-
Notifications
You must be signed in to change notification settings - Fork 17
/
Justfile
399 lines (353 loc) · 15.4 KB
/
Justfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
#!/usr/bin/env just --justfile
set dotenv-load := true
enable_client := "false"
default_node_url := "https://sn-node.s3.eu-west-2.amazonaws.com/safenode-latest-x86_64-unknown-linux-musl.tar.gz"
default_node_archive_filename := "safenode-latest-x86_64-unknown-linux-musl.tar.gz"
custom_bin_archive_filename := "safenode-custom-x86_64-unknown-linux-musl.tar.gz"
# =============================================
# Public targets intended to be called by users
# =============================================
# Initialise the prequisites for creating the testnet:
# * Create a workspace for Terraform
# * Build the RCP client
# * Generate the inventory based on the template
# * Create an EC2 keypair based on the key at $HOME/.ssh/SSH_KEY_NAME
#
# This should be an idempotent target, in that it won't produce errors if any
# of the components already exist.
init env provider:
#!/usr/bin/env bash
echo "Checking for logs from previous testnet with name '{{env}}'..."
aws s3 ls s3://sn-testnet/testnet-logs/{{ env }}/
if [[ $? -eq 0 ]]; then
echo "The logs from a previous testnet run with the same name still exist."
echo "Please remove these logs if you don't need them, or retrieve them, then remove them."
exit 1
fi
echo "No logs exist. Continuing."
(
cd terraform/{{provider}}
terraform init \
-backend-config="bucket=$TERRAFORM_STATE_BUCKET_NAME" \
-backend-config="region=$AWS_DEFAULT_REGION"
terraform workspace list | grep -q "{{env}}"
if [[ $? -eq 0 ]]; then
echo "Workspace '{{env}}' already exists"
else
echo "Creating new Terraform workspace {{env}}"
terraform workspace new {{env}}
fi
)
just build-rpc-client
just create-{{provider}}-inventory {{env}}
just create-{{provider}}-keypair {{env}}
testnet env provider node_count node_instance_count="10" use_custom_bin="false" org="" branch="":
#!/usr/bin/env bash
set -e
(
cd terraform/{{provider}}
terraform workspace select {{env}}
)
just terraform-apply-{{provider}} "{{env}}" {{node_count}} false {{use_custom_bin}}
if [[ {{use_custom_bin}} == true ]]; then
echo "Custom safenode binary will be used"
sed "s|__ORG__|{{org}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__BRANCH__|{{branch}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__NODE_ARCHIVE__|{{custom_bin_archive_filename}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
url="https://sn-node.s3.eu-west-2.amazonaws.com/{{org}}/{{branch}}/{{custom_bin_archive_filename}}"
sed "s|__NODE_URL__|$url|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__NODE_INSTANCE_COUNT__|{{node_instance_count}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__TESTNET_NAME__|{{env}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
just wait-for-ssh "{{env}}" "{{provider}}" "build"
just run-ansible-against-build-machine "{{env}}" "{{provider}}"
else
sed "s|__NODE_ARCHIVE__|{{default_node_archive_filename}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__NODE_URL__|{{default_node_url}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__NODE_INSTANCE_COUNT__|{{node_instance_count}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
sed "s|__TESTNET_NAME__|{{env}}|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
fi
just wait-for-ssh "{{env}}" "{{provider}}" "genesis"
just run-ansible-against-nodes "{{env}}" "{{provider}}" "true" # genesis
just run-ansible-against-nodes "{{env}}" "{{provider}}" "false" # remaining nodes
# List the name and IP address of each node in the testnet, which can be used for SSH access.
ssh-details env provider:
#!/usr/bin/env bash
if [[ "{{provider}}" == "aws" ]]; then
ansible-inventory --inventory ansible/inventory/.{{env}}_inventory_aws_ec2.yml --list | \
jq -r '._meta.hostvars | to_entries[] | [.value.tags.Name, .value.public_dns_name] | @tsv' | \
column -t | \
sort
echo "The user for the EC2 instances is 'ubuntu'"
elif [[ "{{provider}}" == "digital-ocean" ]]; then
ansible-inventory --inventory ansible/inventory/.{{env}}_inventory_digital_ocean.yml --list | \
jq -r '._meta.hostvars | to_entries[] | [.value.do_name, .value.ansible_host] | @tsv' | \
column -t | \
sort
echo "The user for the droplets is 'root'"
else
echo "Provider {{provider}} is not supported"
echo "Please use 'aws' or 'digital-ocean' as the provider"
exit 1
fi
clean-logs env:
aws s3 rm s3://sn-testnet/testnet-logs/{{env}}/ --recursive
# Tear down all the EC2 instances or droplets in the testnet and delete the Terraform workspace.
clean env provider:
#!/usr/bin/env bash
set -e
just terraform-destroy-{{provider}} "{{env}}"
(
cd terraform/{{provider}}
terraform workspace select dev
output=$(terraform workspace list)
if [[ "$output" == *"{{env}}"* ]]; then
echo "Deleting {{env}} workspace..."
terraform workspace delete -force {{env}}
fi
)
just clean-{{provider}} "{{env}}"
# =============================================================
# Private helper utility targets intended to reduce duplication
# =============================================================
# Put the user's custom node binary on S3.
#
# The binary will be placed in a tar archive and placed in a folder within the bucket.
# Using the folder enables each testnet user to have their own binary without interfering
# with each other.
#
# The extra vars file will also be populated with the resulting URL of the archive and
# it will be pulled during the provisioning process. Uploading it once prevents Ansible from
# having to upload it to each node from the client, which is very time consuming.
upload-custom-node-bin env provider node_bin_path:
#!/usr/bin/env bash
archive_name="safenode-custom-x86_64-unknown-linux.tar.gz"
rm -rf /tmp/custom_node && mkdir /tmp/custom_node
cp {{node_bin_path}} /tmp/custom_node
(
cd /tmp/custom_node
tar -zcvf $archive_name safenode
)
aws s3 cp /tmp/custom_node/$archive_name s3://sn-node/{{env}}/$archive_name --acl public-read
url="https://sn-node.s3.eu-west-2.amazonaws.com/{{env}}/${archive_name}"
sed "s|__NODE_URL__|$url|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
echo "Custom binary available at $url"
# Set the multiaddr of the genesis node for provisioning the remaining nodes.
#
# The IP of the genesis node is obtained, then we use that with the RPC service to get
# its peer ID.
#
# The placeholder value in the variables file is then replaced. This file gets provided
# to Ansible.
set-genesis-multiaddr env provider:
#!/usr/bin/env bash
set -e
if [[ "{{provider}}" == "aws" ]]; then
inventory_path="inventory/.{{env}}_genesis_inventory_aws_ec2.yml"
cd ansible
genesis_ip=$(ansible-inventory --inventory $inventory_path --list | \
jq -r '.["_meta"]["hostvars"][]["public_ip_address"]')
cd ..
elif [[ "{{provider}}" == "digital-ocean" ]]; then
inventory_path="inventory/.{{env}}_genesis_inventory_digital_ocean.yml"
cd ansible
genesis_ip=$(ansible-inventory --inventory $inventory_path --list | \
jq -r '.["_meta"]["hostvars"][]["ansible_host"]')
cd ..
fi
peer_id=$(./safenode_rpc_client $genesis_ip:12001 info | \
grep "Peer Id" | awk -F ':' '{ print $2 }' | xargs)
multiaddr="/ip4/$genesis_ip/tcp/12000/p2p/$peer_id"
echo "Multiaddr for genesis node is $multiaddr"
sed "s|__MULTIADDR__|$multiaddr|g" -i ansible/extra_vars/.{{env}}_{{provider}}.json
wait-for-ssh env provider type:
#!/usr/bin/env bash
if [[ "{{provider}}" == "aws" ]]; then
inventory_path="inventory/.{{env}}_{{type}}_inventory_aws_ec2.yml"
cd ansible
genesis_ip=$(ansible-inventory --inventory $inventory_path --list | \
jq -r '.["_meta"]["hostvars"][]["public_ip_address"]')
cd ..
user="ubuntu"
elif [[ "{{provider}}" == "digital-ocean" ]]; then
inventory_path="inventory/.{{env}}_{{type}}_inventory_digital_ocean.yml"
cd ansible
genesis_ip=$(ansible-inventory --inventory $inventory_path --list | \
jq -r '.["_meta"]["hostvars"][]["ansible_host"]')
cd ..
user="root"
fi
max_retries=10
count=0
until ssh -i "${HOME}/.ssh/${SSH_KEY_NAME}" \
-q -oBatchMode=yes -oConnectTimeout=5 -oStrictHostKeyChecking=no $user@$genesis_ip "bash --version"; do
sleep 5
count=$((count + 1))
if [[ $count -gt $max_retries ]]; then
echo "SSH command failed after $count attempts. Exiting."
exit 1
fi
echo "SSH still not available. Attempt $count of $max_retries. Retrying in 5 seconds..."
done
echo "SSH connection now available"
# Build a copy of the RCP client, which is used for obtaining the genesis peer ID.
# If the binary is already in the current directory we will skip.
build-rpc-client:
#!/usr/bin/env bash
if [[ ! -f ./safenode_rpc_client ]]; then
(
cd /tmp
git clone https://github.com/maidsafe/safe_network
cd safe_network
cargo build --release --example safenode_rpc_client
)
cp /tmp/safe_network/target/release/examples/safenode_rpc_client .
else
echo "The safenode_rpc_client binary is already present"
fi
run-ansible-against-build-machine env="" provider="":
#!/usr/bin/env bash
user="root"
inventory_path="inventory/.{{env}}_build_inventory"
playbook="build.yml"
extra_vars_path="extra_vars/.{{env}}_{{provider}}.json"
if [[ "{{provider}}" == "aws" ]]; then
user="ubuntu"
inventory_path="${inventory_path}_aws_ec2.yml"
elif [[ "{{provider}}" == "digital-ocean" ]]; then
user="root"
inventory_path="${inventory_path}_digital_ocean.yml"
else
echo "Provider {{provider}} is not supported"
exit 1
fi
just run-ansible "$user" "$inventory_path" "$playbook" "$extra_vars_path"
run-ansible-against-nodes env="" provider="" is_genesis="":
#!/usr/bin/env bash
set -e
if [[ "{{is_genesis}}" == "true" ]]; then
playbook="genesis_node.yml"
inventory_path="inventory/.{{env}}_genesis_inventory"
else
just set-genesis-multiaddr "{{env}}" "{{provider}}"
playbook="nodes.yml"
inventory_path="inventory/.{{env}}_node_inventory"
fi
if [[ "{{provider}}" == "aws" ]]; then
user="ubuntu"
inventory_path="${inventory_path}_aws_ec2.yml"
elif [[ "{{provider}}" == "digital-ocean" ]]; then
user="root"
inventory_path="${inventory_path}_digital_ocean.yml"
else
echo "Provider {{provider}} is not supported"
exit 1
fi
extra_vars_path="extra_vars/.{{env}}_{{provider}}.json"
just run-ansible "$user" "$inventory_path" "$playbook" "$extra_vars_path"
run-ansible user inventory_path playbook extra_vars_path:
#!/usr/bin/env bash
set -e
(
cd ansible
ansible-playbook --inventory {{inventory_path}} \
--private-key $HOME/.ssh/$SSH_KEY_NAME \
--user {{user}} \
--extra-vars "@{{extra_vars_path}}" \
--vault-password-file $HOME/.ansible/vault-password \
{{playbook}}
)
# ===========
# AWS helpers
# ===========
create-aws-keypair env:
#!/usr/bin/env bash
key_name="testnet-{{env}}"
if ! aws ec2 describe-key-pairs --key-names "$key_name" > /dev/null 2>&1; then
pub_key=$(cat $HOME/.ssh/${SSH_KEY_NAME}.pub | base64 -w0 | xargs)
echo "Creating new key pair for the testnet..."
aws ec2 import-key-pair \
--key-name testnet-{{env}} --public-key-material $pub_key
else
echo "An EC2 keypair for {{env}} already exists"
fi
create-aws-inventory env:
cp ansible/inventory/dev_genesis_inventory_aws_ec2.yml \
ansible/inventory/.{{env}}_genesis_inventory_aws_ec2.yml
sed "s/dev/{{env}}/g" -i ansible/inventory/.{{env}}_genesis_inventory_aws_ec2.yml
cp ansible/inventory/dev_node_inventory_aws_ec2.yml \
ansible/inventory/.{{env}}_node_inventory_aws_ec2.yml
sed "s/dev/{{env}}/g" -i ansible/inventory/.{{env}}_node_inventory_aws_ec2.yml
cp ansible/inventory/dev_client_inventory_aws_ec2.yml \
ansible/inventory/.{{env}}_client_inventory_aws_ec2.yml
sed "s/dev/{{env}}/g" -i ansible/inventory/.{{env}}_client_inventory_aws_ec2.yml
cp ansible/inventory/dev_inventory_aws_ec2.yml \
ansible/inventory/.{{env}}_inventory_aws_ec2.yml
sed "s/dev/{{env}}/g" -i ansible/inventory/.{{env}}_inventory_aws_ec2.yml
cp ansible/extra_vars/aws.json ansible/extra_vars/.{{env}}_aws.json
terraform-apply-aws env node_count enable_client:
#!/usr/bin/env bash
cd terraform/aws
terraform apply -auto-approve \
-var node_count={{node_count}} \
-var key_pair_name=testnet-{{env}} \
-var vpc_subnet_id="$SN_TESTNET_DEV_SUBNET_ID" \
-var vpc_security_group_id="$SN_TESTNET_DEV_SECURITY_GROUP_ID" \
-var enable_client={{enable_client}}
terraform-destroy-aws env:
#!/usr/bin/env bash
cd terraform/aws
terraform workspace select {{env}}
terraform destroy -auto-approve \
-var key_pair_name=testnet-{{env}} \
-var vpc_subnet_id="$SN_TESTNET_DEV_SUBNET_ID" \
-var vpc_security_group_id="$SN_TESTNET_DEV_SECURITY_GROUP_ID"
clean-aws env:
#!/usr/bin/env bash
output=$(aws ec2 describe-key-pairs | jq -r '.KeyPairs[].KeyName')
if [[ "$output" == *"testnet-{{env}}"* ]]; then
echo -n "Deleting keypair..."
aws ec2 delete-key-pair --key-name testnet-{{env}}
echo "Done"
fi
rm -f ansible/inventory/.{{env}}_genesis_inventory_aws_ec2.yml
rm -f ansible/inventory/.{{env}}_node_inventory_aws_ec2.yml
rm -f ansible/inventory/.{{env}}_inventory_aws_ec2.yml
rm -f ansible/extra_vars/.{{env}}_aws.json
# ===========
# DO helpers
# ===========
create-digital-ocean-keypair env:
@echo "Digital Ocean does not require the creation of a keypair"
create-digital-ocean-inventory env:
cp ansible/inventory/dev_inventory_digital_ocean.yml \
ansible/inventory/.{{env}}_build_inventory_digital_ocean.yml
sed "s/env_value/{{env}}/g" -i ansible/inventory/.{{env}}_build_inventory_digital_ocean.yml
sed "s/type_value/build/g" -i ansible/inventory/.{{env}}_build_inventory_digital_ocean.yml
cp ansible/inventory/dev_inventory_digital_ocean.yml \
ansible/inventory/.{{env}}_genesis_inventory_digital_ocean.yml
sed "s/env_value/{{env}}/g" -i ansible/inventory/.{{env}}_genesis_inventory_digital_ocean.yml
sed "s/type_value/genesis/g" -i ansible/inventory/.{{env}}_genesis_inventory_digital_ocean.yml
cp ansible/inventory/dev_inventory_digital_ocean.yml \
ansible/inventory/.{{env}}_node_inventory_digital_ocean.yml
sed "s/env_value/{{env}}/g" -i ansible/inventory/.{{env}}_node_inventory_digital_ocean.yml
sed "s/type_value/node/g" -i ansible/inventory/.{{env}}_node_inventory_digital_ocean.yml
cp ansible/inventory/dev_inventory_digital_ocean.yml \
ansible/inventory/.{{env}}_inventory_digital_ocean.yml
sed "s/env_value/{{env}}/g" -i ansible/inventory/.{{env}}_inventory_digital_ocean.yml
sed '$d' -i ansible/inventory/.{{env}}_inventory_digital_ocean.yml
cp ansible/extra_vars/digital_ocean.json ansible/extra_vars/.{{env}}_digital-ocean.json
terraform-apply-digital-ocean env node_count enable_client use_custom_bin:
#!/usr/bin/env bash
cd terraform/digital-ocean
terraform apply -auto-approve -var node_count={{node_count}} -var use_custom_bin={{use_custom_bin}}
terraform-destroy-digital-ocean env:
#!/usr/bin/env bash
cd terraform/digital-ocean
terraform workspace select {{env}}
terraform destroy -auto-approve
clean-digital-ocean env:
#!/usr/bin/env bash
rm -f ansible/inventory/.{{env}}_genesis_inventory_digital_ocean.yml
rm -f ansible/inventory/.{{env}}_node_inventory_digital_ocean.yml
rm -f ansible/inventory/.{{env}}_inventory_digital_ocean.yml
rm -f ansible/extra_vars/.{{env}}_digital-ocean.json