-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfunctions.sh
executable file
·462 lines (406 loc) · 13.8 KB
/
functions.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
#!/bin/bash
# debug
# set -x
location=$(dirname "$0")
source $location/shared.sh
# Who are you?
whoareyou() {
if [ -n "$1" ]; then
figlet "$1"
else
figlet "$(whoami)"
fi
}
# AWS Login
aws-login() {
aws sso login
}
alogin() {
aws sso login
}
# AWS Login for ghabchi
glogin() {
# shellcheck disable=SC2034
aws sso login --profile personal && export AWS_PROFILE=personal
}
#Pulumi login
plogin() {
pnpm pulumi:login
}
#Delete Pulumi Lock file from S3
pdelete() {
aws s3 rm "$1"
}
#List things on the Network
lnetwork() {
arp -a
}
dockerKill() {
# Old Way commented out
args=$1
if [[ $args == 'main' ]]; then
echo "Killing Docker/Orb"
# killall Docker
killall OrbStack
elif [[ $args == 'all' ]]; then
running=$(docker ps -q)
docker ps --format '{{.Names}}'
for container in "${running[@]}"; do
if [ -n "$container" ]; then
echo "Stopping and Removing:"
docker stop "$container"
docker rm "$container"
fi
done
exited=$(docker ps -a -q -f status=exited)
for container in "${exited[@]}"; do
if [ -n "$container" ]; then
echo "Removing:"
docker rm "$container"
fi
done
echo "Killing Docker"
killall Docker
else
echo "${RED}ERROR - No Argument"
echo "${PURPLE}Please try:"
echo "${GREEN} dockerKill all|main${NC}"
fi
}
dockerStop() {
orb stop
}
dockerCheck() {
if ! orb status &>/dev/null; then
echo "Starting Orb"
orb start
fi
orb &>/dev/null
}
fathomDB() {
export PULUMI_SKIP_UPDATE_CHECK="true"
environment=$1
folder=$(basename "$PWD")
dockerCheck
if [[ $environment == 'staging' ]] || [[ $environment == 'production' ]] && [[ "$folder" == "fathom" ]]; then
echo "${GREEN}Updating FathomDB permissions for $environment${NC}"
pulumi login s3://dabble-pulumi-state/cloud/dabble-accounts
DB_HOST=$(pulumi --cwd .deploy --stack "$environment" stack output rdsAddress) DB_PASS=$(pulumi --cwd .deploy --stack "$environment" stack output --show-secrets rdsMasterPassword) DB_USER=master DB_SECURE=true ./server/scripts/configure-database.sh setup:platform
pulumi login s3://dabble-pulumi-state/cloud/internal-accounts
DB_HOST=$(pulumi --cwd .deploy --stack "$environment-shared-database" stack output rdsAddress) DB_PASS=$(pulumi --cwd .deploy --stack "$environment-shared-database" stack output --show-secrets rdsMasterPassword) DB_USER=master DB_SECURE=true ./server/scripts/configure-database.sh setup:internal
elif [[ $environment == 'staging-use2' ]] || [[ $environment == 'production-use2' ]] && [[ "$folder" == "fathom" ]]; then
echo "${GREEN}Updating FathomDB permissions for $environment${NC}"
pulumi login s3://dabble-pulumi-state/cloud/dabble-accounts
DB_HOST=$(pulumi --cwd .deploy --stack "$environment" stack output rdsAddress) DB_PASS=$(pulumi --cwd .deploy --stack "$environment" stack output --show-secrets rdsMasterPassword) DB_USER=master DB_SECURE=true ./server/scripts/configure-database-us.sh setup:platform
pulumi login s3://dabble-pulumi-state/cloud/internal-accounts
DB_HOST=$(pulumi --cwd .deploy --stack "$environment-shared-database" stack output rdsAddress) DB_PASS=$(pulumi --cwd .deploy --stack "$environment-shared-database" stack output --show-secrets rdsMasterPassword) DB_USER=master DB_SECURE=true ./server/scripts/configure-database-us.sh setup:internal
else
echo "Either Environment not set or you're not in the right directory:"
echo "${RED}Environment:${NC} $environment"
echo "${RED}Directory:${NC} $folder"
fi
}
# SSHUTTLE - KUTTLE'
kuttle_init() {
kuttle_env=${1:-"production-internal"}
kubectl config use-context "${kuttle_env}/main"
kubectl config set-context --current --namespace=default
sshuttle -r "$(kubectl get pod -l app.kubernetes.io/name=kuttle -o jsonpath="{.items[0].metadata.name}" -n default)" -e kuttle 10.0.0.0/8
}
# Airflow pod clean
airflow_pod_clean() {
kube_action=${1:-"list"}
kube_env=${2:-"staging"}
if [ "$kube_action" == "delete" ]; then
kubectl --context "$kube_env-internal/main" -n airflow get pods --field-selector 'status.phase=Failed' -o name | xargs kubectl --context "$kube_env-internal/main" -n airflow delete
elif [ "$kube_action" == "help" ]; then
figlet 👌abble
echo "airflow_pod_clean <action> <env>"
echo " action: list | delete"
echo " env: staging | production"
else
kubectl --context "$kube_env-internal/main" -n airflow get pods
fi
}
alias PWgen="uuidgen | tr '[:upper:]' '[:lower:]' | pbcopy"
git_update_dir() {
directory=${1:-"."}
for subdir in "$directory"/*/; do
if [ -d "$subdir.git" ]; then
echo "Processing repository: $subdir"
(
cd "$subdir" || exit
git checkout HEAD
git checkout -f master
git pull origin master
)
fi
done
}
cognito_search() {
PHONE_NUMBER="$1"
if [ -z "$PHONE_NUMBER" ]; then
error "Missing Phone Number"
return
fi
ENVIRONMENT="${2:-production}"
case "$ENVIRONMENT" in
production | staging)
COUNTRY_CODE="+61"
;;
production-us | staging-us)
COUNTRY_CODE="+1"
;;
count)
ENVIRONMENT="production"
COUNTRY_CODE="+61"
;;
*)
echo "Invalid environment. Defaulting to production."
ENVIRONMENT="production"
COUNTRY_CODE="+61"
;;
esac
CLEAN_ENVIRONMENT=$(awk -v env="$ENVIRONMENT" 'BEGIN { split(env, parts, "-"); print toupper(parts[1]) "-" (index(env, "-us") ? "US" : "AU") }')
if [[ "${PHONE_NUMBER:0:1}" != "+" ]]; then
PHONE_NUMBER="${PHONE_NUMBER/#0/}"
PHONE_NUMBER="${COUNTRY_CODE}${PHONE_NUMBER}"
fi
echo "${GREEN}Searching for Phone Number: ${PURPLE}$PHONE_NUMBER${NC} in ${GREEN}$CLEAN_ENVIRONMENT${NC}"
USER_POOL=$(aws cognito-idp list-user-pools --max-results 2 --profile "$ENVIRONMENT" | jq -r '.UserPools[].Id')
query=(aws cognito-idp list-users --user-pool-id "$USER_POOL" --profile "$ENVIRONMENT" --limit 20 --filter "phone_number=\"$PHONE_NUMBER\"")
if [[ "$3" == "count" || "$2" == "count" ]]; then
"${query[@]}" | jq '.Users | length'
else
"${query[@]}" | jq '.Users'
fi
}
kubectl_secrets() {
NAME=$1
ENVIRONMENT=$2
NAMESPACE=$3
if [ -n "$NAME" ] && [ -n "$ENVIRONMENT" ] && [ -n "$NAMESPACE" ]; then
kubectl get secret "$NAME" -o jsonpath='{.data}' --context "$ENVIRONMENT/main" -n "$NAMESPACE" | jq 'to_entries | map("\(.key): \(.value | @base64d)") | .[]'
elif [ -n "$NAME" ]; then
kubectl get secret "$NAME" -o jsonpath='{.data}' | jq 'to_entries | map("\(.key): \(.value | @base64d)") | .[]'
else
error "Missing NAME=8="
echo "${GREEN}NAME:${NC} ${NAME}"
fi
}
pstack() {
ENVIRONMENT=$1
NAME=$(basename "$PWD")
export PULUMI_SKIP_UPDATE_CHECK="true"
if [[ "$NAME" =~ ^(dabble-accounts|internal-accounts|root-account)$ ]]; then
NAME=""
elif [[ "$NAME" = ".deploy" ]]; then
NAME="$(basename "$(dirname "$PWD")")"
NAME="-$NAME"
else
NAME="-$NAME"
fi
if [ -n "$ENVIRONMENT" ]; then
echo "Changing to the following pulumi stack - ${GREEN}$ENVIRONMENT$NAME${NC}"
pulumi stack select "$ENVIRONMENT$NAME"
else
echo "${RED}Missing ENVIRONMENT${NC}"
echo "${GREEN} ENVIRONMENT:${NC} ${ENVIRONMENT}"
fi
}
penv() {
ENVIRONMENT=$1
export PULUMI_SKIP_UPDATE_CHECK="true"
ENVIRONMENTS=(dabble internal root)
if [ -n "$ENVIRONMENT" ] && [[ "${ENVIRONMENTS[*]}" =~ ${ENVIRONMENT} ]]; then
if [ "$ENVIRONMENT" = "root" ]; then
SUFFIX="-account"
else
SUFFIX='-accounts'
fi
pulumi login s3://dabble-pulumi-state/cloud/"$ENVIRONMENT$SUFFIX"
else
echo "${RED}ENVIRONMENT Error${NC}"
echo "${GREEN} ENVIRONMENT: ${RED}${ENVIRONMENT}"
echo "${GREEN}Allowed Environments:"
for ENV in "${ENVIRONMENTS[@]}"; do
echo "${PURPLE}$ENV${NC}"
done
fi
}
psecrets() {
ENVIRONMENT=$1
if [ -n "$ENVIRONMENT" ]; then
pstack "$ENVIRONMENT"
fi
export PULUMI_SKIP_UPDATE_CHECK="true"
name=$(pulumi stack --show-name)
echo "${GREEN}Showing Secrets for Stack: ${PURPLE}$name${NC}"
pulumi config --show-secrets
}
nodeCount() {
local environment=$1
if [ -n "$environment" ]; then
NODES=$(kubectl get nodes --context "$environment/main" -o json)
COUNT=$(echo "$NODES" | jq -r '.items | length')
echo "${GREEN}$environment:${NC} $COUNT"
echo $NODES | jq -r '.items[] | .metadata.labels.purpose' | sort | uniq -c | sort -nr
else
NODES=$(kubectl get nodes -o json)
COUNT=$(echo "$NODES" | jq -r '.items | length')
CURRENT_CONTEXT=$(cat ~/.kube/config | grep -E "^\s*current-context:" | awk '{print $2}')
echo "${PURPLE}Current Context - ${GREEN} ${CURRENT_CONTEXT}:${NC} $COUNT"
echo $NODES | jq -r '.items[] | .metadata.labels.purpose' | sort | uniq -c | sort -nr
fi
}
podCheck() {
environments=($(envCheck "$@"))
if [ "${#environments[@]}" -eq 1 ]; then
environments=("$environments")
fi
for environment in "${environments[@]}"; do
echo "${PURPLE}Checking $environment...${NC}"
errors=$(kubectl get pods --context "$environment/main" --no-headers -A | grep -v "Running\|Completed")
if [[ -z $errors ]]; then
echo "No errors"
else
echo "$errors"
fi
done
}
jobCheck() {
environments=($(envCheck "$@"))
if [ "${#environments[@]}" -eq 1 ]; then
environments=("$environments")
fi
for environment in "${environments[@]}"; do
echo "${PURPLE}Checking $environment...${NC}"
jobs=$(kubectl get jobs --context "$environment/main" --no-headers -A | grep -v "1/1")
if [[ -n "$jobs" && "$jobs" != "No resources found" ]]; then
echo "$jobs"
fi
echo ""
done
}
vmLogs() {
local pod=$1
# shellcheck disable=SC2317
if [ -n "$pod" ]; then
kubectl logs "$pod" -n monitoring -c vmagent
else
kubectl logs "$(kubectl get pods -n monitoring -o json | jq -r '.items[] | select(.metadata.labels."app.kubernetes.io/name" == "vmagent") | .metadata.name')" -n monitoring -c vmagent
fi
}
getUnhealthyPods() {
local environment=$1
local namespace=$2
}
podClean() {
local environment=$1
local namespace=$2
if [ -n "$environment" ] && [ -n "$namespace" ]; then
# shellcheck disable=SC2046
kubectl delete pod $(kubectl get pods -o json -n "$namespace" --context "$environment/main" | jq -r '.items[] | select(.status.phase == "Failed" or .status.phase == "Error") | .metadata.name') -n "$namespace" --context "$environment/main"
elif [ -n "$environment" ]; then
# shellcheck disable=SC2046
kubectl delete pod $(kubectl get pods -o json --context "$environment/main" | jq -r '.items[] | select(.status.phase == "Failed" or .status.phase == "Error") | .metadata.name') --context "$environment/main"
else
# shellcheck disable=SC2046
kubectl delete pod $(kubectl get pods -o json | jq -r '.items[] | select(.status.phase == "Failed" or .status.phase == "Error") | .metadata.name')
fi
}
jobClean() {
local environment=$1
local namespace=$2
if [ -n "$environment" ] && [ -n "$namespace" ]; then
kubectl delete job $(kubectl get jobs -o json -n "$namespace" --context "$environment/main" | jq -r '.items[] | select(.status.conditions[0].type == "Failed") | .metadata.name') -n "$namespace" --context "$environment/main"
elif [ -n "$environment" ]; then
kubectl delete job $(kubectl get jobs -o json --context "$environment/main" | jq -r '.items[] | select(.status.conditions[0].type == "Failed") | .metadata.name')
else
kubectl delete job $(kubectl get jobs -o json | jq -r '.items[] | select(.status.conditions[0].type == "Failed") | .metadata.name')
fi
}
vmLogs() {
local follow=$1
if [[ "$follow" == "follow" || "$follow" == "f" ]]; then
kubectl logs deployments/vmagent-vm -c vmagent -n monitoring -f
else
kubectl logs deployments/vmagent-vm -c vmagent -n monitoring
fi
}
logsCronjob() {
local job=$1
if [ -n "$job" ]; then
kubectl create job --from=cronjob/"$job" test-"$job"
else
echo "${RED}Missing Argument${NC}"
echo "${GREEN} JOB:${NC} $job"
fi
}
run_wait() {
local timeout="$1"
local ignore="$2"
if [[ "$ignore" == "true" ]]; then
echo "${GREEN}Ignoring errors${NC}"
fi
shift
local cmd="${*}"
if [[ -z "$timeout" || -z "$cmd" ]]; then
echo "${RED}Missing <TIMEOUT> or <CMD>${NC}"
echo "${GREEN} ARGS:${NC} TIMEOUT - ${timeout}"
echo "COMMAND - ${cmd}"
return 1
fi
while true; do
echo "${GREEN}Running: ${PURPLE}$cmd${NC}"
eval "$cmd"
if [[ $? -ne 0 ]]; then
error "${RED}ERROR${NC} - ${PURPLE}$cmd${NC}"
if ! $ignore; then
break
fi
fi
echo "${PURPLE}Waiting for $timeout seconds...${NC}"
sleep "$timeout"
done
}
html-live() {
local port="${1:-8080}"
local directory="${2:-.}"
python3 -m http.server "$port" --directory "$directory"
}
ghistory() {
DAYS=${1:-1} # Default to 1 day if no argument is passed
DATE=$(date -v -"${DAYS}"d +'%Y-%m-%d') # Format the date as 'YYYY-MM-DD'
# Convert the date to a Unix timestamp range
start_date=$(date -j -f "%Y-%m-%d" "$DATE" "+%s")
end_date=$(date -j -f "%Y-%m-%d %H:%M:%S" "$DATE 23:59:59" "+%s")
# Filter the Zsh history, convert timestamps to readable format, and clean up the output
awk -F: -v start="$start_date" -v end="$end_date" '{
if ($2 >= start && $2 <= end) {
command = "date -r " $2 " +%d-%m-%Y\\ %H:%M:%S"
command | getline timestamp
close(command)
# Extract and clean up the command part
split($0, parts, ";")
# Print readable timestamp and command
print timestamp " : " parts[length(parts)]
}
}' ~/.zsh_history
}
glock() {
host=$(hostname)
echo "Locking this Mac - ${host}"
sleep 0.5
osascript -e 'tell application "System Events" to keystroke "q" using {control down, command down}'
}
gbright() {
info "Setting brightness to 100%"
for i in {1..23}; do
osascript <<EOD
tell application "System Events"
key code 144 -- increase brightness
end tell
EOD
done
}