Skip to content

Commit

Permalink
chore(yaskfile): minor improvements here and there
Browse files Browse the repository at this point in the history
Signed-off-by: Devin Buhl <[email protected]>
  • Loading branch information
onedr0p committed Oct 15, 2024
1 parent 0839a9b commit 412535d
Show file tree
Hide file tree
Showing 8 changed files with 104 additions and 81 deletions.
21 changes: 13 additions & 8 deletions .taskfiles/bootstrap/Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ version: '3'

vars:
BOOTSTRAP_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/bootstrap/resources'
MINIJINJA_CMD: '{{.ROOT_DIR}}/.bin/minijinja --env --trim-blocks --lstrip-blocks --autoescape=none --strict'
OP_CMD: op run --env-file {{.CLUSTER_DIR}}/bootstrap/op.env --no-masking
TALOS_CONTROLLER:
sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1

Expand All @@ -14,7 +12,7 @@ tasks:
kubernetes:
desc: Bootstrap a Talos Kubernetes cluster backed by flux, sops, and rook
prompt: Bootstrap a Talos Kubernetes cluster ... continue?
summary: |
summary: |-
CLUSTER: Cluster to run command against (default: main)
NODES: Nodes in the cluster to reset Rook on (required, comma delimited, e.g. k8s-0,k8s-1)
DISK: Disk to reset Rook on (required, e.g. /dev/nvme0n1)
Expand All @@ -29,7 +27,7 @@ tasks:
- { task: rook, vars: *vars }
- { task: flux, vars: *vars }
requires:
vars: ['CLUSTER', 'NODES', 'DISK']
vars: [CLUSTER, NODES, DISK]
preconditions:
- talosctl config info &>/dev/null
- test -f {{.CLUSTER_DIR}}/talosconfig
Expand Down Expand Up @@ -64,12 +62,15 @@ tasks:
cmds:
- kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/bootstrap/flux
- for: { var: BOOTSTRAP_TEMPLATES }
cmd: '{{.OP_CMD}} -- {{.MINIJINJA_CMD}} {{.ITEM}} | kubectl apply --server-side --filename -'
cmd: >
op run --env-file {{.CLUSTER_DIR}}/bootstrap/op.env --no-masking --
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.ITEM}}
| kubectl apply --server-side --filename -
- kubectl apply --server-side --filename {{.CLUSTER_DIR}}/flux/vars/cluster-settings.yaml
- kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/flux/config
vars:
BOOTSTRAP_TEMPLATES:
sh: find {{.CLUSTER_DIR}}/bootstrap -type f -name '*.j2'
sh: ls {{.CLUSTER_DIR}}/bootstrap/**/*.j2
env:
VAULT: '{{if eq .CLUSTER "main"}}kubernetes{{else}}{{.CLUSTER}}{{end}}' # ¯\_(ツ)_/¯
preconditions:
Expand All @@ -92,7 +93,9 @@ tasks:
rook-disk:
internal: true
cmds:
- '{{.MINIJINJA_CMD}} {{.BOOTSTRAP_RESOURCES_DIR}}/templates/rook-disk-job.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.BOOTSTRAP_RESOURCES_DIR}}/templates/rook-disk-job.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.BOOTSTRAP_RESOURCES_DIR}}/scripts/wait-for-job.sh {{.JOB}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for condition=complete --timeout=1m
- kubectl --namespace {{.NS}} logs job/{{.JOB}}
Expand All @@ -112,7 +115,9 @@ tasks:
rook-data:
internal: true
cmds:
- '{{.MINIJINJA_CMD}} {{.BOOTSTRAP_RESOURCES_DIR}}/templates/rook-data-job.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.BOOTSTRAP_RESOURCES_DIR}}/templates/rook-data-job.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.BOOTSTRAP_RESOURCES_DIR}}/scripts/wait-for-job.sh {{.JOB}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for condition=complete --timeout=1m
- kubectl --namespace {{.NS}} logs job/{{.JOB}}
Expand Down
32 changes: 16 additions & 16 deletions .taskfiles/kubernetes/Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@ tasks:

apply-ks:
desc: Apply a Flux Kustomization resource for a cluster
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
PATH: Path to the Flux Kustomization resource from the apps base dir (required, e.g. default/plex)
NS: Namespace the Flux Kustomization exists in (default: flux-system)
cmd: |
flux build --namespace {{.NS}} ks {{base .PATH}} \
--kustomization-file {{.CLUSTER_DIR}}/apps/{{.PATH}}/ks.yaml \
--path {{.CLUSTER_DIR}}/apps/{{.PATH}} \
{{- if contains "not found" .KS }}--dry-run \{{ end }} | \
yq 'with(select(.apiVersion == "kustomize.toolkit.fluxcd.io/v1" and .kind == "Kustomization"); .metadata.namespace = "{{.NS}}")' - | \
kubectl apply --server-side --field-manager=kustomize-controller -f -
cmd: >
flux build --namespace {{.NS}} ks {{base .PATH}}
--kustomization-file {{.CLUSTER_DIR}}/apps/{{.PATH}}/ks.yaml
--path {{.CLUSTER_DIR}}/apps/{{.PATH}}
{{- if contains "not found" .KS }}--dry-run \{{ end }}
| yq 'with(select(.apiVersion == "kustomize.toolkit.fluxcd.io/v1" and .kind == "Kustomization"); .metadata.namespace = "{{.NS}}")' -
| kubectl apply --server-side --field-manager=kustomize-controller -f -
requires:
vars: ['CLUSTER', 'PATH']
vars: [CLUSTER, PATH]
vars:
NS: '{{.NS | default "flux-system"}}'
KS:
Expand All @@ -28,7 +28,7 @@ tasks:

browse-pvc:
desc: Exec into a container to browse a PersistentVolumeClaim
summary: |
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace to browse PersistentVolumeClaims in (default: default)
CLAIM: PersistentVolumeClaim to browse (required)
Expand All @@ -38,22 +38,22 @@ tasks:
CLUSTER: '{{.CLUSTER}}'
NS: '{{.NS | default "default"}}'
requires:
vars: ['CLAIM']
vars: [CLAIM]
preconditions:
- kubectl --namespace {{.NS}} get persistentvolumeclaims {{.CLAIM}}

sync-secrets:
desc: Sync ExternalSecret resources from provider
summary: |
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace to sync secret in (default: default)
SECRET: Secret to sync (default: all secrets)
cmd: |
cmd: >
{{if eq .SECRET ""}}
kubectl get externalsecret --all-namespaces --no-headers -A | awk '{print $1, $2}' \
| xargs --max-procs=2 -l bash -c 'kubectl -n $0 annotate externalsecret $1 force-sync=$(date +%s) --overwrite'
kubectl get externalsecret --all-namespaces --no-headers -A | awk '{print $1, $2}'
| xargs --max-procs=2 -l bash -c 'kubectl -n $0 annotate externalsecret $1 force-sync=$(date +%s) --overwrite'
{{else}}
kubectl --namespace {{.NS}} annotate externalsecret {{.SECRET}} force-sync=$(date +%s) --overwrite
kubectl --namespace {{.NS}} annotate externalsecret {{.SECRET}} force-sync=$(date +%s) --overwrite
{{end}}
vars:
SECRET: '{{ .SECRET | default ""}}'
Expand Down
43 changes: 24 additions & 19 deletions .taskfiles/talos/Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,19 @@ vars:
HOME_SERVICE_ADDR: voyager.internal # Ref: https://github.com/onedr0p/home-service
HOME_SERVICE_USER: devin
HOME_SERVICE_MATCHBOX_DIR: /var/opt/home-service/apps/matchbox/data/config
MINIJINJA_CMD: '{{.ROOT_DIR}}/.bin/minijinja --env --trim-blocks --lstrip-blocks --autoescape=none'

tasks:

apply-config:
desc: Apply Talos configuration to a node
cmd: |
sops exec-file {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml "{{.MINIJINJA_CMD}} {}" | \
talosctl apply-config --mode={{.MODE}} --nodes {{.HOSTNAME}} --file /dev/stdin
cmd: >
sops exec-file {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml "minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {}"
| talosctl apply-config --mode={{.MODE}} --nodes {{.HOSTNAME}} --file /dev/stdin
vars:
MODE: '{{.MODE | default "no-reboot"}}'
env: *vars
requires:
vars: ['CLUSTER', 'HOSTNAME']
vars: [CLUSTER, HOSTNAME]
preconditions:
- test -f {{.CLUSTER_DIR}}/talosconfig
- test -f {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml
Expand All @@ -44,7 +43,7 @@ tasks:
- until kubectl wait --timeout=5m --for=jsonpath=.status.ceph.health=HEALTH_OK cephcluster --all --all-namespaces; do sleep 10; done
vars: *vars
requires:
vars: ['CLUSTER', 'HOSTNAME']
vars: [CLUSTER, HOSTNAME]
preconditions:
- test -f {{.CLUSTER_DIR}}/talosconfig
- talosctl config info &>/dev/null
Expand All @@ -57,7 +56,7 @@ tasks:
- talosctl --nodes {{.TALOS_CONTROLLER}} upgrade-k8s --to {{.KUBERNETES_VERSION}}
vars: *vars
requires:
vars: ['CLUSTER']
vars: [CLUSTER]
preconditions:
- test -f {{.CLUSTER_DIR}}/talosconfig
- talosctl config info &>/dev/null
Expand All @@ -68,7 +67,7 @@ tasks:
prompt: Reset Talos '{{.HOSTNAME}}' node on the '{{.CLUSTER}}' cluster ... continue?
cmd: talosctl reset --nodes {{.HOSTNAME}} --graceful=false
requires:
vars: ['CLUSTER', 'HOSTNAME']
vars: [CLUSTER, HOSTNAME]
preconditions:
- test -f {{.CLUSTER_DIR}}/talosconfig
- talosctl config info &>/dev/null
Expand All @@ -82,26 +81,32 @@ tasks:
NODES:
sh: talosctl config info --output json | jq --join-output '[.nodes[]] | join(",")'
requires:
vars: ['CLUSTER']
vars: [CLUSTER]
preconditions:
- test -f {{.CLUSTER_DIR}}/talosconfig
- talosctl config info &>/dev/null
- talosctl --nodes {{.NODES}} get machineconfig &>/dev/null

bootstrap-matchbox:
desc: Bootstrap required Matchbox configuration to PXE Boot machine
desc: Sync required Matchbox configuration to PXE Boot machine
cmds:
- for: ['kernel-amd64', 'initramfs-amd64.xz']
cmd: |
curl -skL https://factory.talos.dev/image/{{.TALOS_SCHEMATIC_ID}}/{{.TALOS_VERSION}}/{{.ITEM}} | \
curl -skT - -u "{{.HOME_SERVICE_USER}}:" \
sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/assets/{{.ITEM}}
- find {{.CLUSTER_DIR}}/bootstrap/talos/assets -type f | xargs -I{} sh -c "sops --decrypt {} | {{.MINIJINJA_CMD}} - | curl -skT - -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/assets/\$(basename {} | sed 's/\.secret\.sops//')"
- find {{.CLUSTER_DIR}}/bootstrap/talos/groups -type f | xargs -I{} curl -skT {} -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/groups/
- find {{.CLUSTER_DIR}}/bootstrap/talos/profiles -type f | xargs -I{} curl -skT {} -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/profiles/
- for: [kernel-amd64, initramfs-amd64.xz]
cmd: >
curl -skL https://factory.talos.dev/image/{{.TALOS_SCHEMATIC_ID}}/{{.TALOS_VERSION}}/{{.ITEM}}
| curl -skT - -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/assets/{{.ITEM}}
- >
find {{.CLUSTER_DIR}}/bootstrap/talos/assets -type f
| xargs -I{} sh -c "sops --decrypt {} | minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none -
| curl -skT - -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/assets/\$(basename {} | sed 's/\.secret\.sops//')"
- >
find {{.CLUSTER_DIR}}/bootstrap/talos/groups -type f
| xargs -I{} curl -skT {} -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/groups/
- >
find {{.CLUSTER_DIR}}/bootstrap/talos/profiles -type f
| xargs -I{} curl -skT {} -u "{{.HOME_SERVICE_USER}}:" sftp://{{.HOME_SERVICE_ADDR}}/{{.HOME_SERVICE_MATCHBOX_DIR}}/profiles/
- ssh -l {{.HOME_SERVICE_USER}} {{.HOME_SERVICE_ADDR}} "cd /var/opt/home-service ; go-task restart-matchbox"
vars: *vars
requires:
vars: ['CLUSTER']
vars: [CLUSTER]
preconditions:
- ping -c1 {{.HOME_SERVICE_ADDR}}
47 changes: 27 additions & 20 deletions .taskfiles/volsync/Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,12 @@ x-vars: &vars

vars:
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
MINIJINJA_CMD: '{{.ROOT_DIR}}/.bin/minijinja --env --trim-blocks --lstrip-blocks --autoescape=none'

tasks:

state-*:
desc: Suspend or Resume Volsync
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
STATE: resume or suspend (required)
cmds:
Expand All @@ -40,16 +39,18 @@ tasks:
STATE: '{{index .MATCH 0}}'
env: *vars
requires:
vars: ['CLUSTER']
vars: [CLUSTER]

list:
desc: List snapshots for an application
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
NS: Namespace the PVC is in (default: default)
APP: Application to list snapshots for (required)
cmds:
- '{{.MINIJINJA_CMD}} {{.VOLSYNC_RESOURCES_DIR}}/templates/list.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/templates/list.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh volsync-list-{{.APP}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/volsync-list-{{.APP}} --for condition=complete --timeout=1m
- kubectl --namespace {{.NS}} logs job/volsync-list-{{.APP}} --container main
Expand All @@ -60,20 +61,22 @@ tasks:
APP: '{{.APP}}'
NS: '{{.NS | default "default"}}'
requires:
vars: ['CLUSTER', 'APP']
vars: [CLUSTER, APP]
preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh
- test -f {{.VOLSYNC_RESOURCES_DIR}}/templates/list.yaml.j2
silent: true

unlock:
desc: Unlock a Restic repository for an application
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
NS: Namespace the PVC is in (default: default)
APP: Application to unlock (required)
cmds:
- '{{.MINIJINJA_CMD}} {{.VOLSYNC_RESOURCES_DIR}}/templates/unlock.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/templates/unlock.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh volsync-unlock-{{.APP}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/volsync-unlock-{{.APP}} --for condition=complete --timeout=1m
- kubectl --namespace {{.NS}} logs job/volsync-unlock-{{.APP}} --container minio
Expand All @@ -83,7 +86,7 @@ tasks:
NS: '{{.NS | default "default"}}'
env: *vars
requires:
vars: ['CLUSTER', 'APP']
vars: [CLUSTER, APP]
preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh
- test -f {{.VOLSYNC_RESOURCES_DIR}}/templates/unlock.yaml.j2
Expand All @@ -93,7 +96,7 @@ tasks:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
snapshot:
desc: Snapshot a PVC for an application
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
NS: Namespace the PVC is in (default: default)
APP: Application to snapshot (required)
Expand All @@ -108,7 +111,7 @@ tasks:
sh: '{{.VOLSYNC_RESOURCES_DIR}}/scripts/which-controller.sh {{.APP}} {{.NS}}'
env: *vars
requires:
vars: ['CLUSTER', 'APP']
vars: [CLUSTER, APP]
preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/scripts/which-controller.sh
- test -f {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh
Expand All @@ -118,7 +121,7 @@ tasks:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
restore:
desc: Restore a PVC for an application
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
NS: Namespace the PVC is in (default: default)
APP: Application to restore (required)
Expand Down Expand Up @@ -146,27 +149,27 @@ tasks:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
env: *vars
requires:
vars: ['CLUSTER', 'APP']
vars: [CLUSTER, APP]
preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/scripts/which-controller.sh

cleanup:
desc: Delete volume populator PVCs in all namespaces
summary: |
summary: |-
CLUSTER: Cluster to run command against (required)
cmds:
- for: { var: DEST }
cmd: |
{{- $items := (split "/" .ITEM) }}
kubectl delete pvc --namespace {{ $items._0 }} {{ $items._1 }}
kubectl --namespace {{ $items._0 }} delete pvc {{ $items._1 }}
- for: { var: CACHE }
cmd: |
{{- $items := (split "/" .ITEM) }}
kubectl delete pvc --namespace {{ $items._0 }} {{ $items._1 }}
kubectl --namespace {{ $items._0 }} delete pvc {{ $items._1 }}
- for: { var: SNAPS }
cmd: |
{{- $items := (split "/" .ITEM) }}
kubectl delete volumesnapshot --namespace {{ $items._0 }} {{ $items._1 }}
kubectl --namespace {{ $items._0 }} delete volumesnapshot {{ $items._1 }}
vars:
DEST:
sh: kubectl get pvc --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}'
Expand All @@ -176,7 +179,7 @@ tasks:
sh: kubectl get volumesnapshot --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}'
env: *vars
requires:
vars: ['CLUSTER']
vars: [CLUSTER]

# Suspend the Flux ks and hr
.suspend:
Expand All @@ -192,7 +195,9 @@ tasks:
.wipe:
internal: true
cmds:
- '{{.MINIJINJA_CMD}} {{.VOLSYNC_RESOURCES_DIR}}/templates/wipe.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/templates/wipe.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh volsync-wipe-{{.APP}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/volsync-wipe-{{.APP}} --for condition=complete --timeout=120m
- kubectl --namespace {{.NS}} logs job/volsync-wipe-{{.APP}} --container main
Expand All @@ -206,7 +211,9 @@ tasks:
.restore:
internal: true
cmds:
- '{{.MINIJINJA_CMD}} {{.VOLSYNC_RESOURCES_DIR}}/templates/replicationdestination.yaml.j2 | kubectl apply -f -'
- >
minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/templates/replicationdestination.yaml.j2
| kubectl apply --server-side --filename -
- bash {{.VOLSYNC_RESOURCES_DIR}}/scripts/wait-for-job.sh volsync-dst-{{.APP}} {{.NS}}
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}} --for condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination volsync-dst-{{.APP}}
Expand Down
Loading

0 comments on commit 412535d

Please sign in to comment.