From 573530f66938fcce0fc6de28617344aa27f6755e Mon Sep 17 00:00:00 2001 From: Oscar Romeu Date: Sun, 19 May 2024 17:18:48 +0200 Subject: [PATCH] feat: migrate to talos --- .devcontainer/ci/Dockerfile | 2 + .devcontainer/ci/devcontainer.json | 27 + .../ci/features/devcontainer-feature.json | 6 + .devcontainer/ci/features/install.sh | 79 + .devcontainer/devcontainer.json | 11 + .devcontainer/postCreateCommand.sh | 19 + .editorconfig | 6 +- .envrc | 12 +- .gitattributes | 1 - .github/labeler.yaml | 27 +- .github/labels.yaml | 70 +- .github/release.yaml | 4 + .github/renovate.json5 | 258 +- .github/renovate/autoMerge.json5 | 21 - .github/renovate/commitMessage.json5 | 16 - .github/renovate/groups.json5 | 16 - .github/renovate/labels.json5 | 37 - .github/renovate/semanticCommits.json5 | 96 - .github/tests/config-k3s-ipv4.yaml | 42 + .github/tests/config-k3s-ipv6.yaml | 42 + .github/tests/config-talos.yaml | 46 + .github/workflows/ci.yaml | 25 - .github/workflows/devcontainer.yaml | 58 + .github/workflows/e2e.yaml | 108 + .github/workflows/flux-diff.yaml | 52 +- .github/workflows/kubeconform.yaml | 29 + ...{meta-sync-labels.yaml => label-sync.yaml} | 12 +- .../{meta-labeler.yaml => labeler.yaml} | 14 +- .github/workflows/link-check.yaml | 39 - .github/workflows/release.yaml | 44 + .github/workflows/renovate.yaml | 62 - .github/workflows/trivy.yaml.disable | 43 - .gitignore | 14 + .lycheeignore | 2 - .sops.yaml | 12 +- .taskfiles/Ansible/Taskfile.yaml | 88 + .taskfiles/AnsibleTasks.yaml | 59 - .taskfiles/BrewTasks.yaml | 25 - .taskfiles/ClusterTasks.yaml | 82 - .taskfiles/Flux/Taskfile.yaml | 68 + .taskfiles/Kubernetes/Taskfile.yaml | 35 + .taskfiles/Repository/Taskfile.yaml | 42 + .taskfiles/Sops/Taskfile.yaml | 41 + .taskfiles/Talos/Taskfile.yaml | 110 + .taskfiles/Workstation/Archfile | 17 + .taskfiles/Workstation/Brewfile | 20 + .taskfiles/Workstation/Taskfile.yaml | 71 + .vscode/extensions.json | 2 + .vscode/settings.json | 33 +- README copy.md | 529 +++ Taskfile.yaml | 109 +- .../inventory/group_vars/kubernetes/main.yaml | 34 - .../group_vars/kubernetes/supplemental.yaml | 8 - ansible/inventory/group_vars/master/main.yaml | 31 - ansible/inventory/group_vars/worker/main.yaml | 7 - ansible/inventory/host_vars/.gitkeep | 0 ansible/inventory/host_vars/lpkm1.sops.yaml | 21 - ansible/inventory/host_vars/lpkw1.sops.yaml | 21 - ansible/inventory/host_vars/lpkw2.sops.yaml | 21 - ansible/inventory/hosts.yaml | 24 - ansible/playbooks/cluster-ceph-reset.yaml | 31 - ansible/playbooks/cluster-installation.yaml | 72 - ansible/playbooks/cluster-kube-vip.yaml | 23 - ansible/playbooks/cluster-nuke.yaml | 73 - ansible/playbooks/cluster-prepare.yaml | 120 - ansible/playbooks/cluster-reboot.yaml | 15 - ansible/playbooks/cluster-rollout-update.yaml | 84 - .../playbooks/files/stale-containers.service | 6 - .../playbooks/files/stale-containers.timer | 11 - ansible/playbooks/roles/requirements.yml | 19 - ansible/playbooks/tasks/cilium.yaml | 56 - ansible/playbooks/tasks/coredns.yaml | 56 - ansible/playbooks/tasks/cruft.yaml | 32 - ansible/playbooks/tasks/kubeconfig.yaml | 26 - ansible/playbooks/tasks/stale_containers.yaml | 36 - .../templates/custom-cilium-helmchart.yaml.j2 | 54 - .../templates/custom-cilium-l2.yaml.j2 | 22 - .../custom-coredns-helmchart.yaml.j2 | 77 - .../templates/kube-vip-static-pod.yaml.j2 | 57 - bootstrap/configure.yaml | 34 - bootstrap/overrides/readme.partial.yaml.j2 | 5 + bootstrap/scripts/plugin.py | 67 + bootstrap/scripts/validation.py | 138 + bootstrap/tasks/addons/csi_driver_nfs.yaml | 34 - .../addons/discord_template_notifier.yaml | 34 - bootstrap/tasks/addons/grafana.yaml | 34 - bootstrap/tasks/addons/hajimari.yaml | 35 - .../tasks/addons/kube_prometheus_stack.yaml | 34 - .../tasks/addons/kubernetes_dashboard.yaml | 34 - bootstrap/tasks/addons/main.yaml | 33 - .../addons/system_upgrade_controller.yaml | 34 - bootstrap/tasks/addons/weave_gitops.yaml | 34 - bootstrap/tasks/ansible/main.yaml | 39 - bootstrap/tasks/kubernetes/main.yaml | 66 - bootstrap/tasks/sops/main.yaml | 6 - bootstrap/tasks/validation/age.yaml | 21 - bootstrap/tasks/validation/cli.yaml | 9 - bootstrap/tasks/validation/cloudflare.yaml | 34 - bootstrap/tasks/validation/github.yaml | 42 - bootstrap/tasks/validation/main.yaml | 6 - bootstrap/tasks/validation/net.yaml | 157 - bootstrap/tasks/validation/vars.yaml | 37 - bootstrap/templates/.sops.yaml.j2 | 18 +- .../csi-driver-nfs/app/helmrelease.yaml.j2 | 29 - .../csi-driver-nfs/app/kustomization.yaml.j2 | 7 - .../csi-driver-nfs/app/storageclass.yaml.j2 | 15 - .../app/helmrelease.yaml.j2 | 55 - .../app/secret.sops.yaml.j2 | 15 - .../addons/grafana/app/helmrelease.yaml.j2 | 174 - .../addons/grafana/app/secret.sops.yaml.j2 | 9 - .../addons/hajimari/app/helmrelease.yaml.j2 | 66 - .../app/helmrelease.yaml.j2 | 150 - .../app/helmrelease.yaml.j2 | 44 - .../app/kustomization.yaml.j2 | 7 - .../kubernetes-dashboard/app/rbac.yaml.j2 | 41 - .../app/kustomization.yaml.j2 | 22 - .../system-upgrade-controller/ks.yaml.j2 | 34 - .../plans/agent.yaml.j2 | 19 - .../plans/server.yaml.j2 | 24 - .../weave-gitops/app/helmrelease.yaml.j2 | 53 - .../weave-gitops/app/kustomization.yaml.j2 | 7 - .../weave-gitops/app/secret.sops.yaml.j2 | 10 - .../templates/addons/weave-gitops/ks.yaml.j2 | 16 - bootstrap/templates/ansible/.ansible-lint.j2 | 9 + bootstrap/templates/ansible/.mjfilter.py | 1 + .../group_vars/controllers/main.yaml.j2 | 36 + .../group_vars/kubernetes/main.yaml.j2 | 52 +- .../kubernetes/supplemental.yaml.j2 | 13 - .../inventory/group_vars/master/main.yaml.j2 | 36 - .../inventory/group_vars/worker/main.yaml.j2 | 12 - .../inventory/group_vars/workers/.mjfilter.py | 10 + .../inventory/group_vars/workers/main.yaml.j2 | 8 + .../ansible/inventory/host_vars/.gitkeep.j2 | 0 .../templates/ansible/inventory/hosts.yaml.j2 | 39 +- .../playbooks/cluster-installation.yaml.j2 | 62 +- .../playbooks/cluster-kube-vip.yaml.j2 | 23 - .../ansible/playbooks/cluster-nuke.yaml.j2 | 66 +- .../ansible/playbooks/cluster-prepare.yaml.j2 | 96 +- .../ansible/playbooks/cluster-reboot.yaml.j2 | 4 +- .../playbooks/cluster-rollout-update.yaml.j2 | 52 +- .../files/stale-containers.service.j2 | 6 - .../playbooks/files/stale-containers.timer.j2 | 11 - .../ansible/playbooks/tasks/coredns.yaml.j2 | 56 - .../ansible/playbooks/tasks/cruft.yaml.j2 | 15 +- .../playbooks/tasks/kubeconfig.yaml.j2 | 6 +- .../playbooks/tasks/stale_containers.yaml.j2 | 36 - .../playbooks/tasks/version-check.yaml.j2 | 17 + .../templates/custom-cilium-helmchart.yaml.j2 | 17 + .../custom-cilium-helmchart.yaml.j2.j2 | 64 - .../templates/custom-cilium-l2.yaml.j2.j2 | 22 - .../custom-coredns-helmchart.yaml.j2.j2 | 78 - .../templates/custom-kube-vip-ds.yaml.j2 | 2 + .../templates/custom-kube-vip-rbac.yaml.j2 | 2 + .../templates/kube-vip-static-pod.yaml.j2.j2 | 57 - .../templates/ansible/requirements.txt.j2 | 4 + .../templates/ansible/requirements.yaml.j2 | 14 + .../cert-manager/app/helmrelease.yaml.j2 | 14 +- .../cert-manager/app/kustomization.yaml.j2 | 2 - .../cert-manager/app/prometheusrule.yaml.j2 | 62 - .../cert-manager/issuers/.mjfilter.py | 1 + .../cert-manager/issuers/secret.sops.yaml.j2 | 3 +- .../apps/cert-manager/cert-manager/ks.yaml.j2 | 16 +- .../apps/default/kustomization.yaml.j2 | 12 - .../apps/flux-system/kustomization.yaml.j2 | 6 +- .../app}/github/ingress.yaml.j2 | 9 +- .../app}/github/kustomization.yaml.j2 | 2 + .../app}/github/receiver.yaml.j2 | 1 - .../app}/github/secret.sops.yaml.j2 | 3 +- .../app}/kustomization.yaml.j2 | 0 .../webhooks}/ks.yaml.j2 | 8 +- .../cilium/app/helmrelease.yaml.j2 | 111 +- .../cilium/app/kustomization.yaml.j2 | 5 - .../cilium/{app => config}/cilium-l2.yaml.j2 | 12 +- .../cilium/config/cilium-l3.yaml.j2 | 40 + .../cilium/config/kustomization.yaml.j2 | 11 + .../apps/kube-system/cilium/ks.yaml.j2 | 28 +- .../coredns/app/helmrelease.yaml.j2 | 90 - .../coredns/app/kustomization.yaml.j2 | 6 - .../apps/kube-system/kube-vip/.mjfilter.py | 1 + .../kube-vip/app/daemonset.yaml.j2 | 2 + .../kube-vip/app/kustomization.yaml.j2 | 6 + .../kube-system/kube-vip/app/rbac.yaml.j2 | 2 + .../apps/kube-system/kube-vip}/ks.yaml.j2 | 8 +- .../kubelet-csr-approver/.mjfilter.py | 1 + .../app/helmrelease.yaml.j2 | 30 + .../app/kustomization.yaml.j2 | 1 - .../ks.yaml.j2 | 8 +- .../apps/kube-system/kustomization.yaml.j2 | 13 +- .../app/helmrelease.yaml.j2 | 71 - .../app/kustomization.yaml.j2 | 6 - .../local-path-provisioner/ks.yaml.j2 | 18 - .../metrics-server/app/helmrelease.yaml.j2 | 10 +- .../metrics-server/app/kustomization.yaml.j2 | 1 - .../kube-system/metrics-server/ks.yaml.j2 | 6 +- .../reloader/app/helmrelease.yaml.j2 | 12 +- .../reloader/app/kustomization.yaml.j2 | 1 - .../apps/kube-system/reloader/ks.yaml.j2 | 6 +- .../apps/kube-system/spegel/.mjfilter.py | 1 + .../spegel/app/helmrelease.yaml.j2 | 31 + .../spegel}/app/kustomization.yaml.j2 | 1 - .../apps/kube-system/spegel}/ks.yaml.j2 | 8 +- .../apps/monitoring/kustomization.yaml.j2 | 15 - .../kubernetes/apps/network/.mjfilter.py | 1 + .../cloudflared/app/configs/config.yaml.j2 | 10 + .../cloudflared/app/dnsendpoint.yaml.j2 | 1 - .../cloudflared/app/helmrelease.yaml.j2 | 110 + .../cloudflared/app/kustomization.yaml.j2 | 1 - .../cloudflared/app/secret.sops.yaml.j2 | 13 + .../apps/network/cloudflared}/ks.yaml.j2 | 11 +- .../echo-server/app/helmrelease.yaml.j2 | 91 + .../echo-server}/app/kustomization.yaml.j2 | 1 - .../apps/network/echo-server}/ks.yaml.j2 | 8 +- .../external-dns/app/helmrelease.yaml.j2 | 9 +- .../external-dns}/app/kustomization.yaml.j2 | 1 - .../external-dns/app/secret.sops.yaml.j2 | 3 +- .../external-dns}/ks.yaml.j2 | 8 +- .../certificates/kustomization.yaml.j2 | 8 + .../certificates/production.yaml.j2 | 1 - .../certificates/staging.yaml.j2 | 1 - .../external/helmrelease.yaml.j2 | 34 +- .../external/kustomization.yaml.j2 | 5 + .../internal/helmrelease.yaml.j2 | 34 +- .../internal/kustomization.yaml.j2 | 5 + .../ingress-nginx}/ks.yaml.j2 | 30 +- .../k8s-gateway/app/helmrelease.yaml.j2 | 9 +- .../k8s-gateway/app/kustomization.yaml.j2 | 5 + .../apps/network/k8s-gateway}/ks.yaml.j2 | 8 +- .../kustomization.yaml.j2 | 2 +- .../{default => network}/namespace.yaml.j2 | 2 +- .../cloudflared/app/configs/config.yaml.j2 | 14 - .../cloudflared/app/helmrelease.yaml.j2 | 101 - .../cloudflared/app/secret.sops.yaml.j2 | 14 - .../echo-server/app/helmrelease.yaml.j2 | 74 - .../echo-server/app/kustomization.yaml.j2 | 6 - .../apps/networking/echo-server/ks.yaml.j2 | 16 - .../external-dns/app/dnsendpoint-crd.yaml.j2 | 93 - .../external-dns/app/kustomization.yaml.j2 | 8 - .../apps/networking/k8s-gateway/ks.yaml.j2 | 16 - .../apps/networking/namespace.yaml.j2 | 7 - .../nginx/certificates/kustomization.yaml.j2 | 9 - .../nginx/external/kustomization.yaml.j2 | 6 - .../nginx/internal/kustomization.yaml.j2 | 6 - .../apps/openebs-system/kustomization.yaml.j2 | 1 + .../namespace.yaml.j2 | 2 +- .../openebs/app/helmrelease.yaml.j2 | 45 + .../openebs/app/kustomization.yaml.j2 | 5 + .../apps/openebs-system/openebs/ks.yaml.j2 | 20 + .../apps/system-upgrade/.mjfilter.py | 1 + .../k3s/app}/kustomization.yaml.j2 | 3 +- .../apps/system-upgrade/k3s/app/plan.yaml.j2 | 50 + .../apps/system-upgrade/k3s/ks.yaml.j2 | 26 + .../apps/system-upgrade/kustomization.yaml.j2 | 4 +- .../app/helmrelease.yaml.j2 | 101 + .../app/kustomization.yaml.j2 | 8 + .../app/rbac.yaml.j2 | 13 + .../system-upgrade-controller}/ks.yaml.j2 | 8 +- .../flux/github-deploy-key.sops.yaml.j2 | 17 + .../bootstrap/flux/kustomization.yaml.j2 | 61 + .../bootstrap/kustomization.yaml.j2 | 18 - .../kubernetes/bootstrap/talos/.mjfilter.py | 1 + .../talos/apps/cilium-values.yaml.j2 | 4 + .../bootstrap/talos/apps/helmfile.yaml.j2 | 26 + .../apps/kubelet-csr-approver-values.yaml.j2 | 4 + .../bootstrap/talos/talconfig.yaml.j2 | 232 ++ .../templates/kubernetes/flux/apps.yaml.j2 | 4 + .../kubernetes/flux/config/cluster.yaml.j2 | 8 +- .../kubernetes/flux/config/flux.yaml.j2 | 14 +- .../repositories/git/kustomization.yaml.j2 | 3 +- .../git/local-path-provisioner.yaml.j2 | 16 - .../flux/repositories/helm/bitnami.yaml.j2 | 10 - .../flux/repositories/helm/bjw-s.yaml.j2 | 2 +- .../flux/repositories/helm/cilium.yaml.j2 | 2 +- .../flux/repositories/helm/coredns.yaml.j2 | 9 - .../repositories/helm/csi-driver-nfs.yaml.j2 | 9 - .../repositories/helm/external-dns.yaml.j2 | 4 +- .../flux/repositories/helm/grafana.yaml.j2 | 9 - .../flux/repositories/helm/hajimari.yaml.j2 | 9 - .../repositories/helm/ingress-nginx.yaml.j2 | 4 +- .../flux/repositories/helm/jetstack.yaml.j2 | 4 +- .../repositories/helm/k8s-gateway.yaml.j2 | 6 +- .../helm/kubernetes-dashboard.yaml.j2 | 9 - .../repositories/helm/kustomization.yaml.j2 | 17 +- .../repositories/helm/metrics-server.yaml.j2 | 2 +- .../flux/repositories/helm/openebs.yaml.j2 | 9 + .../repositories/helm/postfinance.yaml.j2 | 11 + .../helm/prometheus-community.yaml.j2 | 10 - .../flux/repositories/helm/spegel.yaml.j2 | 12 + .../flux/repositories/helm/stakater.yaml.j2 | 2 +- .../repositories/helm/weave-gitops.yaml.j2 | 10 - .../flux/repositories/kustomization.yaml.j2 | 2 +- .../kubernetes/flux/repositories/oci/.gitkeep | 0 .../repositories/oci/kustomization.yaml.j2 | 4 + .../vars/cluster-secrets-user.sops.yaml.j2 | 8 - .../flux/vars/cluster-secrets.sops.yaml.j2 | 9 +- .../flux/vars/cluster-settings-user.yaml.j2 | 8 - .../flux/vars/cluster-settings.yaml.j2 | 20 +- .../flux/vars/kustomization.yaml.j2 | 2 - bootstrap/templates/node.sops.yaml.j2 | 2 - .../cilium-values-full.partial.yaml.j2 | 129 + .../cilium-values-init.partial.yaml.j2 | 79 + .../partials/kube-vip-ds.partial.yaml.j2 | 74 + .../partials/kube-vip-rbac.partial.yaml.j2 | 41 + ...ubelet-csr-approver-values.partial.yaml.j2 | 2 + bootstrap/vars/.gitignore | 2 - bootstrap/vars/addons.sample.yaml | 51 - bootstrap/vars/config.sample.yaml | 73 - config.sample.yaml | 222 ++ .../cert-manager/app/helmrelease.yaml | 14 +- .../cert-manager/app/kustomization.yaml | 2 - .../cert-manager/app/prometheusrule.yaml | 61 - .../cert-manager/issuers/secret.sops.yaml | 19 +- .../apps/cert-manager/cert-manager/ks.yaml | 14 +- .../monitoring/servicemonitor.yaml | 25 - .../apps/cicd/disk-images/app/dv_fedora.yaml | 15 - kubernetes/apps/cicd/disk-images/ks.yaml | 19 - kubernetes/apps/cicd/kustomization.yaml | 6 - kubernetes/apps/cicd/namespace.yaml | 5 - .../addons/webhooks/github/secret.sops.yaml | 27 - .../flux-system/capacitor/app/ingress.yaml | 28 - .../capacitor/app/kustomization.yaml | 8 - kubernetes/apps/flux-system/capacitor/ks.yaml | 20 - .../apps/flux-system/kustomization.yaml | 2 +- .../app}/github/ingress.yaml | 7 +- .../app}/github/kustomization.yaml | 0 .../app}/github/receiver.yaml | 1 - .../webhooks/app/github/secret.sops.yaml | 26 + .../app}/kustomization.yaml | 0 .../flux-system/{addons => webhooks}/ks.yaml | 8 +- .../kube-system/cilium/app/helmrelease.yaml | 65 +- .../kube-system/cilium/app/kustomization.yaml | 2 - .../cilium/{app => config}/cilium-l2.yaml | 10 +- .../cilium/config}/kustomization.yaml | 2 +- kubernetes/apps/kube-system/cilium/ks.yaml | 28 +- .../monitoring/agent-servicemonitor.yaml | 31 - .../monitoring/hubble-servicemonitor.yaml | 29 - .../monitoring/operator-servicemonitor.yaml | 26 - .../kube-system/coredns/app/helmrelease.yaml | 89 - .../coredns/app/kustomization.yaml | 6 - .../kubelet-csr-approver/app/helmrelease.yaml | 29 + .../app/kustomization.yaml | 1 - .../{coredns => kubelet-csr-approver}/ks.yaml | 8 +- .../apps/kube-system/kustomization.yaml | 4 +- .../app/helmrelease.yaml | 71 - .../app/kustomization.yaml | 6 - .../metrics-server/app/helmrelease.yaml | 7 +- .../metrics-server/app/kustomization.yaml | 1 - .../apps/kube-system/metrics-server/ks.yaml | 6 +- .../kube-system/reloader/app/helmrelease.yaml | 12 +- .../reloader/app/kustomization.yaml | 1 - kubernetes/apps/kube-system/reloader/ks.yaml | 6 +- .../kube-system/spegel/app/helmrelease.yaml | 31 + .../spegel}/app/kustomization.yaml | 1 - .../spegel}/ks.yaml | 10 +- kubernetes/apps/minio/kustomization.yaml | 6 - .../monitoring/dashboards/kustomization.yaml | 14 - .../monitoring/dashboards/minio-overview.json | 2908 ----------------- .../apps/minio/monitoring/kustomization.yaml | 6 - .../apps/minio/operator/app/helmrelease.yaml | 61 - .../apps/minio/operator/app/namespace.yaml | 7 - kubernetes/apps/minio/operator/ks.yaml | 49 - .../cloudflared/app/configs/config.yaml | 10 + .../cloudflared/app/dnsendpoint.yaml | 1 - .../network/cloudflared/app/helmrelease.yaml | 110 + .../cloudflared/app/kustomization.yaml | 1 - .../network/cloudflared/app/secret.sops.yaml | 27 + .../apps/network/cloudflared/ks.yaml | 10 +- .../network/echo-server/app/helmrelease.yaml | 91 + .../echo-server}/app/kustomization.yaml | 1 - .../echo-server/ks.yaml | 8 +- .../external-dns/app/helmrelease.yaml | 9 +- .../external-dns/app/kustomization.yaml | 1 - .../network/external-dns/app/secret.sops.yaml | 26 + .../external-dns/ks.yaml | 8 +- .../certificates/kustomization.yaml | 1 - .../certificates/production.yaml | 1 - .../ingress-nginx}/certificates/staging.yaml | 1 - .../ingress-nginx}/external/helmrelease.yaml | 25 +- .../ingress-nginx/external/kustomization.yaml | 5 + .../ingress-nginx}/internal/helmrelease.yaml | 32 +- .../ingress-nginx/internal/kustomization.yaml | 5 + .../nginx => network/ingress-nginx}/ks.yaml | 30 +- .../k8s-gateway/app/helmrelease.yaml | 7 +- .../k8s-gateway/app/kustomization.yaml | 5 + .../k8s-gateway/ks.yaml | 8 +- .../kustomization.yaml | 3 +- .../{networking => network}/namespace.yaml | 2 +- .../smtp-relay/app/externalsecret.yaml | 0 .../smtp-relay/app/helmrelease.yaml | 0 .../smtp-relay/app/kustomization.yaml | 0 .../smtp-relay/app/resources/maddy.conf | 0 .../smtp-relay/ks.yaml | 0 .../cloudflared/app/configs/config.yaml | 14 - .../cloudflared/app/helmrelease.yaml | 101 - .../cloudflared/app/secret.sops.yaml | 28 - .../echo-server/app/helmrelease.yaml | 73 - .../external-dns/app/dnsendpoint-crd.yaml | 93 - .../external-dns/app/kustomization.yaml | 8 - .../external-dns/app/secret.sops.yaml | 27 - .../nginx/external/kustomization.yaml | 6 - .../nginx/internal/kustomization.yaml | 6 - .../monitoring/dashboards/kustomization.yaml | 21 - .../monitoring/dashboards/nginx-review.json | 2164 ------------ .../nginx/monitoring/dashboards/nginx.json | 1630 --------- .../request-handling-performance.json | 985 ------ .../nginx/monitoring/kustomization.yaml | 6 - .../kustomization.yaml | 1 + .../namespace.yaml | 2 +- .../openebs/app/helmrelease.yaml | 45 + .../openebs/app/kustomization.yaml | 5 + .../openebs}/ks.yaml | 10 +- kubernetes/apps/system-upgrade/namespace.yaml | 7 - kubernetes/bootstrap/flux/kustomization.yaml | 61 + kubernetes/bootstrap/kustomization.yaml | 18 - .../bootstrap/talos/apps/cilium-values.yaml | 59 + kubernetes/bootstrap/talos/apps/helmfile.yaml | 26 + .../apps/kubelet-csr-approver-values.yaml | 3 + kubernetes/bootstrap/talos/talconfig.yaml | 187 ++ kubernetes/flux/apps.yaml | 4 + kubernetes/flux/config/cluster.yaml | 4 +- kubernetes/flux/config/flux.yaml | 14 +- kubernetes/flux/repositories/git/elastic.yaml | 16 - .../git/kubernetes-csi-addons.yaml | 18 - .../flux/repositories/git/kustomization.yaml | 5 +- .../git/local-path-provisioner.yaml | 16 - .../helm/actions-runner-controller.yaml | 19 - .../flux/repositories/helm/authelia.yaml | 10 - .../flux/repositories/helm/backube.yaml | 10 - .../flux/repositories/helm/bitnami.yaml | 10 - kubernetes/flux/repositories/helm/bjw-s.yaml | 2 +- .../flux/repositories/helm/botkube.yaml | 10 - kubernetes/flux/repositories/helm/calico.yaml | 9 - .../flux/repositories/helm/chaos-mesh.yaml | 9 - kubernetes/flux/repositories/helm/cilium.yaml | 2 +- .../repositories/helm/cloudnative-pg.yaml | 10 - .../flux/repositories/helm/coredns.yaml | 9 - .../flux/repositories/helm/crossplane.yaml | 9 - .../repositories/helm/csi-driver-nfs.yaml | 9 - .../flux/repositories/helm/ddosify.yaml | 10 - .../flux/repositories/helm/deliveryhero.yaml | 9 - .../flux/repositories/helm/descheduler.yaml | 9 - .../flux/repositories/helm/external-dns.yaml | 2 +- .../repositories/helm/external-secrets.yaml | 9 - .../flux/repositories/helm/fairwinds.yaml | 9 - .../flux/repositories/helm/flanksource.yaml | 9 - kubernetes/flux/repositories/helm/fleet.yaml | 8 - kubernetes/flux/repositories/helm/gitea.yaml | 10 - .../flux/repositories/helm/grafana.yaml | 9 - .../flux/repositories/helm/hajimari.yaml | 9 - kubernetes/flux/repositories/helm/harbor.yaml | 9 - .../flux/repositories/helm/influxdata.yaml | 10 - .../flux/repositories/helm/ingress-nginx.yaml | 2 +- kubernetes/flux/repositories/helm/intel.yaml | 9 - kubernetes/flux/repositories/helm/istio.yaml | 9 - .../flux/repositories/helm/jetstack.yaml | 4 +- .../flux/repositories/helm/jupyter.yaml | 10 - .../flux/repositories/helm/k8s-gateway.yaml | 4 +- kubernetes/flux/repositories/helm/kafka.yaml | 9 - kubernetes/flux/repositories/helm/keda.yaml | 9 - .../helm/kubernetes-dashboard.yaml | 9 - .../flux/repositories/helm/kustomization.yaml | 68 +- .../flux/repositories/helm/kyverno.yaml | 10 - .../flux/repositories/helm/longhorn.yaml | 10 - .../flux/repositories/helm/marketplane.yaml | 9 - .../flux/repositories/helm/metallb.yaml | 9 - .../repositories/helm/metrics-server.yaml | 2 +- .../flux/repositories/helm/microcks.yaml | 9 - .../repositories/helm/minio-operator.yaml | 10 - kubernetes/flux/repositories/helm/minio.yaml | 9 - .../flux/repositories/helm/mongodb.yaml | 9 - .../helm/nfs-subdir-external-provisioner.yaml | 8 - .../helm/node-feature-discovery.yaml | 9 - .../helm/nvidia-gpu-feature-discovery.yaml | 9 - kubernetes/flux/repositories/helm/nvidia.yaml | 10 - .../flux/repositories/helm/openebs.yaml | 9 + .../flux/repositories/helm/ot-helm.yaml | 10 - .../flux/repositories/helm/passbolt.yaml | 10 - .../flux/repositories/helm/piraeus.yaml | 10 - .../flux/repositories/helm/podinfo.yaml | 8 - .../flux/repositories/helm/postfinance.yaml | 9 + .../helm/prometheus-community.yaml | 10 - .../flux/repositories/helm/pyroscope.yaml | 8 - .../flux/repositories/helm/questdb.yaml | 9 - .../flux/repositories/helm/redpanda.yaml | 9 - .../flux/repositories/helm/robusta.yaml | 9 - .../flux/repositories/helm/rook-ceph.yaml | 9 - kubernetes/flux/repositories/helm/runix.yaml | 9 - kubernetes/flux/repositories/helm/sloth.yaml | 10 - kubernetes/flux/repositories/helm/spegel.yaml | 10 + .../flux/repositories/helm/stakater.yaml | 2 +- .../flux/repositories/helm/tf-controller.yaml | 9 - .../flux/repositories/helm/timescale.yaml | 10 - kubernetes/flux/repositories/helm/twuni.yaml | 10 - kubernetes/flux/repositories/helm/vector.yaml | 9 - kubernetes/flux/repositories/helm/vm.yaml | 10 - kubernetes/flux/repositories/helm/vmware.yaml | 10 - .../flux/repositories/helm/weave-gitops.yaml | 10 - kubernetes/flux/repositories/helm/wikijs.yaml | 9 - .../flux/repositories/helm/woodpecker.yaml | 9 - .../flux/repositories/kustomization.yaml | 2 +- kubernetes/flux/repositories/oci/.gitkeep | 0 .../flux/repositories/oci/flamingo.yaml | 15 - .../flux/repositories/oci/kustomization.yaml | 3 +- .../flux/vars/cluster-secrets-user.sops.yaml | 46 - .../flux/vars/cluster-secrets.sops.yaml | 38 +- .../flux/vars/cluster-settings-user.yaml | 8 - kubernetes/flux/vars/cluster-settings.yaml | 7 +- kubernetes/flux/vars/kustomization.yaml | 3 +- makejinja.toml | 18 + requirements.txt | 11 +- requirements.yaml | 19 - scripts/kubeconform.sh | 52 + 511 files changed, 5471 insertions(+), 14869 deletions(-) create mode 100644 .devcontainer/ci/Dockerfile create mode 100644 .devcontainer/ci/devcontainer.json create mode 100644 .devcontainer/ci/features/devcontainer-feature.json create mode 100644 .devcontainer/ci/features/install.sh create mode 100644 .devcontainer/devcontainer.json create mode 100755 .devcontainer/postCreateCommand.sh create mode 100644 .github/release.yaml delete mode 100644 .github/renovate/autoMerge.json5 delete mode 100644 .github/renovate/commitMessage.json5 delete mode 100644 .github/renovate/groups.json5 delete mode 100644 .github/renovate/labels.json5 delete mode 100644 .github/renovate/semanticCommits.json5 create mode 100644 .github/tests/config-k3s-ipv4.yaml create mode 100644 .github/tests/config-k3s-ipv6.yaml create mode 100644 .github/tests/config-talos.yaml delete mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/devcontainer.yaml create mode 100644 .github/workflows/e2e.yaml create mode 100644 .github/workflows/kubeconform.yaml rename .github/workflows/{meta-sync-labels.yaml => label-sync.yaml} (51%) rename .github/workflows/{meta-labeler.yaml => labeler.yaml} (51%) delete mode 100644 .github/workflows/link-check.yaml create mode 100644 .github/workflows/release.yaml delete mode 100644 .github/workflows/renovate.yaml delete mode 100644 .github/workflows/trivy.yaml.disable delete mode 100644 .lycheeignore create mode 100644 .taskfiles/Ansible/Taskfile.yaml delete mode 100644 .taskfiles/AnsibleTasks.yaml delete mode 100644 .taskfiles/BrewTasks.yaml delete mode 100644 .taskfiles/ClusterTasks.yaml create mode 100644 .taskfiles/Flux/Taskfile.yaml create mode 100644 .taskfiles/Kubernetes/Taskfile.yaml create mode 100644 .taskfiles/Repository/Taskfile.yaml create mode 100644 .taskfiles/Sops/Taskfile.yaml create mode 100644 .taskfiles/Talos/Taskfile.yaml create mode 100644 .taskfiles/Workstation/Archfile create mode 100644 .taskfiles/Workstation/Brewfile create mode 100644 .taskfiles/Workstation/Taskfile.yaml create mode 100644 README copy.md delete mode 100644 ansible/inventory/group_vars/kubernetes/main.yaml delete mode 100644 ansible/inventory/group_vars/kubernetes/supplemental.yaml delete mode 100644 ansible/inventory/group_vars/master/main.yaml delete mode 100644 ansible/inventory/group_vars/worker/main.yaml delete mode 100644 ansible/inventory/host_vars/.gitkeep delete mode 100644 ansible/inventory/host_vars/lpkm1.sops.yaml delete mode 100644 ansible/inventory/host_vars/lpkw1.sops.yaml delete mode 100644 ansible/inventory/host_vars/lpkw2.sops.yaml delete mode 100644 ansible/inventory/hosts.yaml delete mode 100644 ansible/playbooks/cluster-ceph-reset.yaml delete mode 100644 ansible/playbooks/cluster-installation.yaml delete mode 100644 ansible/playbooks/cluster-kube-vip.yaml delete mode 100644 ansible/playbooks/cluster-nuke.yaml delete mode 100644 ansible/playbooks/cluster-prepare.yaml delete mode 100644 ansible/playbooks/cluster-reboot.yaml delete mode 100644 ansible/playbooks/cluster-rollout-update.yaml delete mode 100644 ansible/playbooks/files/stale-containers.service delete mode 100644 ansible/playbooks/files/stale-containers.timer delete mode 100644 ansible/playbooks/roles/requirements.yml delete mode 100644 ansible/playbooks/tasks/cilium.yaml delete mode 100644 ansible/playbooks/tasks/coredns.yaml delete mode 100644 ansible/playbooks/tasks/cruft.yaml delete mode 100644 ansible/playbooks/tasks/kubeconfig.yaml delete mode 100644 ansible/playbooks/tasks/stale_containers.yaml delete mode 100644 ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 delete mode 100644 ansible/playbooks/templates/custom-cilium-l2.yaml.j2 delete mode 100644 ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 delete mode 100644 ansible/playbooks/templates/kube-vip-static-pod.yaml.j2 delete mode 100644 bootstrap/configure.yaml create mode 100644 bootstrap/overrides/readme.partial.yaml.j2 create mode 100644 bootstrap/scripts/plugin.py create mode 100644 bootstrap/scripts/validation.py delete mode 100644 bootstrap/tasks/addons/csi_driver_nfs.yaml delete mode 100644 bootstrap/tasks/addons/discord_template_notifier.yaml delete mode 100644 bootstrap/tasks/addons/grafana.yaml delete mode 100644 bootstrap/tasks/addons/hajimari.yaml delete mode 100644 bootstrap/tasks/addons/kube_prometheus_stack.yaml delete mode 100644 bootstrap/tasks/addons/kubernetes_dashboard.yaml delete mode 100644 bootstrap/tasks/addons/main.yaml delete mode 100644 bootstrap/tasks/addons/system_upgrade_controller.yaml delete mode 100644 bootstrap/tasks/addons/weave_gitops.yaml delete mode 100644 bootstrap/tasks/ansible/main.yaml delete mode 100644 bootstrap/tasks/kubernetes/main.yaml delete mode 100644 bootstrap/tasks/sops/main.yaml delete mode 100644 bootstrap/tasks/validation/age.yaml delete mode 100644 bootstrap/tasks/validation/cli.yaml delete mode 100644 bootstrap/tasks/validation/cloudflare.yaml delete mode 100644 bootstrap/tasks/validation/github.yaml delete mode 100644 bootstrap/tasks/validation/main.yaml delete mode 100644 bootstrap/tasks/validation/net.yaml delete mode 100644 bootstrap/tasks/validation/vars.yaml delete mode 100644 bootstrap/templates/addons/csi-driver-nfs/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/csi-driver-nfs/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/addons/csi-driver-nfs/app/storageclass.yaml.j2 delete mode 100644 bootstrap/templates/addons/discord-template-notifier/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/discord-template-notifier/app/secret.sops.yaml.j2 delete mode 100644 bootstrap/templates/addons/grafana/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/grafana/app/secret.sops.yaml.j2 delete mode 100644 bootstrap/templates/addons/hajimari/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/kube-prometheus-stack/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/kubernetes-dashboard/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/kubernetes-dashboard/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/addons/kubernetes-dashboard/app/rbac.yaml.j2 delete mode 100644 bootstrap/templates/addons/system-upgrade-controller/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/addons/system-upgrade-controller/ks.yaml.j2 delete mode 100644 bootstrap/templates/addons/system-upgrade-controller/plans/agent.yaml.j2 delete mode 100644 bootstrap/templates/addons/system-upgrade-controller/plans/server.yaml.j2 delete mode 100644 bootstrap/templates/addons/weave-gitops/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/addons/weave-gitops/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/addons/weave-gitops/app/secret.sops.yaml.j2 delete mode 100644 bootstrap/templates/addons/weave-gitops/ks.yaml.j2 create mode 100644 bootstrap/templates/ansible/.ansible-lint.j2 create mode 100644 bootstrap/templates/ansible/.mjfilter.py create mode 100644 bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 delete mode 100644 bootstrap/templates/ansible/inventory/group_vars/kubernetes/supplemental.yaml.j2 delete mode 100644 bootstrap/templates/ansible/inventory/group_vars/master/main.yaml.j2 delete mode 100644 bootstrap/templates/ansible/inventory/group_vars/worker/main.yaml.j2 create mode 100644 bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py create mode 100644 bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 delete mode 100644 bootstrap/templates/ansible/inventory/host_vars/.gitkeep.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/files/stale-containers.service.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/files/stale-containers.timer.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/tasks/coredns.yaml.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/tasks/stale_containers.yaml.j2 create mode 100644 bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 create mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-cilium-l2.yaml.j2.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2.j2 create mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 create mode 100644 bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 delete mode 100644 bootstrap/templates/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2.j2 create mode 100644 bootstrap/templates/ansible/requirements.txt.j2 create mode 100644 bootstrap/templates/ansible/requirements.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py delete mode 100644 bootstrap/templates/kubernetes/apps/default/kustomization.yaml.j2 rename bootstrap/templates/kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/ingress.yaml.j2 (75%) rename bootstrap/templates/kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/kustomization.yaml.j2 (72%) rename bootstrap/templates/kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/receiver.yaml.j2 (95%) rename bootstrap/templates/kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/secret.sops.yaml.j2 (53%) rename bootstrap/templates/kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/kustomization.yaml.j2 (100%) rename bootstrap/templates/kubernetes/apps/{networking/external-dns => flux-system/webhooks}/ks.yaml.j2 (58%) rename bootstrap/templates/kubernetes/apps/kube-system/cilium/{app => config}/cilium-l2.yaml.j2 (56%) create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l3.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/cilium/config/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 rename bootstrap/templates/{addons/kube-prometheus-stack => kubernetes/apps/kube-system/kube-vip}/ks.yaml.j2 (59%) create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 rename bootstrap/templates/{addons/hajimari => kubernetes/apps/kube-system/kubelet-csr-approver}/app/kustomization.yaml.j2 (84%) rename bootstrap/templates/kubernetes/apps/kube-system/{coredns => kubelet-csr-approver}/ks.yaml.j2 (58%) delete mode 100644 bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py create mode 100644 bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 rename bootstrap/templates/{addons/kube-prometheus-stack => kubernetes/apps/kube-system/spegel}/app/kustomization.yaml.j2 (82%) rename bootstrap/templates/{addons/hajimari => kubernetes/apps/kube-system/spegel}/ks.yaml.j2 (60%) delete mode 100644 bootstrap/templates/kubernetes/apps/monitoring/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/network/.mjfilter.py create mode 100644 bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking => network}/cloudflared/app/dnsendpoint.yaml.j2 (91%) create mode 100644 bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking => network}/cloudflared/app/kustomization.yaml.j2 (92%) create mode 100644 bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 rename bootstrap/templates/{addons/kubernetes-dashboard => kubernetes/apps/network/cloudflared}/ks.yaml.j2 (57%) create mode 100644 bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking/k8s-gateway => network/echo-server}/app/kustomization.yaml.j2 (82%) rename bootstrap/templates/{addons/discord-template-notifier => kubernetes/apps/network/echo-server}/ks.yaml.j2 (59%) rename bootstrap/templates/kubernetes/apps/{networking => network}/external-dns/app/helmrelease.yaml.j2 (89%) rename bootstrap/templates/{addons/discord-template-notifier => kubernetes/apps/network/external-dns}/app/kustomization.yaml.j2 (86%) rename bootstrap/templates/kubernetes/apps/{networking => network}/external-dns/app/secret.sops.yaml.j2 (53%) rename bootstrap/templates/kubernetes/apps/{flux-system/addons => network/external-dns}/ks.yaml.j2 (59%) create mode 100644 bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking/nginx => network/ingress-nginx}/certificates/production.yaml.j2 (93%) rename bootstrap/templates/kubernetes/apps/{networking/nginx => network/ingress-nginx}/certificates/staging.yaml.j2 (93%) rename bootstrap/templates/kubernetes/apps/{networking/nginx => network/ingress-nginx}/external/helmrelease.yaml.j2 (75%) create mode 100644 bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking/nginx => network/ingress-nginx}/internal/helmrelease.yaml.j2 (73%) create mode 100644 bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 rename bootstrap/templates/kubernetes/apps/{networking/nginx => network/ingress-nginx}/ks.yaml.j2 (53%) rename bootstrap/templates/kubernetes/apps/{networking => network}/k8s-gateway/app/helmrelease.yaml.j2 (74%) create mode 100644 bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 rename bootstrap/templates/{addons/grafana => kubernetes/apps/network/k8s-gateway}/ks.yaml.j2 (59%) rename bootstrap/templates/kubernetes/apps/{networking => network}/kustomization.yaml.j2 (87%) rename bootstrap/templates/kubernetes/apps/{default => network}/namespace.yaml.j2 (86%) delete mode 100644 bootstrap/templates/kubernetes/apps/networking/cloudflared/app/configs/config.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/echo-server/app/helmrelease.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/echo-server/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/echo-server/ks.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/external-dns/app/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/k8s-gateway/ks.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/namespace.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/nginx/certificates/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/nginx/external/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/apps/networking/nginx/internal/kustomization.yaml.j2 rename kubernetes/apps/system-upgrade/kustomization.yaml => bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 (81%) rename bootstrap/templates/kubernetes/apps/{monitoring => openebs-system}/namespace.yaml.j2 (81%) create mode 100644 bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py rename bootstrap/templates/{addons/system-upgrade-controller/plans => kubernetes/apps/system-upgrade/k3s/app}/kustomization.yaml.j2 (69%) create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 rename bootstrap/templates/{addons/csi-driver-nfs => kubernetes/apps/system-upgrade/system-upgrade-controller}/ks.yaml.j2 (54%) create mode 100644 bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/bootstrap/kustomization.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py create mode 100644 bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/git/local-path-provisioner.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/bitnami.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/grafana.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/hajimari.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 create mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/helm/weave-gitops.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/repositories/oci/.gitkeep create mode 100644 bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/vars/cluster-secrets-user.sops.yaml.j2 delete mode 100644 bootstrap/templates/kubernetes/flux/vars/cluster-settings-user.yaml.j2 delete mode 100644 bootstrap/templates/node.sops.yaml.j2 create mode 100644 bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 create mode 100644 bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 create mode 100644 bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 create mode 100644 bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 create mode 100644 bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 delete mode 100644 bootstrap/vars/.gitignore delete mode 100644 bootstrap/vars/addons.sample.yaml delete mode 100644 bootstrap/vars/config.sample.yaml create mode 100644 config.sample.yaml delete mode 100644 kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml delete mode 100644 kubernetes/apps/cert-manager/cert-manager/monitoring/servicemonitor.yaml delete mode 100644 kubernetes/apps/cicd/disk-images/app/dv_fedora.yaml delete mode 100644 kubernetes/apps/cicd/disk-images/ks.yaml delete mode 100644 kubernetes/apps/cicd/kustomization.yaml delete mode 100644 kubernetes/apps/cicd/namespace.yaml delete mode 100644 kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml delete mode 100644 kubernetes/apps/flux-system/capacitor/app/ingress.yaml delete mode 100644 kubernetes/apps/flux-system/capacitor/app/kustomization.yaml delete mode 100644 kubernetes/apps/flux-system/capacitor/ks.yaml rename kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/ingress.yaml (75%) rename kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/kustomization.yaml (100%) rename kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/github/receiver.yaml (95%) create mode 100644 kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml rename kubernetes/apps/flux-system/{addons/webhooks => webhooks/app}/kustomization.yaml (100%) rename kubernetes/apps/flux-system/{addons => webhooks}/ks.yaml (58%) rename kubernetes/apps/kube-system/cilium/{app => config}/cilium-l2.yaml (67%) rename kubernetes/apps/{cicd/disk-images/app => kube-system/cilium/config}/kustomization.yaml (79%) delete mode 100644 kubernetes/apps/kube-system/cilium/monitoring/agent-servicemonitor.yaml delete mode 100644 kubernetes/apps/kube-system/cilium/monitoring/hubble-servicemonitor.yaml delete mode 100644 kubernetes/apps/kube-system/cilium/monitoring/operator-servicemonitor.yaml delete mode 100644 kubernetes/apps/kube-system/coredns/app/helmrelease.yaml delete mode 100644 kubernetes/apps/kube-system/coredns/app/kustomization.yaml create mode 100644 kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml rename kubernetes/apps/{minio/operator => kube-system/kubelet-csr-approver}/app/kustomization.yaml (82%) rename kubernetes/apps/kube-system/{coredns => kubelet-csr-approver}/ks.yaml (58%) delete mode 100644 kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml delete mode 100644 kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml create mode 100644 kubernetes/apps/kube-system/spegel/app/helmrelease.yaml rename kubernetes/apps/{networking/echo-server => kube-system/spegel}/app/kustomization.yaml (82%) rename kubernetes/apps/{networking/cloudflared => kube-system/spegel}/ks.yaml (60%) delete mode 100644 kubernetes/apps/minio/kustomization.yaml delete mode 100644 kubernetes/apps/minio/monitoring/dashboards/kustomization.yaml delete mode 100644 kubernetes/apps/minio/monitoring/dashboards/minio-overview.json delete mode 100644 kubernetes/apps/minio/monitoring/kustomization.yaml delete mode 100644 kubernetes/apps/minio/operator/app/helmrelease.yaml delete mode 100644 kubernetes/apps/minio/operator/app/namespace.yaml delete mode 100644 kubernetes/apps/minio/operator/ks.yaml create mode 100644 kubernetes/apps/network/cloudflared/app/configs/config.yaml rename kubernetes/apps/{networking => network}/cloudflared/app/dnsendpoint.yaml (91%) create mode 100644 kubernetes/apps/network/cloudflared/app/helmrelease.yaml rename kubernetes/apps/{networking => network}/cloudflared/app/kustomization.yaml (92%) create mode 100644 kubernetes/apps/network/cloudflared/app/secret.sops.yaml rename bootstrap/templates/kubernetes/apps/networking/cloudflared/ks.yaml.j2 => kubernetes/apps/network/cloudflared/ks.yaml (57%) create mode 100644 kubernetes/apps/network/echo-server/app/helmrelease.yaml rename kubernetes/apps/{networking/k8s-gateway => network/echo-server}/app/kustomization.yaml (82%) rename kubernetes/apps/{networking => network}/echo-server/ks.yaml (59%) rename kubernetes/apps/{networking => network}/external-dns/app/helmrelease.yaml (89%) rename bootstrap/templates/addons/grafana/app/kustomization.yaml.j2 => kubernetes/apps/network/external-dns/app/kustomization.yaml (85%) create mode 100644 kubernetes/apps/network/external-dns/app/secret.sops.yaml rename kubernetes/apps/{networking => network}/external-dns/ks.yaml (59%) rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/certificates/kustomization.yaml (81%) rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/certificates/production.yaml (93%) rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/certificates/staging.yaml (93%) rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/external/helmrelease.yaml (82%) create mode 100644 kubernetes/apps/network/ingress-nginx/external/kustomization.yaml rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/internal/helmrelease.yaml (73%) create mode 100644 kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml rename kubernetes/apps/{networking/nginx => network/ingress-nginx}/ks.yaml (53%) rename kubernetes/apps/{networking => network}/k8s-gateway/app/helmrelease.yaml (83%) create mode 100644 kubernetes/apps/network/k8s-gateway/app/kustomization.yaml rename kubernetes/apps/{networking => network}/k8s-gateway/ks.yaml (59%) rename kubernetes/apps/{networking => network}/kustomization.yaml (79%) rename kubernetes/apps/{networking => network}/namespace.yaml (84%) rename kubernetes/apps/{networking => network}/smtp-relay/app/externalsecret.yaml (100%) rename kubernetes/apps/{networking => network}/smtp-relay/app/helmrelease.yaml (100%) rename kubernetes/apps/{networking => network}/smtp-relay/app/kustomization.yaml (100%) rename kubernetes/apps/{networking => network}/smtp-relay/app/resources/maddy.conf (100%) rename kubernetes/apps/{networking => network}/smtp-relay/ks.yaml (100%) delete mode 100644 kubernetes/apps/networking/cloudflared/app/configs/config.yaml delete mode 100644 kubernetes/apps/networking/cloudflared/app/helmrelease.yaml delete mode 100644 kubernetes/apps/networking/cloudflared/app/secret.sops.yaml delete mode 100644 kubernetes/apps/networking/echo-server/app/helmrelease.yaml delete mode 100644 kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml delete mode 100644 kubernetes/apps/networking/external-dns/app/kustomization.yaml delete mode 100644 kubernetes/apps/networking/external-dns/app/secret.sops.yaml delete mode 100644 kubernetes/apps/networking/nginx/external/kustomization.yaml delete mode 100644 kubernetes/apps/networking/nginx/internal/kustomization.yaml delete mode 100644 kubernetes/apps/networking/nginx/monitoring/dashboards/kustomization.yaml delete mode 100644 kubernetes/apps/networking/nginx/monitoring/dashboards/nginx-review.json delete mode 100644 kubernetes/apps/networking/nginx/monitoring/dashboards/nginx.json delete mode 100644 kubernetes/apps/networking/nginx/monitoring/dashboards/request-handling-performance.json delete mode 100644 kubernetes/apps/networking/nginx/monitoring/kustomization.yaml rename kubernetes/apps/{monitoring => openebs-system}/kustomization.yaml (81%) rename kubernetes/apps/{monitoring => openebs-system}/namespace.yaml (81%) create mode 100644 kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml create mode 100644 kubernetes/apps/openebs-system/openebs/app/kustomization.yaml rename kubernetes/apps/{kube-system/local-path-provisioner => openebs-system/openebs}/ks.yaml (59%) delete mode 100644 kubernetes/apps/system-upgrade/namespace.yaml create mode 100644 kubernetes/bootstrap/flux/kustomization.yaml delete mode 100644 kubernetes/bootstrap/kustomization.yaml create mode 100644 kubernetes/bootstrap/talos/apps/cilium-values.yaml create mode 100644 kubernetes/bootstrap/talos/apps/helmfile.yaml create mode 100644 kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml create mode 100644 kubernetes/bootstrap/talos/talconfig.yaml delete mode 100644 kubernetes/flux/repositories/git/elastic.yaml delete mode 100644 kubernetes/flux/repositories/git/kubernetes-csi-addons.yaml delete mode 100644 kubernetes/flux/repositories/git/local-path-provisioner.yaml delete mode 100644 kubernetes/flux/repositories/helm/actions-runner-controller.yaml delete mode 100644 kubernetes/flux/repositories/helm/authelia.yaml delete mode 100644 kubernetes/flux/repositories/helm/backube.yaml delete mode 100644 kubernetes/flux/repositories/helm/bitnami.yaml delete mode 100644 kubernetes/flux/repositories/helm/botkube.yaml delete mode 100644 kubernetes/flux/repositories/helm/calico.yaml delete mode 100644 kubernetes/flux/repositories/helm/chaos-mesh.yaml delete mode 100644 kubernetes/flux/repositories/helm/cloudnative-pg.yaml delete mode 100644 kubernetes/flux/repositories/helm/coredns.yaml delete mode 100644 kubernetes/flux/repositories/helm/crossplane.yaml delete mode 100644 kubernetes/flux/repositories/helm/csi-driver-nfs.yaml delete mode 100644 kubernetes/flux/repositories/helm/ddosify.yaml delete mode 100644 kubernetes/flux/repositories/helm/deliveryhero.yaml delete mode 100644 kubernetes/flux/repositories/helm/descheduler.yaml delete mode 100644 kubernetes/flux/repositories/helm/external-secrets.yaml delete mode 100644 kubernetes/flux/repositories/helm/fairwinds.yaml delete mode 100644 kubernetes/flux/repositories/helm/flanksource.yaml delete mode 100644 kubernetes/flux/repositories/helm/fleet.yaml delete mode 100644 kubernetes/flux/repositories/helm/gitea.yaml delete mode 100644 kubernetes/flux/repositories/helm/grafana.yaml delete mode 100644 kubernetes/flux/repositories/helm/hajimari.yaml delete mode 100644 kubernetes/flux/repositories/helm/harbor.yaml delete mode 100644 kubernetes/flux/repositories/helm/influxdata.yaml delete mode 100644 kubernetes/flux/repositories/helm/intel.yaml delete mode 100644 kubernetes/flux/repositories/helm/istio.yaml delete mode 100644 kubernetes/flux/repositories/helm/jupyter.yaml delete mode 100644 kubernetes/flux/repositories/helm/kafka.yaml delete mode 100644 kubernetes/flux/repositories/helm/keda.yaml delete mode 100644 kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml delete mode 100644 kubernetes/flux/repositories/helm/kyverno.yaml delete mode 100644 kubernetes/flux/repositories/helm/longhorn.yaml delete mode 100644 kubernetes/flux/repositories/helm/marketplane.yaml delete mode 100644 kubernetes/flux/repositories/helm/metallb.yaml delete mode 100644 kubernetes/flux/repositories/helm/microcks.yaml delete mode 100644 kubernetes/flux/repositories/helm/minio-operator.yaml delete mode 100644 kubernetes/flux/repositories/helm/minio.yaml delete mode 100644 kubernetes/flux/repositories/helm/mongodb.yaml delete mode 100644 kubernetes/flux/repositories/helm/nfs-subdir-external-provisioner.yaml delete mode 100644 kubernetes/flux/repositories/helm/node-feature-discovery.yaml delete mode 100644 kubernetes/flux/repositories/helm/nvidia-gpu-feature-discovery.yaml delete mode 100644 kubernetes/flux/repositories/helm/nvidia.yaml create mode 100644 kubernetes/flux/repositories/helm/openebs.yaml delete mode 100644 kubernetes/flux/repositories/helm/ot-helm.yaml delete mode 100644 kubernetes/flux/repositories/helm/passbolt.yaml delete mode 100644 kubernetes/flux/repositories/helm/piraeus.yaml delete mode 100644 kubernetes/flux/repositories/helm/podinfo.yaml create mode 100644 kubernetes/flux/repositories/helm/postfinance.yaml delete mode 100644 kubernetes/flux/repositories/helm/prometheus-community.yaml delete mode 100644 kubernetes/flux/repositories/helm/pyroscope.yaml delete mode 100644 kubernetes/flux/repositories/helm/questdb.yaml delete mode 100644 kubernetes/flux/repositories/helm/redpanda.yaml delete mode 100644 kubernetes/flux/repositories/helm/robusta.yaml delete mode 100644 kubernetes/flux/repositories/helm/rook-ceph.yaml delete mode 100644 kubernetes/flux/repositories/helm/runix.yaml delete mode 100644 kubernetes/flux/repositories/helm/sloth.yaml create mode 100644 kubernetes/flux/repositories/helm/spegel.yaml delete mode 100644 kubernetes/flux/repositories/helm/tf-controller.yaml delete mode 100644 kubernetes/flux/repositories/helm/timescale.yaml delete mode 100644 kubernetes/flux/repositories/helm/twuni.yaml delete mode 100644 kubernetes/flux/repositories/helm/vector.yaml delete mode 100644 kubernetes/flux/repositories/helm/vm.yaml delete mode 100644 kubernetes/flux/repositories/helm/vmware.yaml delete mode 100644 kubernetes/flux/repositories/helm/weave-gitops.yaml delete mode 100644 kubernetes/flux/repositories/helm/wikijs.yaml delete mode 100644 kubernetes/flux/repositories/helm/woodpecker.yaml delete mode 100644 kubernetes/flux/repositories/oci/.gitkeep delete mode 100644 kubernetes/flux/repositories/oci/flamingo.yaml delete mode 100644 kubernetes/flux/vars/cluster-secrets-user.sops.yaml delete mode 100644 kubernetes/flux/vars/cluster-settings-user.yaml create mode 100644 makejinja.toml delete mode 100644 requirements.yaml create mode 100755 scripts/kubeconform.sh diff --git a/.devcontainer/ci/Dockerfile b/.devcontainer/ci/Dockerfile new file mode 100644 index 000000000..e6e945b41 --- /dev/null +++ b/.devcontainer/ci/Dockerfile @@ -0,0 +1,2 @@ +# Ref: https://github.com/devcontainers/ci/issues/191 +FROM mcr.microsoft.com/devcontainers/base:alpine diff --git a/.devcontainer/ci/devcontainer.json b/.devcontainer/ci/devcontainer.json new file mode 100644 index 000000000..b38aa2b51 --- /dev/null +++ b/.devcontainer/ci/devcontainer.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "Flux Cluster Template (CI)", + "build": { + "dockerfile": "./Dockerfile", + "context": "." + }, + "features": { + "./features": {} + }, + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/usr/bin/fish" + } + }, + "terminal.integrated.defaultProfile.linux": "fish" + }, + "extensions": [ + "redhat.ansible", + "redhat.vscode-yaml" + ] + } + } +} diff --git a/.devcontainer/ci/features/devcontainer-feature.json b/.devcontainer/ci/features/devcontainer-feature.json new file mode 100644 index 000000000..5f771e345 --- /dev/null +++ b/.devcontainer/ci/features/devcontainer-feature.json @@ -0,0 +1,6 @@ +{ + "name": "Flux Cluster Template (Tools)", + "id": "cluster-template", + "version": "1.0.0", + "description": "Install Tools" +} diff --git a/.devcontainer/ci/features/install.sh b/.devcontainer/ci/features/install.sh new file mode 100644 index 000000000..c21d79b0c --- /dev/null +++ b/.devcontainer/ci/features/install.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +set -e +set -o noglob + +apk add --no-cache \ + bash bind-tools ca-certificates curl gettext python3 \ + py3-pip moreutils jq git iputils openssh-client \ + starship fzf fish + +apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \ + age helm kubectl sops + +sudo apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/testing \ + lsd + +for app in \ + "budimanjojo/talhelper!" \ + "cilium/cilium-cli!!?as=cilium&type=script" \ + "cli/cli!!?as=gh&type=script" \ + "cloudflare/cloudflared!!?as=cloudflared&type=script" \ + "derailed/k9s!!?as=k9s&type=script" \ + "direnv/direnv!!?as=direnv&type=script" \ + "fluxcd/flux2!!?as=flux&type=script" \ + "go-task/task!!?as=task&type=script" \ + "helmfile/helmfile!!?as=helmfile&type=script" \ + "kubecolor/kubecolor!!?as=kubecolor&type=script" \ + "kubernetes-sigs/krew!!?as=krew&type=script" \ + "kubernetes-sigs/kustomize!!?as=kustomize&type=script" \ + "stern/stern!!?as=stern&type=script" \ + "siderolabs/talos!!?as=talosctl&type=script" \ + "yannh/kubeconform!!?as=kubeconform&type=script" \ + "mikefarah/yq!!?as=yq&type=script" +do + echo "=== Installing ${app} ===" + curl -fsSL "https://i.jpillora.com/${app}" | bash +done + +# Create the fish configuration directory +mkdir -p /home/vscode/.config/fish/{completions,conf.d} + +# Setup autocompletions for fish +for tool in cilium flux helm helmfile k9s kubectl kustomize talhelper talosctl; do + $tool completion fish > /home/vscode/.config/fish/completions/$tool.fish +done +gh completion --shell fish > /home/vscode/.config/fish/completions/gh.fish +stern --completion fish > /home/vscode/.config/fish/completions/stern.fish +yq shell-completion fish > /home/vscode/.config/fish/completions/yq.fish + +# Add hooks into fish +tee /home/vscode/.config/fish/conf.d/hooks.fish > /dev/null < /dev/null < /dev/null < /dev/null <- - Changes made in the ansible directory -- name: area/github - color: "72ccf3" - description: >- - Changes made in the github directory -- name: area/kubernetes - color: "72ccf3" - description: >- - Changes made in the kubernetes directory -- name: area/addons - color: "72ccf3" - description: >- - Changes made in the addons directory -- name: area/bootstrap - color: "72ccf3" - description: >- - Changes made in the bootstrap directory +- { name: "area/ansible", color: "0e8a16" } +- { name: "area/bootstrap", color: "0e8a16" } +- { name: "area/github", color: "0e8a16" } +- { name: "area/kubernetes", color: "0e8a16" } +- { name: "area/taskfile", color: "0e8a16" } +# Distro +- { name: "distro/k3s", color: "ffc300" } +- { name: "distro/talos", color: "ffc300" } # Renovate -- name: renovate/ansible - color: "ffc300" -- name: renovate/container - color: "ffc300" -- name: renovate/github-action - color: "ffc300" -- name: renovate/github-release - color: "ffc300" -- name: renovate/helm - color: "ffc300" +- { name: "renovate/ansible", color: "027fa0" } +- { name: "renovate/container", color: "027fa0" } +- { name: "renovate/github-action", color: "027fa0" } +- { name: "renovate/github-release", color: "027fa0" } +- { name: "renovate/helm", color: "027fa0" } # Semantic Type -- name: type/patch - color: "FFEC19" -- name: type/minor - color: "FF9800" -- name: type/major - color: "F6412D" -- name: type/break - color: "F6412D" +- { name: "type/patch", color: "ffec19" } +- { name: "type/minor", color: "ff9800" } +- { name: "type/major", color: "f6412d" } +- { name: "type/break", color: "f6412d" } # Uncategorized -- name: bug - color: "ee0701" -- name: do-not-merge - color: "ee0701" -- name: docs - color: "F4D1B7" -- name: enhancement - color: "84b6eb" -- name: broken-links - color: "7B55D7" -- name: question - color: "cc317c" -- name: community - color: "0e8a16" +- { name: "hold/upstream", color: "ee0701" } diff --git a/.github/release.yaml b/.github/release.yaml new file mode 100644 index 000000000..1598e66bf --- /dev/null +++ b/.github/release.yaml @@ -0,0 +1,4 @@ +changelog: + exclude: + authors: + - renovate diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 9caf4bec7..0d94f3c73 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,69 +1,261 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:base", + "config:recommended", "docker:enableMajor", ":disableRateLimiting", ":dependencyDashboard", ":semanticCommits", - ":automergeDigest", - ":automergeBranch", - "github>oscaromeu/home-ops//.github/renovate/autoMerge.json5", - "github>oscaromeu/home-ops//.github/renovate/commitMessage.json5", - "github>oscaromeu/home-ops//.github/renovate/groups.json5", - "github>oscaromeu/home-ops//.github/renovate/labels.json5", - "github>oscaromeu/home-ops//.github/renovate/semanticCommits.json5", - "helpers:pinGitHubActionDigests" + ":automergeBranch" ], "dependencyDashboard": true, "dependencyDashboardTitle": "Renovate Dashboard 🤖", - "suppressNotifications": ["prIgnoreNotification"], + "suppressNotifications": ["prEditedNotification", "prIgnoreNotification"], "rebaseWhen": "conflicted", "schedule": ["on saturday"], "flux": { "fileMatch": [ - "(^|/)addons/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)ansible/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?(\\.j2)?$" + "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" ] }, "helm-values": { "fileMatch": [ - "(^|/)addons/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)ansible/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?(\\.j2)?$" + "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + ] + }, + "helmfile": { + "fileMatch": [ + "(^|/)helmfile\\.ya?ml(?:\\.j2)?$", + "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" ] }, "kubernetes": { "fileMatch": [ - "(^|/)addons/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)ansible/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?(\\.j2)?$" + "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" ] }, "kustomize": { "fileMatch": [ - "(^|/)kustomization\\.ya?ml(\\.j2)?$" + "(^|/)kustomization\\.ya?ml(?:\\.j2)?$" + ] + }, + "pip_requirements": { + "fileMatch": [ + "(^|/)[\\w-]*requirements(-\\w+)?\\.(txt|pip)(?:\\.j2)?$" ] }, - "regexManagers": [ + "ansible-galaxy": { + "fileMatch": [ + "(^|/)(galaxy|requirements)(\\.ansible)?\\.ya?ml(?:\\.j2)?$" + ] + }, + // commit message topics + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "to {{newVersion}}", + "commitMessageSuffix": "", + // package rules + "packageRules": [ + // automerge + { + "description": ["Auto merge Github Actions"], + "matchManagers": ["github-actions"], + "automerge": true, + "automergeType": "branch", + "ignoreTests": true, + "matchUpdateTypes": ["minor", "patch"] + }, + // groups + { + "description": ["Flux Group"], + "groupName": "Flux", + "matchPackagePatterns": ["flux"], + "matchDatasources": ["docker", "github-tags"], + "versioning": "semver", + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["System Upgrade Controller Group"], + "groupName": "System Upgrade Controller", + "matchPackagePatterns": ["system-upgrade-controller"], + "matchDatasources": ["docker", "github-releases"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + // custom versioning + { + "description": ["Use custom versioning for k3s"], + "matchDatasources": ["github-releases"], + "versioning": "regex:^v(?\\d+)\\.(?\\d+)\\.(?\\d+)(?\\+k.s)\\.?(?\\d+)$", + "matchPackagePatterns": ["k3s"] + }, + // commit message topics + { + "matchDatasources": ["helm"], + "commitMessageTopic": "chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "image {{depName}}" + }, + // commit messages + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(container)!: " + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["digest"], + "semanticCommitType": "chore", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(helm)!: " + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(ansible)!: " + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-release)!: " + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-release" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-release" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-action)!: " + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-action" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-action" + }, + // labels + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "addLabels": ["renovate/ansible"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ], + // custom managers + "customManagers": [ { - "description": "Process various other dependencies", + "customType": "regex", + "description": ["Process custom dependencies"], "fileMatch": [ - "(^|/)addons/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)ansible/.+\\.ya?ml(\\.j2)?(\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?(\\.j2)?$" + "(^|/).taskfiles/.+\\.ya?ml$", + "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" ], "matchStrings": [ - // Example: `k3s_release_version: "v1.27.3+k3s1"` - "datasource=(?\\S+) depName=(?\\S+)( versioning=(?\\S+))?\n.*?\"(?.*)\"\n", - // Example: `- https://github.com/rancher/system-upgrade-controller/releases/download/v0.11.0/crd.yaml` - "datasource=(?\\S+) depName=(?\\S+)( versioning=(?\\S+))?\n.*?-\\s(.*?)\/(?[^/]+)\/[^/]+\n", - // Example: apiVersion=helm.cattle.io/v1 kind=HelmChart - "datasource=(?\\S+)\n.*?repo: (?\\S+)\n.*?chart: (?\\S+)\n.*?version: (?\\S+)\n" + // # renovate: datasource=github-releases depName=k3s-io/k3s + // k3s_release_version: &version v1.29.0+k3s1 + // # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io + // version: 1.15.1 + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+: (&\\S+\\s)?(?\\S+)", + // # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + // https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml + "datasource=(?\\S+) depName=(?\\S+)\\n.+/(?(v|\\d)[^/]+)" ], - "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}", - "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" } ] } diff --git a/.github/renovate/autoMerge.json5 b/.github/renovate/autoMerge.json5 deleted file mode 100644 index c8d5fef3a..000000000 --- a/.github/renovate/autoMerge.json5 +++ /dev/null @@ -1,21 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "packageRules": [ - { - "description": "Auto merge Github Actions", - "matchManagers": ["github-actions"], - "automerge": true, - "automergeType": "branch", - "ignoreTests": true, - "matchUpdateTypes": ["minor", "patch", "digest"] - }, - { - "description": "Auto merge container digests", - "matchDatasources": ["docker"], - "automerge": true, - "automergeType": "branch", - "ignoreTests": true, - "matchUpdateTypes": ["digest"] - } - ] -} diff --git a/.github/renovate/commitMessage.json5 b/.github/renovate/commitMessage.json5 deleted file mode 100644 index 3fea62872..000000000 --- a/.github/renovate/commitMessage.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "commitMessageTopic": "{{depName}}", - "commitMessageExtra": "to {{newVersion}}", - "commitMessageSuffix": "", - "packageRules": [ - { - "matchDatasources": ["helm"], - "commitMessageTopic": "chart {{depName}}" - }, - { - "matchDatasources": ["docker"], - "commitMessageTopic": "image {{depName}}" - } - ] -} diff --git a/.github/renovate/groups.json5 b/.github/renovate/groups.json5 deleted file mode 100644 index 98e4468d9..000000000 --- a/.github/renovate/groups.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "packageRules": [ - { - "description": "Flux Group", - "groupName": "Flux", - "matchPackagePatterns": ["flux"], - "matchDatasources": ["docker", "github-tags"], - "versioning": "semver", - "group": { - "commitMessageTopic": "{{{groupName}}} group" - }, - "separateMinorPatch": true - } - ] -} diff --git a/.github/renovate/labels.json5 b/.github/renovate/labels.json5 deleted file mode 100644 index 494472cb6..000000000 --- a/.github/renovate/labels.json5 +++ /dev/null @@ -1,37 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "packageRules": [ - { - "matchUpdateTypes": ["major"], - "labels": ["type/major"] - }, - { - "matchUpdateTypes": ["minor"], - "labels": ["type/minor"] - }, - { - "matchUpdateTypes": ["patch"], - "labels": ["type/patch"] - }, - { - "matchDatasources": ["docker"], - "addLabels": ["renovate/container"] - }, - { - "matchDatasources": ["helm"], - "addLabels": ["renovate/helm"] - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "addLabels": ["renovate/ansible"] - }, - { - "matchDatasources": ["github-releases", "github-tags"], - "addLabels": ["renovate/github-release"] - }, - { - "matchManagers": ["github-actions"], - "addLabels": ["renovate/github-action"] - } - ] -} diff --git a/.github/renovate/semanticCommits.json5 b/.github/renovate/semanticCommits.json5 deleted file mode 100644 index 614e44b39..000000000 --- a/.github/renovate/semanticCommits.json5 +++ /dev/null @@ -1,96 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "packageRules": [ - { - "matchDatasources": ["docker"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(container)!: " - }, - { - "matchDatasources": ["docker"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "container" - }, - { - "matchDatasources": ["docker"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "container" - }, - { - "matchDatasources": ["docker"], - "matchUpdateTypes": ["digest"], - "semanticCommitType": "chore", - "semanticCommitScope": "container" - }, - { - "matchDatasources": ["helm"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(helm)!: " - }, - { - "matchDatasources": ["helm"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "helm" - }, - { - "matchDatasources": ["helm"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "helm" - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(ansible)!: " - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "ansible" - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "ansible" - }, - { - "matchDatasources": ["github-releases", "github-tags"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(github-release)!: " - }, - { - "matchDatasources": ["github-releases", "github-tags"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "github-release" - }, - { - "matchDatasources": ["github-releases", "github-tags"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "github-release" - }, - { - "matchManagers": ["github-actions"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(github-action)!: " - }, - { - "matchManagers": ["github-actions"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "github-action" - }, - { - "matchManagers": ["github-actions"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "github-action" - } - ] -} diff --git a/.github/tests/config-k3s-ipv4.yaml b/.github/tests/config-k3s-ipv4.yaml new file mode 100644 index 000000000..7948fee58 --- /dev/null +++ b/.github/tests/config-k3s-ipv4.yaml @@ -0,0 +1,42 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: k3s +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + ssh_user: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + ssh_user: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.69.0.0/16 +bootstrap_service_network: 10.96.0.0/16 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-k3s-ipv6.yaml b/.github/tests/config-k3s-ipv6.yaml new file mode 100644 index 000000000..5efa50c6c --- /dev/null +++ b/.github/tests/config-k3s-ipv6.yaml @@ -0,0 +1,42 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: k3s +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + ssh_user: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + ssh_user: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.42.0.0/16,fd7f:8f5:e87c:a::/64 +bootstrap_service_network: 10.43.0.0/16,fd7f:8f5:e87c:e::/112 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-talos.yaml b/.github/tests/config-talos.yaml new file mode 100644 index 000000000..387c606d2 --- /dev/null +++ b/.github/tests/config-talos.yaml @@ -0,0 +1,46 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: talos +boostrap_talos: + schematic_id: "df491c50a5acc05b977ef00c32050e1ceb0df746e40b33c643ac8a9bfb7c7263" +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + talos_disk: fake + talos_nic: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + talos_disk: fake + talos_nic: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.69.0.0/16 +bootstrap_service_network: 10.96.0.0/16 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index 6a68e4b8f..000000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: ci -on: - push: - branches: - - master - - main -permissions: - contents: write -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5 - with: - python-version: 3.x - - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - key: mkdocs-material-${{ env.cache_id }} - path: .cache - restore-keys: | - mkdocs-material- - - run: pip install mkdocs-material - - run: mkdocs gh-deploy --force diff --git a/.github/workflows/devcontainer.yaml b/.github/workflows/devcontainer.yaml new file mode 100644 index 000000000..4729d6abe --- /dev/null +++ b/.github/workflows/devcontainer.yaml @@ -0,0 +1,58 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "devcontainer" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".devcontainer/ci/**"] + pull_request: + branches: ["main"] + paths: [".devcontainer/ci/**"] + schedule: + - cron: "0 0 * * 1" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + devcontainer: + if: ${{ github.repository == 'onedr0p/cluster-template' }} + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - if: ${{ github.event_name != 'pull_request' }} + name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: devcontainers/ci@v0.3 + env: + BUILDX_NO_DEFAULT_ATTESTATIONS: true + with: + imageName: ghcr.io/${{ github.repository }}/devcontainer + # cacheFrom: ghcr.io/${{ github.repository }}/devcontainer + imageTag: base,latest + platform: linux/amd64,linux/arm64 + configFile: .devcontainer/ci/devcontainer.json + push: ${{ github.event_name == 'pull_request' && 'never' || 'always' }} diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 000000000..1ebbb1393 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,108 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "e2e" + +on: + workflow_dispatch: + pull_request: + branches: ["main"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + configure: + if: ${{ github.repository == 'onedr0p/cluster-template' }} + name: configure + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + config-files: + - k3s-ipv4 + - k3s-ipv6 + - talos + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + id: setup-homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Python + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: "3.11" # minimum supported version + + - name: Cache homebrew packages + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + id: cache-homebrew-packages + with: + key: homebrew-${{ runner.os }}-${{ steps.setup-homebrew.outputs.gems-hash }}-${{ hashFiles('.taskfiles/Workstation/Brewfile') }} + path: /home/linuxbrew/.linuxbrew + + - name: Cache venv + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + with: + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('requirements.txt', 'requirements.yaml') }} + path: .venv + + - name: Setup Workflow Tools + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: brew install go-task + + - name: Run Workstation Brew tasks + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: task workstation:brew + + - name: Run Workstation venv tasks + shell: bash + run: task workstation:venv + + - name: Run Workstation direnv tasks + shell: bash + run: task workstation:direnv + + - name: Run Sops Age key task + shell: bash + run: task sops:age-keygen + + - name: Run init tasks + shell: bash + run: | + task init + cp ./.github/tests/config-${{ matrix.config-files }}.yaml ./config.yaml + export BOOTSTRAP_AGE_PUBLIC_KEY=$(sed -n 's/# public key: //gp' age.key) + envsubst < ./config.yaml | sponge ./config.yaml + + - name: Run configure task + shell: bash + run: task configure --yes + + - name: Run Talos tasks + if: ${{ startsWith(matrix.config-files, 'talos') }} + shell: bash + run: | + task talos:bootstrap-gensecret + task talos:bootstrap-genconfig + + - name: Run Ansible tasks + if: ${{ startsWith(matrix.config-files, 'k3s') }} + shell: bash + run: | + task ansible:deps force=false + task ansible:lint + task ansible:list + + - name: Run repo clean and reset tasks + shell: bash + run: | + task repository:clean + task repository:reset --yes diff --git a/.github/workflows/flux-diff.yaml b/.github/workflows/flux-diff.yaml index b0002f976..7548800f3 100644 --- a/.github/workflows/flux-diff.yaml +++ b/.github/workflows/flux-diff.yaml @@ -1,36 +1,66 @@ --- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json name: "Flux Diff" on: pull_request: branches: ["main"] - paths: ["kubernetes/**.yaml"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true jobs: flux-diff: name: Flux Diff runs-on: ubuntu-latest permissions: + contents: read pull-requests: write strategy: matrix: - path: ["kubernetes"] - resource: ["helmrelease", "kustomization"] + paths: ["kubernetes"] + resources: ["helmrelease", "kustomization"] steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: pull + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + ref: "${{ github.event.repository.default_branch }}" + path: default + - name: Diff Resources - uses: allenporter/flux-local/action/diff@e630daec1a1bcd24dc80f5100c6e1c7b6a091153 # 4.3.1 - id: diff + uses: docker://ghcr.io/allenporter/flux-local:main with: - sources: home-kubernetes - path: "${{ matrix.path }}" - resource: "${{ matrix.resource }}" + args: >- + diff ${{ matrix.resources }} + --unified 6 + --path /github/workspace/pull/${{ matrix.paths }}/flux + --path-orig /github/workspace/default/${{ matrix.paths }}/flux + --strip-attrs "helm.sh/chart,checksum/config,app.kubernetes.io/version,chart" + --limit-bytes 10000 + --all-namespaces + --sources "home-kubernetes" + --output-file diff.patch + + - name: Generate Diff + id: diff + run: | + cat diff.patch + echo "diff<> $GITHUB_OUTPUT + cat diff.patch >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT - if: ${{ steps.diff.outputs.diff != '' }} name: Add comment - uses: mshick/add-pr-comment@b8f338c590a895d50bcbfa6c5859251edc8952fc # v2.8.2 + uses: mshick/add-pr-comment@v2 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - message-id: "${{ github.event.pull_request.number }}/${{ matrix.path }}/${{ matrix.resource }}" + message-id: "${{ github.event.pull_request.number }}/${{ matrix.paths }}/${{ matrix.resources }}" message-failure: Diff was not successful message: | ```diff diff --git a/.github/workflows/kubeconform.yaml b/.github/workflows/kubeconform.yaml new file mode 100644 index 000000000..58a63cc17 --- /dev/null +++ b/.github/workflows/kubeconform.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Kubeconform" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +env: + KUBERNETES_DIR: ./kubernetes + +jobs: + kubeconform: + name: Kubeconform + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + run: brew install fluxcd/tap/flux kubeconform kustomize + + - name: Run kubeconform + shell: bash + run: bash ./scripts/kubeconform.sh ${{ env.KUBERNETES_DIR }} diff --git a/.github/workflows/meta-sync-labels.yaml b/.github/workflows/label-sync.yaml similarity index 51% rename from .github/workflows/meta-sync-labels.yaml rename to .github/workflows/label-sync.yaml index c0b91717f..90804e0af 100644 --- a/.github/workflows/meta-sync-labels.yaml +++ b/.github/workflows/label-sync.yaml @@ -1,5 +1,6 @@ --- -name: "Meta Sync labels" +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Label Sync" on: workflow_dispatch: @@ -8,16 +9,15 @@ on: paths: [".github/labels.yaml"] jobs: - labels: - name: Sync Labels + label-sync: + name: Label Sync runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + uses: actions/checkout@v4 - name: Sync Labels - uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3 + uses: EndBug/label-sync@v2 with: config-file: .github/labels.yaml - token: "${{ secrets.GITHUB_TOKEN }}" delete-other-labels: true diff --git a/.github/workflows/meta-labeler.yaml b/.github/workflows/labeler.yaml similarity index 51% rename from .github/workflows/meta-labeler.yaml rename to .github/workflows/labeler.yaml index ad7209b52..d658c1d96 100644 --- a/.github/workflows/meta-labeler.yaml +++ b/.github/workflows/labeler.yaml @@ -1,23 +1,21 @@ --- -name: "Meta Labeler" +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Labeler" on: workflow_dispatch: pull_request_target: branches: ["main"] -permissions: - checks: write - contents: read - pull-requests: write - jobs: labeler: name: Labeler runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write steps: - name: Labeler - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 + uses: actions/labeler@v5 with: configuration-path: .github/labeler.yaml - repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/link-check.yaml b/.github/workflows/link-check.yaml deleted file mode 100644 index d48220796..000000000 --- a/.github/workflows/link-check.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -name: "Link Check" - -on: - workflow_dispatch: - schedule: - - cron: "0 0 * * 0" - -jobs: - link-check: - name: Link Check - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - - - name: Link Checker - uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # v1.10.0 - id: lychee - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - - - name: Find Link Checker Issue - id: link-checker-issue - uses: micalevisk/last-issue-action@0d40124cc99ac8601c2516007f0c98ef3d27537b # v2.3.0 - with: - state: open - labels: | - broken-links - - - name: Update Issue - uses: peter-evans/create-issue-from-file@433e51abf769039ee20ba1293a088ca19d573b7f # renovate: tag=v4.0.1 - with: - title: Broken links detected 🔗 - issue-number: "${{ steps.link-checker-issue.outputs.issue-number }}" - content-filepath: ./lychee/out.md - token: "${{ secrets.GITHUB_TOKEN }}" - labels: | - broken-links diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..ff8eabe44 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 1 * *" + +jobs: + release: + if: ${{ github.repository == 'onedr0p/cluster-template' }} + name: Release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Release + shell: bash + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + run: | + # Retrieve previous release tag + previous_tag="$(gh release list --limit 1 | awk '{ print $1 }')" + previous_major="${previous_tag%%\.*}" + previous_minor="${previous_tag#*.}" + previous_minor="${previous_minor%.*}" + previous_patch="${previous_tag##*.}" + # Determine next release tag + next_major_minor="$(date +'%Y').$(date +'%-m')" + if [[ "${previous_major}.${previous_minor}" == "${next_major_minor}" ]]; then + echo "Month release already exists for year, incrementing patch number by 1" + next_patch="$((previous_patch + 1))" + else + echo "Month release does not exist for year, setting patch number to 0" + next_patch="0" + fi + # Create release + release_tag="${next_major_minor}.${next_patch}" + gh release create "${release_tag}" \ + --repo="${GITHUB_REPOSITORY}" \ + --title="${release_tag}" \ + --generate-notes diff --git a/.github/workflows/renovate.yaml b/.github/workflows/renovate.yaml deleted file mode 100644 index c9761629b..000000000 --- a/.github/workflows/renovate.yaml +++ /dev/null @@ -1,62 +0,0 @@ ---- - name: "Renovate" - - on: - workflow_dispatch: - inputs: - dryRun: - description: Dry Run - default: "false" - required: false - logLevel: - description: Log Level - default: debug - required: false - version: - description: Renovate version - default: latest - required: false - schedule: - - cron: "0 * * * *" # Every hour - push: - branches: ["main"] - paths: - - .github/renovate.json5 - - .github/renovate/**.json5 - - concurrency: - group: ${{ github.workflow }}-${{ github.event.number || github.ref }} - cancel-in-progress: true - - env: - LOG_LEVEL: "${{ inputs.logLevel || 'debug' }}" - RENOVATE_AUTODISCOVER: true - RENOVATE_AUTODISCOVER_FILTER: "${{ github.repository }}" - RENOVATE_DRY_RUN: "${{ inputs.dryRun == true }}" - RENOVATE_PLATFORM: github - RENOVATE_PLATFORM_COMMIT: true - WORKFLOW_RENOVATE_VERSION: "${{ inputs.version || 'latest' }}" - - jobs: - renovate: - name: Renovate - runs-on: self-hosted - steps: - - name: Generate Token - uses: actions/create-github-app-token@a0de6af83968303c8c955486bf9739a57d23c7f1 # v1 - id: app-token - with: - app-id: "${{ secrets.BOT_APP_ID }}" - private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" - - - name: Checkout - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 - with: - token: "${{ steps.app-token.outputs.token }}" - - - name: Renovate - uses: renovatebot/github-action@063e0c946b9c1af35ef3450efc44114925d6e8e6 # v40.1.11 - with: - configurationFile: .github/renovate.json5 - token: "${{ steps.app-token.outputs.token }}" - renovate-version: "${{ env.WORKFLOW_RENOVATE_VERSION }}" diff --git a/.github/workflows/trivy.yaml.disable b/.github/workflows/trivy.yaml.disable deleted file mode 100644 index b3bfd591d..000000000 --- a/.github/workflows/trivy.yaml.disable +++ /dev/null @@ -1,43 +0,0 @@ ---- - name: Run Aqua Security Trivy vulnerability scanner - - on: - push: - branches: - - main - pull_request: - - jobs: - trivy: - name: Trivy - runs-on: self-hosted - steps: - # https://github.com/marketplace/actions/harden-runner - - name: Harden Runner - uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0 - with: - egress-policy: audit - - # https://github.com/marketplace/actions/checkout - - name: Checkout Repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - # https://github.com/marketplace/actions/aqua-security-trivy - - name: Run Trivy vulnerability scanner in IaC mode - uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # 0.19.0 - with: - scan-type: config - hide-progress: false - format: sarif - output: trivy-results.sarif - exit-code: "1" - ignore-unfixed: true - severity: CRITICAL,HIGH - scan-ref: infra/terraform - - # Upload the results to GitHub's code scanning dashboard. - # https://github.com/github/codeql-action - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 - with: - sarif_file: trivy-results.sarif diff --git a/.gitignore b/.gitignore index 3967685d1..4f71d416f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,10 +3,24 @@ Thumbs.db # k8s kubeconfig +talosconfig .decrypted~*.yaml .config.env *.agekey *.pub *.key +# Private +.private +.bin # Ansible .venv* +# Taskfile +.task +# Brew +Brewfile.lock.json +# intellij +.idea +# wiki +wiki +# Bootstrap +/config.yaml diff --git a/.lycheeignore b/.lycheeignore deleted file mode 100644 index 8cbc880a9..000000000 --- a/.lycheeignore +++ /dev/null @@ -1,2 +0,0 @@ -https://dash.cloudflare.com/profile/api-tokens -https://www.mend.io/free-developer-tools/renovate/ diff --git a/.sops.yaml b/.sops.yaml index 5ff49a9d3..ebc5c9b1f 100644 --- a/.sops.yaml +++ b/.sops.yaml @@ -1,16 +1,12 @@ --- creation_rules: - - path_regex: kubernetes/.*\.sops\.ya?ml - encrypted_regex: "^(data|stringData)$" + - # IMPORTANT: This rule MUST be above the others + path_regex: talos/.*\.sops\.ya?ml key_groups: - age: - "age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8" - - path_regex: ansible/.*\.sops\.ya?ml - key_groups: - - age: - - "age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8" - # https://github.com/ansible-collections/community.sops/issues/153 - - path_regex: /dev/stdin + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" key_groups: - age: - "age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8" diff --git a/.taskfiles/Ansible/Taskfile.yaml b/.taskfiles/Ansible/Taskfile.yaml new file mode 100644 index 000000000..02322eafa --- /dev/null +++ b/.taskfiles/Ansible/Taskfile.yaml @@ -0,0 +1,88 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + ANSIBLE_LINT_FILE: "{{.ANSIBLE_DIR}}/.ansible-lint" + ANSIBLE_INVENTORY_FILE: "{{.ANSIBLE_DIR}}/inventory/hosts.yaml" + ANSIBLE_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.yaml" + ANSIBLE_PIP_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.txt" + +env: + ANSIBLE_COLLECTIONS_PATH: "{{.VIRTUAL_ENV}}/galaxy" + ANSIBLE_ROLES_PATH: "{{.VIRTUAL_ENV}}/galaxy/ansible_roles" + ANSIBLE_VARS_ENABLED: "host_group_vars" + ANSIBLE_LOCALHOST_WARNING: "False" + ANSIBLE_INVENTORY_UNPARSED_WARNING: "False" + +tasks: + + deps: + desc: Set up Ansible dependencies + deps: [":workstation:venv"] + cmds: + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}"' + - '{{.VIRTUAL_ENV}}/bin/ansible-galaxy install --role-file "{{.ANSIBLE_REQUIREMENTS_FILE}}" {{if eq .force "true"}}--force{{end}}' + preconditions: + - { msg: "Missing Ansible requirements file", sh: "test -f {{.ANSIBLE_REQUIREMENTS_FILE}}" } + - { msg: "Missing Pip requirements file", sh: "test -f {{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" } + sources: + - "{{.ANSIBLE_REQUIREMENTS_FILE}}" + - "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" + generates: + - "{{.VIRTUAL_ENV}}/bin/ansible" + - "{{.VIRTUAL_ENV}}/bin/ansible-galaxy" + vars: + force: '{{.force | default "true"}}' + + run: + desc: Run an Ansible playbook for configuring a cluster + summary: | + Args: + playbook: Playbook to run (required) + prompt: Run Ansible playbook '{{.playbook}}'... continue? + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible-playbook --inventory {{.ANSIBLE_INVENTORY_FILE}} {{.ANSIBLE_DIR}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}" + requires: + vars: ["playbook"] + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + poweroff: + desc: Shutdown all the k8s nodes + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} -a '/usr/bin/systemctl poweroff' --become" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + list: + desc: List all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --list-hosts" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + ping: + desc: Ping all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -m 'ping'" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + uptime: + desc: Uptime of all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -a 'uptime'" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + lint: + desc: Lint Ansible + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible-lint --config-file {{.ANSIBLE_LINT_FILE}} {{.ANSIBLE_DIR}}/**/*.yaml" + preconditions: + - { msg: "Missing Ansible lint file", sh: "test -f {{.ANSIBLE_LINT_FILE}}" } + + .reset: + internal: true + cmd: rm -rf {{.ANSIBLE_DIR}} diff --git a/.taskfiles/AnsibleTasks.yaml b/.taskfiles/AnsibleTasks.yaml deleted file mode 100644 index 0cc5786a2..000000000 --- a/.taskfiles/AnsibleTasks.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- -version: "3" - -vars: - ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks" - ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory" - -tasks: - - prepare: - desc: Prepare all the k8s nodes for running k3s - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-prepare.yaml - - install: - desc: Install Kubernetes on the nodes - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-installation.yaml - - rollout-update: - desc: Preform operating system updates and rollout restart the cluster - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-rollout-update.yaml - - nuke: - desc: Uninstall Kubernetes on the nodes - dir: "{{.ANSIBLE_DIR}}" - interactive: true - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-nuke.yaml - - reboot: - desc: Reboot all the k8s nodes - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-reboot.yaml - - poweroff: - desc: Shutdown all the k8s nodes - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible kubernetes -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml -a '/usr/bin/systemctl poweroff' --become - - list: - desc: List all the hosts - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --list-hosts - - ping: - desc: Ping all the hosts - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -m 'ping' - - uptime: - desc: Uptime of all the hosts - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -a 'uptime' - - reset-ceph: - desc: Reset Ceph Drives - dir: "{{.ANSIBLE_DIR}}" - cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-ceph-reset.yaml diff --git a/.taskfiles/BrewTasks.yaml b/.taskfiles/BrewTasks.yaml deleted file mode 100644 index fc8c4b1fb..000000000 --- a/.taskfiles/BrewTasks.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -version: "3" - -tasks: - - deps: - desc: Install workstation dependencies with Brew - cmd: brew install {{.DEPS}} {{.CLI_ARGS}} - preconditions: - - sh: command -v brew - msg: | - Homebrew is not installed. Using MacOS, Linux or WSL? - Head over to https://brew.sh to get up and running. - vars: - DEPS: >- - age - cilium-cli - cloudflared - fluxcd/tap/flux - helm - kubernetes-cli - kustomize - sops - stern - yq diff --git a/.taskfiles/ClusterTasks.yaml b/.taskfiles/ClusterTasks.yaml deleted file mode 100644 index 22b842028..000000000 --- a/.taskfiles/ClusterTasks.yaml +++ /dev/null @@ -1,82 +0,0 @@ ---- -version: "3" - -tasks: - - verify: - desc: Verify flux meets the prerequisites - cmd: flux check --pre - - install: - desc: Install Flux into your cluster - cmds: - - kubectl apply --kustomize {{.KUBERNETES_DIR}}/bootstrap - #- kubectl apply --kustomize {{.KUBERNETES_DIR}}/bootstrap-extra - - cat {{.SOPS_AGE_KEY_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin - - sops --decrypt {{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml | kubectl apply -f - - - sops --decrypt {{.KUBERNETES_DIR}}/flux/vars/cluster-secrets-user.sops.yaml | kubectl apply -f - - - kubectl apply -f {{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml - - kubectl apply -f {{.KUBERNETES_DIR}}/flux/vars/cluster-settings-user.yaml - - kubectl apply --kustomize {{.KUBERNETES_DIR}}/flux/config - preconditions: - - sh: test -f {{.SOPS_AGE_KEY_FILE}} - msg: | - Age key file is not found. Did you forget to create it? - vars: - SOPS_AGE_KEY_FILE: "{{.ROOT_DIR}}/age.key" - - reconcile: - desc: Force update Flux to pull in changes from your Git repository - cmd: flux reconcile -n flux-system kustomization cluster --with-source - - hr-restart: - desc: Restart all failed Helm Releases - cmds: - - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -L1 bash -c 'flux suspend hr $0 -n $1' - - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -L1 bash -c 'flux resume hr $0 -n $1' - - nodes: - desc: List all the nodes in your cluster - cmd: kubectl get nodes {{.CLI_ARGS | default "-o wide"}} - - pods: - desc: List all the pods in your cluster - cmd: kubectl get pods {{.CLI_ARGS | default "-A"}} - - kustomizations: - desc: List all the kustomizations in your cluster - cmd: kubectl get kustomizations {{.CLI_ARGS | default "-A"}} - - helmreleases: - desc: List all the helmreleases in your cluster - cmd: kubectl get helmreleases {{.CLI_ARGS | default "-A"}} - - helmrepositories: - desc: List all the helmrepositories in your cluster - cmd: kubectl get helmrepositories {{.CLI_ARGS | default "-A"}} - - gitrepositories: - desc: List all the gitrepositories in your cluster - cmd: kubectl get gitrepositories {{.CLI_ARGS | default "-A"}} - - certificates: - desc: List all the certificates in your cluster - cmds: - - kubectl get certificates {{.CLI_ARGS | default "-A"}} - - kubectl get certificaterequests {{.CLI_ARGS | default "-A"}} - - ingresses: - desc: List all the ingresses in your cluster - cmd: kubectl get ingress {{.CLI_ARGS | default "-A"}} - - resources: - desc: Gather common resources in your cluster, useful when asking for support - cmds: - - task: nodes - - task: kustomizations - - task: helmreleases - - task: helmrepositories - - task: gitrepositories - - task: certificates - - task: ingresses - - task: pods diff --git a/.taskfiles/Flux/Taskfile.yaml b/.taskfiles/Flux/Taskfile.yaml new file mode 100644 index 000000000..8f0c95ba5 --- /dev/null +++ b/.taskfiles/Flux/Taskfile.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + # renovate: datasource=github-releases depName=prometheus-operator/prometheus-operator + PROMETHEUS_OPERATOR_VERSION: v0.74.0 + CLUSTER_SECRET_SOPS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml" + CLUSTER_SETTINGS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml" + GITHUB_DEPLOY_KEY_FILE: "{{.KUBERNETES_DIR}}/bootstrap/flux/github-deploy-key.sops.yaml" + +tasks: + + bootstrap: + desc: Bootstrap Flux into a Kubernetes cluster + cmds: + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/bootstrap/flux + - cat {{.AGE_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin + - sops --decrypt {{.CLUSTER_SECRET_SOPS_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename {{.CLUSTER_SETTINGS_FILE}} + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/flux/config + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + + apply: + desc: Apply a Flux Kustomization resource for a cluster + summary: | + Args: + path: Path under apps containing the Flux Kustomization resource (ks.yaml) (required) + ns: Namespace the Flux Kustomization exists in (default: flux-system) + cmd: | + flux --kubeconfig {{.KUBECONFIG_FILE}} build ks $(basename {{.path}}) \ + --namespace {{.ns}} \ + --kustomization-file {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml \ + --path {{.KUBERNETES_DIR}}/apps/{{.path}} \ + {{- if contains "not found" .ks }}--dry-run \{{ end }} + | \ + kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side \ + --field-manager=kustomize-controller -f - + requires: + vars: ["path"] + vars: + ns: '{{.ns | default "flux-system"}}' + ks: + sh: flux --kubeconfig {{.KUBECONFIG_FILE}} --namespace {{.ns}} get kustomizations $(basename {{.path}}) 2>&1 + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Flux Kustomization for app {{.path}}", sh: "test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml" } + + reconcile: + desc: Force update Flux to pull in changes from your Git repository + cmd: flux --kubeconfig {{.KUBECONFIG_FILE}} reconcile --namespace flux-system kustomization cluster --with-source + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + + github-deploy-key: + cmds: + - kubectl create namespace flux-system --dry-run=client -o yaml | kubectl --kubeconfig {{.KUBECONFIG_FILE}} apply --filename - + - sops --decrypt {{.GITHUB_DEPLOY_KEY_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + - { msg: "Missing Github deploy key file", sh: "test -f {{.GITHUB_DEPLOY_KEY_FILE}}" } diff --git a/.taskfiles/Kubernetes/Taskfile.yaml b/.taskfiles/Kubernetes/Taskfile.yaml new file mode 100644 index 000000000..e4f52e0cb --- /dev/null +++ b/.taskfiles/Kubernetes/Taskfile.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + KUBECONFORM_SCRIPT: "{{.SCRIPTS_DIR}}/kubeconform.sh" + +tasks: + + resources: + desc: Gather common resources in your cluster, useful when asking for support + cmds: + - for: { var: resource } + cmd: kubectl get {{.ITEM}} {{.CLI_ARGS | default "-A"}} + vars: + resource: >- + nodes + gitrepositories + kustomizations + helmrepositories + helmreleases + certificates + certificaterequests + ingresses + pods + + kubeconform: + desc: Validate Kubernetes manifests with kubeconform + cmd: bash {{.KUBECONFORM_SCRIPT}} {{.KUBERNETES_DIR}} + preconditions: + - { msg: "Missing kubeconform script", sh: "test -f {{.KUBECONFORM_SCRIPT}}" } + + .reset: + internal: true + cmd: rm -rf {{.KUBERNETES_DIR}} diff --git a/.taskfiles/Repository/Taskfile.yaml b/.taskfiles/Repository/Taskfile.yaml new file mode 100644 index 000000000..e1e5f68c2 --- /dev/null +++ b/.taskfiles/Repository/Taskfile.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +tasks: + + clean: + desc: Clean files and directories no longer needed after cluster bootstrap + cmds: + - mkdir -p {{.PRIVATE_DIR}} + # Clean up CI + - rm -rf {{.ROOT_DIR}}/.github/tests + - rm -rf {{.ROOT_DIR}}/.github/workflows/e2e.yaml + # Clean up devcontainer + - rm -rf {{.ROOT_DIR}}/.devcontainer/ci + - rm -rf {{.ROOT_DIR}}/.github/workflows/devcontainer.yaml + # Move bootstrap directory to gitignored directory + - mv {{.BOOTSTRAP_DIR}} {{.PRIVATE_DIR}}/bootstrap-{{now | date "150405"}} + - mv {{.MAKEJINJA_CONFIG_FILE}} {{.PRIVATE_DIR}}/makejinja-{{now | date "150405"}}.toml + # Update renovate.json5 + - sed -i {{if eq OS "darwin"}}''{{end}} 's/(..\.j2)\?//g' {{.ROOT_DIR}}/.github/renovate.json5 + preconditions: + - { msg: "Missing bootstrap directory", sh: "test -d {{.BOOTSTRAP_DIR}}" } + - { msg: "Missing Renovate config file", sh: "test -f {{.ROOT_DIR}}/.github/renovate.json5" } + + reset: + desc: Reset templated configuration files + prompt: Reset templated configuration files... continue? + cmds: + - task: :ansible:.reset + - task: :kubernetes:.reset + - task: :sops:.reset + - task: :talos:.reset + + force-reset: + desc: Reset repo back to HEAD + prompt: Reset repo back to HEAD... continue? + cmds: + - task: reset + - git reset --hard HEAD + - git clean -f -d + - git pull origin main diff --git a/.taskfiles/Sops/Taskfile.yaml b/.taskfiles/Sops/Taskfile.yaml new file mode 100644 index 000000000..373957657 --- /dev/null +++ b/.taskfiles/Sops/Taskfile.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + SOPS_CONFIG_FILE: "{{.ROOT_DIR}}/.sops.yaml" + +tasks: + + age-keygen: + desc: Initialize Age Key for Sops + cmd: age-keygen --output {{.AGE_FILE}} + status: + - test -f "{{.AGE_FILE}}" + + encrypt: + desc: Encrypt all Kubernetes SOPS secrets that are not already encrypted + cmds: + - for: { var: file } + task: .encrypt-file + vars: + file: "{{.ITEM}}" + vars: + file: + sh: | + if [ -d "{{.KUBERNETES_DIR}}" ]; then + find "{{.KUBERNETES_DIR}}" -type f -name "*.sops.*" -exec grep -L "ENC\[AES256_GCM" {} \; + fi + + .encrypt-file: + internal: true + cmd: sops --encrypt --in-place {{.file}} + requires: + vars: ["file"] + preconditions: + - { msg: "Missing Sops config file", sh: "test -f {{.SOPS_CONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + + .reset: + internal: true + cmd: rm -rf {{.SOPS_CONFIG_FILE}} diff --git a/.taskfiles/Talos/Taskfile.yaml b/.taskfiles/Talos/Taskfile.yaml new file mode 100644 index 000000000..97133078e --- /dev/null +++ b/.taskfiles/Talos/Taskfile.yaml @@ -0,0 +1,110 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + TALOS_DIR: "{{.KUBERNETES_DIR}}/bootstrap/talos" + TALHELPER_SECRET_FILE: "{{.TALOS_DIR}}/talsecret.sops.yaml" + TALHELPER_CONFIG_FILE: "{{.TALOS_DIR}}/talconfig.yaml" + +env: + TALOSCONFIG: "{{.TALOS_DIR}}/clusterconfig/talosconfig" + +tasks: + + bootstrap: + desc: Bootstrap the Talos cluster + dir: "{{.TALOS_DIR}}" + cmds: + - task: bootstrap-gensecret + - task: bootstrap-genconfig + - task: bootstrap-apply + - task: bootstrap-install + - task: fetch-kubeconfig + - task: bootstrap-apps + - talosctl health --server=false + + bootstrap-gensecret: + desc: Generate the Talos secrets + dir: "{{.TALOS_DIR}}" + cmds: + - talhelper gensecret > {{.TALHELPER_SECRET_FILE}} + - task: :sops:.encrypt-file + vars: + file: "{{.TALHELPER_SECRET_FILE}}" + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + status: + - test -f "{{.TALHELPER_SECRET_FILE}}" + + bootstrap-genconfig: + desc: Generate the Talos configs + dir: "{{.TALOS_DIR}}" + cmd: talhelper genconfig --secret-file {{.TALHELPER_SECRET_FILE}} + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + - { msg: "Missing talhelper secret file", sh: "test -f {{.TALHELPER_SECRET_FILE}}" } + + bootstrap-apply: + desc: Apply the Talos configs to the nodes + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand apply --extra-flags=--insecure | bash + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + + bootstrap-install: + desc: Install the Talos cluster + dir: "{{.TALOS_DIR}}" + cmds: + - echo "Installing Talos... ignore the errors and be patient" + - until talhelper gencommand bootstrap | bash; do sleep 10; done + - sleep 10 + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + + bootstrap-apps: + desc: Bootstrap core apps needed for Talos + dir: "{{.TALOS_DIR}}" + cmds: + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done + - helmfile --kubeconfig {{.KUBECONFIG_FILE}} --file ./apps/helmfile.yaml apply --skip-diff-on-install --suppress-diff + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + + upgrade-talos: + desc: Upgrade talos on a node + cmd: talosctl --nodes {{.node}} upgrade --image {{.image}} --preserve=true --reboot-mode=default + requires: + vars: ["node", "image"] + preconditions: + - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + + upgrade-k8s: + desc: Upgrade k8s on a node + cmd: talosctl --nodes {{.node}} upgrade-k8s --to {{.to}} + requires: + vars: ["node", "to"] + preconditions: + - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + + fetch-kubeconfig: + desc: Generate talos kubeconfig + dir: "{{.TALOS_DIR}}" + cmd: until talhelper gencommand kubeconfig --extra-flags "{{.ROOT_DIR}} --force" | bash; do sleep 10; done + + soft-nuke: + desc: Resets nodes back to maintenance mode so you can re-deploy again straight after + prompt: This will destroy your cluster and reset the nodes back to maintenance mode... continue? + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand reset --extra-flags "--reboot --system-labels-to-wipe STATE --system-labels-to-wipe EPHEMERAL --graceful=false --wait=false" | bash + + hard-nuke: + desc: Resets nodes back completely and reboots them + prompt: This will destroy your cluster and reset the nodes... continue? + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand reset --extra-flags "--reboot --graceful=false --wait=false" | bash + + .reset: + internal: true + cmd: rm -rf {{.TALOS_DIR}} diff --git a/.taskfiles/Workstation/Archfile b/.taskfiles/Workstation/Archfile new file mode 100644 index 000000000..b1ad3160c --- /dev/null +++ b/.taskfiles/Workstation/Archfile @@ -0,0 +1,17 @@ +age +cloudflared-bin +direnv +flux-bin +go-task +go-yq +helm +helmfile +jq +kubeconform +kubectl-bin +kustomize +moreutils +sops +stern-bin +talhelper-bin +talosctl diff --git a/.taskfiles/Workstation/Brewfile b/.taskfiles/Workstation/Brewfile new file mode 100644 index 000000000..59688345b --- /dev/null +++ b/.taskfiles/Workstation/Brewfile @@ -0,0 +1,20 @@ +tap "fluxcd/tap" +tap "go-task/tap" +tap "siderolabs/tap" +brew "age" +brew "cloudflared" +brew "direnv" +brew "fluxcd/tap/flux" +brew "go-task/tap/go-task" +brew "helm" +brew "helmfile" +brew "jq" +brew "kubeconform" +brew "kubernetes-cli" +brew "kustomize" +brew "moreutils" +brew "sops" +brew "stern" +brew "talhelper" +brew "talosctl" +brew "yq" diff --git a/.taskfiles/Workstation/Taskfile.yaml b/.taskfiles/Workstation/Taskfile.yaml new file mode 100644 index 000000000..09f309f6c --- /dev/null +++ b/.taskfiles/Workstation/Taskfile.yaml @@ -0,0 +1,71 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + ARCHFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Archfile" + BREWFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Brewfile" + GENERIC_BIN_DIR: "{{.ROOT_DIR}}/.bin" + +tasks: + + direnv: + desc: Run direnv hooks + cmd: direnv allow . + status: + - "[[ $(direnv status --json | jq '.state.foundRC.allowed') == 0 ]]" + - "[[ $(direnv status --json | jq '.state.loadedRC.allowed') == 0 ]]" + + venv: + desc: Set up virtual environment + cmds: + - "{{.PYTHON_BIN}} -m venv {{.VIRTUAL_ENV}}" + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade pip setuptools wheel' + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.PIP_REQUIREMENTS_FILE}}"' + sources: + - "{{.PIP_REQUIREMENTS_FILE}}" + generates: + - "{{.VIRTUAL_ENV}}/pyvenv.cfg" + preconditions: + - { msg: "Missing Pip requirements file", sh: "test -f {{.PIP_REQUIREMENTS_FILE}}" } + + brew: + desc: Install workstation dependencies with Brew + cmd: brew bundle --file {{.BREWFILE}} + preconditions: + - { msg: "Missing Homebrew", sh: "command -v brew" } + - { msg: "Missing Brewfile", sh: "test -f {{.BREWFILE}}" } + + arch: + desc: Install Arch workstation dependencies with Paru Or Yay + cmd: "{{.helper}} -Syu --needed --noconfirm --noprogressbar $(cat {{.ARCHFILE}} | xargs)" + vars: + helper: + sh: "command -v yay || command -v paru" + preconditions: + - { msg: "Missing Archfile", sh: "test -f {{.ARCHFILE}}" } + + generic-linux: + desc: Install CLI tools into the projects .bin directory using curl + dir: "{{.GENERIC_BIN_DIR}}" + platforms: ["linux/amd64", "linux/arm64"] + cmds: + - for: + - budimanjojo/talhelper?as=talhelper&type=script + - cloudflare/cloudflared?as=cloudflared&type=script + - FiloSottile/age?as=age&type=script + - fluxcd/flux2?as=flux&type=script + - getsops/sops?as=sops&type=script + - helmfile/helmfile?as=helmfile&type=script + - jqlang/jq?as=jq&type=script + - kubernetes-sigs/kustomize?as=kustomize&type=script + - siderolabs/talos?as=talosctl&type=script + - yannh/kubeconform?as=kubeconform&type=script + - mikefarah/yq?as=yq&type=script + cmd: curl -fsSL "https://i.jpillora.com/{{.ITEM}}" | bash + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + platforms: ["linux/amd64"] + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + platforms: ["linux/arm64"] + - cmd: chmod +x kubectl + - cmd: curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | USE_SUDO="false" HELM_INSTALL_DIR="." bash diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 3c10ab9d2..c8f112105 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -3,11 +3,13 @@ "albert.TabOut", "britesnow.vscode-toggle-quotes", "fcrespo82.markdown-table-formatter", + "mikestead.dotenv", "mitchdenny.ecdc", "redhat.ansible", "signageos.signageos-vscode-sops", "will-stone.in-any-case", "EditorConfig.editorconfig", "PKief.material-icon-theme", + "Gruntfuggly.todo-tree" ] } diff --git a/.vscode/settings.json b/.vscode/settings.json index 33a015e98..8f29572b7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,20 +1,27 @@ { + "ansible.ansible.path": ".venv/bin/ansible", + "ansible.python.activationScript": ".venv/bin/activate", + "ansible.python.interpreterPath": ".venv/bin/python3", + "ansible.validation.enabled": true, + "ansible.validation.lint.arguments": "-c ansible/.ansible-lint", + "ansible.validation.lint.enabled": true, + "ansible.validation.lint.path": ".venv/bin/ansible-lint", "files.associations": { "*.json5": "jsonc", - "**/ansible/**/*.yml": "ansible", - "**/ansible/**/*.sops.yml": "yaml", - "**/ansible/**/inventory/**/*.yml": "yaml", - "**/kubernetes/**/*.sops.toml": "plaintext" + "./ansible/**/*.yaml": "ansible", + "./ansible/**/*.sops.yaml": "yaml", + "./ansible/**/inventory/**/*.yaml": "yaml", + "./kubernetes/**/*.sops.toml": "plaintext" }, + "sops.defaults.ageKeyFile": "age.key", "yaml.schemas": { - "ansible": "ansible/*.yml", - "Kubernetes": "kubernetes/*.yaml" + "ansible": "./ansible/*.yaml", + "Kubernetes": "./kubernetes/*.yaml" }, - "editor.bracketPairColorization.enabled": true, - "editor.guides.bracketPairs": true, - "editor.guides.bracketPairsHorizontal": true, - "editor.guides.highlightActiveBracketPair": true, - "editor.hover.delay": 1500, - "files.trimTrailingWhitespace": true, - "ansible.python.interpreterPath": "/usr/bin/python3", + "vs-kubernetes": { + "vs-kubernetes.kubeconfig": "./kubeconfig", + "vs-kubernetes.knownKubeconfigs": [ + "./kubeconfig" + ] + } } diff --git a/README copy.md b/README copy.md new file mode 100644 index 000000000..4a57ecfdd --- /dev/null +++ b/README copy.md @@ -0,0 +1,529 @@ +# ⛵ Cluster Template + +Welcome to my opinionated and extensible template for deploying a single Kubernetes cluster. The goal of this project is to make it easier for people interested in using Kubernetes to deploy a cluster at home on bare-metal or VMs. + +At a high level this project makes use of [makejinja](https://github.com/mirkolenz/makejinja) to read in a [configuration file](./config.sample.yaml) which renders out templates that will allow you to install and manage your Kubernetes cluster with. + +## ✨ Features + +The features included will depend on the type of configuration you want to use. There are currently **2 different types** of **configurations** available with this template. + +1. **"Flux cluster"** - a Kubernetes distribution of your choosing: [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). Deploys an opinionated implementation of [Flux](https://github.com/fluxcd/flux2) using [GitHub](https://github.com/) as the Git provider and [sops](https://github.com/getsops/sops) to manage secrets. + + - **Required:** Debian 12 or Talos Linux installed on bare metal (or VMs) and some knowledge of [Containers](https://opencontainers.org/) and [YAML](https://yaml.org/). Some knowledge of [Git](https://git-scm.com/) practices & terminology is also required. + - **Components:** [Cilium](https://github.com/cilium/cilium) and [kube-vip](https://github.com/kube-vip/kube-vip) _(k3s)_. [flux](https://github.com/fluxcd/flux2), [cert-manager](https://github.com/cert-manager/cert-manager), [spegel](https://github.com/spegel-org/spegel), [reloader](https://github.com/stakater/Reloader), [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) _(k3s)_, and [openebs](https://github.com/openebs/openebs). + +3. **"Flux cluster with Cloudflare"** - An addition to "**Flux cluster**" that provides DNS and SSL with [Cloudflare](https://www.cloudflare.com/). [Cloudflare Tunnel](https://www.cloudflare.com/products/tunnel/) is also included to provide external access to certain applications deployed in your cluster. + + - **Required:** A Cloudflare account with a domain managed in your Cloudflare account. + - **Components:** [ingress-nginx](https://github.com/kubernetes/ingress-nginx/), [external-dns](https://github.com/kubernetes-sigs/external-dns) and [cloudflared](https://github.com/cloudflare/cloudflared). + +**Other features include:** + +- A [Renovate](https://www.mend.io/renovate)-ready repository with pull request diffs provided by [flux-local](https://github.com/allenporter/flux-local) +- Integrated [GitHub Actions](https://github.com/features/actions) with helpful workflows. + +## 💻 Machine Preparation + +Hopefully some of this peeked your interests! If you are marching forward, now is a good time to choose whether you will deploy a Kubernetes cluster with [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). + +### System requirements + +> [!NOTE] +> 1. The included behaviour of Talos or k3s is that all nodes are able to run workloads, **including** the controller nodes. **Worker nodes** are therefore **optional**. +> 2. Do you have 3 or more nodes? It is highly recommended to make 3 of them controller nodes for a highly available control plane. +> 3. Running the cluster on Proxmox VE? My thoughts and recommendations about that are documented [here](https://onedr0p.github.io/home-ops/notes/proxmox-considerations.html). + +| Role | Cores | Memory | System Disk | +|---------|----------|---------------|---------------------------| +| Control | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | +| Worker | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | +| _\* recommended_ | + +### Talos + +1. Download the latest stable release of Talos from their [GitHub releases](https://github.com/siderolabs/talos/releases). You will want to grab either `metal-amd64.iso` or `metal-rpi_generic-arm64.raw.xz` depending on your system. + +2. Take note of the OS drive serial numbers you will need them later on. + +3. Flash the iso or raw file to a USB drive and boot to Talos on your nodes with it. + +4. Continue on to 🚀 [**Getting Started**](#-getting-started) + +### k3s (AMD64) + +1. Download the latest stable release of Debian from [here](https://cdimage.debian.org/debian-cd/current/amd64/iso-dvd), then follow [this guide](https://www.linuxtechi.com/how-to-install-debian-12-step-by-step) to get it installed. Deviations from the guide: + + ```txt + Choose "Guided - use entire disk" + Choose "All files in one partition" + Delete Swap partition + Uncheck all Debian desktop environment options + ``` + +2. [Post install] Remove CD/DVD as apt source + + ```sh + su - + sed -i '/deb cdrom/d' /etc/apt/sources.list + apt update + exit + ``` + +3. [Post install] Enable sudo for your non-root user + + ```sh + su - + apt update + apt install -y sudo + usermod -aG sudo ${username} + echo "${username} ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/${username} + exit + newgrp sudo + sudo apt update + ``` + +4. [Post install] Add SSH keys (or use `ssh-copy-id` on the client that is connecting) + + 📍 _First make sure your ssh keys are up-to-date and added to your github account as [instructed](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account)._ + + ```sh + mkdir -m 700 ~/.ssh + sudo apt install -y curl + curl https://github.com/${github_username}.keys > ~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + ``` + +### k3s (RasPi4) + +
+Click here to read about using a RasPi4 + + +> [!NOTE] +> 1. It is recommended to have an 8GB RasPi model. Most important is to **boot from an external SSD/NVMe** rather than an SD card. This is [supported natively](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html), however if you have an early model you may need to [update the bootloader](https://www.tomshardware.com/how-to/boot-raspberry-pi-4-usb) first. +> 2. Check the [power requirements](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html#power-supply) if using a PoE Hat and a SSD/NVMe dongle. + +1. Download the latest stable release of Debian from [here](https://raspi.debian.net/tested-images). _**Do not** use Raspbian or DietPi or any other flavor Linux OS._ + +2. Flash the image onto an SSD/NVMe drive. + +3. Re-mount the drive to your workstation and then do the following (per the [official documentation](https://raspi.debian.net/defaults-and-settings)): + + ```txt + Open 'sysconf.txt' in a text editor and save it upon updating the information below + - Change 'root_authorized_key' to your desired public SSH key + - Change 'root_pw' to your desired root password + - Change 'hostname' to your desired hostname + ``` + +4. Connect SSD/NVMe drive to the Raspberry Pi 4 and power it on. + +5. [Post install] SSH into the device with the `root` user and then create a normal user account with `adduser ${username}` + +6. [Post install] Follow steps 3 and 4 from [k3s (AMD64)](##k3s-amd64). + +7. [Post install] Install `python3` which is needed by Ansible. + + ```sh + sudo apt install -y python3 + ``` + +8. Continue on to 🚀 [**Getting Started**](#-getting-started) + +
+ +## 🚀 Getting Started + +Once you have installed Talos or Debian on your nodes, there are six stages to getting a Flux-managed cluster up and runnning. + +> [!NOTE] +> For all stages below the commands **MUST** be ran on your personal workstation within your repository directory + +### 🎉 Stage 1: Create a Git repository + +1. Create a new **public** repository by clicking the big green "Use this template" button at the top of this page. + +2. Clone **your new repo** to you local workstation and `cd` into it. + +3. Continue on to 🌱 [**Stage 2**](#-stage-2-setup-your-local-workstation-environment) + +### 🌱 Stage 2: Setup your local workstation + +You have two different options for setting up your local workstation. + +- First option is using a `devcontainer` which requires you to have Docker and VSCode installed. This method is the fastest to get going because all the required CLI tools are provided for you in my [devcontainer](https://github.com/onedr0p/cluster-template/pkgs/container/cluster-template%2Fdevcontainer) image. +- The second option is setting up the CLI tools directly on your workstation. + +#### Devcontainer method + +1. Start Docker and open your repository in VSCode. There will be a pop-up asking you to use the `devcontainer`, click the button to start using it. + +2. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) + +#### Non-devcontainer method + +1. Install the most recent version of [task](https://taskfile.dev/), see the [installation docs](https://taskfile.dev/installation/) for other supported platforms. + + ```sh + # Homebrew + brew install go-task + # or, Arch + pacman -S --noconfirm go-task && ln -sf /usr/bin/go-task /usr/local/bin/task + ``` + +2. Install the most recent version of [direnv](https://direnv.net/), see the [installation docs](https://direnv.net/docs/installation.html) for other supported platforms. + + ```sh + # Homebrew + brew install direnv + # or, Arch + pacman -S --noconfirm direnv + ``` + +3. [Hook `direnv` into your preferred shell](https://direnv.net/docs/hook.html), then run: + + ```sh + task workstation:direnv + ``` + + 📍 _**Verify** that `direnv` is setup properly by opening a new terminal and `cd`ing into your repository. You should see something like:_ + ```sh + cd /path/to/repo + direnv: loading /path/to/repo/.envrc + direnv: export +ANSIBLE_COLLECTIONS_PATH ... +VIRTUAL_ENV ~PATH + ``` + +6. Install the additional **required** CLI tools + + 📍 _**Not using Homebrew or ArchLinux?** Try using the generic Linux task below, if that fails check out the [Brewfile](.taskfiles/Workstation/Brewfile)/[Archfile](.taskfiles/Workstation/Archfile) for what CLI tools needed and install them._ + + ```sh + # Homebrew + task workstation:brew + # or, Arch with yay/paru + task workstation:arch + # or, Generic Linux (YMMV, this pulls binaires in to ./bin) + task workstation:generic-linux + ``` + +7. Setup a Python virual environment by running the following task command. + + 📍 _This commands requires Python 3.11+ to be installed._ + + ```sh + task workstation:venv + ``` + +8. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) + +### 🔧 Stage 3: Bootstrap configuration + +> [!NOTE] +> The [config.sample.yaml](./config.sample.yaml) file contains config that is **vital** to the bootstrap process. + +1. Generate the `config.yaml` from the [config.sample.yaml](./config.sample.yaml) configuration file. + + ```sh + task init + ``` + +2. Fill out the `config.yaml` configuration file using the comments in that file as a guide. + +3. Run the following command which will generate all the files needed to continue. + + ```sh + task configure + ``` + +4. Push you changes to git + + 📍 _**Verify** all the `./kubernetes/**/*.sops.*` files are **encrypted** with SOPS_ + + ```sh + git add -A + git commit -m "Initial commit :rocket:" + git push + ``` + +5. Continue on to ⚡ [**Stage 4**](#-stage-4-prepare-your-nodes-for-kubernetes) + +### ⚡ Stage 4: Prepare your nodes for Kubernetes + +> [!NOTE] +> For **Talos** skip ahead to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) + +#### k3s + +📍 _Here we will be running an Ansible playbook to prepare your nodes for running a Kubernetes cluster._ + +1. Ensure you are able to SSH into your nodes from your workstation using a private SSH key **without a passphrase** (for example using a SSH agent). This lets Ansible interact with your nodes. + +3. Install the Ansible dependencies + + ```sh + task ansible:deps + ``` + +4. Verify Ansible can view your config and ping your nodes + + ```sh + task ansible:list + task ansible:ping + ``` + +5. Run the Ansible prepare playbook (nodes wil reboot when done) + + ```sh + task ansible:run playbook=cluster-prepare + ``` + +6. Continue on to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) + +### ⛵ Stage 5: Install Kubernetes + +#### Talos + +1. Deploy your cluster and bootstrap it. This generates secrets, generates the config files for your nodes and applies them. It bootstraps the cluster afterwards, fetches the kubeconfig file and installs Cilium and kubelet-csr-approver. It finishes with some health checks. + + ```sh + task talos:bootstrap + ``` + +2. ⚠️ It might take a while for the cluster to be setup (10+ minutes is normal), during which time you will see a variety of error messages like: "couldn't get current server API group list," "error: no matching resources found", etc. This is a normal. If this step gets interrupted, e.g. by pressing Ctrl + C, you likely will need to [nuke the cluster](#-Nuke) before trying again. + +#### k3s + +1. Install Kubernetes depending on the distribution you chose + + ```sh + task ansible:run playbook=cluster-installation + ``` + +#### Cluster validation + +1. The `kubeconfig` for interacting with your cluster should have been created in the root of your repository. + +2. Verify the nodes are online + + 📍 _If this command **fails** you likely haven't configured `direnv` as [mentioned previously](#non-devcontainer-method) in the guide._ + + ```sh + kubectl get nodes -o wide + # NAME STATUS ROLES AGE VERSION + # k8s-0 Ready control-plane,etcd,master 1h v1.29.1 + # k8s-1 Ready worker 1h v1.29.1 + ``` + +3. Continue on to 🔹 [**Stage 6**](#-stage-6-install-flux-in-your-cluster) + +### 🔹 Stage 6: Install Flux in your cluster + +1. Verify Flux can be installed + + ```sh + flux check --pre + # ► checking prerequisites + # ✔ kubectl 1.27.3 >=1.18.0-0 + # ✔ Kubernetes 1.27.3+k3s1 >=1.16.0-0 + # ✔ prerequisites checks passed + ``` + +2. Install Flux and sync the cluster to the Git repository + + 📍 _Run `task flux:github-deploy-key` first if using a private repository._ + + ```sh + task flux:bootstrap + # namespace/flux-system configured + # customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created + # ... + ``` + +1. Verify Flux components are running in the cluster + + ```sh + kubectl -n flux-system get pods -o wide + # NAME READY STATUS RESTARTS AGE + # helm-controller-5bbd94c75-89sb4 1/1 Running 0 1h + # kustomize-controller-7b67b6b77d-nqc67 1/1 Running 0 1h + # notification-controller-7c46575844-k4bvr 1/1 Running 0 1h + # source-controller-7d6875bcb4-zqw9f 1/1 Running 0 1h + ``` + +### 🎤 Verification Steps + +_Mic check, 1, 2_ - In a few moments applications should be lighting up like Christmas in July 🎄 + +1. Output all the common resources in your cluster. + + 📍 _Feel free to use the provided [kubernetes tasks](.taskfiles/Kubernetes/Taskfile.yaml) for validation of cluster resources or continue to get familiar with the `kubectl` and `flux` CLI tools._ + + ```sh + task kubernetes:resources + ``` + +2. ⚠️ It might take `cert-manager` awhile to generate certificates, this is normal so be patient. + +3. 🏆 **Congratulations** if all goes smooth you will have a Kubernetes cluster managed by Flux and your Git repository is driving the state of your cluster. + +4. 🧠 Now it's time to pause and go get some motel motor oil ☕ and admire you made it this far! + +## 📣 Flux w/ Cloudflare post installation + +#### 🌐 Public DNS + +The `external-dns` application created in the `networking` namespace will handle creating public DNS records. By default, `echo-server` and the `flux-webhook` are the only subdomains reachable from the public internet. In order to make additional applications public you must set set the correct ingress class name and ingress annotations like in the HelmRelease for `echo-server`. + +#### 🏠 Home DNS + +`k8s_gateway` will provide DNS resolution to external Kubernetes resources (i.e. points of entry to the cluster) from any device that uses your home DNS server. For this to work, your home DNS server must be configured to forward DNS queries for `${bootstrap_cloudflare.domain}` to `${bootstrap_cloudflare.gateway_vip}` instead of the upstream DNS server(s) it normally uses. This is a form of **split DNS** (aka split-horizon DNS / conditional forwarding). + +> [!TIP] +> Below is how to configure a Pi-hole for split DNS. Other platforms should be similar. +> 1. Apply this file on the Pihole server while substituting the variables +> ```sh +> # /etc/dnsmasq.d/99-k8s-gateway-forward.conf +> server=/${bootstrap_cloudflare.domain}/${bootstrap_cloudflare.gateway_vip} +> ``` +> 2. Restart dnsmasq on the server. +> 3. Query an internal-only subdomain from your workstation (any `internal` class ingresses): `dig @${home-dns-server-ip} echo-server-internal.${bootstrap_cloudflare.domain}`. It should resolve to `${bootstrap_cloudflare.ingress_vip}`. + +If you're having trouble with DNS be sure to check out these two GitHub discussions: [Internal DNS](https://github.com/onedr0p/cluster-template/discussions/719) and [Pod DNS resolution broken](https://github.com/onedr0p/cluster-template/discussions/635). + +... Nothing working? That is expected, this is DNS after all! + +#### 📜 Certificates + +By default this template will deploy a wildcard certificate using the Let's Encrypt **staging environment**, which prevents you from getting rate-limited by the Let's Encrypt production servers if your cluster doesn't deploy properly (for example due to a misconfiguration). Once you are sure you will keep the cluster up for more than a few hours be sure to switch to the production servers as outlined in `config.yaml`. + +📍 _You will need a production certificate to reach internet-exposed applications through `cloudflared`._ + +#### 🪝 Github Webhook + +By default Flux will periodically check your git repository for changes. In order to have Flux reconcile on `git push` you must configure Github to send `push` events to Flux. + +> [!NOTE] +> This will only work after you have switched over certificates to the Let's Encrypt Production servers. + +1. Obtain the webhook path + + 📍 _Hook id and path should look like `/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123`_ + + ```sh + kubectl -n flux-system get receiver github-receiver -o jsonpath='{.status.webhookPath}' + ``` + +2. Piece together the full URL with the webhook path appended + + ```text + https://flux-webhook.${bootstrap_cloudflare.domain}/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123 + ``` + +3. Navigate to the settings of your repository on Github, under "Settings/Webhooks" press the "Add webhook" button. Fill in the webhook url and your `bootstrap_github_webhook_token` secret and save. + +## 💥 Nuke + +There might be a situation where you want to destroy your Kubernetes cluster. This will completely clean the OS of all traces of the Kubernetes distribution you chose and then reboot the nodes. + +```sh +# k3s: Remove all traces of k3s from the nodes +task ansible:run playbook=cluster-nuke +# Talos: Reset your nodes back to maintenance mode and reboot +task talos:soft-nuke +# Talos: Comletely format your the Talos installation and reboot +task talos:hard-nuke +``` + +## 🤖 Renovate + +[Renovate](https://www.mend.io/renovate) is a tool that automates dependency management. It is designed to scan your repository around the clock and open PRs for out-of-date dependencies it finds. Common dependencies it can discover are Helm charts, container images, GitHub Actions, Ansible roles... even Flux itself! Merging a PR will cause Flux to apply the update to your cluster. + +To enable Renovate, click the 'Configure' button over at their [Github app page](https://github.com/apps/renovate) and select your repository. Renovate creates a "Dependency Dashboard" as an issue in your repository, giving an overview of the status of all updates. The dashboard has interactive checkboxes that let you do things like advance scheduling or reattempt update PRs you closed without merging. + +The base Renovate configuration in your repository can be viewed at [.github/renovate.json5](./.github/renovate.json5). By default it is scheduled to be active with PRs every weekend, but you can [change the schedule to anything you want](https://docs.renovatebot.com/presets-schedule), or remove it if you want Renovate to open PRs right away. + +## 🐛 Debugging + +Below is a general guide on trying to debug an issue with an resource or application. For example, if a workload/resource is not showing up or a pod has started but in a `CrashLoopBackOff` or `Pending` state. + +1. Start by checking all Flux Kustomizations & Git Repository & OCI Repository and verify they are healthy. + + ```sh + flux get sources oci -A + flux get sources git -A + flux get ks -A + ``` + +2. Then check all the Flux Helm Releases and verify they are healthy. + + ```sh + flux get hr -A + ``` + +3. Then check the if the pod is present. + + ```sh + kubectl -n get pods -o wide + ``` + +4. Then check the logs of the pod if its there. + + ```sh + kubectl -n logs -f + # or + stern -n + ``` + +5. If a resource exists try to describe it to see what problems it might have. + + ```sh + kubectl -n describe + ``` + +6. Check the namespace events + + ```sh + kubectl -n get events --sort-by='.metadata.creationTimestamp' + ``` + +Resolving problems that you have could take some tweaking of your YAML manifests in order to get things working, other times it could be a external factor like permissions on NFS. If you are unable to figure out your problem see the help section below. + +## 👉 Help + +- Make a post in this repository's Github [Discussions](https://github.com/onedr0p/cluster-template/discussions). +- Start a thread in the `#support` or `#cluster-template` channels in the [Home Operations](https://discord.gg/home-operations) Discord server. + +## ❔ What's next + +The cluster is your oyster (or something like that). Below are some optional considerations you might want to review. + +#### Ship it + +To browse or get ideas on applications people are running, community member [@whazor](https://github.com/whazor) created [Kubesearch](https://kubesearch.dev) as a creative way to search Flux HelmReleases across Github and Gitlab. + +#### Storage + +The included CSI (openebs in local-hostpath mode) is a great start for storage but soon you might find you need more features like replicated block storage, or to connect to a NFS/SMB/iSCSI server. If you need any of those features be sure to check out the projects like [rook-ceph](https://github.com/rook/rook), [longhorn](https://github.com/longhorn/longhorn), [openebs](https://github.com/openebs/openebs), [democratic-csi](https://github.com/democratic-csi/democratic-csi), [csi-driver-nfs](https://github.com/kubernetes-csi/csi-driver-nfs), +and [synology-csi](https://github.com/SynologyOpenSource/synology-csi). + +## 🙌 Related Projects + +If this repo is too hot to handle or too cold to hold check out these following projects. + +- [khuedoan/homelab](https://github.com/khuedoan/homelab) - _Modern self-hosting framework, fully automated from empty disk to operating services with a single command._ +- [danmanners/aws-argo-cluster-template](https://github.com/danmanners/aws-argo-cluster-template) - _A community opinionated template for deploying Kubernetes clusters on-prem and in AWS using Pulumi, SOPS, Sealed Secrets, GitHub Actions, Renovate, Cilium and more!_ +- [ricsanfre/pi-cluster](https://github.com/ricsanfre/pi-cluster) - _Pi Kubernetes Cluster. Homelab kubernetes cluster automated with Ansible and ArgoCD_ +- [techno-tim/k3s-ansible](https://github.com/techno-tim/k3s-ansible) - _The easiest way to bootstrap a self-hosted High Availability Kubernetes cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB, and more_ + +## ⭐ Stargazers + +
+ +[![Star History Chart](https://api.star-history.com/svg?repos=onedr0p/cluster-template&type=Date)](https://star-history.com/#onedr0p/cluster-template&Date) + +
+ +## 🤝 Thanks + +Big shout out to all the contributors, sponsors and everyone else who has helped on this project. diff --git a/Taskfile.yaml b/Taskfile.yaml index 8944b5e82..c504e74fb 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -1,60 +1,93 @@ --- +# yaml-language-server: $schema=https://taskfile.dev/schema.json version: "3" vars: - PYTHON_BIN: python3 - BOOTSTRAP_DIR: "{{.ROOT_DIR}}/bootstrap" + # Directories ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" + BOOTSTRAP_DIR: "{{.ROOT_DIR}}/bootstrap" KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" + PRIVATE_DIR: "{{.ROOT_DIR}}/.private" + SCRIPTS_DIR: "{{.ROOT_DIR}}/scripts" + # Files + AGE_FILE: "{{.ROOT_DIR}}/age.key" + BOOTSTRAP_CONFIG_FILE: "{{.ROOT_DIR}}/config.yaml" + KUBECONFIG_FILE: "{{.ROOT_DIR}}/kubeconfig" + MAKEJINJA_CONFIG_FILE: "{{.ROOT_DIR}}/makejinja.toml" + PIP_REQUIREMENTS_FILE: "{{.ROOT_DIR}}/requirements.txt" + # Binaries + PYTHON_BIN: python3 env: - KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" - SOPS_AGE_KEY_FILE: "{{.ROOT_DIR}}/age.key" - PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH" + KUBECONFIG: "{{.KUBECONFIG_FILE}}" + PYTHONDONTWRITEBYTECODE: "1" + SOPS_AGE_KEY_FILE: "{{.AGE_FILE}}" VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" - ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy" - ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles" - ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops" - K8S_AUTH_KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" includes: - ansible: .taskfiles/AnsibleTasks.yaml - brew: .taskfiles/BrewTasks.yaml - cluster: .taskfiles/ClusterTasks.yaml + ansible: .taskfiles/Ansible/Taskfile.yaml + kubernetes: + aliases: ["k8s"] + taskfile: .taskfiles/Kubernetes/Taskfile.yaml + flux: .taskfiles/Flux/Taskfile.yaml + repository: + aliases: ["repo"] + taskfile: .taskfiles/Repository/Taskfile.yaml + talos: .taskfiles/Talos/Taskfile.yaml + sops: .taskfiles/Sops/Taskfile.yaml + workstation: .taskfiles/Workstation/Taskfile.yaml + user: + taskfile: .taskfiles/User + optional: true tasks: default: task -l - deps: - desc: Create a Python virtual env and install required packages - cmds: - - "{{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv" - - .venv/bin/python3 -m pip install --upgrade pip setuptools wheel - - .venv/bin/python3 -m pip install --upgrade --requirement "{{.ROOT_DIR}}/requirements.txt" - - .venv/bin/ansible-galaxy install --role-file "{{.ROOT_DIR}}/requirements.yaml" --force - init: desc: Initialize configuration files - dir: "{{.BOOTSTRAP_DIR}}" cmds: - - cp -n vars/addons.sample.yaml vars/addons.yaml - - cp -n vars/config.sample.yaml vars/config.yaml - - cmd: echo "=== Configuration files copied ===" - silent: true - - cmd: echo "Proceed with updating the configuration files..." - silent: true - - cmd: echo "{{.BOOTSTRAP_DIR}}/vars/config.yaml" - silent: true - - cmd: echo "{{.BOOTSTRAP_DIR}}/vars/addons.yaml" - silent: true + - cp -n {{.BOOTSTRAP_CONFIG_FILE | replace ".yaml" ".sample.yaml"}} {{.BOOTSTRAP_CONFIG_FILE}} + - cmd: echo === Configuration file copied === + - cmd: echo Proceed with updating the configuration files... + - cmd: echo {{.BOOTSTRAP_CONFIG_FILE}} status: - - test -f "{{.BOOTSTRAP_DIR}}/vars/addons.yaml" - - test -f "{{.BOOTSTRAP_DIR}}/vars/config.yaml" + - test -f "{{.BOOTSTRAP_CONFIG_FILE}}" + silent: true configure: - desc: Configure repository from Ansible vars - dir: "{{.BOOTSTRAP_DIR}}" - cmd: ansible-playbook configure.yaml - env: - ANSIBLE_DISPLAY_SKIPPED_HOSTS: "false" + desc: Configure repository from bootstrap vars + prompt: Any conflicting config in the root kubernetes and ansible directories will be overwritten... continue? + deps: ["workstation:direnv", "workstation:venv", "sops:age-keygen", "init"] + cmds: + - task: .template + - task: sops:encrypt + - task: .validate + + .template: + internal: true + cmd: "{{.VIRTUAL_ENV}}/bin/makejinja" + preconditions: + - { msg: "Missing virtual environment", sh: "test -d {{.VIRTUAL_ENV}}" } + - { msg: "Missing Makejinja config file", sh: "test -f {{.MAKEJINJA_CONFIG_FILE}}" } + - { msg: "Missing Makejinja plugin file", sh: "test -f {{.BOOTSTRAP_DIR}}/scripts/plugin.py" } + - { msg: "Missing bootstrap config file", sh: "test -f {{.BOOTSTRAP_CONFIG_FILE}}" } + + .validate: + internal: true + cmds: + - task: kubernetes:kubeconform + - cmd: echo === Done rendering and validating YAML === + - cmd: | + if [[ $KUBECONFIG != "{{.KUBECONFIG_FILE}}" ]]; then + echo WARNING: KUBECONFIG is not set to the expected value, this may cause conflicts. + fi + - cmd: | + if [[ $SOPS_AGE_KEY_FILE != "{{.AGE_FILE}}" ]]; then + echo WARNING: SOPS_AGE_KEY_FILE is not set to the expected value, this may cause conflicts. + fi + - cmd: | + if test -f ~/.config/sops/age/keys.txt; then + echo WARNING: SOPS Age key found in home directory, this may cause conflicts. + fi + silent: true diff --git a/ansible/inventory/group_vars/kubernetes/main.yaml b/ansible/inventory/group_vars/kubernetes/main.yaml deleted file mode 100644 index 9161a4a4f..000000000 --- a/ansible/inventory/group_vars/kubernetes/main.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# -# Below vars are for the xanmanning.k3s role -# ...see https://github.com/PyratLabs/ansible-role-k3s -# - -# renovate: datasource=github-releases depName=k3s-io/k3s -k3s_release_version: "v1.29.3+k3s1" -k3s_install_hard_links: true -k3s_become: true -k3s_etcd_datastore: true -k3s_use_unsupported_config: true -k3s_registration_address: "{{ kube_vip_addr }}" -k3s_server_manifests_urls: - # Kube-vip RBAC - - url: https://raw.githubusercontent.com/kube-vip/website/main/content/manifests/rbac.yaml - filename: kube-vip-rbac.yaml - # Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release) - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml - filename: custom-prometheus-podmonitors.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml - filename: custom-prometheus-prometheusrules.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml - filename: custom-prometheus-scrapeconfigs.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml - filename: custom-prometheus-servicemonitors.yaml -# /var/lib/rancher/k3s/server/manifests -k3s_server_manifests_templates: - - custom-cilium-helmchart.yaml.j2 - - custom-cilium-l2.yaml.j2 - - custom-coredns-helmchart.yaml.j2 -# /var/lib/rancher/k3s/agent/pod-manifests -k3s_server_pod_manifests_templates: - - kube-vip-static-pod.yaml.j2 diff --git a/ansible/inventory/group_vars/kubernetes/supplemental.yaml b/ansible/inventory/group_vars/kubernetes/supplemental.yaml deleted file mode 100644 index 3662e73a6..000000000 --- a/ansible/inventory/group_vars/kubernetes/supplemental.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -timezone: "Etc/UTC" -github_username: "oscaromeu" -coredns_addr: "10.43.0.10" -kube_vip_addr: "10.69.3.154" -cluster_cidr: "10.42.0.0/16" -service_cidr: "10.43.0.0/16" -node_cidr: "10.69.3.0/24" diff --git a/ansible/inventory/group_vars/master/main.yaml b/ansible/inventory/group_vars/master/main.yaml deleted file mode 100644 index 053969f78..000000000 --- a/ansible/inventory/group_vars/master/main.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ -# https://github.com/PyratLabs/ansible-role-k3s - -k3s_control_node: true -k3s_server: - node-ip: "{{ ansible_host }}" - tls-san: - - "{{ kube_vip_addr }}" - docker: false - flannel-backend: "none" # This needs to be in quotes - disable: - - coredns # Disable coredns - replaced with Coredns Helm Chart - - flannel # Disable flannel - replaced with Cilium Helm Chart - - local-storage # Disable local-path-provisioner - installed with Flux - - metrics-server # Disable metrics-server - installed with Flux - - servicelb # Disable servicelb - replaced with Cilium Helm Chart - - traefik # Disable traefik - replaced with ingress-nginx and installed with Flux - disable-network-policy: true - disable-cloud-controller: true - disable-kube-proxy: true # Cilium uses eBPF - write-kubeconfig-mode: "644" - cluster-cidr: "{{ cluster_cidr }}" - service-cidr: "{{ service_cidr }}" - etcd-expose-metrics: true # Required to monitor etcd with kube-prometheus-stack - kube-controller-manager-arg: - - "bind-address=0.0.0.0" # Required to monitor kube-controller-manager with kube-prometheus-stack - kube-scheduler-arg: - - "bind-address=0.0.0.0" # Required to monitor kube-scheduler with kube-prometheus-stack - kube-apiserver-arg: - - "anonymous-auth=true" # Required for HAProxy health-checks diff --git a/ansible/inventory/group_vars/worker/main.yaml b/ansible/inventory/group_vars/worker/main.yaml deleted file mode 100644 index 476825717..000000000 --- a/ansible/inventory/group_vars/worker/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ -# https://github.com/PyratLabs/ansible-role-k3s - -k3s_control_node: false -k3s_agent: - node-ip: "{{ ansible_host }}" diff --git a/ansible/inventory/host_vars/.gitkeep b/ansible/inventory/host_vars/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/ansible/inventory/host_vars/lpkm1.sops.yaml b/ansible/inventory/host_vars/lpkm1.sops.yaml deleted file mode 100644 index 6dab1bcba..000000000 --- a/ansible/inventory/host_vars/lpkm1.sops.yaml +++ /dev/null @@ -1,21 +0,0 @@ -ansible_become_pass: ENC[AES256_GCM,data:T9y2NRs=,iv:rlDI4Ujjwl7ckMaMc7ZYzjZZVc+WrE25EwF6rsjZApo=,tag:InDM4bhsShOvmOyiN9sEeg==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsWHNaQkFUUlBxSDVYK2hv - MDVEajNUL285d3ZTaTFoaHIvcGJ3TU1XckhrCitvbTRPdVBNejBGVFdqOUE5K2tj - SmlRYmVuYTJVcmpNQmVzNVBkeTlyY0UKLS0tIEZ3UktkQW1JQlhQNnpheG5CUVZH - OXJEVXg4ZGVzYjNnUDNMdGs5dDFtaTQKOcy4dTiz/kCpwyljYmu630n8+pTtw4mX - vPuxKpGeRxfR+/d/dNmz6CZSegbH97JfJKQu6BWSkBRoplIzOBnJJQ== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:05Z" - mac: ENC[AES256_GCM,data:gv20yCxuSfSRFLV2f6VfygZL5/odF7L1ym9T34rBBHyvzEsV0ekdUQQW1wV/ac39tiCn7huksPGWRx9bSW4iZFTqK7DCoB/yoCiBQWGR9V/PPmhbdnAYsrBPpj0OaijEyScCjlaI7tZJUCeE6yY/XHCFwqhyH7P8pjYufz3k/Wg=,iv:lLABp8Q+mxM4KNc8xWVBFxsj6Yx7ETbmJO5QD9a9r/k=,tag:RQ8Js1dNzkWm5ZyZse3YNg==,type:str] - pgp: [] - unencrypted_suffix: _unencrypted - version: 3.7.3 diff --git a/ansible/inventory/host_vars/lpkw1.sops.yaml b/ansible/inventory/host_vars/lpkw1.sops.yaml deleted file mode 100644 index 4ea1df1d5..000000000 --- a/ansible/inventory/host_vars/lpkw1.sops.yaml +++ /dev/null @@ -1,21 +0,0 @@ -ansible_become_pass: ENC[AES256_GCM,data:KZrd6AQ=,iv:uQ03ZFdr3LYS5cJ+nIV17DD8QbQeBCMFBqi3SJTyM8k=,tag:RRJA6QPZgqA1dXKrIJEm/Q==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBteWt1enJZWTNZbW4wYXBk - T0c0UGFFVDF2eSs2cmtNaUxFZ24yVEZaR1NVCkp5NkpMWUVqcDVwR3JpZ25CaUZs - Zi83V2RRMUF1Z05yaDdYUWc1alhrSHcKLS0tIDV2am5oUUExbTNaZ0k2bDFScWtI - cFZkSERRV0xpZzJSQnorcG13cnRiS3MKZ2BVctczG3NzUwi7ogHPvGnHl0oGK6/J - DX7hwtepJOTN2zmMiM/0qkD20bC0BKIcvkIwR6G157PREjQp3c/AcA== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:05Z" - mac: ENC[AES256_GCM,data:QIXR/d5WA8v20ScEmj8p1ldkWqDwa7G9tMczFDRXm6DPxnMJK0UkbSrVT5cEmdgy4EjLH/M7I6eOwc/GumfMO5UTTFn+zQftYN1AtmmeHqqifglTrQc30gK1KVcCL8qbJArTh3TlTf7vhSyycjKLGjH8jFED/FWthowB6YaAJBY=,iv:KNs8POEIqsC7Iz2HZdBqct2E8TRjNz2xaCtQQj62CIs=,tag:kIhfQ2s3zUViW7+IegsOxw==,type:str] - pgp: [] - unencrypted_suffix: _unencrypted - version: 3.7.3 diff --git a/ansible/inventory/host_vars/lpkw2.sops.yaml b/ansible/inventory/host_vars/lpkw2.sops.yaml deleted file mode 100644 index 42f05973c..000000000 --- a/ansible/inventory/host_vars/lpkw2.sops.yaml +++ /dev/null @@ -1,21 +0,0 @@ -ansible_become_pass: ENC[AES256_GCM,data:1eadXvY=,iv:XhCM1pwEP4wl2ox/bUIo7VbEyBBQ074rwkqYchVasVA=,tag:PC51f9HPDNzNSj9j0kYHaw==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjbmc2VVkydG5UTk53TElL - NSsvdHZHdTcvY1BQcXNRRUlqRW15TVNXL0g0Ckh0QnNEUGh0UEhsUUdyMy9IdE1l - TU5yTHhRWmgwbVJpRkFKTE0yRjNzZmMKLS0tIFlOMVF4ZC9pRVF5QmtLWGhJNkFB - UjROOUluMk1TbWY2aFFaNW50VzdWNTAKKCM+UTAVlSg1PFsWa3Ei/EXXkHturHcu - I32E+n3s7WawVvwYaTsNu4nxe0SoSjeTtzI1OMHpODD+zUQxfwE4XA== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:05Z" - mac: ENC[AES256_GCM,data:25n4ttrLtqGWStVuZoSQv0EVhBnvlAKFC+GVdpcBsUWT72kTz5wrSF24QXxnm2HHAd2/lBrlVPWMIlUy648W0PHdHcq5HXI5fkX/KtE9MyGFtZpVef4GQORS8Iey8vs62ce4U2A3CiQglyicacmmnQSYt2wQ0pNmcF7DE7Kb9uQ=,iv:5rYqxNh+5IslcMS4Nf7ia31F3RfT4+9A9OBF1qGd1vM=,tag:yEcrn2GEF6cNqmIVevAuMg==,type:str] - pgp: [] - unencrypted_suffix: _unencrypted - version: 3.7.3 diff --git a/ansible/inventory/hosts.yaml b/ansible/inventory/hosts.yaml deleted file mode 100644 index 34484354e..000000000 --- a/ansible/inventory/hosts.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -kubernetes: - children: - master: - hosts: - lpkm1: - ansible_user: oscar - ansible_host: 10.69.3.26 - ceph_drives: - - /dev/disk/by-id/ata-KINGSTON_SA400S37480G_50026B7283215569 - - worker: - hosts: - lpkw1: - ansible_user: oscar - ansible_host: 10.69.3.27 - ceph_drives: - - /dev/disk/by-id/ata-SSDPR-CL100-480-G3_G0Z047886 - lpkw2: - ansible_user: oscar - ansible_host: 10.69.3.25 - ceph_drives: - - /dev/disk/by-id/ata-SSDPR-CL100-480-G3_GXE092715 - diff --git a/ansible/playbooks/cluster-ceph-reset.yaml b/ansible/playbooks/cluster-ceph-reset.yaml deleted file mode 100644 index a9898356f..000000000 --- a/ansible/playbooks/cluster-ceph-reset.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Reset Ceph Drives - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 2 seconds... - ansible.builtin.pause: - seconds: 2 - tasks: - - name: Reset Ceph Drives # noqa: ignore-errors - ignore_errors: true - when: ceph_drives | default([]) | length > 0 - block: - - name: Delete (/var/lib/rook) - ansible.builtin.file: - state: absent - path: /var/lib/rook - # Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean) - - name: Wipe (sgdisk) # noqa: no-changed-when - ansible.builtin.command: "sgdisk --zap-all {{ item }}" - loop: "{{ ceph_drives }}" - # SSDs may be better cleaned with blkdiscard instead of dd - - name: Wipe (blkdiscard) # noqa: no-changed-when - ansible.builtin.command: "blkdiscard {{ item }}" - loop: "{{ ceph_drives }}" - # Inform the OS of partition table changes - - name: Wipe (partprobe) # noqa: no-changed-when - ansible.builtin.command: "partprobe {{ item }}" - loop: "{{ ceph_drives }}" diff --git a/ansible/playbooks/cluster-installation.yaml b/ansible/playbooks/cluster-installation.yaml deleted file mode 100644 index 7151d3e73..000000000 --- a/ansible/playbooks/cluster-installation.yaml +++ /dev/null @@ -1,72 +0,0 @@ ---- -- name: Cluster Installation - hosts: all - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Check if cluster is installed - check_mode: false - ansible.builtin.stat: - path: /etc/rancher/k3s/config.yaml - register: k3s_installed - - - name: Ignore manifests templates and urls if the cluster is already installed - when: k3s_installed.stat.exists - ansible.builtin.set_fact: - k3s_server_manifests_templates: [] - k3s_server_manifests_urls: [] - - - name: Install Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: installed - - - name: Kubeconfig - ansible.builtin.include_tasks: tasks/kubeconfig.yaml - - - name: Wait for custom manifests to rollout - when: - - k3s_primary_control_node - - (k3s_server_manifests_templates | length > 0 - or k3s_server_manifests_urls | length > 0) - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: "{{ item.kind }}" - name: "{{ item.name }}" - namespace: "{{ item.namespace | default('') }}" - wait: true - wait_sleep: 10 - wait_timeout: 360 - loop: - - { name: cilium, kind: HelmChart, namespace: kube-system } - - { name: coredns, kind: HelmChart, namespace: kube-system } - - { name: policy, kind: CiliumL2AnnouncementPolicy } - - { name: pool, kind: CiliumLoadBalancerIPPool } - - { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition } - - - name: Coredns - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/coredns.yaml - - - name: Cilium - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/cilium.yaml - - - name: Cruft - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/cruft.yaml - - - name: Stale Containers - ansible.builtin.include_tasks: tasks/stale_containers.yaml - vars: - stale_containers_state: enabled diff --git a/ansible/playbooks/cluster-kube-vip.yaml b/ansible/playbooks/cluster-kube-vip.yaml deleted file mode 100644 index d6fe862b2..000000000 --- a/ansible/playbooks/cluster-kube-vip.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Cluster kube-vip - hosts: master - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Ensure Kubernetes is running - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: started - - - name: Upgrade kube-vip - ansible.builtin.template: - src: templates/kube-vip-static-pod.yaml.j2 - dest: "{{ k3s_server_pod_manifests_dir }}/kube-vip-static-pod.yaml" - mode: preserve diff --git a/ansible/playbooks/cluster-nuke.yaml b/ansible/playbooks/cluster-nuke.yaml deleted file mode 100644 index c1f5bbfec..000000000 --- a/ansible/playbooks/cluster-nuke.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -- name: Cluster Nuke - hosts: all - become: true - gather_facts: true - any_errors_fatal: true - vars_prompt: - - name: nuke - prompt: |- - Are you sure you want to nuke this cluster? - Type 'YES I WANT TO DESTROY THIS CLUSTER' to proceed - default: "n" - private: false - pre_tasks: - - name: Check for confirmation - ansible.builtin.fail: - msg: Aborted nuking the cluster - when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER' - - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Stop Kubernetes # noqa: ignore-errors - ignore_errors: true - block: - - name: Stop Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: stopped - - # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md - - name: Networking - block: - - name: Networking | Delete Cilium links - ansible.builtin.command: - cmd: "ip link delete {{ item }}" - removes: "/sys/class/net/{{ item }}" - loop: ["cilium_host", "cilium_net", "cilium_vxlan"] - - name: Networking | Flush iptables - ansible.builtin.iptables: - table: "{{ item }}" - flush: true - loop: ["filter", "nat", "mangle", "raw"] - - name: Networking | Flush ip6tables - ansible.builtin.iptables: - table: "{{ item }}" - flush: true - ip_version: ipv6 - loop: ["filter", "nat", "mangle", "raw"] - - name: Networking | Delete CNI directory - ansible.builtin.file: - path: /etc/cni/net.d - state: absent - - - name: Uninstall Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: uninstalled - - - name: Stale Containers - ansible.builtin.include_tasks: tasks/stale_containers.yaml - vars: - stale_containers_state: disabled - - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting nodes - reboot_timeout: 3600 diff --git a/ansible/playbooks/cluster-prepare.yaml b/ansible/playbooks/cluster-prepare.yaml deleted file mode 100644 index f22948aa7..000000000 --- a/ansible/playbooks/cluster-prepare.yaml +++ /dev/null @@ -1,120 +0,0 @@ ---- -- name: Prepare System - hosts: all - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Locale - block: - - name: Locale | Set timezone - community.general.timezone: - name: "{{ timezone | default('Etc/UTC') }}" - - - name: Packages - block: - - name: Packages | Install - ansible.builtin.apt: - name: apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,gnupg,hdparm,htop, - iptables,iputils-ping,ipvsadm,libseccomp2,lm-sensors,neofetch,net-tools,nfs-common, - nvme-cli,open-iscsi,parted,psmisc,python3,python3-apt,python3-kubernetes,python3-yaml, - smartmontools,socat,software-properties-common,unzip,util-linux - install_recommends: false - - - name: User Configuration - block: - - name: User Configuration | SSH keys - ansible.posix.authorized_key: - user: "{{ ansible_user }}" - key: "https://github.com/{{ github_username }}.keys" - - name: User Configuration | Silence login - ansible.builtin.file: - dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.hushlogin" - state: touch - owner: "{{ ansible_user }}" - group: "{{ ansible_user }}" - mode: "0644" - modification_time: preserve - access_time: preserve - - - name: Network Configuration - notify: Reboot - block: - - name: Network Configuration | Set hostname - ansible.builtin.hostname: - name: "{{ inventory_hostname }}" - - name: Network Configuration | Update hosts - ansible.builtin.copy: - content: | - 127.0.0.1 localhost - 127.0.1.1 {{ inventory_hostname }} - # The following lines are desirable for IPv6 capable hosts - ::1 localhost ip6-localhost ip6-loopback - ff02::1 ip6-allnodes - ff02::2 ip6-allrouters - dest: /etc/hosts - mode: preserve - # https://github.com/cilium/cilium/issues/18706 - - name: Network Configuration | Cilium (1) - ansible.builtin.lineinfile: - dest: /etc/systemd/networkd.conf - regexp: ManageForeignRoutingPolicyRules - line: ManageForeignRoutingPolicyRules=no - - name: Network Configuration | Cilium (2) - ansible.builtin.lineinfile: - dest: /etc/systemd/networkd.conf - regexp: ManageForeignRoutes - line: ManageForeignRoutes=no - - - name: System Configuration - notify: Reboot - block: - - name: System Configuration | Neofetch - ansible.builtin.copy: - dest: /etc/profile.d/neofetch.sh - mode: "0755" - content: neofetch --config none - - name: System Configuration | Disable apparmor - ansible.builtin.systemd: - name: apparmor - state: stopped - masked: true - - name: System Configuration | Disable swap - ansible.posix.mount: - name: "{{ item }}" - fstype: swap - state: absent - loop: ["none", "swap"] - - name: System Configuration | Kernel modules (1) - community.general.modprobe: - name: "{{ item }}" - state: present - loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] - - name: System Configuration | Kernel modules (2) - ansible.builtin.copy: - dest: "/etc/modules-load.d/{{ item }}.conf" - mode: "0644" - content: "{{ item }}" - loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] - - name: System Configuration | Sysctl - ansible.posix.sysctl: - name: "{{ item.key }}" - value: "{{ item.value }}" - sysctl_file: /etc/sysctl.d/99-kubernetes.conf - reload: true - with_dict: "{{ sysctl_config }}" - vars: - sysctl_config: - fs.inotify.max_queued_events: 65536 - fs.inotify.max_user_watches: 524288 - fs.inotify.max_user_instances: 8192 - - handlers: - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting nodes - reboot_timeout: 3600 diff --git a/ansible/playbooks/cluster-reboot.yaml b/ansible/playbooks/cluster-reboot.yaml deleted file mode 100644 index 4adcfe435..000000000 --- a/ansible/playbooks/cluster-reboot.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Reboot - hosts: all - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting nodes - reboot_timeout: 3600 diff --git a/ansible/playbooks/cluster-rollout-update.yaml b/ansible/playbooks/cluster-rollout-update.yaml deleted file mode 100644 index 2a2735aa7..000000000 --- a/ansible/playbooks/cluster-rollout-update.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -# https://github.com/kevincoakley/ansible-role-k8s-rolling-update -- name: Cluster rollout update - hosts: all - become: true - gather_facts: true - any_errors_fatal: true - serial: 1 - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Details - ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json" - register: kubectl_get_node - delegate_to: "{{ groups['master'][0] }}" - failed_when: false - changed_when: false - - - name: Update - when: - # When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False - - kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status") - # If spec.unschedulable is defined then the node is cordoned - - not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined - block: - - name: Cordon - kubernetes.core.k8s_drain: - name: "{{ inventory_hostname }}" - kubeconfig: /etc/rancher/k3s/k3s.yaml - state: cordon - delegate_to: "{{ groups['master'][0] }}" - - - name: Drain - ansible.builtin.command: "kubectl drain --pod-selector='app!=rook-ceph-osd,app!=csi-attacher,app!=csi-provisioner' --ignore-daemonsets --delete-emptydir-data --force --grace-period=300 {{ inventory_hostname }}" - delegate_to: "{{ groups['master'][0] }}" - changed_when: false - - ### pod_selectors feature in upcoming kubernetes.core 2.5.0 ### - # - name: Drain - # kubernetes.core.k8s_drain: - # name: "{{ inventory_hostname }}" - # kubeconfig: /etc/rancher/k3s/k3s.yaml - # state: drain - # delete_options: - # delete_emptydir_data: true - # ignore_daemonsets: true - # terminate_grace_period: 600 - # wait_timeout: 900 - # force: true - # pod_selectors: - # # Rook Ceph - # - app!=rook-ceph-osd - # # Longhorn - # - app!=csi-attacher - # # Longhorn - # - app!=csi-provisioner - # delegate_to: "{{ groups['master'][0] }}" - - - name: Update - ansible.builtin.apt: - upgrade: dist - update_cache: true - - - name: Check if reboot is required - ansible.builtin.stat: - path: /var/run/reboot-required - register: reboot_required - - - name: Reboot - when: reboot_required.stat.exists - ansible.builtin.reboot: - msg: Rebooting node - post_reboot_delay: 60 - reboot_timeout: 3600 - when: reboot_required.stat.exists - - - name: Uncordon - kubernetes.core.k8s_drain: - name: "{{ inventory_hostname }}" - kubeconfig: /etc/rancher/k3s/k3s.yaml - state: uncordon - delegate_to: "{{ groups['master'][0] }}" diff --git a/ansible/playbooks/files/stale-containers.service b/ansible/playbooks/files/stale-containers.service deleted file mode 100644 index 5136df2f6..000000000 --- a/ansible/playbooks/files/stale-containers.service +++ /dev/null @@ -1,6 +0,0 @@ -[Unit] -Description=Stale containers - -[Service] -Type=oneshot -ExecStart=/usr/local/bin/k3s crictl rmi --prune diff --git a/ansible/playbooks/files/stale-containers.timer b/ansible/playbooks/files/stale-containers.timer deleted file mode 100644 index 731885a14..000000000 --- a/ansible/playbooks/files/stale-containers.timer +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Stale containers - -[Timer] -OnCalendar=weekly -AccuracySec=1h -Persistent=true -RandomizedDelaySec=6000 - -[Install] -WantedBy=timers.target diff --git a/ansible/playbooks/roles/requirements.yml b/ansible/playbooks/roles/requirements.yml deleted file mode 100644 index 8aec996bc..000000000 --- a/ansible/playbooks/roles/requirements.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -collections: - - name: ansible.posix - version: 1.5.4 - - name: ansible.utils - version: 2.10.3 - - name: community.general - version: 7.3.0 - - name: community.sops - version: 1.6.7 - - name: kubernetes.core - version: 2.4.0 -roles: - - name: xanmanning.k3s - version: v3.4.2 - - name: geerlingguy.docker - version: 7.0.2 - - name: geerlingguy.pip - version: 3.0.1 diff --git a/ansible/playbooks/tasks/cilium.yaml b/ansible/playbooks/tasks/cilium.yaml deleted file mode 100644 index ca242bb03..000000000 --- a/ansible/playbooks/tasks/cilium.yaml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Cilium - block: - - name: Cilium | Check if Cilium HelmChart exists - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - register: cilium_helmchart - - - name: Cilium | Wait for Cilium to rollout - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: helm-install-cilium - kind: Job - namespace: kube-system - wait: true - wait_condition: - type: Complete - status: true - wait_timeout: 360 - - - name: Cilium | Patch the Cilium HelmChart to unmanage it - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s_json_patch: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - patch: - - op: add - path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged - value: "true" - - - name: Cilium | Delete the Cilium HelmChart CR - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: absent - - - name: Cilium | Force delete the Cilium HelmChart - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: patched - definition: - metadata: - finalizers: [] diff --git a/ansible/playbooks/tasks/coredns.yaml b/ansible/playbooks/tasks/coredns.yaml deleted file mode 100644 index d18383a75..000000000 --- a/ansible/playbooks/tasks/coredns.yaml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Coredns - block: - - name: Coredns | Check if Coredns HelmChart exists - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - register: coredns_helmchart - - - name: Coredns | Wait for Coredns to rollout - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: helm-install-coredns - kind: Job - namespace: kube-system - wait: true - wait_condition: - type: Complete - status: true - wait_timeout: 360 - - - name: Coredns | Patch the Coredns HelmChart to unmanage it - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s_json_patch: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - patch: - - op: add - path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged - value: "true" - - - name: Coredns | Delete the Coredns HelmChart CR - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - state: absent - - - name: Coredns | Force delete the Coredns HelmChart - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - state: patched - definition: - metadata: - finalizers: [] diff --git a/ansible/playbooks/tasks/cruft.yaml b/ansible/playbooks/tasks/cruft.yaml deleted file mode 100644 index 66ae984f2..000000000 --- a/ansible/playbooks/tasks/cruft.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# https://github.com/k3s-io/k3s/issues/1971 -- name: Cruft - block: - - name: Cruft | Get list of custom mantifests - ansible.builtin.find: - paths: "{{ k3s_server_manifests_dir }}" - file_type: file - use_regex: true - patterns: ["^custom-.*"] - register: custom_manifest - - - name: Cruft | Delete custom mantifests - ansible.builtin.file: - path: "{{ item.path }}" - state: absent - loop: "{{ custom_manifest.files }}" - - - name: Cruft | Get list of custom addons - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: Addon - register: addons_list - - - name: Cruft | Delete addons - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: "{{ item.metadata.name }}" - kind: Addon - namespace: kube-system - state: absent - loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/ansible/playbooks/tasks/kubeconfig.yaml b/ansible/playbooks/tasks/kubeconfig.yaml deleted file mode 100644 index 56bf684e5..000000000 --- a/ansible/playbooks/tasks/kubeconfig.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Get absolute path to this Git repository # noqa: command-instead-of-module - ansible.builtin.command: git rev-parse --show-toplevel - delegate_to: localhost - become: false - run_once: true - register: repository_path - changed_when: false - check_mode: false - failed_when: repository_path.rc != 0 - -- name: Copy kubeconfig to the project directory - when: k3s_primary_control_node - ansible.builtin.fetch: - src: /etc/rancher/k3s/k3s.yaml - dest: "{{ repository_path.stdout }}/kubeconfig" - flat: true - -- name: Update kubeconfig with the correct load balancer address - delegate_to: localhost - become: false - run_once: true - ansible.builtin.replace: - path: "{{ repository_path.stdout }}/kubeconfig" - regexp: https://127.0.0.1:6443 - replace: "https://{{ k3s_registration_address }}:6443" diff --git a/ansible/playbooks/tasks/stale_containers.yaml b/ansible/playbooks/tasks/stale_containers.yaml deleted file mode 100644 index 9857d6bce..000000000 --- a/ansible/playbooks/tasks/stale_containers.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# https://github.com/k3s-io/k3s/issues/1900 -- name: Enabled Stale containers - when: stale_containers_state == "enabled" - block: - - name: Stale containers | Create systemd unit - ansible.builtin.copy: - src: files/stale-containers.service - dest: /etc/systemd/system/stale-containers.service - owner: root - group: root - mode: "0644" - - - name: Stale containers | Create systemd timer - ansible.builtin.copy: - src: files/stale-containers.timer - dest: /etc/systemd/system/stale-containers.timer - owner: root - group: root - mode: "0644" - - - name: Stale containers | Start the systemd timer - ansible.builtin.systemd: - name: stale-containers.timer - enabled: true - daemon_reload: true - masked: false - state: started - -- name: Disable Stale containers - when: stale_containers_state == "disabled" - block: - - name: Stale containers | Mask the systemd timer - ansible.builtin.systemd: - name: stale-containers.timer - masked: true diff --git a/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 deleted file mode 100644 index 9122c4f82..000000000 --- a/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 +++ /dev/null @@ -1,54 +0,0 @@ ---- -# https://docs.k3s.io/helm -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: cilium - namespace: kube-system -spec: - # renovate: datasource=helm - repo: https://helm.cilium.io/ - chart: cilium - version: 1.14.0 - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - autoDirectNodeRoutes: true - bpf: - masquerade: true - bgp: - enabled: false - cluster: - name: home-cluster - id: 1 - containerRuntime: - integration: containerd - socketPath: /var/run/k3s/containerd/containerd.sock - endpointRoutes: - enabled: true - hubble: - enabled: false - ipam: - mode: kubernetes - ipv4NativeRoutingCIDR: "{{ cluster_cidr }}" - k8sServiceHost: "{{ kube_vip_addr }}" - k8sServicePort: 6443 - kubeProxyReplacement: strict - kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 - l2announcements: - enabled: true - # https://github.com/cilium/cilium/issues/26586 - leaseDuration: 120s - leaseRenewDeadline: 60s - leaseRetryPeriod: 1s - loadBalancer: - algorithm: maglev - mode: dsr - localRedirectPolicy: true - operator: - replicas: 1 - rollOutPods: true - rollOutCiliumPods: true - securityContext: - privileged: true - tunnel: disabled diff --git a/ansible/playbooks/templates/custom-cilium-l2.yaml.j2 b/ansible/playbooks/templates/custom-cilium-l2.yaml.j2 deleted file mode 100644 index 4c889e628..000000000 --- a/ansible/playbooks/templates/custom-cilium-l2.yaml.j2 +++ /dev/null @@ -1,22 +0,0 @@ ---- -# https://docs.cilium.io/en/latest/network/l2-announcements -apiVersion: cilium.io/v2alpha1 -kind: CiliumL2AnnouncementPolicy -metadata: - name: policy -spec: - loadBalancerIPs: true - # NOTE: This might need to be set if you have more than one active NIC on your nodes - # interfaces: - # - ^eno[0-9]+ - nodeSelector: - matchLabels: - kubernetes.io/os: linux ---- -apiVersion: cilium.io/v2alpha1 -kind: CiliumLoadBalancerIPPool -metadata: - name: pool -spec: - cidrs: - - cidr: "{{ node_cidr }}" diff --git a/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 b/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 deleted file mode 100644 index 4a14e510a..000000000 --- a/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 +++ /dev/null @@ -1,77 +0,0 @@ ---- -# https://docs.k3s.io/helm -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: coredns - namespace: kube-system -spec: - # renovate: datasource=helm - repo: https://coredns.github.io/helm - chart: coredns - version: 1.29.0 - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - fullnameOverride: coredns - replicaCount: 1 - k8sAppLabelOverride: kube-dns - service: - name: kube-dns - clusterIP: "{{ coredns_addr }}" - serviceAccount: - create: true - deployment: - annotations: - reloader.stakater.com/auto: "true" - servers: - - zones: - - zone: . - scheme: dns:// - use_tcp: true - port: 53 - plugins: - - name: log - - name: errors - - name: health - configBlock: |- - lameduck 5s - - name: ready - - name: kubernetes - parameters: cluster.local in-addr.arpa ip6.arpa - configBlock: |- - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - - name: prometheus - parameters: 0.0.0.0:9153 - - name: forward - parameters: . /etc/resolv.conf - - name: cache - parameters: 30 - - name: loop - - name: reload - - name: loadbalance - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - app.kubernetes.io/instance: coredns diff --git a/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2 b/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2 deleted file mode 100644 index 91c22005d..000000000 --- a/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2 +++ /dev/null @@ -1,57 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: kube-vip - namespace: kube-system - labels: - app.kubernetes.io/instance: kube-vip - app.kubernetes.io/name: kube-vip -spec: - containers: - - name: kube-vip - image: ghcr.io/kube-vip/kube-vip:v0.6.1 - imagePullPolicy: IfNotPresent - args: ["manager"] - env: - - name: address - value: "{{ kube_vip_addr }}" - - name: vip_arp - value: "true" - - name: port - value: "6443" - - name: vip_cidr - value: "32" - - name: cp_enable - value: "true" - - name: cp_namespace - value: kube-system - - name: vip_ddns - value: "false" - - name: svc_enable - value: "false" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - - name: prometheus_server - value: :2112 - securityContext: - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - volumeMounts: - - mountPath: /etc/kubernetes/admin.conf - name: kubeconfig - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - hostNetwork: true - volumes: - - name: kubeconfig - hostPath: - path: /etc/rancher/k3s/k3s.yaml diff --git a/bootstrap/configure.yaml b/bootstrap/configure.yaml deleted file mode 100644 index 0f7689b71..000000000 --- a/bootstrap/configure.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Cluster Installation - hosts: localhost - connection: local - vars_files: - - defaults/main.yaml - - vars/config.yaml - - vars/addons.yaml - tasks: - - name: Get absolute path to this Git repository # noqa: command-instead-of-module - ansible.builtin.command: git rev-parse --show-toplevel - changed_when: false - check_mode: false - register: repository - failed_when: repository.rc != 0 - - - name: Set facts - ansible.builtin.set_fact: - repository_path: "{{ repository.stdout }}" - - - name: Verify configuration - ansible.builtin.include_tasks: tasks/validation/main.yaml - - - name: Template Sops configuration - ansible.builtin.include_tasks: tasks/sops/main.yaml - - - name: Template Ansible configuration - ansible.builtin.include_tasks: tasks/ansible/main.yaml - - - name: Template Kubernetes configuration - ansible.builtin.include_tasks: tasks/kubernetes/main.yaml - - - name: Template Kubernetes addon configuration - ansible.builtin.include_tasks: tasks/addons/main.yaml diff --git a/bootstrap/overrides/readme.partial.yaml.j2 b/bootstrap/overrides/readme.partial.yaml.j2 new file mode 100644 index 000000000..36dac44d3 --- /dev/null +++ b/bootstrap/overrides/readme.partial.yaml.j2 @@ -0,0 +1,5 @@ +<% Place user jinja template overrides in this file's directory %> +<% Docs: https://mirkolenz.github.io/makejinja/makejinja.html %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/makejinja.toml %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input1/not-empty.yaml.jinja %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input2/not-empty.yaml.jinja %> diff --git a/bootstrap/scripts/plugin.py b/bootstrap/scripts/plugin.py new file mode 100644 index 000000000..57a0682ba --- /dev/null +++ b/bootstrap/scripts/plugin.py @@ -0,0 +1,67 @@ +import importlib.util +import sys +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from typing import Any +from netaddr import IPNetwork +from bcrypt import hashpw, gensalt + +import makejinja +import validation + +def encrypt(value: str) -> str: + return hashpw(value.encode(), gensalt(rounds=10)).decode("ascii") + + +def nthhost(value: str, query: int) -> str: + value = IPNetwork(value) + try: + nth = int(query) + if value.size > nth: + return str(value[nth]) + except ValueError: + return False + return value + + +def import_filter(file: Path) -> Callable[[dict[str, Any]], bool]: + module_path = file.relative_to(Path.cwd()).with_suffix("") + module_name = str(module_path).replace("/", ".") + spec = importlib.util.spec_from_file_location(module_name, file) + assert spec is not None + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + assert spec.loader is not None + spec.loader.exec_module(module) + return module.main + + +class Plugin(makejinja.plugin.Plugin): + def __init__(self, data: dict[str, Any], config: makejinja.config.Config): + self._data = data + self._config = config + + self._excluded_dirs: set[Path] = set() + for input_path in config.inputs: + for filter_file in input_path.rglob(".mjfilter.py"): + filter_func = import_filter(filter_file) + if filter_func(data) is False: + self._excluded_dirs.add(filter_file.parent) + + validation.validate(data) + + + def filters(self) -> makejinja.plugin.Filters: + return [encrypt, nthhost] + + + def path_filters(self): + return [self._mjfilter_func] + + + def _mjfilter_func(self, path: Path) -> bool: + return not any( + path.is_relative_to(excluded_dir) for excluded_dir in self._excluded_dirs + ) diff --git a/bootstrap/scripts/validation.py b/bootstrap/scripts/validation.py new file mode 100644 index 000000000..f0bd685ac --- /dev/null +++ b/bootstrap/scripts/validation.py @@ -0,0 +1,138 @@ +from functools import wraps +from shutil import which +from typing import Callable, cast +from zoneinfo import available_timezones +import netaddr +import re +import socket +import sys + +DISTRIBUTIONS = ["k3s", "talos"] +GLOBAL_CLI_TOOLS = ["age", "flux", "helmfile", "sops", "jq", "kubeconform", "kustomize"] +TALOS_CLI_TOOLS = ["talosctl", "talhelper"] +CLOUDFLARE_TOOLS = ["cloudflared"] + + +def required(*keys: str): + def wrapper_outter(func: Callable): + @wraps(func) + def wrapper(data: dict, *_, **kwargs) -> None: + for key in keys: + if data.get(key) is None: + raise ValueError(f"Missing required key {key}") + return func(*[data[key] for key in keys], **kwargs) + + return wrapper + + return wrapper_outter + + +def validate_python_version() -> None: + required_version = (3, 11, 0) + if sys.version_info < required_version: + raise ValueError(f"Python {sys.version_info} is below 3.11. Please upgrade.") + + +def validate_ip(ip: str) -> str: + try: + netaddr.IPAddress(ip) + except netaddr.core.AddrFormatError as e: + raise ValueError(f"Invalid IP address {ip}") from e + return ip + + +def validate_network(cidr: str, family: int) -> str: + try: + network = netaddr.IPNetwork(cidr) + if network.version != family: + raise ValueError(f"Invalid CIDR family {network.version}") + except netaddr.core.AddrFormatError as e: + raise ValueError(f"Invalid CIDR {cidr}") from e + return cidr + + +def validate_node(node: dict, node_cidr: str, distribution: str) -> None: + if not node.get("name"): + raise ValueError(f"A node is missing a name") + if not re.match(r"^[a-z0-9-\.]+$", node.get('name')): + raise ValueError(f"Node {node.get('name')} has an invalid name") + if distribution in ["k3s"]: + if not node.get("ssh_user") : + raise ValueError(f"Node {node.get('name')} is missing ssh_user") + if distribution in ["talos"]: + if not node.get("talos_disk"): + raise ValueError(f"Node {node.get('name')} is missing talos_disk") + if not node.get("talos_nic"): + raise ValueError(f"Node {node.get('name')} is missing talos_nic") + if not re.match(r"(?:[0-9a-fA-F]:?){12}", node.get("talos_nic")): + raise ValueError(f"Node {node.get('name')} has an invalid talos_nic, is this a MAC address?") + ip = validate_ip(node.get("address")) + if netaddr.IPAddress(ip, 4) not in netaddr.IPNetwork(node_cidr): + raise ValueError(f"Node {node.get('name')} is not in the node CIDR {node_cidr}") + port = 50000 if distribution in ["talos"] else 22 + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.settimeout(5) + result = sock.connect_ex((ip, port)) + if result != 0: + raise ValueError(f"Node {node.get('name')} port {port} is not open") + + +@required("bootstrap_distribution", "bootstrap_cloudflare") +def validate_cli_tools(distribution: str, cloudflare: dict, **_) -> None: + if distribution not in DISTRIBUTIONS: + raise ValueError(f"Invalid distribution {distribution}") + for tool in GLOBAL_CLI_TOOLS: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + for tool in TALOS_CLI_TOOLS if distribution in ["talos"] else []: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + for tool in CLOUDFLARE_TOOLS if cloudflare.get("enabled", False) else []: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + + +@required("bootstrap_distribution") +def validate_distribution(distribution: str, **_) -> None: + if distribution not in DISTRIBUTIONS: + raise ValueError(f"Invalid distribution {distribution}") + + +@required("bootstrap_timezone") +def validate_timezone(timezone: str, **_) -> None: + if timezone not in available_timezones(): + raise ValueError(f"Invalid timezone {timezone}") + + +@required("bootstrap_sops_age_pubkey") +def validate_age(key: str, **_) -> None: + if not re.match(r"^age1[a-z0-9]{0,58}$", key): + raise ValueError(f"Invalid Age public key {key}") + + +@required("bootstrap_node_network", "bootstrap_node_inventory", "bootstrap_distribution") +def validate_nodes(node_cidr: str, nodes: dict[list], distribution: str, **_) -> None: + node_cidr = validate_network(node_cidr, 4) + + controllers = [node for node in nodes if node.get('controller') == True] + if len(controllers) < 1: + raise ValueError(f"Must have at least one controller node") + if len(controllers) % 2 == 0: + raise ValueError(f"Must have an odd number of controller nodes") + for node in controllers: + validate_node(node, node_cidr, distribution) + + workers = [node for node in nodes if node.get('controller') == False] + for node in workers: + validate_node(node, node_cidr, distribution) + + +def validate(data: dict) -> None: + validate_python_version() + validate_cli_tools(data) + validate_distribution(data) + validate_timezone(data) + validate_age(data) + + if not data.get("skip_tests", False): + validate_nodes(data) diff --git a/bootstrap/tasks/addons/csi_driver_nfs.yaml b/bootstrap/tasks/addons/csi_driver_nfs.yaml deleted file mode 100644 index cfd0291a3..000000000 --- a/bootstrap/tasks/addons/csi_driver_nfs.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: csi-driver-nfs - addon_namespace: kube-system - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/discord_template_notifier.yaml b/bootstrap/tasks/addons/discord_template_notifier.yaml deleted file mode 100644 index 734065c32..000000000 --- a/bootstrap/tasks/addons/discord_template_notifier.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: discord-template-notifier - addon_namespace: default - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/grafana.yaml b/bootstrap/tasks/addons/grafana.yaml deleted file mode 100644 index 8ae41c2f7..000000000 --- a/bootstrap/tasks/addons/grafana.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: grafana - addon_namespace: monitoring - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/hajimari.yaml b/bootstrap/tasks/addons/hajimari.yaml deleted file mode 100644 index c8b976105..000000000 --- a/bootstrap/tasks/addons/hajimari.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: hajimari - addon_namespace: default - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -# https://github.com/ansible-collections/community.sops/issues/153 -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/kube_prometheus_stack.yaml b/bootstrap/tasks/addons/kube_prometheus_stack.yaml deleted file mode 100644 index 0e7531da9..000000000 --- a/bootstrap/tasks/addons/kube_prometheus_stack.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: kube-prometheus-stack - addon_namespace: monitoring - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/kubernetes_dashboard.yaml b/bootstrap/tasks/addons/kubernetes_dashboard.yaml deleted file mode 100644 index 778c79679..000000000 --- a/bootstrap/tasks/addons/kubernetes_dashboard.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: kubernetes-dashboard - addon_namespace: monitoring - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/main.yaml b/bootstrap/tasks/addons/main.yaml deleted file mode 100644 index 23e4092bc..000000000 --- a/bootstrap/tasks/addons/main.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- - -- name: Process addon csi-driver-nfs - when: csi_driver_nfs.enabled | default(false) - ansible.builtin.include_tasks: csi_driver_nfs.yaml - -- name: Process addon hajimari - when: hajimari.enabled | default(false) - ansible.builtin.include_tasks: hajimari.yaml - -- name: Process addon grafana - when: grafana.enabled | default(false) - ansible.builtin.include_tasks: grafana.yaml - -- name: Process addon kubernetes-dashboard - when: kubernetes_dashboard.enabled | default(false) - ansible.builtin.include_tasks: kubernetes_dashboard.yaml - -- name: Process addon kube-prometheus-stack - when: kube_prometheus_stack.enabled | default(false) - ansible.builtin.include_tasks: kube_prometheus_stack.yaml - -- name: Process addon system-upgrade-controller - when: system_upgrade_controller.enabled | default(false) - ansible.builtin.include_tasks: system_upgrade_controller.yaml - -- name: Process addon weave-gitops - when: weave_gitops.enabled | default(false) - ansible.builtin.include_tasks: weave_gitops.yaml - -- name: Process addon discord-template-notifier - when: discord_template_notifier.enabled | default(false) - ansible.builtin.include_tasks: discord_template_notifier.yaml diff --git a/bootstrap/tasks/addons/system_upgrade_controller.yaml b/bootstrap/tasks/addons/system_upgrade_controller.yaml deleted file mode 100644 index f3ed64b79..000000000 --- a/bootstrap/tasks/addons/system_upgrade_controller.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: system-upgrade-controller - addon_namespace: system-upgrade - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/addons/weave_gitops.yaml b/bootstrap/tasks/addons/weave_gitops.yaml deleted file mode 100644 index 12d612406..000000000 --- a/bootstrap/tasks/addons/weave_gitops.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Set addon facts - ansible.builtin.set_fact: - addon_name: weave-gitops - addon_namespace: flux-system - -- name: Ensure directories exist for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template unencrypted files for {{ addon_namespace }}/{{ addon_name }} - when: item.state == 'file' and 'sops' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] - -- name: Template encrypted files for {{ addon_namespace }}/{{ addon_name }} - block: - - name: Template encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/apps/{{ addon_namespace }}/{{ addon_name }}/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/addons/{{ addon_name }}/"] diff --git a/bootstrap/tasks/ansible/main.yaml b/bootstrap/tasks/ansible/main.yaml deleted file mode 100644 index 5c5a286fd..000000000 --- a/bootstrap/tasks/ansible/main.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Ensure Kubernetes directories exist - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/ansible/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/ansible/"] - -- name: Template Ansible unencrypted files - when: item.state == 'file' and 'sops' not in item.path and '.DS_Store' not in item.path - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/ansible/{{ item.path | regex_replace('.j2$', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/ansible/"] - -- name: Template Ansible encrypted files - block: - - name: Template Ansible encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/ansible/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/ansible/"] - - name: Template encrypted node secrets - community.sops.sops_encrypt: - path: "{{ repository_path }}/ansible/inventory/host_vars/{{ item.name }}.sops.yaml" - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', 'templates/node.sops.yaml.j2', template_vars=dict(password=item.password)) | from_yaml }}" - mode: "0644" - force: true - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.address }}" diff --git a/bootstrap/tasks/kubernetes/main.yaml b/bootstrap/tasks/kubernetes/main.yaml deleted file mode 100644 index 9c6f27ab0..000000000 --- a/bootstrap/tasks/kubernetes/main.yaml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- name: Ensure Kubernetes directories exist - when: item.state == 'directory' - ansible.builtin.file: - path: "{{ repository_path }}/kubernetes/{{ item.path }}" - state: directory - mode: "0755" - with_community.general.filetree: ["../templates/kubernetes/"] - -- name: Template Kubernetes unencrypted files - when: - - item.state == 'file' - - "'.DS_Store' not in item.path" - - "'sops' not in item.path" - - "'cluster-settings-user.yaml.j2' not in item.path" - - "'cluster-secrets-user.yaml.j2' not in item.path" - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/{{ item.path | regex_replace('.j2$', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/kubernetes/"] - -- name: Check if the cluster user settings file already exists - stat: - path: "{{ repository_path }}/kubernetes/flux/vars/cluster-settings-user.yaml" - register: cluster_settings_user - -- name: Template Kubernetes user cluster settings - when: - - item.state == 'file' - - "'cluster-settings-user.yaml' in item.path" - - not cluster_settings_user.stat.exists - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/{{ item.path | regex_replace('.j2$', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/kubernetes/"] - -- name: Check if the cluster user secrets file already exists - stat: - path: "{{ repository_path }}/kubernetes/flux/vars/cluster-secrets-user.yaml" - register: cluster_secrets_user - -- name: Template Kubernetes user cluster secrets - when: - - item.state == 'file' - - "'cluster-secrets-user.yaml' in item.path" - - not cluster_secrets_user.stat.exists - ansible.builtin.template: - src: "{{ item.src }}" - dest: "{{ repository_path }}/kubernetes/{{ item.path | regex_replace('.j2$', '') }}" - mode: "0644" - with_community.general.filetree: ["../templates/kubernetes/"] - -- name: Template Kubernetes encrypted files - block: - - name: Template Kubernetes encrypted files - when: item.state == 'file' and 'sops' in item.path - community.sops.sops_encrypt: - path: "{{ repository_path }}/kubernetes/{{ item.path | replace('.j2', '') }}" - encrypted_regex: ^(data|stringData)$ - age: ["{{ bootstrap_age_public_key }}"] - content_yaml: "{{ lookup('ansible.builtin.template', item.src) | from_yaml }}" - mode: "0644" - force: true - with_community.general.filetree: ["../templates/kubernetes/"] diff --git a/bootstrap/tasks/sops/main.yaml b/bootstrap/tasks/sops/main.yaml deleted file mode 100644 index 7aaaf6b9e..000000000 --- a/bootstrap/tasks/sops/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Template Sops configuration file - ansible.builtin.template: - src: "templates/.sops.yaml.j2" - dest: "{{ repository_path }}/.sops.yaml" - mode: "0644" diff --git a/bootstrap/tasks/validation/age.yaml b/bootstrap/tasks/validation/age.yaml deleted file mode 100644 index 81d6ae8e7..000000000 --- a/bootstrap/tasks/validation/age.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Query age key file - ansible.builtin.stat: - path: "{{ repository_path }}/age.key" - register: result - -- name: Check if age key file exists - ansible.builtin.assert: - that: result.stat.exists - success_msg: Age file {{ repository_path }}/age.key exists - fail_msg: Age file {{ repository_path }}/age.key does not exist - -- name: Query age key file contents - ansible.builtin.set_fact: - age_contents: "{{ lookup('file', repository_path + '/age.key') }}" - -- name: Check if age public keys match - ansible.builtin.assert: - that: bootstrap_age_public_key in age_contents - success_msg: Age public key {{ bootstrap_age_public_key }} exists - fail_msg: Age public key {{ bootstrap_age_public_key }} does not exist diff --git a/bootstrap/tasks/validation/cli.yaml b/bootstrap/tasks/validation/cli.yaml deleted file mode 100644 index 7e0215b9f..000000000 --- a/bootstrap/tasks/validation/cli.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Check for required CLI tools - ansible.builtin.shell: | - command -v {{ item }} >/dev/null 2>&1 - loop: [age, cloudflared, flux, sops] - changed_when: false - check_mode: false - register: result - failed_when: result.rc != 0 and result.rc != 127 diff --git a/bootstrap/tasks/validation/cloudflare.yaml b/bootstrap/tasks/validation/cloudflare.yaml deleted file mode 100644 index 4c2885ebe..000000000 --- a/bootstrap/tasks/validation/cloudflare.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Query Cloudflare zone - ansible.builtin.uri: - url: https://api.cloudflare.com/client/v4/zones?name={{ bootstrap_cloudflare_domain }}&status=active - headers: - Authorization: Bearer {{ bootstrap_cloudflare_token }} - Content-Type: application/json - timeout: 5 - return_content: true - body_format: json - register: result - -- name: Check if Cloudflare zone exists - ansible.builtin.assert: - that: result.json.success is true - success_msg: Cloudflare zone {{ bootstrap_cloudflare_domain }} exists - fail_msg: Cloudflare zone {{ bootstrap_cloudflare_domain }} does not exist - -- name: Query Cloudflared tunnel - ansible.builtin.uri: - url: https://api.cloudflare.com/client/v4/accounts/{{ bootstrap_cloudflare_account_tag }}/cfd_tunnel/{{ bootstrap_cloudflare_tunnel_id }} - headers: - Authorization: Bearer {{ bootstrap_cloudflare_token }} - Content-Type: application/json - timeout: 5 - return_content: true - body_format: json - register: result - -- name: Check if Cloudflared tunnel exists - ansible.builtin.assert: - that: result.json.success is true - success_msg: Cloudflared tunnel {{ bootstrap_cloudflare_tunnel_id }} exists - fail_msg: Cloudflared tunnel {{ bootstrap_cloudflare_tunnel_id }} does not exist diff --git a/bootstrap/tasks/validation/github.yaml b/bootstrap/tasks/validation/github.yaml deleted file mode 100644 index af514962e..000000000 --- a/bootstrap/tasks/validation/github.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Query Github username - ansible.builtin.uri: - url: https://api.github.com/users/{{ bootstrap_github_username }} - timeout: 5 - return_content: true - body_format: json - register: result - -- name: Check if username exists - ansible.builtin.assert: - that: result.json.login == bootstrap_github_username - success_msg: Github user {{ bootstrap_github_username }} exists - fail_msg: Github user {{ bootstrap_github_username }} does not exist - -- name: Query Github repo - ansible.builtin.uri: - url: https://api.github.com/repos/{{ bootstrap_github_username }}/{{ bootstrap_github_repository_name }} - timeout: 5 - return_content: true - body_format: json - register: result - -- name: Check if repo exists - ansible.builtin.assert: - that: result.json.full_name == bootstrap_github_username + '/' + bootstrap_github_repository_name - success_msg: Github repo {{ bootstrap_github_username }}/{{ bootstrap_github_repository_name }} exists - fail_msg: Github repo {{ bootstrap_github_username }}/{{ bootstrap_github_repository_name }} does not exist - -- name: Query Github repo branch - ansible.builtin.uri: - url: https://api.github.com/repos/{{ bootstrap_github_username }}/{{ bootstrap_github_repository_name }}/branches/{{ bootstrap_github_repository_branch }} - timeout: 5 - return_content: true - body_format: json - register: result - -- name: Check if repo branch exists - ansible.builtin.assert: - that: result.json.name == bootstrap_github_repository_branch - success_msg: Github repo branch {{ bootstrap_github_repository_branch }} exists - fail_msg: Github repo branch {{ bootstrap_github_repository_branch }} does not exist diff --git a/bootstrap/tasks/validation/main.yaml b/bootstrap/tasks/validation/main.yaml deleted file mode 100644 index 38b0db197..000000000 --- a/bootstrap/tasks/validation/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Verify configuration - ansible.builtin.include_tasks: "{{ task }}.yaml" - loop: [vars, age, cli, net, cloudflare, github] - loop_control: - loop_var: task diff --git a/bootstrap/tasks/validation/net.yaml b/bootstrap/tasks/validation/net.yaml deleted file mode 100644 index 98f81b120..000000000 --- a/bootstrap/tasks/validation/net.yaml +++ /dev/null @@ -1,157 +0,0 @@ ---- -- name: Verify master node count - ansible.builtin.assert: - that: - - bootstrap_nodes.master | length > 0 - - bootstrap_nodes.master | length is odd - success_msg: Master node count {{ bootstrap_nodes.master | length }} is correct. - fail_msg: Master node count {{ bootstrap_nodes.master | length }} is not greater than 0 or is not odd. - -- name: Verify node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr is ansible.utils.ipv4 - success_msg: Node CIDR {{ bootstrap_node_cidr }} is valid. - fail_msg: Node CIDR {{ bootstrap_node_cidr }} is invalid. - -- name: Verify cluster CIDR is ipv4 OR ipv6 - when: not bootstrap_ipv6_enabled | default(false) - ansible.builtin.assert: - that: bootstrap_cluster_cidr is ansible.utils.ipv4 or bootstrap_cluster_cidr is ansible.utils.ipv6 - success_msg: Cluster CIDR {{ bootstrap_cluster_cidr }} is valid. - fail_msg: Cluster CIDR {{ bootstrap_cluster_cidr }} is invalid. - -- name: Verify service CIDR is ipv4 OR ipv6 - when: not bootstrap_ipv6_enabled | default(false) - ansible.builtin.assert: - that: bootstrap_service_cidr is ansible.utils.ipv4 or bootstrap_service_cidr is ansible.utils.ipv6 - success_msg: Service CIDR {{ bootstrap_service_cidr }} is valid. - fail_msg: Service CIDR {{ bootstrap_service_cidr }} is invalid. - -- name: Verify cluster CIDR is ipv4 AND ipv6 - when: bootstrap_ipv6_enabled | default(false) - ansible.builtin.assert: - that: > - ( - bootstrap_cluster_cidr.split(',')[0] is ansible.utils.ipv4 or - bootstrap_cluster_cidr.split(',')[1] is ansible.utils.ipv4 - ) and ( - bootstrap_cluster_cidr.split(',')[1] is ansible.utils.ipv6 or - bootstrap_cluster_cidr.split(',')[0] is ansible.utils.ipv6 - ) - success_msg: Cluster CIDR {{ bootstrap_cluster_cidr }} is valid. - fail_msg: Cluster CIDR {{ bootstrap_cluster_cidr }} is invalid. - -- name: Verify service CIDR is ipv4 AND ipv6 - when: bootstrap_ipv6_enabled | default(false) - ansible.builtin.assert: - that: > - ( - bootstrap_service_cidr.split(',')[0] is ansible.utils.ipv4 or - bootstrap_service_cidr.split(',')[1] is ansible.utils.ipv4 - ) and ( - bootstrap_service_cidr.split(',')[1] is ansible.utils.ipv6 or - bootstrap_service_cidr.split(',')[0] is ansible.utils.ipv6 - ) - success_msg: Service CIDR {{ bootstrap_service_cidr }} is valid. - fail_msg: Service CIDR {{ bootstrap_service_cidr }} is invalid. - -- name: Verify k8s_gateway - ansible.builtin.assert: - that: bootstrap_k8s_gateway_addr is ansible.utils.ipv4 - success_msg: k8s_gateway address {{ bootstrap_k8s_gateway_addr }} is valid. - fail_msg: k8s_gateway address {{ bootstrap_k8s_gateway_addr }} is invalid. - -- name: Verify k8s_gateway in node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr | ansible.utils.network_in_usable(bootstrap_k8s_gateway_addr) - success_msg: k8s_gateway address {{ bootstrap_k8s_gateway_addr }} is within {{ bootstrap_node_cidr }}. - fail_msg: k8s_gateway address {{ bootstrap_k8s_gateway_addr }} is not within {{ bootstrap_node_cidr }}. - -- name: Verify internal ingress - ansible.builtin.assert: - that: bootstrap_internal_ingress_addr is ansible.utils.ipv4 - success_msg: internal ingress address {{ bootstrap_internal_ingress_addr }} is valid. - fail_msg: internal ingress address {{ bootstrap_internal_ingress_addr }} is invalid. - -- name: Verify internal ingress in node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr | ansible.utils.network_in_usable(bootstrap_internal_ingress_addr) - success_msg: internal ingress address {{ bootstrap_internal_ingress_addr }} is within {{ bootstrap_node_cidr }}. - fail_msg: internal ingress address {{ bootstrap_internal_ingress_addr }} is not within {{ bootstrap_node_cidr }}. - -- name: Verify external ingress - ansible.builtin.assert: - that: bootstrap_external_ingress_addr is ansible.utils.ipv4 - success_msg: external ingress address {{ bootstrap_external_ingress_addr }} is valid. - fail_msg: external ingress address {{ bootstrap_external_ingress_addr }} is invalid. - -- name: Verify external ingress in node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr | ansible.utils.network_in_usable(bootstrap_external_ingress_addr) - success_msg: external ingress address {{ bootstrap_external_ingress_addr }} is within {{ bootstrap_node_cidr }}. - fail_msg: external ingress address {{ bootstrap_external_ingress_addr }} is not within {{ bootstrap_node_cidr }}. - -- name: Verify kube-vip - ansible.builtin.assert: - that: bootstrap_kube_vip_addr is ansible.utils.ipv4 - success_msg: kube-vip address {{ bootstrap_kube_vip_addr }} is valid. - fail_msg: kube-vip address {{ bootstrap_kube_vip_addr }} is invalid. - -- name: Verify kube-vip in node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr | ansible.utils.network_in_usable(bootstrap_kube_vip_addr) - success_msg: kube-vip address {{ bootstrap_kube_vip_addr }} is within {{ bootstrap_node_cidr }}. - fail_msg: kube-vip address {{ bootstrap_kube_vip_addr }} is not within {{ bootstrap_node_cidr }}. - -- name: Verify all IP addresses are unique - ansible.builtin.assert: - that: > - [ - bootstrap_k8s_gateway_addr, - bootstrap_external_ingress_addr, - bootstrap_internal_ingress_addr, - bootstrap_kube_vip_addr - ] | unique | length == 4 - success_msg: All IP addresses are unique. - fail_msg: All IP addresses are not unique. - -- name: Verify nodes are not the same IPs as k8s_gateway, ingress external/internal or kube-vip - ansible.builtin.assert: - that: item.address not in (bootstrap_k8s_gateway_addr, bootstrap_external_ingress_addr, bootstrap_internal_ingress_addr, bootstrap_kube_vip_addr) - success_msg: Node address {{ item.address }} is different than k8s_gateway, ingress-nginx or kube-vip. - fail_msg: Node address {{ item.address }} is not different than k8s_gateway, ingress-nginx or kube-vip. - quiet: true - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.address }}" - -- name: Verify nodes are ipv4 - ansible.builtin.assert: - that: item.address is ansible.utils.ipv4 - success_msg: Node address {{ item.address }} is valid. - fail_msg: Node address {{ item.address }} is invalid. - quiet: true - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.address }}" - -- name: Verify nodes are in node CIDR - ansible.builtin.assert: - that: bootstrap_node_cidr | ansible.utils.network_in_usable(item.address) - success_msg: Node address {{ item.address }} is within {{ bootstrap_node_cidr }}. - fail_msg: Node address {{ item.address }} is not within {{ bootstrap_node_cidr }}. - quiet: true - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.address }}" - -- name: Verify SSH port is reachable - ansible.builtin.wait_for: - host: "{{ item.address }}" - port: 22 - search_regex: OpenSSH - timeout: 10 - connection: local - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.address }}" diff --git a/bootstrap/tasks/validation/vars.yaml b/bootstrap/tasks/validation/vars.yaml deleted file mode 100644 index 9e81b7ee7..000000000 --- a/bootstrap/tasks/validation/vars.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - -- name: Verify required bootstrap vars are set - ansible.builtin.assert: - that: item | default('', true) | trim != '' - success_msg: Required bootstrap var {{ item }} exists and is defined - fail_msg: Required bootstrap var {{ item }} does not exists or is not defined - loop: - - bootstrap_acme_email - - bootstrap_age_public_key - - bootstrap_cloudflare_account_tag - - bootstrap_cloudflare_domain - - bootstrap_cloudflare_token - - bootstrap_cloudflare_tunnel_id - - bootstrap_cloudflare_tunnel_secret - - bootstrap_cluster_cidr - - bootstrap_flux_github_webhook_token - - bootstrap_github_repository_name - - bootstrap_github_repository_branch - - bootstrap_github_username - - bootstrap_external_ingress_addr - - bootstrap_internal_ingress_addr - - bootstrap_ipv6_enabled - - bootstrap_k8s_gateway_addr - - bootstrap_kube_vip_addr - - bootstrap_node_cidr - - bootstrap_service_cidr - - bootstrap_timezone - -- name: Verify bootstrap node names are valid - ansible.builtin.assert: - that: item.name is match('^[a-z0-9-]+$') - success_msg: Node name {{ item.name }} is valid - fail_msg: Node name {{ item.name }} is not valid - loop: "{{ bootstrap_nodes.master + bootstrap_nodes.worker | default([]) }}" - loop_control: - label: "{{ item.name }}" diff --git a/bootstrap/templates/.sops.yaml.j2 b/bootstrap/templates/.sops.yaml.j2 index 8a86fb3b3..4cec52614 100644 --- a/bootstrap/templates/.sops.yaml.j2 +++ b/bootstrap/templates/.sops.yaml.j2 @@ -1,16 +1,20 @@ --- creation_rules: + #% if bootstrap_distribution in ["talos"] %# + - # IMPORTANT: This rule MUST be above the others + path_regex: talos/.*\.sops\.ya?ml + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" + #% endif %# - path_regex: kubernetes/.*\.sops\.ya?ml encrypted_regex: "^(data|stringData)$" key_groups: - age: - - "{{ bootstrap_age_public_key }}" + - "#{ bootstrap_sops_age_pubkey }#" + #% if bootstrap_distribution in ["k3s"] %# - path_regex: ansible/.*\.sops\.ya?ml key_groups: - age: - - "{{ bootstrap_age_public_key }}" - # https://github.com/ansible-collections/community.sops/issues/153 - - path_regex: /dev/stdin - key_groups: - - age: - - "{{ bootstrap_age_public_key }}" + - "#{ bootstrap_sops_age_pubkey }#" + #% endif %# diff --git a/bootstrap/templates/addons/csi-driver-nfs/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/csi-driver-nfs/app/helmrelease.yaml.j2 deleted file mode 100644 index 69acf6dd8..000000000 --- a/bootstrap/templates/addons/csi-driver-nfs/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: csi-driver-nfs - namespace: kube-system -spec: - interval: 30m - chart: - spec: - chart: csi-driver-nfs - version: v4.6.0 - sourceRef: - kind: HelmRepository - name: csi-driver-nfs - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - externalSnapshotter: - enabled: false diff --git a/bootstrap/templates/addons/csi-driver-nfs/app/kustomization.yaml.j2 b/bootstrap/templates/addons/csi-driver-nfs/app/kustomization.yaml.j2 deleted file mode 100644 index 53906e15b..000000000 --- a/bootstrap/templates/addons/csi-driver-nfs/app/kustomization.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: kube-system -resources: - - ./helmrelease.yaml - - ./storageclass.yaml diff --git a/bootstrap/templates/addons/csi-driver-nfs/app/storageclass.yaml.j2 b/bootstrap/templates/addons/csi-driver-nfs/app/storageclass.yaml.j2 deleted file mode 100644 index d62dc847c..000000000 --- a/bootstrap/templates/addons/csi-driver-nfs/app/storageclass.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True -{% for item in csi_driver_nfs.storage_class %} ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ item.name }} -provisioner: nfs.csi.k8s.io -parameters: - server: {{ item.server }} - share: {{ item.share }} -reclaimPolicy: Delete -volumeBindingMode: Immediate -mountOptions: ["hard", "noatime"] -{% endfor %} diff --git a/bootstrap/templates/addons/discord-template-notifier/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/discord-template-notifier/app/helmrelease.yaml.j2 deleted file mode 100644 index 70837431e..000000000 --- a/bootstrap/templates/addons/discord-template-notifier/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,55 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: discord-template-notifier - namespace: default -spec: - interval: 15m - chart: - spec: - chart: app-template - version: 1.5.1 - interval: 30m - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - maxHistory: 2 - install: - createNamespace: true - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - image: - repository: ghcr.io/morphy2k/rss-forwarder - tag: 0.6.1 - env: - TZ: "${TIMEZONE}" - service: - main: &disableSvc - enabled: false - ingress: - main: *disableSvc - probes: - liveness: *disableSvc - readiness: *disableSvc - startup: *disableSvc - persistence: - data: - enabled: true - type: secret - name: discord-template-notifier-secret - resources: - requests: - cpu: 15m - memory: 110M - limits: - cpu: 15m - memory: 110M diff --git a/bootstrap/templates/addons/discord-template-notifier/app/secret.sops.yaml.j2 b/bootstrap/templates/addons/discord-template-notifier/app/secret.sops.yaml.j2 deleted file mode 100644 index 837d614de..000000000 --- a/bootstrap/templates/addons/discord-template-notifier/app/secret.sops.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: discord-template-notifier-secret - namespace: default -type: Opaque -stringData: - config.toml: |- - [feeds.github-template] - url = "https://github.com/onedr0p/flux-cluster-template/commits/main/.atom" - interval = "10m" - retry_limit = 5 - sink.type = "discord" - sink.url = "{{ discord_template_notifier.webhook_url }}" diff --git a/bootstrap/templates/addons/grafana/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/grafana/app/helmrelease.yaml.j2 deleted file mode 100644 index e5559dd86..000000000 --- a/bootstrap/templates/addons/grafana/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,174 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: grafana - namespace: monitoring -spec: - interval: 30m - chart: - spec: - chart: grafana - version: 7.3.7 - sourceRef: - kind: HelmRepository - name: grafana - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - dependsOn: - - name: local-path-provisioner - namespace: kube-system - values: - deploymentStrategy: - type: Recreate - admin: - existingSecret: grafana-admin-secret - env: - GF_EXPLORE_ENABLED: true - GF_SERVER_ROOT_URL: "https://grafana.${SECRET_DOMAIN}" - grafana.ini: - analytics: - check_for_updates: false - check_for_plugin_updates: false - reporting_enabled: false - dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: default - orgId: 1 - folder: "" - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default - - name: flux - orgId: 1 - folder: Flux - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/flux - - name: kubernetes - orgId: 1 - folder: Kubernetes - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/kubernetes - - name: nginx - orgId: 1 - folder: Nginx - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/nginx - datasources: - datasources.yaml: - apiVersion: 1 - deleteDatasources: - - { name: Prometheus, orgId: 1 } - datasources: - - name: Prometheus - type: prometheus - uid: prometheus - access: proxy - url: http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090 - jsonData: - prometheusType: Prometheus - isDefault: true - dashboards: - default: - cloudflared: - gnetId: 17457 # https://grafana.com/grafana/dashboards/17457?tab=revisions - revision: 6 - datasource: - - { name: DS_PROMETHEUS, value: Prometheus } - external-dns: - gnetId: 15038 # https://grafana.com/grafana/dashboards/15038?tab=revisions - revision: 1 - datasource: Prometheus - cert-manager: - url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json - datasource: Prometheus - node-exporter-full: - gnetId: 1860 # https://grafana.com/grafana/dashboards/1860?tab=revisions - revision: 31 - datasource: Prometheus - flux: - flux-cluster: - url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/cluster.json - datasource: Prometheus - flux-control-plane: - url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/control-plane.json - datasource: Prometheus - kubernetes: - kubernetes-api-server: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json - datasource: Prometheus - kubernetes-coredns: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json - datasource: Prometheus - kubernetes-global: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json - datasource: Prometheus - kubernetes-namespaces: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json - datasource: Prometheus - kubernetes-nodes: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json - datasource: Prometheus - kubernetes-pods: - url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json - datasource: Prometheus - nginx: - nginx: - url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json - datasource: Prometheus - nginx-request-handling-performance: - url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json - datasource: Prometheus - sidecar: - dashboards: - enabled: true - searchNamespace: ALL - labelValue: "" - label: grafana_dashboard - folderAnnotation: grafana_folder - provider: - disableDelete: true - foldersFromFilesStructure: true - datasources: - enabled: true - searchNamespace: ALL - labelValue: "" - serviceMonitor: - enabled: true - ingress: - enabled: true - ingressClassName: internal - annotations: - hajimari.io/icon: simple-icons:grafana - hosts: - - &host "grafana.${SECRET_DOMAIN}" - tls: - - hosts: - - *host - persistence: - enabled: true - storageClassName: local-path - testFramework: - enabled: false diff --git a/bootstrap/templates/addons/grafana/app/secret.sops.yaml.j2 b/bootstrap/templates/addons/grafana/app/secret.sops.yaml.j2 deleted file mode 100644 index f458b9ce4..000000000 --- a/bootstrap/templates/addons/grafana/app/secret.sops.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: grafana-admin-secret - namespace: monitoring -stringData: - admin-user: admin - admin-password: "{{ grafana.password }}" diff --git a/bootstrap/templates/addons/hajimari/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/hajimari/app/helmrelease.yaml.j2 deleted file mode 100644 index d1b4de807..000000000 --- a/bootstrap/templates/addons/hajimari/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,66 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: hajimari - namespace: default -spec: - interval: 30m - chart: - spec: - chart: hajimari - version: 2.0.2 - sourceRef: - kind: HelmRepository - name: hajimari - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - hajimari: - title: Apps - darkTheme: espresso - alwaysTargetBlank: true - showGreeting: false - showAppGroups: false - showAppStatus: false - showBookmarkGroups: false - showGlobalBookmarks: false - showAppUrls: false - defaultEnable: true - namespaceSelector: - matchNames: - - default - - monitoring - ingress: - main: - enabled: true - ingressClassName: internal - annotations: - hajimari.io/enable: "false" - hosts: - - host: &host "hajimari.${SECRET_DOMAIN}" - paths: - - path: / - pathType: Prefix - tls: - - hosts: - - *host - podAnnotations: - configmap.reloader.stakater.com/reload: hajimari-settings - persistence: - data: - enabled: true - type: emptyDir - resources: - requests: - cpu: 100m - memory: 128M diff --git a/bootstrap/templates/addons/kube-prometheus-stack/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/kube-prometheus-stack/app/helmrelease.yaml.j2 deleted file mode 100644 index 20890db2a..000000000 --- a/bootstrap/templates/addons/kube-prometheus-stack/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,150 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: kube-prometheus-stack - namespace: monitoring -spec: - interval: 30m - timeout: 15m - chart: - spec: - chart: kube-prometheus-stack - version: 48.1.2 - sourceRef: - kind: HelmRepository - name: prometheus-community - namespace: flux-system - maxHistory: 2 - install: - crds: CreateReplace - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - crds: CreateReplace - remediation: - retries: 3 - uninstall: - keepHistory: false - dependsOn: - - name: local-path-provisioner - namespace: kube-system - values: - crds: - enabled: true - cleanPrometheusOperatorObjectNames: true - alertmanager: - enabled: false - kube-state-metrics: - metricLabelsAllowlist: - - "deployments=[*]" - - "persistentvolumeclaims=[*]" - - "pods=[*]" - prometheus: - monitor: - enabled: true - relabelings: - - action: replace - sourceLabels: ["__meta_kubernetes_pod_node_name"] - regex: ^(.*)$ - replacement: $1 - targetLabel: kubernetes_node - kubelet: - enabled: true - serviceMonitor: - metricRelabelings: - # Remove duplicate labels provided by k3s - - action: keep - sourceLabels: ["__name__"] - regex: (apiserver_audit|apiserver_client|apiserver_delegated|apiserver_envelope|apiserver_storage|apiserver_webhooks|authentication_token|cadvisor_version|container_blkio|container_cpu|container_fs|container_last|container_memory|container_network|container_oom|container_processes|container|csi_operations|disabled_metric|get_token|go|hidden_metric|kubelet_certificate|kubelet_cgroup|kubelet_container|kubelet_containers|kubelet_cpu|kubelet_device|kubelet_graceful|kubelet_http|kubelet_lifecycle|kubelet_managed|kubelet_node|kubelet_pleg|kubelet_pod|kubelet_run|kubelet_running|kubelet_runtime|kubelet_server|kubelet_started|kubelet_volume|kubernetes_build|kubernetes_feature|machine_cpu|machine_memory|machine_nvm|machine_scrape|node_namespace|plugin_manager|prober_probe|process_cpu|process_max|process_open|process_resident|process_start|process_virtual|registered_metric|rest_client|scrape_duration|scrape_samples|scrape_series|storage_operation|volume_manager|volume_operation|workqueue)_(.+) - - action: replace - sourceLabels: ["node"] - targetLabel: instance - # Drop high cardinality labels - - action: labeldrop - regex: (uid) - - action: labeldrop - regex: (id|name) - - action: drop - sourceLabels: ["__name__"] - regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count) - kubeApiServer: - enabled: true - serviceMonitor: - metricRelabelings: - # Remove duplicate labels provided by k3s - - action: keep - sourceLabels: ["__name__"] - regex: (aggregator_openapi|aggregator_unavailable|apiextensions_openapi|apiserver_admission|apiserver_audit|apiserver_cache|apiserver_cel|apiserver_client|apiserver_crd|apiserver_current|apiserver_envelope|apiserver_flowcontrol|apiserver_init|apiserver_kube|apiserver_longrunning|apiserver_request|apiserver_requested|apiserver_response|apiserver_selfrequest|apiserver_storage|apiserver_terminated|apiserver_tls|apiserver_watch|apiserver_webhooks|authenticated_user|authentication|disabled_metric|etcd_bookmark|etcd_lease|etcd_request|field_validation|get_token|go|grpc_client|hidden_metric|kube_apiserver|kubernetes_build|kubernetes_feature|node_authorizer|pod_security|process_cpu|process_max|process_open|process_resident|process_start|process_virtual|registered_metric|rest_client|scrape_duration|scrape_samples|scrape_series|serviceaccount_legacy|serviceaccount_stale|serviceaccount_valid|watch_cache|workqueue)_(.+) - # Drop high cardinality labels - - action: drop - sourceLabels: ["__name__"] - regex: (apiserver|etcd|rest_client)_request(|_sli|_slo)_duration_seconds_bucket - - action: drop - sourceLabels: ["__name__"] - regex: (apiserver_response_sizes_bucket|apiserver_watch_events_sizes_bucket) - kubeControllerManager: - enabled: true - endpoints: &cp - {% for item in bootstrap_nodes.master %} - - {{ item.address }} - {% endfor %} - serviceMonitor: - metricRelabelings: - # Remove duplicate labels provided by k3s - - action: keep - sourceLabels: ["__name__"] - regex: "(apiserver_audit|apiserver_client|apiserver_delegated|apiserver_envelope|apiserver_storage|apiserver_webhooks|attachdetach_controller|authenticated_user|authentication|cronjob_controller|disabled_metric|endpoint_slice|ephemeral_volume|garbagecollector_controller|get_token|go|hidden_metric|job_controller|kubernetes_build|kubernetes_feature|leader_election|node_collector|node_ipam|process_cpu|process_max|process_open|process_resident|process_start|process_virtual|pv_collector|registered_metric|replicaset_controller|rest_client|retroactive_storageclass|root_ca|running_managed|scrape_duration|scrape_samples|scrape_series|service_controller|storage_count|storage_operation|ttl_after|volume_operation|workqueue)_(.+)" - kubeEtcd: - enabled: true - endpoints: *cp - kubeScheduler: - enabled: true - endpoints: *cp - serviceMonitor: - metricRelabelings: - # Remove duplicate labels provided by k3s - - action: keep - sourceLabels: ["__name__"] - regex: "(apiserver_audit|apiserver_client|apiserver_delegated|apiserver_envelope|apiserver_storage|apiserver_webhooks|authenticated_user|authentication|disabled_metric|go|hidden_metric|kubernetes_build|kubernetes_feature|leader_election|process_cpu|process_max|process_open|process_resident|process_start|process_virtual|registered_metric|rest_client|scheduler|scrape_duration|scrape_samples|scrape_series|workqueue)_(.+)" - kubeProxy: - enabled: false # Disabled due to eBPF - prometheus: - ingress: - enabled: true - ingressClassName: internal - annotations: - hajimari.io/appName: Prometheus - hajimari.io/icon: simple-icons:prometheus - pathType: Prefix - hosts: - - &host "prometheus.${SECRET_DOMAIN}" - tls: - - hosts: - - *host - prometheusSpec: - ruleSelectorNilUsesHelmValues: false - serviceMonitorSelectorNilUsesHelmValues: false - podMonitorSelectorNilUsesHelmValues: false - probeSelectorNilUsesHelmValues: false - scrapeConfigSelectorNilUsesHelmValues: false - enableAdminAPI: true - walCompression: true - retentionSize: 8GB - storageSpec: - volumeClaimTemplate: - spec: - storageClassName: local-path - resources: - requests: - storage: 10Gi - grafana: - enabled: false - forceDeployDashboards: true - sidecar: - dashboards: - multicluster: - etcd: - enabled: true diff --git a/bootstrap/templates/addons/kubernetes-dashboard/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/kubernetes-dashboard/app/helmrelease.yaml.j2 deleted file mode 100644 index c7ca1bbdc..000000000 --- a/bootstrap/templates/addons/kubernetes-dashboard/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,44 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: kubernetes-dashboard - namespace: monitoring -spec: - interval: 30m - chart: - spec: - chart: kubernetes-dashboard - version: 7.4.0 - sourceRef: - kind: HelmRepository - name: kubernetes-dashboard - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - extraArgs: - - --enable-insecure-login - - --enable-skip-login - - --disable-settings-authorizer - ingress: - enabled: true - className: internal - annotations: - hajimari.io/icon: mdi:kubernetes - hosts: - - &host "kubernetes.${SECRET_DOMAIN}" - tls: - - hosts: - - *host - secretName: kubernetes-dashboard-tls - metricsScraper: - enabled: true diff --git a/bootstrap/templates/addons/kubernetes-dashboard/app/kustomization.yaml.j2 b/bootstrap/templates/addons/kubernetes-dashboard/app/kustomization.yaml.j2 deleted file mode 100644 index f3f3cf605..000000000 --- a/bootstrap/templates/addons/kubernetes-dashboard/app/kustomization.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: monitoring -resources: - - ./rbac.yaml - - ./helmrelease.yaml diff --git a/bootstrap/templates/addons/kubernetes-dashboard/app/rbac.yaml.j2 b/bootstrap/templates/addons/kubernetes-dashboard/app/rbac.yaml.j2 deleted file mode 100644 index 4aecb4385..000000000 --- a/bootstrap/templates/addons/kubernetes-dashboard/app/rbac.yaml.j2 +++ /dev/null @@ -1,41 +0,0 @@ -# For dashboard sign in token: -# kubectl -n monitoring get secret kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kubernetes-dashboard - namespace: monitoring - labels: - app.kubernetes.io/managed-by: Helm - annotations: - meta.helm.sh/release-name: kubernetes-dashboard - meta.helm.sh/release-namespace: monitoring -secrets: - - name: kubernetes-dashboard ---- -apiVersion: v1 -kind: Secret -type: kubernetes.io/service-account-token -metadata: - name: kubernetes-dashboard - namespace: monitoring - labels: - app.kubernetes.io/managed-by: Helm - annotations: - meta.helm.sh/release-name: kubernetes-dashboard - meta.helm.sh/release-namespace: monitoring - kubernetes.io/service-account.name: kubernetes-dashboard ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:kubernetes-dashboard -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: monitoring diff --git a/bootstrap/templates/addons/system-upgrade-controller/app/kustomization.yaml.j2 b/bootstrap/templates/addons/system-upgrade-controller/app/kustomization.yaml.j2 deleted file mode 100644 index d4bee706f..000000000 --- a/bootstrap/templates/addons/system-upgrade-controller/app/kustomization.yaml.j2 +++ /dev/null @@ -1,22 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - # renovate: datasource=github-releases depName=rancher/system-upgrade-controller - - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.4/crd.yaml - - https://github.com/rancher/system-upgrade-controller?ref=v0.13.4 -images: - - name: rancher/system-upgrade-controller - newTag: v0.13.4 -labels: - - includeSelectors: true - pairs: - app.kubernetes.io/instance: system-upgrade-controller - app.kubernetes.io/name: system-upgrade-controller -patches: - - patch: | - $patch: delete - apiVersion: v1 - kind: Namespace - metadata: - name: system-upgrade diff --git a/bootstrap/templates/addons/system-upgrade-controller/ks.yaml.j2 b/bootstrap/templates/addons/system-upgrade-controller/ks.yaml.j2 deleted file mode 100644 index e4ff61911..000000000 --- a/bootstrap/templates/addons/system-upgrade-controller/ks.yaml.j2 +++ /dev/null @@ -1,34 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-system-upgrade-controller - namespace: flux-system -spec: - path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: true - interval: 30m - retryInterval: 1m - timeout: 5m ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-system-upgrade-controller-plans - namespace: flux-system -spec: - dependsOn: - - name: cluster-apps-system-upgrade-controller - path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/plans - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/bootstrap/templates/addons/system-upgrade-controller/plans/agent.yaml.j2 b/bootstrap/templates/addons/system-upgrade-controller/plans/agent.yaml.j2 deleted file mode 100644 index fb8d8d1fd..000000000 --- a/bootstrap/templates/addons/system-upgrade-controller/plans/agent.yaml.j2 +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: agent - namespace: system-upgrade -spec: - # renovate: datasource=github-releases depName=k3s-io/k3s - version: "v1.29.3+k3s1" - serviceAccountName: system-upgrade - concurrency: 1 - nodeSelector: - matchExpressions: - - {key: node-role.kubernetes.io/control-plane, operator: DoesNotExist} - prepare: - image: rancher/k3s-upgrade - args: ["prepare", "server"] - upgrade: - image: rancher/k3s-upgrade diff --git a/bootstrap/templates/addons/system-upgrade-controller/plans/server.yaml.j2 b/bootstrap/templates/addons/system-upgrade-controller/plans/server.yaml.j2 deleted file mode 100644 index db7923533..000000000 --- a/bootstrap/templates/addons/system-upgrade-controller/plans/server.yaml.j2 +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: server - namespace: system-upgrade -spec: - # renovate: datasource=github-releases depName=k3s-io/k3s - version: "v1.29.3+k3s1" - serviceAccountName: system-upgrade - concurrency: 1 - cordon: true - nodeSelector: - matchExpressions: - - { key: node-role.kubernetes.io/control-plane, operator: Exists } - tolerations: - - { effect: NoSchedule, operator: Exists } - - { effect: NoExecute, operator: Exists } - - { key: node-role.kubernetes.io/control-plane, effect: NoSchedule, operator: Exists } - - { key: node-role.kubernetes.io/master, effect: NoSchedule, operator: Exists } - - { key: node-role.kubernetes.io/etcd, effect: NoExecute, operator: Exists } - - { key: CriticalAddonsOnly, operator: Exists } - upgrade: - image: rancher/k3s-upgrade diff --git a/bootstrap/templates/addons/weave-gitops/app/helmrelease.yaml.j2 b/bootstrap/templates/addons/weave-gitops/app/helmrelease.yaml.j2 deleted file mode 100644 index 27a1f08e6..000000000 --- a/bootstrap/templates/addons/weave-gitops/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,53 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: weave-gitops - namespace: flux-system -spec: - interval: 30m - chart: - spec: - chart: weave-gitops - version: 4.0.36 - sourceRef: - kind: HelmRepository - name: weave-gitops - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - adminUser: - create: true - createSecret: false - username: admin - ingress: - enabled: true - className: internal - annotations: - hajimari.io/icon: sawtooth-wave - hosts: - - host: &host "gitops.${SECRET_DOMAIN}" - paths: - - path: / - pathType: Prefix - tls: - - hosts: - - *host - networkPolicy: - create: false - metrics: - enabled: true - rbac: - create: true - impersonationResourceNames: ["admin"] - podAnnotations: - secret.reloader.stakater.com/reload: cluster-user-auth diff --git a/bootstrap/templates/addons/weave-gitops/app/kustomization.yaml.j2 b/bootstrap/templates/addons/weave-gitops/app/kustomization.yaml.j2 deleted file mode 100644 index 3805ad867..000000000 --- a/bootstrap/templates/addons/weave-gitops/app/kustomization.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: flux-system -resources: - - ./secret.sops.yaml - - ./helmrelease.yaml diff --git a/bootstrap/templates/addons/weave-gitops/app/secret.sops.yaml.j2 b/bootstrap/templates/addons/weave-gitops/app/secret.sops.yaml.j2 deleted file mode 100644 index ae0237843..000000000 --- a/bootstrap/templates/addons/weave-gitops/app/secret.sops.yaml.j2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: cluster-user-auth - namespace: flux-system -type: Opaque -stringData: - username: admin - password: "{{ weave_gitops.password | password_hash('bcrypt', rounds=10) }}" diff --git a/bootstrap/templates/addons/weave-gitops/ks.yaml.j2 b/bootstrap/templates/addons/weave-gitops/ks.yaml.j2 deleted file mode 100644 index efb4a0c6d..000000000 --- a/bootstrap/templates/addons/weave-gitops/ks.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-weave-gitops - namespace: flux-system -spec: - path: ./kubernetes/apps/flux-system/weave-gitops/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/bootstrap/templates/ansible/.ansible-lint.j2 b/bootstrap/templates/ansible/.ansible-lint.j2 new file mode 100644 index 000000000..36f6b4414 --- /dev/null +++ b/bootstrap/templates/ansible/.ansible-lint.j2 @@ -0,0 +1,9 @@ +skip_list: + - yaml[commas] + - yaml[line-length] + - var-naming +warn_list: + - command-instead-of-shell + - deprecated-command-syntax + - experimental + - no-changed-when diff --git a/bootstrap/templates/ansible/.mjfilter.py b/bootstrap/templates/ansible/.mjfilter.py new file mode 100644 index 000000000..0979f9a64 --- /dev/null +++ b/bootstrap/templates/ansible/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 new file mode 100644 index 000000000..875c4c615 --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 @@ -0,0 +1,36 @@ +--- +k3s_control_node: true +k3s_server: + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + cluster-cidr: "#{ bootstrap_pod_network.split(',')[0] }#,#{ bootstrap_pod_network.split(',')[1] }#" + service-cidr: "#{ bootstrap_service_network.split(',')[0] }#,#{ bootstrap_service_network.split(',')[1] }#" + #% else %# + cluster-cidr: "#{ bootstrap_pod_network }#" + service-cidr: "#{ bootstrap_service_network }#" + #% endif %# + disable: ["flannel", "local-storage", "metrics-server", "servicelb", "traefik"] + disable-cloud-controller: true + disable-kube-proxy: true + disable-network-policy: true + docker: false + embedded-registry: true + etcd-expose-metrics: true + flannel-backend: "none" + kube-apiserver-arg: + - "anonymous-auth=true" + kube-controller-manager-arg: + - "bind-address=0.0.0.0" + kube-scheduler-arg: + - "bind-address=0.0.0.0" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" + #% else %# + node-ip: "{{ ansible_host }}" + #% endif %# + secrets-encryption: true + tls-san: + - "#{ bootstrap_controllers_vip }#" + #% for item in bootstrap_tls_sans %# + - "#{ item }#" + #% endfor %# + write-kubeconfig-mode: "644" diff --git a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 index 5df486f5e..fcac87dc7 100644 --- a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 +++ b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 @@ -1,37 +1,23 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- -# -# Below vars are for the xanmanning.k3s role -# ...see https://github.com/PyratLabs/ansible-role-k3s -# - -# renovate: datasource=github-releases depName=k3s-io/k3s -k3s_release_version: "v1.29.3+k3s1" -k3s_install_hard_links: true k3s_become: true k3s_etcd_datastore: true -k3s_use_unsupported_config: true -k3s_registration_address: "{% raw %}{{ kube_vip_addr }}{% endraw %}" -k3s_server_manifests_urls: - # Kube-vip RBAC - - url: https://raw.githubusercontent.com/kube-vip/website/main/content/manifests/rbac.yaml - filename: kube-vip-rbac.yaml - # Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release) - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml - filename: custom-prometheus-podmonitors.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml - filename: custom-prometheus-prometheusrules.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml - filename: custom-prometheus-scrapeconfigs.yaml - - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml - filename: custom-prometheus-servicemonitors.yaml -# /var/lib/rancher/k3s/server/manifests +k3s_install_hard_links: true +k3s_registration_address: "#{ bootstrap_controllers_vip }#" +k3s_registries: + mirrors: + docker.io: + gcr.io: + ghcr.io: + k8s.gcr.io: + lscr.io: + mcr.microsoft.com: + public.ecr.aws: + quay.io: + registry.k8s.io: +# renovate: datasource=github-releases depName=k3s-io/k3s +k3s_release_version: v1.30.0+k3s1 k3s_server_manifests_templates: - - custom-cilium-helmchart.yaml.j2 - {% if not bootstrap_ipv6_enabled | default(false) %} - - custom-cilium-l2.yaml.j2 - {% endif %} - - custom-coredns-helmchart.yaml.j2 -# /var/lib/rancher/k3s/agent/pod-manifests -k3s_server_pod_manifests_templates: - - kube-vip-static-pod.yaml.j2 + - custom-cilium-helmchart.yaml + - custom-kube-vip-ds.yaml + - custom-kube-vip-rbac.yaml +k3s_use_unsupported_config: true diff --git a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/supplemental.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/supplemental.yaml.j2 deleted file mode 100644 index 816dc3f58..000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/supplemental.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -timezone: "{{ bootstrap_timezone }}" -github_username: "{{ bootstrap_github_username }}" -coredns_addr: "{{ bootstrap_service_cidr.split(',')[0] | ansible.utils.nthhost(10) }}" -kube_vip_addr: "{{ bootstrap_kube_vip_addr }}" -cluster_cidr: "{{ bootstrap_cluster_cidr.split(',')[0] }}" -service_cidr: "{{ bootstrap_service_cidr.split(',')[0] }}" -node_cidr: "{{ bootstrap_node_cidr }}" -{% if bootstrap_ipv6_enabled | default(false) %} -cluster_cidr_v6: "{{ bootstrap_cluster_cidr.split(',')[1] }}" -service_cidr_v6: "{{ bootstrap_service_cidr.split(',')[1] }}" -{% endif %} diff --git a/bootstrap/templates/ansible/inventory/group_vars/master/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/master/main.yaml.j2 deleted file mode 100644 index 70764f632..000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/master/main.yaml.j2 +++ /dev/null @@ -1,36 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ -# https://github.com/PyratLabs/ansible-role-k3s - -k3s_control_node: true -k3s_server: -{% if bootstrap_ipv6_enabled | default(false) %} - node-ip: "{% raw %}{{ ansible_host }},{{ ansible_default_ipv6.address }}{% endraw %}" -{% else %} - node-ip: "{% raw %}{{ ansible_host }}{% endraw %}" -{% endif %} - tls-san: - - "{% raw %}{{ kube_vip_addr }}{% endraw %}" - docker: false - flannel-backend: "none" # This needs to be in quotes - disable: - - coredns # Disable coredns - replaced with Coredns Helm Chart - - flannel # Disable flannel - replaced with Cilium Helm Chart - - local-storage # Disable local-path-provisioner - installed with Flux - - metrics-server # Disable metrics-server - installed with Flux - - servicelb # Disable servicelb - replaced with Cilium Helm Chart - - traefik # Disable traefik - replaced with ingress-nginx and installed with Flux - disable-network-policy: true - disable-cloud-controller: true - disable-kube-proxy: true # Cilium uses eBPF - write-kubeconfig-mode: "644" - cluster-cidr: "{% raw %}{{ cluster_cidr }}{% endraw %}" - service-cidr: "{% raw %}{{ service_cidr }}{% endraw %}" - etcd-expose-metrics: true # Required to monitor etcd with kube-prometheus-stack - kube-controller-manager-arg: - - "bind-address=0.0.0.0" # Required to monitor kube-controller-manager with kube-prometheus-stack - kube-scheduler-arg: - - "bind-address=0.0.0.0" # Required to monitor kube-scheduler with kube-prometheus-stack - kube-apiserver-arg: - - "anonymous-auth=true" # Required for HAProxy health-checks diff --git a/bootstrap/templates/ansible/inventory/group_vars/worker/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/worker/main.yaml.j2 deleted file mode 100644 index 687cf0dc3..000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/worker/main.yaml.j2 +++ /dev/null @@ -1,12 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ -# https://github.com/PyratLabs/ansible-role-k3s - -k3s_control_node: false -k3s_agent: -{% if bootstrap_ipv6_enabled | default(false) %} - node-ip: "{% raw %}{{ ansible_host }},{{ ansible_default_ipv6.address }}{% endraw %}" -{% else %} - node-ip: "{% raw %}{{ ansible_host }}{% endraw %}" -{% endif %} diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py b/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py new file mode 100644 index 000000000..8fb17eac5 --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py @@ -0,0 +1,10 @@ +main = lambda data: ( + data.get("bootstrap_distribution", "k3s") in ["k3s"] and + len( + list( + filter( + lambda item: "controller" in item and item["controller"] is False, data.get("bootstrap_node_inventory") + ) + ) + ) > 0 +) diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 new file mode 100644 index 000000000..61622c98b --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 @@ -0,0 +1,8 @@ +--- +k3s_control_node: false +k3s_agent: + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" + #% else %# + node-ip: "{{ ansible_host }}" + #% endif %# diff --git a/bootstrap/templates/ansible/inventory/host_vars/.gitkeep.j2 b/bootstrap/templates/ansible/inventory/host_vars/.gitkeep.j2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/bootstrap/templates/ansible/inventory/hosts.yaml.j2 b/bootstrap/templates/ansible/inventory/hosts.yaml.j2 index 4499a0229..4df83a280 100644 --- a/bootstrap/templates/ansible/inventory/hosts.yaml.j2 +++ b/bootstrap/templates/ansible/inventory/hosts.yaml.j2 @@ -1,20 +1,29 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- kubernetes: children: - master: + controllers: hosts: - {% for item in bootstrap_nodes.master %} - {{ item.name }}: - ansible_user: {{ item.username }} - ansible_host: {{ item.address }} - {% endfor %} - {% if bootstrap_nodes.worker | default([]) | length > 0 %} - worker: + #% for item in bootstrap_node_inventory %# + #% if item.controller %# + "#{ item.name }#": + ansible_user: "#{ item.ssh_user }#" + ansible_host: "#{ item.address }#" + #% if item.ssh_key %# + ansible_ssh_private_key_file: "#{ item.ssh_key }#" + #% endif %# + #% endif %# + #% endfor %# + #% if bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length %# + workers: hosts: - {% for item in bootstrap_nodes.worker %} - {{ item.name }}: - ansible_user: {{ item.username }} - ansible_host: {{ item.address }} - {% endfor %} - {% endif %} + #% for item in bootstrap_node_inventory %# + #% if not item.controller %# + "#{ item.name }#": + ansible_user: "#{ item.ssh_user }#" + ansible_host: "#{ item.address }#" + #% if item.ssh_key %# + ansible_ssh_private_key_file: "#{ item.ssh_key }#" + #% endif %# + #% endif %# + #% endfor %# + #% endif %# diff --git a/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 index 24e502bb8..507b7b295 100644 --- a/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 @@ -1,7 +1,6 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- - name: Cluster Installation - hosts: all + hosts: kubernetes become: true gather_facts: true any_errors_fatal: true @@ -22,6 +21,40 @@ k3s_server_manifests_templates: [] k3s_server_manifests_urls: [] + - name: Prevent downgrades + when: k3s_installed.stat.exists + ansible.builtin.include_tasks: tasks/version-check.yaml + + - name: Ensure that the /opt/cni directory exists + ansible.builtin.file: + path: /opt/cni + mode: '755' + state: directory + - name: Ensure that the /opt/cni/bin is a link to /var/lib/rancher/k3s/data/current/bin + ansible.builtin.file: + src: /var/lib/rancher/k3s/data/current/bin + dest: /opt/cni/bin + follow: false + force: true + state: link + + - name: Ensure that the /etc/cni directory exists + ansible.builtin.file: + path: /etc/cni + mode: '755' + state: directory + - name: Ensure that the /var/lib/rancher/k3s/agent/etc/cni/net.d directory exists + ansible.builtin.file: + path: /var/lib/rancher/k3s/agent/etc/cni/net.d + mode: '755' + state: directory + - name: Ensure that the /etc/cni/net.d is a link to /var/lib/rancher/k3s/agent/etc/cni/net.d + ansible.builtin.file: + src: /var/lib/rancher/k3s/agent/etc/cni/net.d + dest: /etc/cni/net.d + force: true + state: link + - name: Install Kubernetes ansible.builtin.include_role: name: xanmanning.k3s @@ -39,27 +72,15 @@ or k3s_server_manifests_urls | length > 0) kubernetes.core.k8s_info: kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: "{% raw %}{{ item.kind }}{% endraw %}" - name: "{% raw %}{{ item.name }}{% endraw %}" - namespace: "{% raw %}{{ item.namespace | default('') }}{% endraw %}" + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" wait: true wait_sleep: 10 wait_timeout: 360 loop: - { name: cilium, kind: HelmChart, namespace: kube-system } - - { name: coredns, kind: HelmChart, namespace: kube-system } - {% if not bootstrap_ipv6_enabled | default(false) %} - - { name: policy, kind: CiliumL2AnnouncementPolicy } - - { name: pool, kind: CiliumLoadBalancerIPPool } - {% endif %} - - { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition } - - { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition } - - - name: Coredns - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/coredns.yaml + - { name: kube-vip, kind: DaemonSet, namespace: kube-system } - name: Cilium when: k3s_primary_control_node @@ -68,8 +89,3 @@ - name: Cruft when: k3s_primary_control_node ansible.builtin.include_tasks: tasks/cruft.yaml - - - name: Stale Containers - ansible.builtin.include_tasks: tasks/stale_containers.yaml - vars: - stale_containers_state: enabled diff --git a/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 deleted file mode 100644 index be497de94..000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Cluster kube-vip - hosts: master - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Ensure Kubernetes is running - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: started - - - name: Upgrade kube-vip - ansible.builtin.template: - src: templates/kube-vip-static-pod.yaml.j2 - dest: "{% raw %}{{ k3s_server_pod_manifests_dir }}{% endraw %}/kube-vip-static-pod.yaml" - mode: preserve diff --git a/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 index 4400269e4..415e98ed0 100644 --- a/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 @@ -1,6 +1,6 @@ --- - name: Cluster Nuke - hosts: all + hosts: kubernetes become: true gather_facts: true any_errors_fatal: true @@ -36,38 +36,70 @@ block: - name: Networking | Delete Cilium links ansible.builtin.command: - cmd: "ip link delete {% raw %}{{ item }}{% endraw %}" - removes: "/sys/class/net/{% raw %}{{ item }}{% endraw %}" + cmd: "ip link delete {{ item }}" + removes: "/sys/class/net/{{ item }}" loop: ["cilium_host", "cilium_net", "cilium_vxlan"] - name: Networking | Flush iptables ansible.builtin.iptables: - table: "{% raw %}{{ item }}{% endraw %}" + table: "{{ item }}" flush: true loop: ["filter", "nat", "mangle", "raw"] - name: Networking | Flush ip6tables ansible.builtin.iptables: - table: "{% raw %}{{ item }}{% endraw %}" + table: "{{ item }}" flush: true ip_version: ipv6 loop: ["filter", "nat", "mangle", "raw"] - - name: Networking | Delete CNI directory + - name: Networking | Delete CNI bin link + ansible.builtin.file: + path: /opt/cni/bin + state: absent + - name: Networking | Delete CNI conf link ansible.builtin.file: path: /etc/cni/net.d state: absent - - name: Uninstall Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: uninstalled + - name: Check to see if k3s-killall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-killall.sh + register: check_k3s_killall_script + + - name: Check to see if k3s-uninstall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-uninstall.sh + register: check_k3s_uninstall_script + + - name: Run k3s-killall.sh + when: check_k3s_killall_script.stat.exists + ansible.builtin.command: + cmd: /usr/local/bin/k3s-killall.sh + register: k3s_killall + changed_when: k3s_killall.rc == 0 + + - name: Run k3s-uninstall.sh + when: check_k3s_uninstall_script.stat.exists + ansible.builtin.command: + cmd: /usr/local/bin/k3s-uninstall.sh + args: + removes: /usr/local/bin/k3s-uninstall.sh + register: k3s_uninstall + changed_when: k3s_uninstall.rc == 0 + + - name: Ensure hard links are removed + when: + - k3s_install_hard_links + - not ansible_check_mode + ansible.builtin.file: + path: "{{ k3s_install_dir }}/{{ item }}" + state: absent + loop: ["kubectl", "crictl", "ctr"] - - name: Stale Containers - ansible.builtin.include_tasks: tasks/stale_containers.yaml - vars: - stale_containers_state: disabled + - name: Remove local storage path + ansible.builtin.file: + path: /var/openebs/local + state: absent - name: Reboot ansible.builtin.reboot: - msg: Rebooting nodes + msg: Rebooting hosts reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 index 77e2c15d8..126138c4f 100644 --- a/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 @@ -1,6 +1,6 @@ --- - name: Prepare System - hosts: all + hosts: kubernetes become: true gather_facts: true any_errors_fatal: true @@ -8,50 +8,36 @@ - name: Pausing for 5 seconds... ansible.builtin.pause: seconds: 5 + - name: Populate service facts + ansible.builtin.service_facts: tasks: - name: Locale block: - name: Locale | Set timezone community.general.timezone: - name: "{% raw %}{{ timezone | default('Etc/UTC') }}{% endraw %}" + name: "#{ bootstrap_timezone }#" - name: Packages block: - name: Packages | Install ansible.builtin.apt: name: apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,gnupg,hdparm,htop, - iptables,iputils-ping,ipvsadm,libseccomp2,lm-sensors,neofetch,net-tools,nfs-common, + iptables,iputils-ping,ipvsadm,libseccomp2,lm-sensors,net-tools,nfs-common, nvme-cli,open-iscsi,parted,psmisc,python3,python3-apt,python3-kubernetes,python3-yaml, smartmontools,socat,software-properties-common,unzip,util-linux install_recommends: false - - name: User Configuration - block: - - name: User Configuration | SSH keys - ansible.posix.authorized_key: - user: "{% raw %}{{ ansible_user }}{% endraw %}" - key: "https://github.com/{% raw %}{{ github_username }}{% endraw %}.keys" - - name: User Configuration | Silence login - ansible.builtin.file: - dest: "{% raw %}{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}{% endraw %}/.hushlogin" - state: touch - owner: "{% raw %}{{ ansible_user }}{% endraw %}" - group: "{% raw %}{{ ansible_user }}{% endraw %}" - mode: "0644" - modification_time: preserve - access_time: preserve - - name: Network Configuration notify: Reboot block: - name: Network Configuration | Set hostname ansible.builtin.hostname: - name: "{% raw %}{{ inventory_hostname }}{% endraw %}" + name: "{{ inventory_hostname }}" - name: Network Configuration | Update hosts ansible.builtin.copy: content: | 127.0.0.1 localhost - 127.0.1.1 {% raw %}{{ inventory_hostname }}{% endraw %} + 127.0.1.1 {{ inventory_hostname }} # The following lines are desirable for IPv6 capable hosts ::1 localhost ip6-localhost ip6-loopback @@ -59,63 +45,71 @@ ff02::2 ip6-allrouters dest: /etc/hosts mode: preserve - # https://github.com/cilium/cilium/issues/18706 - - name: Network Configuration | Cilium (1) - ansible.builtin.lineinfile: - dest: /etc/systemd/networkd.conf - regexp: ManageForeignRoutingPolicyRules - line: ManageForeignRoutingPolicyRules=no - - name: Network Configuration | Cilium (2) - ansible.builtin.lineinfile: - dest: /etc/systemd/networkd.conf - regexp: ManageForeignRoutes - line: ManageForeignRoutes=no + # https://github.com/onedr0p/cluster-template/discussions/635 + - name: Network Configuration | Remove immutable flag from /etc/resolv.conf + ansible.builtin.file: + attributes: -i + path: /etc/resolv.conf + - name: Network Configuration | Remove /etc/resolv.conf + ansible.builtin.file: + attributes: -i + path: /etc/resolv.conf + state: absent + - name: Network Configuration | Add custom /etc/resolv.conf + ansible.builtin.copy: + attributes: +i + mode: '0644' + dest: /etc/resolv.conf + content: | + search #{ bootstrap_search_domain|default('.', true) }# + #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# + nameserver #{ item }# + #% endfor %# - name: System Configuration notify: Reboot block: - - name: System Configuration | Neofetch - ansible.builtin.copy: - dest: /etc/profile.d/neofetch.sh - mode: "0755" - content: neofetch --config none - name: System Configuration | Disable apparmor + when: ansible_facts.services['apparmor.service'] is defined ansible.builtin.systemd: name: apparmor state: stopped masked: true - name: System Configuration | Disable swap ansible.posix.mount: - name: "{% raw %}{{ item }}{% endraw %}" + name: "{{ item }}" fstype: swap state: absent loop: ["none", "swap"] - - name: System Configuration | Kernel modules (1) - community.general.modprobe: - name: "{% raw %}{{ item }}{% endraw %}" - state: present - loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] - - name: System Configuration | Kernel modules (2) + - name: System Configuration | Create Kernel modules ansible.builtin.copy: - dest: "/etc/modules-load.d/{% raw %}{{ item }}{% endraw %}.conf" + dest: "/etc/modules-load.d/{{ item }}.conf" mode: "0644" - content: "{% raw %}{{ item }}{% endraw %}" - loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] + content: "{{ item }}" + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "iptable_mangle", "iptable_raw", "nbd", "overlay", "rbd", "xt_socket"] + register: modules_status + - name: System Configuration | Reload Kernel modules # noqa: no-changed-when no-handler + when: modules_status.changed + ansible.builtin.systemd: + name: systemd-modules-load + state: restarted - name: System Configuration | Sysctl ansible.posix.sysctl: - name: "{% raw %}{{ item.key }}{% endraw %}" - value: "{% raw %}{{ item.value }}{% endraw %}" + name: "{{ item.key }}" + value: "{{ item.value }}" sysctl_file: /etc/sysctl.d/99-kubernetes.conf reload: true - with_dict: "{% raw %}{{ sysctl_config }}{% endraw %}" + with_dict: "{{ sysctl_config }}" vars: sysctl_config: fs.inotify.max_queued_events: 65536 fs.inotify.max_user_watches: 524288 fs.inotify.max_user_instances: 8192 + net.core.rmem_max: 2500000 + net.core.wmem_max: 2500000 handlers: - name: Reboot ansible.builtin.reboot: - msg: Rebooting nodes + msg: Rebooting hosts reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 index 4adcfe435..6fe1fd0df 100644 --- a/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 @@ -1,6 +1,6 @@ --- - name: Reboot - hosts: all + hosts: kubernetes become: true gather_facts: true any_errors_fatal: true @@ -11,5 +11,5 @@ tasks: - name: Reboot ansible.builtin.reboot: - msg: Rebooting nodes + msg: Rebooting hosts reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 index 086c7dced..acad8fd60 100644 --- a/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 @@ -1,7 +1,6 @@ --- -# https://github.com/kevincoakley/ansible-role-k8s-rolling-update - name: Cluster rollout update - hosts: all + hosts: kubernetes become: true gather_facts: true any_errors_fatal: true @@ -12,9 +11,9 @@ seconds: 5 tasks: - name: Details - ansible.builtin.command: "kubectl get node {% raw %}{{ inventory_hostname }}{% endraw %} -o json" + ansible.builtin.command: "k3s kubectl get node {{ inventory_hostname }} -o json" register: kubectl_get_node - delegate_to: "{% raw %}{{ groups['master'][0] }}{% endraw %}" + delegate_to: "{{ groups['controllers'][0] }}" failed_when: false changed_when: false @@ -27,36 +26,24 @@ block: - name: Cordon kubernetes.core.k8s_drain: - name: "{% raw %}{{ inventory_hostname }}{% endraw %}" + name: "{{ inventory_hostname }}" kubeconfig: /etc/rancher/k3s/k3s.yaml state: cordon - delegate_to: "{% raw %}{{ groups['master'][0] }}{% endraw %}" + delegate_to: "{{ groups['controllers'][0] }}" - name: Drain - ansible.builtin.command: "kubectl drain --pod-selector='app!=rook-ceph-osd,app!=csi-attacher,app!=csi-provisioner' --ignore-daemonsets --delete-emptydir-data --force --grace-period=300 {% raw %}{{ inventory_hostname }}{% endraw %}" - delegate_to: "{% raw %}{{ groups['master'][0] }}{% endraw %}" - changed_when: false - - ### pod_selectors feature in upcoming kubernetes.core 2.5.0 ### - # - name: Drain - # kubernetes.core.k8s_drain: - # name: "{% raw %}{{ inventory_hostname }}{% endraw %}" - # kubeconfig: /etc/rancher/k3s/k3s.yaml - # state: drain - # delete_options: - # delete_emptydir_data: true - # ignore_daemonsets: true - # terminate_grace_period: 600 - # wait_timeout: 900 - # force: true - # pod_selectors: - # # Rook Ceph - # - app!=rook-ceph-osd - # # Longhorn - # - app!=csi-attacher - # # Longhorn - # - app!=csi-provisioner - # delegate_to: "{% raw %}{{ groups['master'][0] }}{% endraw %}" + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: drain + delete_options: + delete_emptydir_data: true + ignore_daemonsets: true + terminate_grace_period: 600 + wait_timeout: 900 + pod_selectors: + - app!=rook-ceph-osd # Rook Ceph + delegate_to: "{{ groups['controllers'][0] }}" - name: Update ansible.builtin.apt: @@ -74,11 +61,10 @@ msg: Rebooting node post_reboot_delay: 60 reboot_timeout: 3600 - when: reboot_required.stat.exists - name: Uncordon kubernetes.core.k8s_drain: - name: "{% raw %}{{ inventory_hostname }}{% endraw %}" + name: "{{ inventory_hostname }}" kubeconfig: /etc/rancher/k3s/k3s.yaml state: uncordon - delegate_to: "{% raw %}{{ groups['master'][0] }}{% endraw %}" + delegate_to: "{{ groups['controllers'][0] }}" diff --git a/bootstrap/templates/ansible/playbooks/files/stale-containers.service.j2 b/bootstrap/templates/ansible/playbooks/files/stale-containers.service.j2 deleted file mode 100644 index 5136df2f6..000000000 --- a/bootstrap/templates/ansible/playbooks/files/stale-containers.service.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[Unit] -Description=Stale containers - -[Service] -Type=oneshot -ExecStart=/usr/local/bin/k3s crictl rmi --prune diff --git a/bootstrap/templates/ansible/playbooks/files/stale-containers.timer.j2 b/bootstrap/templates/ansible/playbooks/files/stale-containers.timer.j2 deleted file mode 100644 index 731885a14..000000000 --- a/bootstrap/templates/ansible/playbooks/files/stale-containers.timer.j2 +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Stale containers - -[Timer] -OnCalendar=weekly -AccuracySec=1h -Persistent=true -RandomizedDelaySec=6000 - -[Install] -WantedBy=timers.target diff --git a/bootstrap/templates/ansible/playbooks/tasks/coredns.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/coredns.yaml.j2 deleted file mode 100644 index d18383a75..000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/coredns.yaml.j2 +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Coredns - block: - - name: Coredns | Check if Coredns HelmChart exists - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - register: coredns_helmchart - - - name: Coredns | Wait for Coredns to rollout - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: helm-install-coredns - kind: Job - namespace: kube-system - wait: true - wait_condition: - type: Complete - status: true - wait_timeout: 360 - - - name: Coredns | Patch the Coredns HelmChart to unmanage it - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s_json_patch: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - patch: - - op: add - path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged - value: "true" - - - name: Coredns | Delete the Coredns HelmChart CR - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - state: absent - - - name: Coredns | Force delete the Coredns HelmChart - when: coredns_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: coredns - kind: HelmChart - namespace: kube-system - state: patched - definition: - metadata: - finalizers: [] diff --git a/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 index f22ad98a2..736974763 100644 --- a/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 @@ -1,20 +1,19 @@ --- -# https://github.com/k3s-io/k3s/issues/1971 - name: Cruft block: - - name: Cruft | Get list of custom mantifests + - name: Cruft | Get list of custom manifests ansible.builtin.find: - paths: "{% raw %}{{ k3s_server_manifests_dir }}{% endraw %}" + paths: "{{ k3s_server_manifests_dir }}" file_type: file use_regex: true patterns: ["^custom-.*"] register: custom_manifest - - name: Cruft | Delete custom mantifests + - name: Cruft | Delete custom manifests ansible.builtin.file: - path: "{% raw %}{{ item.path }}{% endraw %}" + path: "{{ item.path }}" state: absent - loop: "{% raw %}{{ custom_manifest.files }}{% endraw %}" + loop: "{{ custom_manifest.files }}" - name: Cruft | Get list of custom addons kubernetes.core.k8s_info: @@ -25,8 +24,8 @@ - name: Cruft | Delete addons kubernetes.core.k8s: kubeconfig: /etc/rancher/k3s/k3s.yaml - name: "{% raw %}{{ item.metadata.name }}{% endraw %}" + name: "{{ item.metadata.name }}" kind: Addon namespace: kube-system state: absent - loop: "{% raw %}{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}{% endraw %}" + loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 index a3a0681aa..56bf684e5 100644 --- a/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 +++ b/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 @@ -13,7 +13,7 @@ when: k3s_primary_control_node ansible.builtin.fetch: src: /etc/rancher/k3s/k3s.yaml - dest: "{% raw %}{{ repository_path.stdout }}{% endraw %}/kubeconfig" + dest: "{{ repository_path.stdout }}/kubeconfig" flat: true - name: Update kubeconfig with the correct load balancer address @@ -21,6 +21,6 @@ become: false run_once: true ansible.builtin.replace: - path: "{% raw %}{{ repository_path.stdout }}{% endraw %}/kubeconfig" + path: "{{ repository_path.stdout }}/kubeconfig" regexp: https://127.0.0.1:6443 - replace: "https://{% raw %}{{ k3s_registration_address }}{% endraw %}:6443" + replace: "https://{{ k3s_registration_address }}:6443" diff --git a/bootstrap/templates/ansible/playbooks/tasks/stale_containers.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/stale_containers.yaml.j2 deleted file mode 100644 index 9857d6bce..000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/stale_containers.yaml.j2 +++ /dev/null @@ -1,36 +0,0 @@ ---- -# https://github.com/k3s-io/k3s/issues/1900 -- name: Enabled Stale containers - when: stale_containers_state == "enabled" - block: - - name: Stale containers | Create systemd unit - ansible.builtin.copy: - src: files/stale-containers.service - dest: /etc/systemd/system/stale-containers.service - owner: root - group: root - mode: "0644" - - - name: Stale containers | Create systemd timer - ansible.builtin.copy: - src: files/stale-containers.timer - dest: /etc/systemd/system/stale-containers.timer - owner: root - group: root - mode: "0644" - - - name: Stale containers | Start the systemd timer - ansible.builtin.systemd: - name: stale-containers.timer - enabled: true - daemon_reload: true - masked: false - state: started - -- name: Disable Stale containers - when: stale_containers_state == "disabled" - block: - - name: Stale containers | Mask the systemd timer - ansible.builtin.systemd: - name: stale-containers.timer - masked: true diff --git a/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 new file mode 100644 index 000000000..56e567026 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 @@ -0,0 +1,17 @@ +--- +- name: Version Check + block: + - name: Get deployed k3s version + ansible.builtin.command: k3s --version + register: k3s_version + changed_when: false + failed_when: false + + - name: Extract k3s version + ansible.builtin.set_fact: + current_k3s_version: "{{ k3s_version.stdout | regex_replace('(?im)k3s version (?P[a-z0-9\\.\\+]+).*\n.*', '\\g') }}" + + - name: Check if upgrades are allowed + ansible.builtin.assert: + that: ["k3s_release_version is version(current_k3s_version, '>=')"] + fail_msg: "Unable to upgrade k3s because the deployed version is higher than the one specified in the configuration" diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 new file mode 100644 index 000000000..3054a59fa --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: cilium + namespace: kube-system +spec: + repo: https://helm.cilium.io/ + chart: cilium + # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io + version: 1.15.5 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + #% filter indent(width=4, first=True) %# + #% include 'partials/cilium-values-init.partial.yaml.j2' %# + #% endfilter %# diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2.j2 deleted file mode 100644 index e43c0a9a4..000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2.j2 +++ /dev/null @@ -1,64 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -# https://docs.k3s.io/helm -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: cilium - namespace: kube-system -spec: - # renovate: datasource=helm - repo: https://helm.cilium.io/ - chart: cilium - version: 1.14.0 - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - autoDirectNodeRoutes: true - bpf: - masquerade: true - bgp: - enabled: false - cluster: - name: home-cluster - id: 1 - containerRuntime: - integration: containerd - socketPath: /var/run/k3s/containerd/containerd.sock - endpointRoutes: - enabled: true - hubble: - enabled: false - ipam: - mode: kubernetes - ipv4NativeRoutingCIDR: "{% raw %}{{ cluster_cidr }}{% endraw %}" - {% if bootstrap_ipv6_enabled | default(false) %} - ipv6NativeRoutingCIDR: "{% raw %}{{ cluster_cidr_v6 }}{% endraw %}" - ipv6: - enabled: true - {% endif %} - k8sServiceHost: "{% raw %}{{ kube_vip_addr }}{% endraw %}" - k8sServicePort: 6443 - kubeProxyReplacement: strict - kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 - l2announcements: - {% if bootstrap_ipv6_enabled | default(false) %} - enabled: false - {% else %} - enabled: true - # https://github.com/cilium/cilium/issues/26586 - leaseDuration: 120s - leaseRenewDeadline: 60s - leaseRetryPeriod: 1s - {% endif %} - loadBalancer: - algorithm: maglev - mode: dsr - localRedirectPolicy: true - operator: - replicas: 1 - rollOutPods: true - rollOutCiliumPods: true - securityContext: - privileged: true - tunnel: disabled diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-l2.yaml.j2.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-l2.yaml.j2.j2 deleted file mode 100644 index 51415a665..000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-l2.yaml.j2.j2 +++ /dev/null @@ -1,22 +0,0 @@ ---- -# https://docs.cilium.io/en/latest/network/l2-announcements -apiVersion: cilium.io/v2alpha1 -kind: CiliumL2AnnouncementPolicy -metadata: - name: policy -spec: - loadBalancerIPs: true - # NOTE: This might need to be set if you have more than one active NIC on your nodes - # interfaces: - # - ^eno[0-9]+ - nodeSelector: - matchLabels: - kubernetes.io/os: linux ---- -apiVersion: cilium.io/v2alpha1 -kind: CiliumLoadBalancerIPPool -metadata: - name: pool -spec: - cidrs: - - cidr: "{% raw %}{{ node_cidr }}{% endraw %}" diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2.j2 deleted file mode 100644 index 53617f8b9..000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2.j2 +++ /dev/null @@ -1,78 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -# https://docs.k3s.io/helm -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: coredns - namespace: kube-system -spec: - # renovate: datasource=helm - repo: https://coredns.github.io/helm - chart: coredns - version: 1.29.0 - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - fullnameOverride: coredns - replicaCount: 1 - k8sAppLabelOverride: kube-dns - service: - name: kube-dns - clusterIP: "{% raw %}{{ coredns_addr }}{% endraw %}" - serviceAccount: - create: true - deployment: - annotations: - reloader.stakater.com/auto: "true" - servers: - - zones: - - zone: . - scheme: dns:// - use_tcp: true - port: 53 - plugins: - - name: log - - name: errors - - name: health - configBlock: |- - lameduck 5s - - name: ready - - name: kubernetes - parameters: cluster.local in-addr.arpa ip6.arpa - configBlock: |- - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - - name: prometheus - parameters: 0.0.0.0:9153 - - name: forward - parameters: . /etc/resolv.conf - - name: cache - parameters: 30 - - name: loop - - name: reload - - name: loadbalance - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - app.kubernetes.io/instance: coredns diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 new file mode 100644 index 000000000..f62cab4d9 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-ds.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 new file mode 100644 index 000000000..481c2e822 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-rbac.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2.j2 b/bootstrap/templates/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2.j2 deleted file mode 100644 index 5ec399263..000000000 --- a/bootstrap/templates/ansible/playbooks/templates/kube-vip-static-pod.yaml.j2.j2 +++ /dev/null @@ -1,57 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - name: kube-vip - namespace: kube-system - labels: - app.kubernetes.io/instance: kube-vip - app.kubernetes.io/name: kube-vip -spec: - containers: - - name: kube-vip - image: ghcr.io/kube-vip/kube-vip:v0.6.1 - imagePullPolicy: IfNotPresent - args: ["manager"] - env: - - name: address - value: "{% raw %}{{ kube_vip_addr }}{% endraw %}" - - name: vip_arp - value: "true" - - name: port - value: "6443" - - name: vip_cidr - value: "32" - - name: cp_enable - value: "true" - - name: cp_namespace - value: kube-system - - name: vip_ddns - value: "false" - - name: svc_enable - value: "false" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - - name: prometheus_server - value: :2112 - securityContext: - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - volumeMounts: - - mountPath: /etc/kubernetes/admin.conf - name: kubeconfig - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - hostNetwork: true - volumes: - - name: kubeconfig - hostPath: - path: /etc/rancher/k3s/k3s.yaml diff --git a/bootstrap/templates/ansible/requirements.txt.j2 b/bootstrap/templates/ansible/requirements.txt.j2 new file mode 100644 index 000000000..ef5a6fc3c --- /dev/null +++ b/bootstrap/templates/ansible/requirements.txt.j2 @@ -0,0 +1,4 @@ +ansible-lint==24.5.0 +ansible==9.5.1 +jmespath==1.0.1 +openshift==0.13.2 diff --git a/bootstrap/templates/ansible/requirements.yaml.j2 b/bootstrap/templates/ansible/requirements.yaml.j2 new file mode 100644 index 000000000..91c6e5449 --- /dev/null +++ b/bootstrap/templates/ansible/requirements.yaml.j2 @@ -0,0 +1,14 @@ +--- +collections: + - name: ansible.posix + version: 1.5.4 + - name: ansible.utils + version: 4.1.0 + - name: community.general + version: 8.6.0 + - name: kubernetes.core + version: 3.1.0 +roles: + - name: xanmanning.k3s + src: https://github.com/PyratLabs/ansible-role-k3s + version: v3.4.4 diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 index a7eb30d13..5a0496483 100644 --- a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: cert-manager - namespace: cert-manager spec: interval: 30m chart: spec: chart: cert-manager - version: v1.14.4 + version: v1.14.5 sourceRef: kind: HelmRepository name: jetstack namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,13 +20,10 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: installCRDs: true - extraArgs: - - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 - - --dns01-recursive-nameservers-only + dns01RecursiveNameservers: 1.1.1.1:53,9.9.9.9:53 + dns01RecursiveNameserversOnly: true podDnsPolicy: None podDnsConfig: nameservers: @@ -38,4 +33,3 @@ spec: enabled: true servicemonitor: enabled: true - prometheusInstance: monitoring diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 index d7e7064ca..5dd7baca7 100644 --- a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 @@ -1,7 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: cert-manager resources: - ./helmrelease.yaml - - ./prometheusrule.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml.j2 deleted file mode 100644 index 34eb6887a..000000000 --- a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml.j2 +++ /dev/null @@ -1,62 +0,0 @@ -#jinja2: trim_blocks: True ---- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: cert-manager.rules - namespace: cert-manager -spec: - groups: - - name: cert-manager - rules: - - alert: CertManagerAbsent - expr: | - absent(up{job="cert-manager"}) - for: 15m - labels: - severity: critical - annotations: - description: > - New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent - summary: "Cert Manager has dissapeared from Prometheus service discovery." - - name: certificates - rules: - - alert: CertManagerCertExpirySoon - expr: | - avg by (exported_namespace, namespace, name) (certmanager_certificate_expiration_timestamp_seconds - time()) < (21 * 24 * 3600) - for: 15m - labels: - severity: warning - annotations: - description: > - The domain that this cert covers will be unavailable after - {% raw %}{{ $value | humanizeDuration }}{% endraw %}. Clients using endpoints that this cert - protects will start to fail in {% raw %}{{ $value | humanizeDuration }}{% endraw %}. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon - summary: | - The cert {% raw %}{{ $labels.name }}{% endraw %} is {% raw %}{{ $value | humanizeDuration }}{% endraw %} from expiry, it should have renewed over a week ago. - - alert: CertManagerCertNotReady - expr: | - max by (name, exported_namespace, namespace, condition) (certmanager_certificate_ready_status{condition!="True"} == 1) - for: 15m - labels: - severity: critical - annotations: - description: > - This certificate has not been ready to serve traffic for at least - 10m. If the cert is being renewed or there is another valid cert, the ingress - controller _may_ be able to serve that instead. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready - summary: "The cert {% raw %}{{ $labels.name }}{% endraw %} is not ready to serve traffic." - - alert: CertManagerHittingRateLimits - expr: | - sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) > 0 - for: 15m - labels: - severity: critical - annotations: - description: > - Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits - summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py new file mode 100644 index 000000000..d9ae82b4b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 index 18b1db7b4..f5bf887f9 100644 --- a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 @@ -3,6 +3,5 @@ apiVersion: v1 kind: Secret metadata: name: cert-manager-secret - namespace: cert-manager stringData: - api-token: "{{ bootstrap_cloudflare_token }}" + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 index c5fb6a6c5..3efe99d81 100644 --- a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cert-manager + name: &app cert-manager namespace: flux-system spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/cert-manager/cert-manager/app prune: true sourceRef: @@ -14,15 +18,20 @@ spec: interval: 30m retryInterval: 1m timeout: 5m +#% if bootstrap_cloudflare.enabled %# --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cert-manager-issuers + name: &app cert-manager-issuers namespace: flux-system spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-cert-manager + - name: cert-manager path: ./kubernetes/apps/cert-manager/cert-manager/issuers prune: true sourceRef: @@ -32,3 +41,4 @@ spec: interval: 30m retryInterval: 1m timeout: 5m +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/default/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/default/kustomization.yaml.j2 deleted file mode 100644 index 23a8573e7..000000000 --- a/bootstrap/templates/kubernetes/apps/default/kustomization.yaml.j2 +++ /dev/null @@ -1,12 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ./namespace.yaml - {% if hajimari.enabled | default(false) %} - - ./hajimari/ks.yaml - {% endif %} - {% if discord_template_notifier.enabled | default(false) %} - - ./discord-template-notifier/ks.yaml - {% endif %} diff --git a/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 index bf4d7e096..10587f8c9 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 @@ -1,10 +1,6 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - - ./addons/ks.yaml - {% if weave_gitops.enabled | default(false) %} - - ./weave-gitops/ks.yaml - {% endif %} + - ./webhooks/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 similarity index 75% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 index c6d007099..171716743 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 @@ -1,16 +1,15 @@ +#% if bootstrap_cloudflare.enabled %# --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: flux-webhook - namespace: flux-system annotations: external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" - hajimari.io/enable: "false" spec: ingressClassName: external rules: - - host: &host "flux-webhook.${SECRET_DOMAIN}" + - host: "flux-webhook.${SECRET_DOMAIN}" http: paths: - path: /hook/ @@ -20,6 +19,4 @@ spec: name: webhook-receiver port: number: 80 - tls: - - hosts: - - *host +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 similarity index 72% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 index 786e654a5..75fc5841c 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 @@ -3,5 +3,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./secret.sops.yaml + #% if bootstrap_cloudflare.enabled %# - ./ingress.yaml + #% endif %# - ./receiver.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 similarity index 95% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 index b4f78ca65..cca5931bd 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 @@ -3,7 +3,6 @@ apiVersion: notification.toolkit.fluxcd.io/v1 kind: Receiver metadata: name: github-receiver - namespace: flux-system spec: type: github events: diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 similarity index 53% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 index 4a63b880a..34ac7daff 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 @@ -3,6 +3,5 @@ apiVersion: v1 kind: Secret metadata: name: github-webhook-token-secret - namespace: flux-system stringData: - token: "{{ bootstrap_flux_github_webhook_token }}" + token: "#{ bootstrap_github_webhook_token }#" diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 similarity index 100% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 diff --git a/bootstrap/templates/kubernetes/apps/networking/external-dns/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 similarity index 58% rename from bootstrap/templates/kubernetes/apps/networking/external-dns/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 index 9dcaf8c01..e80c50b23 100644 --- a/bootstrap/templates/kubernetes/apps/networking/external-dns/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-external-dns + name: &app flux-webhooks namespace: flux-system spec: - path: ./kubernetes/apps/networking/external-dns/app + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/flux-system/webhooks/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 index b4c896240..a78691008 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 @@ -1,21 +1,18 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: cilium - namespace: kube-system spec: interval: 30m chart: spec: chart: cilium - version: 1.14.0 + version: 1.15.5 sourceRef: kind: HelmRepository name: cilium namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -23,105 +20,7 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: - autoDirectNodeRoutes: true - bpf: - masquerade: true - bgp: - enabled: false - cluster: - name: home-cluster - id: 1 - containerRuntime: - integration: containerd - socketPath: /var/run/k3s/containerd/containerd.sock - endpointRoutes: - enabled: true - hubble: - enabled: true - metrics: - enabled: - - dns:query - - drop - - tcp - - flow - - port-distribution - - icmp - - http - serviceMonitor: - enabled: true - dashboards: - enabled: true - annotations: - grafana_folder: Cilium - relay: - enabled: true - rollOutPods: true - prometheus: - serviceMonitor: - enabled: true - ui: - enabled: true - rollOutPods: true - ingress: - enabled: true - className: internal - annotations: - hajimari.io/icon: simple-icons:cilium - hosts: - - &host "hubble.${SECRET_DOMAIN}" - tls: - - hosts: - - *host - ipam: - mode: kubernetes - ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" - {% if bootstrap_ipv6_enabled | default(false) %} - ipv6NativeRoutingCIDR: "${CLUSTER_CIDR_V6}" - ipv6: - enabled: true - {% endif %} - k8sServiceHost: "${KUBE_VIP_ADDR}" - k8sServicePort: 6443 - kubeProxyReplacement: strict - kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 - l2announcements: - {% if bootstrap_ipv6_enabled | default(false) %} - enabled: false - {% else %} - enabled: true - # https://github.com/cilium/cilium/issues/26586 - leaseDuration: 120s - leaseRenewDeadline: 60s - leaseRetryPeriod: 1s - {% endif %} - loadBalancer: - algorithm: maglev - mode: dsr - localRedirectPolicy: true - operator: - replicas: 1 - rollOutPods: true - prometheus: - enabled: true - serviceMonitor: - enabled: true - dashboards: - enabled: true - annotations: - grafana_folder: Cilium - prometheus: - enabled: true - serviceMonitor: - enabled: true - trustCRDsExist: true - dashboards: - enabled: true - annotations: - grafana_folder: Cilium - rollOutCiliumPods: true - securityContext: - privileged: true - tunnel: disabled + #% filter indent(width=4, first=True) %# + #% include 'partials/cilium-values-full.partial.yaml.j2' %# + #% endfilter %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 index aa7aace1e..5dd7baca7 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 @@ -1,10 +1,5 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - {% if not bootstrap_ipv6_enabled | default(false) %} - - ./cilium-l2.yaml - {% endif %} - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml.j2 similarity index 56% rename from bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml.j2 index e8eba26e1..7065f1fad 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml.j2 @@ -1,14 +1,16 @@ +#% if ((not bootstrap_bgp.enabled) and (not bootstrap_feature_gates.dual_stack_ipv4_first)) %# --- # https://docs.cilium.io/en/latest/network/l2-announcements apiVersion: cilium.io/v2alpha1 kind: CiliumL2AnnouncementPolicy metadata: - name: policy + name: l2-policy spec: loadBalancerIPs: true - # NOTE: This might need to be set if you have more than one active NIC on your nodes + # NOTE: interfaces might need to be set if you have more than one active NIC on your hosts # interfaces: # - ^eno[0-9]+ + # - ^eth[0-9]+ nodeSelector: matchLabels: kubernetes.io/os: linux @@ -16,7 +18,9 @@ spec: apiVersion: cilium.io/v2alpha1 kind: CiliumLoadBalancerIPPool metadata: - name: pool + name: l2-pool spec: - cidrs: + allowFirstLastIPs: "Yes" + blocks: - cidr: "${NODE_CIDR}" +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l3.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l3.yaml.j2 new file mode 100644 index 000000000..fa51e2daa --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/cilium-l3.yaml.j2 @@ -0,0 +1,40 @@ +#% if bootstrap_bgp.enabled %# +--- +# https://docs.cilium.io/en/latest/network/bgp-control-plane/ +apiVersion: cilium.io/v2alpha1 +kind: CiliumBGPPeeringPolicy +metadata: + name: l3-policy +spec: + nodeSelector: + matchLabels: + kubernetes.io/os: linux + virtualRouters: + - localASN: #{ bootstrap_bgp.local_asn }# + neighbors: + #% if bootstrap_bgp.peers %# + #% for item in bootstrap_bgp.peers %# + - peerAddress: "#{ item }#/32" + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endfor %# + #% else %# + #% if bootstrap_node_default_gateway %# + - peerAddress: "#{ bootstrap_node_default_gateway }#/32" + #% else %# + - peerAddress: "#{ bootstrap_node_network | nthhost(1) }#/32" + #% endif %# + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endif %# + serviceSelector: + matchExpressions: + - {key: somekey, operator: NotIn, values: ['never-used-value']} +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: l3-pool +spec: + allowFirstLastIPs: "Yes" + blocks: + - cidr: "${BGP_ADVERTISED_CIDR}" +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/kustomization.yaml.j2 new file mode 100644 index 000000000..4fc169b4d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/config/kustomization.yaml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + #% if bootstrap_bgp.enabled %# + - ./cilium-l3.yaml + #% elif not bootstrap_feature_gates.dual_stack_ipv4_first %# + - ./cilium-l2.yaml + #% else %# + [] + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 index 7d29d9821..2522f1dfe 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 @@ -2,11 +2,37 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cilium + name: &app cilium namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/cilium/app prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cilium + path: ./kubernetes/apps/kube-system/cilium/config + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-kubernetes diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 deleted file mode 100644 index 65d0f520e..000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,90 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: coredns - namespace: kube-system -spec: - interval: 30m - chart: - spec: - chart: coredns - version: 1.29.0 - sourceRef: - kind: HelmRepository - name: coredns - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - fullnameOverride: coredns - replicaCount: 1 - k8sAppLabelOverride: kube-dns - service: - name: kube-dns - clusterIP: "${COREDNS_ADDR}" - serviceAccount: - create: true - deployment: - annotations: - reloader.stakater.com/auto: "true" - servers: - - zones: - - zone: . - scheme: dns:// - use_tcp: true - port: 53 - plugins: - - name: log - - name: errors - - name: health - configBlock: |- - lameduck 5s - - name: ready - - name: kubernetes - parameters: cluster.local in-addr.arpa ip6.arpa - configBlock: |- - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - - name: prometheus - parameters: 0.0.0.0:9153 - - name: forward - parameters: . /etc/resolv.conf - - name: cache - parameters: 30 - - name: loop - - name: reload - - name: loadbalance - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - app.kubernetes.io/instance: coredns diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 deleted file mode 100644 index 1c3fdb04d..000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: kube-system -resources: - - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py new file mode 100644 index 000000000..0979f9a64 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 new file mode 100644 index 000000000..f62cab4d9 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-ds.partial.yaml.j2' %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 new file mode 100644 index 000000000..cbede8284 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./rbac.yaml + - ./daemonset.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 new file mode 100644 index 000000000..481c2e822 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-rbac.partial.yaml.j2' %# diff --git a/bootstrap/templates/addons/kube-prometheus-stack/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 similarity index 59% rename from bootstrap/templates/addons/kube-prometheus-stack/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 index 74c535889..fcd2c8add 100644 --- a/bootstrap/templates/addons/kube-prometheus-stack/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-kube-prometheus-stack + name: &app kube-vip namespace: flux-system spec: - path: ./kubernetes/apps/monitoring/kube-prometheus-stack/app + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kube-vip/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py new file mode 100644 index 000000000..3ace63dfa --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..86aa49047 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.1.0 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + #% filter indent(width=4, first=True) %# + #% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# + #% endfilter %# + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/addons/hajimari/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 similarity index 84% rename from bootstrap/templates/addons/hajimari/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 index c0cd21834..5dd7baca7 100644 --- a/bootstrap/templates/addons/hajimari/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: default resources: - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 similarity index 58% rename from bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 index c036bad99..adfb4940a 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-coredns + name: &app kubelet-csr-approver namespace: flux-system spec: - path: ./kubernetes/apps/kube-system/coredns/app + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kubelet-csr-approver/app prune: false # never should be deleted sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 index 6b9dc1e7c..289af80e9 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 @@ -1,14 +1,15 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - ./cilium/ks.yaml - - ./coredns/ks.yaml - - ./local-path-provisioner/ks.yaml - ./metrics-server/ks.yaml - ./reloader/ks.yaml - {% if csi_driver_nfs.enabled | default(false) %} - - ./csi-driver-nfs/ks.yaml - {% endif %} + #% if bootstrap_distribution in ["talos"] %# + - ./kubelet-csr-approver/ks.yaml + - ./spegel/ks.yaml + #% endif %# + #% if bootstrap_distribution in ["k3s"] %# + - ./kube-vip/ks.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml.j2 deleted file mode 100644 index be4f0f6fc..000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,71 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: local-path-provisioner - namespace: kube-system -spec: - interval: 30m - chart: - spec: - chart: ./deploy/chart/local-path-provisioner - sourceRef: - kind: GitRepository - name: local-path-provisioner - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - helperImage: - repository: public.ecr.aws/docker/library/busybox - tag: latest - storageClass: - defaultClass: false - nodePathMap: - - node: DEFAULT_PATH_FOR_NON_LISTED_NODES - paths: ["/var/lib/rancher/k3s/storage"] - # NOTE: Do not enable Flux variable substitution on this HelmRelease - configmap: - setup: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - mkdir -m 0777 -p ${absolutePath} - chmod 701 ${absolutePath}/.. - teardown: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - rm -rf ${absolutePath} diff --git a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml.j2 deleted file mode 100644 index 1c3fdb04d..000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: kube-system -resources: - - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml.j2 deleted file mode 100644 index 985be51e7..000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml.j2 +++ /dev/null @@ -1,18 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-local-path-provisioner - namespace: flux-system - labels: - substitution.flux.home.arpa/disabled: "true" -spec: - path: ./kubernetes/apps/kube-system/local-path-provisioner/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 index ae719d340..64f412e3d 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: metrics-server - namespace: kube-system spec: interval: 30m chart: @@ -14,7 +13,6 @@ spec: kind: HelmRepository name: metrics-server namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,14 +20,14 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: args: - - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s + #% if bootstrap_distribution in ["k3s"] %# + - --kubelet-insecure-tls + #% endif %# metrics: enabled: true serviceMonitor: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 index 1c3fdb04d..5dd7baca7 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 index d10ca1fbe..244f53c16 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-metrics-server + name: &app metrics-server namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/metrics-server/app prune: true sourceRef: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 index b33220653..f5cd4317d 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: reloader - namespace: &namespace kube-system spec: interval: 30m chart: spec: chart: reloader - version: 1.0.36 + version: 1.0.97 sourceRef: kind: HelmRepository name: stakater namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,12 +20,10 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: reloader reloader: - reloadStrategy: annotations + readOnlyRootFileSystem: true podMonitor: enabled: true - namespace: *namespace + namespace: "{{ .Release.Namespace }}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 index 1c3fdb04d..5dd7baca7 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 index 27a247c5b..9aa429934 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-reloader + name: &app reloader namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/reloader/app prune: true sourceRef: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py new file mode 100644 index 000000000..3ace63dfa --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..5c960bbaa --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: spegel +spec: + interval: 30m + chart: + spec: + chart: spegel + version: v0.0.22 + sourceRef: + kind: HelmRepository + name: spegel + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts + service: + registry: + hostPort: 29999 + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/addons/kube-prometheus-stack/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 similarity index 82% rename from bootstrap/templates/addons/kube-prometheus-stack/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 index 2469b52a3..5dd7baca7 100644 --- a/bootstrap/templates/addons/kube-prometheus-stack/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: monitoring resources: - ./helmrelease.yaml diff --git a/bootstrap/templates/addons/hajimari/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 similarity index 60% rename from bootstrap/templates/addons/hajimari/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 index 1fe9bdeef..83c730b07 100644 --- a/bootstrap/templates/addons/hajimari/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-hajimari + name: &app spegel namespace: flux-system spec: - path: ./kubernetes/apps/default/hajimari/app + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/spegel/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/monitoring/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/monitoring/kustomization.yaml.j2 deleted file mode 100644 index 982a0929a..000000000 --- a/bootstrap/templates/kubernetes/apps/monitoring/kustomization.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ./namespace.yaml - {% if grafana.enabled | default(false) %} - - ./grafana/ks.yaml - {% endif %} - {% if kube_prometheus_stack.enabled | default(false) %} - - ./kube-prometheus-stack/ks.yaml - {% endif %} - {% if kubernetes_dashboard.enabled | default(false) %} - - ./kubernetes-dashboard/ks.yaml - {% endif %} diff --git a/bootstrap/templates/kubernetes/apps/network/.mjfilter.py b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py new file mode 100644 index 000000000..d9ae82b4b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 new file mode 100644 index 000000000..05bcef5cf --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: "external.${SECRET_DOMAIN}" + +ingress: + - hostname: "${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 similarity index 91% rename from bootstrap/templates/kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 index 2a748f949..43d7d7b29 100644 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 @@ -3,7 +3,6 @@ apiVersion: externaldns.k8s.io/v1alpha1 kind: DNSEndpoint metadata: name: cloudflared - namespace: networking spec: endpoints: - dnsName: "external.${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..f15dd501c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 @@ -0,0 +1,110 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + cloudflared: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.5.0 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + TUNNEL_ID: + valueFrom: + secretKeyRef: + name: cloudflared-secret + key: TUNNEL_ID + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "$(TUNNEL_ID)" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 similarity index 92% rename from bootstrap/templates/kubernetes/apps/networking/cloudflared/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 index 0536740db..891a864ad 100644 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 @@ -1,7 +1,6 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: networking resources: - ./dnsendpoint.yaml - ./secret.sops.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 new file mode 100644 index 000000000..67d169ed7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflared-secret +stringData: + TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + credentials.json: | + { + "AccountTag": "#{ bootstrap_cloudflare.tunnel.account_id }#", + "TunnelSecret": "#{ bootstrap_cloudflare.tunnel.secret }#", + "TunnelID": "#{ bootstrap_cloudflare.tunnel.id }#" + } diff --git a/bootstrap/templates/addons/kubernetes-dashboard/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 similarity index 57% rename from bootstrap/templates/addons/kubernetes-dashboard/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 index 2220146cf..eb8d8da0b 100644 --- a/bootstrap/templates/addons/kubernetes-dashboard/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 @@ -2,13 +2,16 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-kubernetes-dashboard + name: &app cloudflared namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-cert-manager - - name: cluster-apps-metrics-server - path: ./kubernetes/apps/monitoring/kubernetes-dashboard/app + - name: external-dns + path: ./kubernetes/apps/network/cloudflared/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..b35cdd06d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 @@ -0,0 +1,91 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: echo-server +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + echo-server: + strategy: RollingUpdate + containers: + app: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: 33 + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: true + LOG_IGNORE_PATH: /healthz + PROMETHEUS_ENABLED: true + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 64Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: echo-server + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: echo-server + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http diff --git a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 similarity index 82% rename from bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 index c83d92a87..5dd7baca7 100644 --- a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: networking resources: - ./helmrelease.yaml diff --git a/bootstrap/templates/addons/discord-template-notifier/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 similarity index 59% rename from bootstrap/templates/addons/discord-template-notifier/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 index 55e19556a..2984f219c 100644 --- a/bootstrap/templates/addons/discord-template-notifier/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-discord-template-notifier + name: &app echo-server namespace: flux-system spec: - path: ./kubernetes/apps/default/discord-template-notifier/app + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/echo-server/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 similarity index 89% rename from bootstrap/templates/kubernetes/apps/networking/external-dns/app/helmrelease.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 index 71b532f21..f9b23788e 100644 --- a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: &app external-dns - namespace: networking spec: interval: 30m chart: @@ -14,16 +13,16 @@ spec: kind: HelmRepository name: external-dns namespace: flux-system - maxHistory: 2 install: + crds: CreateReplace remediation: retries: 3 upgrade: cleanupOnFail: true + crds: CreateReplace remediation: + strategy: rollback retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: *app provider: cloudflare diff --git a/bootstrap/templates/addons/discord-template-notifier/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 similarity index 86% rename from bootstrap/templates/addons/discord-template-notifier/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 index 9444f7fa8..95bf4747f 100644 --- a/bootstrap/templates/addons/discord-template-notifier/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 @@ -1,7 +1,6 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: default resources: - ./secret.sops.yaml - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 similarity index 53% rename from bootstrap/templates/kubernetes/apps/networking/external-dns/app/secret.sops.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 index 7d62527da..c067b3293 100644 --- a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/secret.sops.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 @@ -3,6 +3,5 @@ apiVersion: v1 kind: Secret metadata: name: external-dns-secret - namespace: networking stringData: - api-token: "{{ bootstrap_cloudflare_token }}" + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/flux-system/addons/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 similarity index 59% rename from bootstrap/templates/kubernetes/apps/flux-system/addons/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 index 3eef3c510..eaed4b566 100644 --- a/bootstrap/templates/kubernetes/apps/flux-system/addons/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-flux-webhooks + name: &app external-dns namespace: flux-system spec: - path: ./kubernetes/apps/flux-system/addons/webhooks + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/external-dns/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 new file mode 100644 index 000000000..94d1afbf2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./staging.yaml + #% if bootstrap_cloudflare.acme.production %# + - ./production.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/production.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 similarity index 93% rename from bootstrap/templates/kubernetes/apps/networking/nginx/certificates/production.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 index 952f26490..b5afdf419 100644 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/production.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 @@ -3,7 +3,6 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: "${SECRET_DOMAIN/./-}-production" - namespace: networking spec: secretName: "${SECRET_DOMAIN/./-}-production-tls" issuerRef: diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/staging.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 similarity index 93% rename from bootstrap/templates/kubernetes/apps/networking/nginx/certificates/staging.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 index 7b3914fa1..9c8694251 100644 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/staging.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 @@ -3,7 +3,6 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: "${SECRET_DOMAIN/./-}-staging" - namespace: networking spec: secretName: "${SECRET_DOMAIN/./-}-staging-tls" issuerRef: diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/external/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 similarity index 75% rename from bootstrap/templates/kubernetes/apps/networking/nginx/external/helmrelease.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 index 3f32b5581..60b83c6b6 100644 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/external/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 @@ -1,21 +1,18 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: nginx-external - namespace: networking + name: ingress-nginx-external spec: interval: 30m chart: spec: chart: ingress-nginx - version: 4.7.1 + version: 4.10.1 sourceRef: kind: HelmRepository name: ingress-nginx namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -23,19 +20,17 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false dependsOn: - name: cloudflared - namespace: networking + namespace: network values: - fullnameOverride: nginx-external + fullnameOverride: ingress-nginx-external controller: replicaCount: 1 service: annotations: external-dns.alpha.kubernetes.io/hostname: "external.${SECRET_DOMAIN}" - io.cilium/lb-ipam-ips: "{{ bootstrap_external_ingress_addr }}" + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.tunnel.ingress_vip }#" externalTrafficPolicy: Cluster ingressClassResource: name: external @@ -70,27 +65,26 @@ spec: enabled: true serviceMonitor: enabled: true - namespace: networking namespaceSelector: any: true extraArgs: - {% if bootstrap_acme_production_enabled | default(false) %} - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-production-tls" - {% else %} - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-staging-tls" - {% endif %} + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: - app.kubernetes.io/name: nginx-external + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-external app.kubernetes.io/component: controller resources: requests: - cpu: 10m - memory: 250Mi + cpu: 100m limits: memory: 500Mi defaultBackend: diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/internal/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 similarity index 73% rename from bootstrap/templates/kubernetes/apps/networking/nginx/internal/helmrelease.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 index ad0d4ca51..045eed32a 100644 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/internal/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 @@ -1,21 +1,19 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: nginx-internal - namespace: networking + name: ingress-nginx-internal + namespace: network spec: interval: 30m chart: spec: chart: ingress-nginx - version: 4.7.1 + version: 4.10.1 sourceRef: kind: HelmRepository name: ingress-nginx namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -23,16 +21,13 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: - fullnameOverride: nginx-internal + fullnameOverride: ingress-nginx-internal controller: replicaCount: 1 service: annotations: - external-dns.alpha.kubernetes.io/hostname: "internal.${SECRET_DOMAIN}" - io.cilium/lb-ipam-ips: "{{ bootstrap_internal_ingress_addr }}" + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.ingress_vip }#" externalTrafficPolicy: Cluster ingressClassResource: name: internal @@ -67,27 +62,26 @@ spec: enabled: true serviceMonitor: enabled: true - namespace: networking namespaceSelector: any: true extraArgs: - {% if bootstrap_acme_production_enabled | default(false) %} - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-production-tls" - {% else %} - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-staging-tls" - {% endif %} + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: - app.kubernetes.io/name: nginx-internal + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-internal app.kubernetes.io/component: controller resources: requests: - cpu: 10m - memory: 250Mi + cpu: 100m limits: memory: 500Mi defaultBackend: diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 similarity index 53% rename from bootstrap/templates/kubernetes/apps/networking/nginx/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 index 60439e48b..99b1abb58 100644 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 @@ -2,12 +2,16 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-certificates + name: &app ingress-nginx-certificates namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-cert-manager-issuers - path: ./kubernetes/apps/networking/nginx/certificates + - name: cert-manager-issuers + path: ./kubernetes/apps/network/ingress-nginx/certificates prune: true sourceRef: kind: GitRepository @@ -20,12 +24,16 @@ spec: apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-external + name: &app ingress-nginx-internal namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-nginx-certificates - path: ./kubernetes/apps/networking/nginx/external + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/internal prune: true sourceRef: kind: GitRepository @@ -38,12 +46,16 @@ spec: apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-internal + name: &app ingress-nginx-external namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-nginx-certificates - path: ./kubernetes/apps/networking/nginx/internal + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/external prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 similarity index 74% rename from bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 index 09f91c5a3..bf7b917f6 100644 --- a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: k8s-gateway - namespace: networking spec: interval: 30m chart: @@ -14,7 +13,6 @@ spec: kind: HelmRepository name: k8s-gateway namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,8 +20,6 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: k8s-gateway domain: "${SECRET_DOMAIN}" @@ -32,5 +28,6 @@ spec: type: LoadBalancer port: 53 annotations: - io.cilium/lb-ipam-ips: "{{ bootstrap_k8s_gateway_addr }}" + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.gateway_vip }#" externalTrafficPolicy: Cluster + watchedResources: ["Ingress", "Service"] diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/addons/grafana/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 similarity index 59% rename from bootstrap/templates/addons/grafana/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 index c115caa83..06f442555 100644 --- a/bootstrap/templates/addons/grafana/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-grafana + name: &app k8s-gateway namespace: flux-system spec: - path: ./kubernetes/apps/monitoring/grafana/app + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/k8s-gateway/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/apps/networking/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 similarity index 87% rename from bootstrap/templates/kubernetes/apps/networking/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 index 4ad531cd6..e6f8ddc1b 100644 --- a/bootstrap/templates/kubernetes/apps/networking/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 @@ -6,5 +6,5 @@ resources: - ./cloudflared/ks.yaml - ./echo-server/ks.yaml - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml - ./k8s-gateway/ks.yaml - - ./nginx/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/default/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 similarity index 86% rename from bootstrap/templates/kubernetes/apps/default/namespace.yaml.j2 rename to bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 index f659b055d..4d78d7b11 100644 --- a/bootstrap/templates/kubernetes/apps/default/namespace.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: Namespace metadata: - name: default + name: network labels: kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/configs/config.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/configs/config.yaml.j2 deleted file mode 100644 index fdb39f3d1..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/configs/config.yaml.j2 +++ /dev/null @@ -1,14 +0,0 @@ ---- -originRequest: - http2Origin: true - -ingress: - - hostname: "${SECRET_DOMAIN}" - service: https://nginx-external-controller.networking.svc.cluster.local:443 - originRequest: - originServerName: "external.${SECRET_DOMAIN}" - - hostname: "*.${SECRET_DOMAIN}" - service: https://nginx-external-controller.networking.svc.cluster.local:443 - originRequest: - originServerName: "external.${SECRET_DOMAIN}" - - service: http_status:404 diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml.j2 deleted file mode 100644 index 08d27ba17..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,101 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: cloudflared - namespace: networking -spec: - interval: 30m - chart: - spec: - chart: app-template - version: 1.5.1 - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - controller: - replicas: 1 - strategy: RollingUpdate - annotations: - reloader.stakater.com/auto: "true" - image: - repository: docker.io/cloudflare/cloudflared - tag: 2023.7.3 - env: - NO_AUTOUPDATE: "true" - TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json - TUNNEL_METRICS: 0.0.0.0:8080 - TUNNEL_TRANSPORT_PROTOCOL: auto - TUNNEL_ID: - valueFrom: - secretKeyRef: - name: cloudflared-secret - key: TUNNEL_ID - args: - - tunnel - - --config - - /etc/cloudflared/config/config.yaml - - run - - "$(TUNNEL_ID)" - service: - main: - ports: - http: - port: 8080 - serviceMonitor: - main: - enabled: true - endpoints: - - port: http - scheme: http - path: /metrics - interval: 1m - scrapeTimeout: 30s - probes: - liveness: &probes - enabled: true - custom: true - spec: - httpGet: - path: /ready - port: http - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - readiness: *probes - startup: - enabled: false - persistence: - config: - enabled: true - type: configMap - name: cloudflared-configmap - subPath: config.yaml - mountPath: /etc/cloudflared/config/config.yaml - readOnly: true - creds: - enabled: true - type: secret - name: cloudflared-secret - subPath: credentials.json - mountPath: /etc/cloudflared/creds/credentials.json - readOnly: true - resources: - requests: - cpu: 5m - memory: 10Mi - limits: - memory: 256Mi diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml.j2 deleted file mode 100644 index ad63f1ef5..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml.j2 +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: cloudflared-secret - namespace: networking -stringData: - TUNNEL_ID: "{{ bootstrap_cloudflare_tunnel_id }}" - credentials.json: | - { - "AccountTag": "{{ bootstrap_cloudflare_account_tag }}", - "TunnelSecret": "{{ bootstrap_cloudflare_tunnel_secret }}", - "TunnelID": "{{ bootstrap_cloudflare_tunnel_id }}" - } diff --git a/bootstrap/templates/kubernetes/apps/networking/echo-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/echo-server/app/helmrelease.yaml.j2 deleted file mode 100644 index 08ac8a097..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/echo-server/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,74 +0,0 @@ -#jinja2: trim_blocks: True ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: echo-server - namespace: networking -spec: - interval: 30m - chart: - spec: - chart: app-template - version: 1.5.1 - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - controller: - strategy: RollingUpdate - image: - repository: docker.io/jmalloc/echo-server - tag: 0.3.5 - service: - main: - ports: - http: - port: &port 8080 - probes: - liveness: &probes - enabled: true - custom: true - spec: - httpGet: - path: /health - port: *port - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - readiness: *probes - startup: - enabled: false - ingress: - main: - enabled: true - ingressClassName: external - annotations: - external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" - hajimari.io/icon: video-input-antenna - hosts: - - host: &host "{% raw %}{{ .Release.Name }}{% endraw %}.${SECRET_DOMAIN}" - paths: - - path: / - pathType: Prefix - tls: - - hosts: - - *host - resources: - requests: - cpu: 5m - memory: 10Mi - limits: - memory: 50Mi diff --git a/bootstrap/templates/kubernetes/apps/networking/echo-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/echo-server/app/kustomization.yaml.j2 deleted file mode 100644 index c83d92a87..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/echo-server/app/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/echo-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/echo-server/ks.yaml.j2 deleted file mode 100644 index 0fe3d81a6..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/echo-server/ks.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-echo-server - namespace: flux-system -spec: - path: ./kubernetes/apps/networking/echo-server/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml.j2 deleted file mode 100644 index 9254f89d1..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml.j2 +++ /dev/null @@ -1,93 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.0 - api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007" - creationTimestamp: null - name: dnsendpoints.externaldns.k8s.io -spec: - group: externaldns.k8s.io - names: - kind: DNSEndpoint - listKind: DNSEndpointList - plural: dnsendpoints - singular: dnsendpoint - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DNSEndpointSpec defines the desired state of DNSEndpoint - properties: - endpoints: - items: - description: Endpoint is a high-level way of a connection between a service and an IP - properties: - dnsName: - description: The hostname of the DNS record - type: string - labels: - additionalProperties: - type: string - description: Labels stores labels defined for the Endpoint - type: object - providerSpecific: - description: ProviderSpecific stores provider specific config - items: - description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers - properties: - name: - type: string - value: - type: string - type: object - type: array - recordTTL: - description: TTL for the record - format: int64 - type: integer - recordType: - description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc - type: string - setIdentifier: - description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple') - type: string - targets: - description: The targets the DNS record points to - items: - type: string - type: array - type: object - type: array - type: object - status: - description: DNSEndpointStatus defines the observed state of DNSEndpoint - properties: - observedGeneration: - description: The generation observed by the external-dns controller. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/external-dns/app/kustomization.yaml.j2 deleted file mode 100644 index 1278dd8b5..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/external-dns/app/kustomization.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./dnsendpoint-crd.yaml - - ./secret.sops.yaml - - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/ks.yaml.j2 deleted file mode 100644 index 502e1f2f8..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/k8s-gateway/ks.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: cluster-apps-k8s-gateway - namespace: flux-system -spec: - path: ./kubernetes/apps/networking/k8s-gateway/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/networking/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/namespace.yaml.j2 deleted file mode 100644 index b9e4a4161..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/namespace.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: networking - labels: - kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/kustomization.yaml.j2 deleted file mode 100644 index 81f57a480..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/certificates/kustomization.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ./staging.yaml - {% if bootstrap_acme_production_enabled | default(false) %} - - ./production.yaml - {% endif %} diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/external/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/nginx/external/kustomization.yaml.j2 deleted file mode 100644 index c83d92a87..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/external/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/networking/nginx/internal/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/networking/nginx/internal/kustomization.yaml.j2 deleted file mode 100644 index c83d92a87..000000000 --- a/bootstrap/templates/kubernetes/apps/networking/nginx/internal/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./helmrelease.yaml diff --git a/kubernetes/apps/system-upgrade/kustomization.yaml b/bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 similarity index 81% rename from kubernetes/apps/system-upgrade/kustomization.yaml rename to bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 index 5413fe6af..9cd8d4e4f 100644 --- a/kubernetes/apps/system-upgrade/kustomization.yaml +++ b/bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 @@ -3,3 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml + - ./openebs/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/monitoring/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 similarity index 81% rename from bootstrap/templates/kubernetes/apps/monitoring/namespace.yaml.j2 rename to bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 index ef4dd87a4..f173c6c9c 100644 --- a/bootstrap/templates/kubernetes/apps/monitoring/namespace.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: Namespace metadata: - name: monitoring + name: openebs-system labels: kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..975bff303 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 @@ -0,0 +1,45 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: openebs +spec: + interval: 30m + chart: + spec: + chart: openebs + version: 4.0.1 + sourceRef: + kind: HelmRepository + name: openebs + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + engines: + local: + lvm: + enabled: false + zfs: + enabled: false + replicated: + mayastor: + enabled: false + openebs-crds: + csi: + volumeSnapshots: + enabled: false + localpv-provisioner: + localpv: + image: + registry: quay.io/ + hostpathClass: + enabled: true + name: openebs-hostpath + isDefaultClass: false + basePath: /var/openebs/local diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 new file mode 100644 index 000000000..170feca91 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app openebs + namespace: flux-system +spec: + targetNamespace: openebs-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/openebs-system/openebs/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py new file mode 100644 index 000000000..0979f9a64 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/addons/system-upgrade-controller/plans/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 similarity index 69% rename from bootstrap/templates/addons/system-upgrade-controller/plans/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 index 2a658c35b..c159f45bc 100644 --- a/bootstrap/templates/addons/system-upgrade-controller/plans/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 @@ -2,5 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./server.yaml - - ./agent.yaml + - ./plan.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 new file mode 100644 index 000000000..38784cd5a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 @@ -0,0 +1,50 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: controllers +spec: + version: "${KUBE_VERSION}" + upgrade: + image: rancher/k3s-upgrade + serviceAccountName: system-upgrade + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/etcd + effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: workers +spec: + version: "${KUBE_VERSION}" + serviceAccountName: system-upgrade + concurrency: 1 + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + prepare: + image: rancher/k3s-upgrade + args: ["prepare", "controllers"] + upgrade: + image: rancher/k3s-upgrade diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 new file mode 100644 index 000000000..3ee72dac7 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 @@ -0,0 +1,26 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-k3s + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/apps/system-upgrade/k3s/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=github-releases depName=k3s-io/k3s + KUBE_VERSION: v1.30.0+k3s1 diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 index 07efa24a4..e0b2bf29a 100644 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 @@ -1,9 +1,7 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - {% if system_upgrade_controller.enabled | default(false) %} - ./system-upgrade-controller/ks.yaml - {% endif %} + - ./k3s/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 new file mode 100644 index 000000000..a9e48714a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 @@ -0,0 +1,101 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app system-upgrade-controller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + system-upgrade-controller: + strategy: RollingUpdate + containers: + app: + image: + repository: docker.io/rancher/system-upgrade-controller + tag: v0.13.4 + env: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: false + SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.30.1 + SYSTEM_UPGRADE_JOB_PRIVILEGED: true + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + SYSTEM_UPGRADE_CONTROLLER_NAME: *app + SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccount: + create: true + name: system-upgrade + persistence: + tmp: + type: emptyDir + globalMounts: + - path: /tmp + etc-ssl: + type: hostPath + hostPath: /etc/ssl + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ssl + readOnly: true + etc-pki: + type: hostPath + hostPath: /etc/pki + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/pki + readOnly: true + etc-ca-certificates: + type: hostPath + hostPath: /etc/ca-certificates + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ca-certificates + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 new file mode 100644 index 000000000..49f355119 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.4/crd.yaml + - helmrelease.yaml + - rbac.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 new file mode 100644 index 000000000..123677c2a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-upgrade +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade diff --git a/bootstrap/templates/addons/csi-driver-nfs/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 similarity index 54% rename from bootstrap/templates/addons/csi-driver-nfs/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 index 18e933c0c..7fe74b4af 100644 --- a/bootstrap/templates/addons/csi-driver-nfs/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-csi-driver-nfs + name: &app system-upgrade-controller namespace: flux-system spec: - path: ./kubernetes/apps/kube-system/csi-driver-nfs/app + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app prune: true sourceRef: kind: GitRepository diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 new file mode 100644 index 000000000..0ef1f6e8d --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 @@ -0,0 +1,17 @@ +#% if bootstrap_github_private_key %# +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-deploy-key + namespace: flux-system +stringData: + identity: | + #% filter indent(width=4, first=False) %# + #{ bootstrap_github_private_key }# + #%- endfilter %# + known_hosts: | + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= +#% endif %# diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 new file mode 100644 index 000000000..4a669d63e --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 @@ -0,0 +1,61 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components and CRDs into your cluster. +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.3.0 +patches: + # Remove the default network policies + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/bootstrap/templates/kubernetes/bootstrap/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/kustomization.yaml.j2 deleted file mode 100644 index ca6f64993..000000000 --- a/bootstrap/templates/kubernetes/bootstrap/kustomization.yaml.j2 +++ /dev/null @@ -1,18 +0,0 @@ -# IMPORTANT: This file is not tracked by flux and should never be. Its -# purpose is to only install the Flux components and CRDs into your cluster. ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - github.com/fluxcd/flux2/manifests/install?ref=v2.2.3 -patches: - # Remove the default network policies - - patch: |- - $patch: delete - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: not-used - target: - group: networking.k8s.io - kind: NetworkPolicy diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py b/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py new file mode 100644 index 000000000..3ace63dfa --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 new file mode 100644 index 000000000..ecaa09176 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 @@ -0,0 +1,4 @@ +--- +#% filter indent(width=0, first=True) %# +#% include 'partials/cilium-values-init.partial.yaml.j2' %# +#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 new file mode 100644 index 000000000..8308db2e6 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 @@ -0,0 +1,26 @@ +--- +repositories: + - name: cilium + url: https://helm.cilium.io + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + recreatePods: true + force: true + +releases: + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.5 + values: ["./cilium-values.yaml"] + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.1.0 + values: ["./kubelet-csr-approver-values.yaml"] + needs: ["cilium"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 new file mode 100644 index 000000000..d63b98451 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 @@ -0,0 +1,4 @@ +--- +#% filter indent(width=0, first=True) %# +#% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# +#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 new file mode 100644 index 000000000..084030c89 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 @@ -0,0 +1,232 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json +--- +# renovate: datasource=docker depName=ghcr.io/siderolabs/installer +talosVersion: v1.7.2 +# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet +kubernetesVersion: v1.30.1 + +clusterName: &cluster #{ bootstrap_cluster_name|default('home-kubernetes', true) }# +endpoint: https://#{ bootstrap_controllers_vip }#:6443 +clusterPodNets: + - "#{ bootstrap_pod_network.split(',')[0] }#" +clusterSvcNets: + - "#{ bootstrap_service_network.split(',')[0] }#" +additionalApiServerCertSans: &sans + - "#{ bootstrap_controllers_vip }#" + - 127.0.0.1 # KubePrism + #% for item in bootstrap_tls_sans %# + - "#{ item }#" + #% endfor %# +additionalMachineCertSans: *sans +cniConfig: + name: none + +nodes: + #% for item in bootstrap_node_inventory %# + - hostname: "#{ item.name }#" + ipAddress: "#{ item.address }#" + #% if item.talos_disk.startswith('/') %# + installDisk: "#{ item.talos_disk }#" + #% else %# + installDiskSelector: + serial: "#{ item.talos_disk }#" + #% endif %# + #% if bootstrap_talos.secureboot.enabled %# + machineSpec: + secureboot: true + talosImageURL: factory.talos.dev/installer-secureboot/#{ bootstrap_talos.schematic_id }# + #% else %# + talosImageURL: factory.talos.dev/installer/#{ bootstrap_talos.schematic_id }# + #% endif %# + controlPlane: #{ (item.controller) | string | lower }# + networkInterfaces: + - deviceSelector: + hardwareAddr: "#{ item.talos_nic | lower }#" + dhcp: false + #% if bootstrap_talos.vlan %# + vlans: + - vlanId: #{ bootstrap_talos.vlan }# + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controllers_vip }#" + #% endif %# + #% else %# + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controllers_vip }#" + #% endif %# + #% endif %# + #% if bootstrap_talos.user_patches %# + patches: + - "@./patches/node_#{ item.name }#.yaml" + #% endif %# + #% endfor %# + +patches: + # Configure containerd + - |- + machine: + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: |- + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + + # Disable search domain everywhere + - |- + machine: + network: + disableSearchDomain: true + + # Enable cluster discovery + - |- + cluster: + discovery: + registries: + kubernetes: + disabled: false + service: + disabled: false + + # Configure kubelet + - |- + machine: + kubelet: + extraArgs: + rotate-server-certificates: true + nodeIP: + validSubnets: ["#{ bootstrap_node_network }#"] + + # Force nameserver + - |- + machine: + network: + nameservers: + #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# + - #{ item }# + #% endfor %# + + # Configure NTP + - |- + machine: + time: + disabled: false + servers: ["time.cloudflare.com"] + + # Custom sysctl settings + - |- + machine: + sysctls: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_watches: "524288" + fs.inotify.max_user_instances: "8192" + net.core.rmem_max: "2500000" + net.core.wmem_max: "2500000" + + # Mount openebs-hostpath in kubelet + - |- + machine: + kubelet: + extraMounts: + - destination: /var/openebs/local + type: bind + source: /var/openebs/local + options: ["bind", "rshared", "rw"] + + #% if bootstrap_talos.secureboot.enabled and bootstrap_talos.secureboot.encrypt_disk_with_tpm %# + # Encrypt system disk with TPM + - |- + machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} + #% endif %# + + #% if bootstrap_talos.user_patches %# + # User specified global patches + - "@./patches/global.yaml" + #% endif %# + +controlPlane: + patches: + # Cluster configuration + - |- + cluster: + allowSchedulingOnControlPlanes: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + proxy: + disabled: true + scheduler: + extraArgs: + bind-address: 0.0.0.0 + + # ETCD configuration + - |- + cluster: + etcd: + extraArgs: + listen-metrics-urls: http://0.0.0.0:2381 + advertisedSubnets: + - "#{ bootstrap_node_network }#" + + # Disable default API server admission plugins. + - |- + - op: remove + path: /cluster/apiServer/admissionControl + + # Enable K8s Talos API Access + - |- + machine: + features: + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: ["os:admin"] + allowedKubernetesNamespaces: ["system-upgrade"] + + #% if bootstrap_talos.user_patches %# + # User specified controlPlane patches + - "@./patches/controlPlane.yaml" + #% endif %# + +#% if ((bootstrap_talos.user_patches) and (bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length)) %# +worker: + patches: + # User specified worker patches + - "@./patches/worker.yaml" +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/apps.yaml.j2 b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 index d557f8286..2284be624 100644 --- a/bootstrap/templates/kubernetes/flux/apps.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 @@ -23,8 +23,10 @@ spec: name: cluster-secrets - kind: ConfigMap name: cluster-settings-user + optional: true - kind: Secret name: cluster-secrets-user + optional: true patches: - patch: |- apiVersion: kustomize.toolkit.fluxcd.io/v1 @@ -44,8 +46,10 @@ spec: name: cluster-secrets - kind: ConfigMap name: cluster-settings-user + optional: true - kind: Secret name: cluster-secrets-user + optional: true target: group: kustomize.toolkit.fluxcd.io kind: Kustomization diff --git a/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 index cbabaddb3..bae21e831 100644 --- a/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 @@ -6,9 +6,13 @@ metadata: namespace: flux-system spec: interval: 30m + url: "#{ bootstrap_github_address }#" + #% if bootstrap_github_private_key %# + secretRef: + name: github-deploy-key + #% endif %# ref: - branch: {{ bootstrap_github_repository_branch }} - url: "https://github.com/{{ bootstrap_github_username }}/{{ bootstrap_github_repository_name }}.git" + branch: "#{ bootstrap_github_branch|default('main', true) }#" ignore: | # exclude all /* diff --git a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 index 9f9ffd321..b6889a4c3 100644 --- a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 @@ -8,7 +8,7 @@ spec: interval: 10m url: oci://ghcr.io/fluxcd/flux-manifests ref: - tag: v2.2.3 + tag: v2.3.0 --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization @@ -69,18 +69,6 @@ spec: target: kind: Deployment name: (kustomize-controller|helm-controller|source-controller) - # Enable drift detection for HelmReleases and set the log level to debug - # https://fluxcd.io/flux/components/helm/helmreleases/#drift-detection - - patch: | - - op: add - path: /spec/template/spec/containers/0/args/- - value: --feature-gates=DetectDrift=true,CorrectDrift=false - - op: add - path: /spec/template/spec/containers/0/args/- - value: --log-level=debug - target: - kind: Deployment - name: helm-controller # Enable Helm near OOM detection # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection - patch: | diff --git a/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 index bb78ecf0d..fe0f332a9 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 @@ -1,5 +1,4 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -resources: - - ./local-path-provisioner.yaml +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/repositories/git/local-path-provisioner.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/git/local-path-provisioner.yaml.j2 deleted file mode 100644 index 669fb0533..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/git/local-path-provisioner.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1 -kind: GitRepository -metadata: - name: local-path-provisioner - namespace: flux-system -spec: - interval: 30m - url: https://github.com/rancher/local-path-provisioner - ref: - tag: v0.0.24 - ignore: | - # exclude all - /* - # include kubernetes directory - !/deploy/chart/local-path-provisioner diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/bitnami.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/bitnami.yaml.j2 deleted file mode 100644 index eca160a1a..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/bitnami.yaml.j2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: bitnami - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://registry-1.docker.io/bitnamicharts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 index df0c6474a..a40b5d778 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: bjw-s diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 index 51c65d691..3aee36788 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: cilium diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 deleted file mode 100644 index e3a16bd18..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: coredns - namespace: flux-system -spec: - interval: 1h - url: https://coredns.github.io/helm diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml.j2 deleted file mode 100644 index b48140d78..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: csi-driver-nfs - namespace: flux-system -spec: - interval: 1h - url: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 index b76b9662c..b5b66a36a 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 @@ -1,5 +1,6 @@ +#% if bootstrap_cloudflare.enabled %# --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: external-dns @@ -7,3 +8,4 @@ metadata: spec: interval: 1h url: https://kubernetes-sigs.github.io/external-dns +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/grafana.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/grafana.yaml.j2 deleted file mode 100644 index 5c3939d53..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/grafana.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: grafana - namespace: flux-system -spec: - interval: 1h - url: https://grafana.github.io/helm-charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/hajimari.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/hajimari.yaml.j2 deleted file mode 100644 index e246f09be..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/hajimari.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: hajimari - namespace: flux-system -spec: - interval: 1h - url: https://hajimari.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 index 4dcf5eeac..db1ddad32 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 @@ -1,5 +1,6 @@ +#% if bootstrap_cloudflare.enabled %# --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: ingress-nginx @@ -7,3 +8,4 @@ metadata: spec: interval: 1h url: https://kubernetes.github.io/ingress-nginx +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 index d7e64ffc7..737e06af0 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 @@ -1,9 +1,9 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: jetstack namespace: flux-system spec: interval: 1h - url: https://charts.jetstack.io/ + url: https://charts.jetstack.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 index a18177eb4..abfa8c14f 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 @@ -1,9 +1,11 @@ +#% if bootstrap_cloudflare.enabled %# --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: k8s-gateway namespace: flux-system spec: interval: 1h - url: https://ori-edge.github.io/k8s_gateway/ + url: https://ori-edge.github.io/k8s_gateway +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml.j2 deleted file mode 100644 index d63e74b70..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: kubernetes-dashboard - namespace: flux-system -spec: - interval: 1h - url: https://kubernetes.github.io/dashboard/ diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 index 64a99c3f7..2207b7737 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 @@ -2,19 +2,18 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./bitnami.yaml - ./bjw-s.yaml - ./cilium.yaml - - ./coredns.yaml - - ./csi-driver-nfs.yaml + #% if bootstrap_cloudflare.enabled %# - ./external-dns.yaml - - ./grafana.yaml - - ./hajimari.yaml - ./ingress-nginx.yaml - - ./jetstack.yaml - ./k8s-gateway.yaml - - ./kubernetes-dashboard.yaml + #% endif %# + - ./jetstack.yaml - ./metrics-server.yaml - - ./prometheus-community.yaml + - ./openebs.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./postfinance.yaml + - ./spegel.yaml + #% endif %# - ./stakater.yaml - - ./weave-gitops.yaml diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 index 57e7aa0c5..27a44828a 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: metrics-server diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 new file mode 100644 index 000000000..4f48013ee --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: openebs + namespace: flux-system +spec: + interval: 1h + url: https://openebs.github.io/openebs diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 new file mode 100644 index 000000000..390e6b708 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 @@ -0,0 +1,11 @@ +#% if bootstrap_distribution in ["talos"] %# +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 1h + url: https://postfinance.github.io/kubelet-csr-approver +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 deleted file mode 100644 index a97a3d445..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: prometheus-community - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://ghcr.io/prometheus-community/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 new file mode 100644 index 000000000..c43e0b268 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_distribution in ["talos"] %# +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: spegel + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/spegel-org/helm-charts +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 index 1846e8ae4..98a3f6455 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: stakater diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/weave-gitops.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/weave-gitops.yaml.j2 deleted file mode 100644 index f325c18ba..000000000 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/weave-gitops.yaml.j2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: weave-gitops - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://ghcr.io/weaveworks/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 index cb57fda12..d158d426e 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./git - ./helm - # - ./oci + - ./oci diff --git a/bootstrap/templates/kubernetes/flux/repositories/oci/.gitkeep b/bootstrap/templates/kubernetes/flux/repositories/oci/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 new file mode 100644 index 000000000..fe0f332a9 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 @@ -0,0 +1,4 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets-user.sops.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets-user.sops.yaml.j2 deleted file mode 100644 index 281accfb4..000000000 --- a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets-user.sops.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: cluster-secrets-user - namespace: flux-system -stringData: - SECRET_PLACEHOLDER: "secret-value" diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 index 54f10d93f..71a496d7e 100644 --- a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 @@ -5,6 +5,9 @@ metadata: name: cluster-secrets namespace: flux-system stringData: - SECRET_DOMAIN: "{{ bootstrap_cloudflare_domain }}" - SECRET_ACME_EMAIL: "{{ bootstrap_acme_email }}" - SECRET_CLOUDFLARE_TUNNEL_ID: "{{ bootstrap_cloudflare_tunnel_id }}" + SECRET_EXAMPLE: Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit... + #% if bootstrap_cloudflare.enabled %# + SECRET_DOMAIN: "#{ bootstrap_cloudflare.domain }#" + SECRET_ACME_EMAIL: "#{ bootstrap_cloudflare.acme.email }#" + SECRET_CLOUDFLARE_TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-settings-user.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-settings-user.yaml.j2 deleted file mode 100644 index 7b8176166..000000000 --- a/bootstrap/templates/kubernetes/flux/vars/cluster-settings-user.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-settings-user - namespace: flux-system -data: - SETTINGS_PLACEHOLDER: "settings-value" diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 index 511c2bd27..f176c7f55 100644 --- a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 @@ -1,4 +1,3 @@ -#jinja2: trim_blocks: True, lstrip_blocks: True --- apiVersion: v1 kind: ConfigMap @@ -6,13 +5,12 @@ metadata: name: cluster-settings namespace: flux-system data: - TIMEZONE: "{{ bootstrap_timezone }}" - COREDNS_ADDR: "{{ bootstrap_service_cidr.split(',')[0] | ansible.utils.nthhost(10) }}" - KUBE_VIP_ADDR: "{{ bootstrap_kube_vip_addr }}" - CLUSTER_CIDR: "{{ bootstrap_cluster_cidr.split(',')[0] }}" - SERVICE_CIDR: "{{ bootstrap_service_cidr.split(',')[0] }}" - NODE_CIDR: "{{ bootstrap_node_cidr }}" - {% if bootstrap_ipv6_enabled | default(false) %} - CLUSTER_CIDR_V6: "{{ bootstrap_cluster_cidr.split(',')[1] }}" - SERVICE_CIDR_V6: "{{ bootstrap_service_cidr.split(',')[1] }}" - {% endif %} + TIMEZONE: "#{ bootstrap_timezone }#" + CLUSTER_CIDR: "#{ bootstrap_pod_network.split(',')[0] }#" + NODE_CIDR: "#{ bootstrap_node_network }#" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + CLUSTER_CIDR_V6: "#{ bootstrap_pod_network.split(',')[1] }#" + #% endif %# + #% if bootstrap_bgp.enabled %# + BGP_ADVERTISED_CIDR: "#{ bootstrap_bgp.advertised_network }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 index dd93387ae..8db2fe911 100644 --- a/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 @@ -2,6 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./cluster-settings.yaml - - ./cluster-settings-user.yaml - ./cluster-secrets.sops.yaml - - ./cluster-secrets-user.sops.yaml diff --git a/bootstrap/templates/node.sops.yaml.j2 b/bootstrap/templates/node.sops.yaml.j2 deleted file mode 100644 index e538fd2e1..000000000 --- a/bootstrap/templates/node.sops.yaml.j2 +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_become_pass: "{{ password }}" diff --git a/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 b/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 new file mode 100644 index 000000000..7553cf1b9 --- /dev/null +++ b/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 @@ -0,0 +1,129 @@ +autoDirectNodeRoutes: true +bgpControlPlane: + enabled: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: #{ bootstrap_cluster_name|default('home-kubernetes', true) }# +cni: + exclusive: false + #% if bootstrap_distribution in ["k3s"] %# + binPath: /var/lib/rancher/k3s/data/current/bin + confPath: /var/lib/rancher/k3s/agent/etc/cni/net.d + #% endif %# +containerRuntime: + integration: containerd + #% if bootstrap_distribution in ["k3s"] %# + socketPath: /var/run/k3s/containerd/containerd.sock + #% endif %# +# NOTE: devices might need to be set if you have more than one active NIC on your hosts +# devices: eno+ eth+ +endpointRoutes: + enabled: true +#% if bootstrap_cloudflare.enabled %# +hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + className: internal + hosts: ["hubble.${SECRET_DOMAIN}"] +#% else %# +hubble: + enabled: false +#% endif %# +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" +#% if bootstrap_feature_gates.dual_stack_ipv4_first %# +ipv6NativeRoutingCIDR: "${CLUSTER_CIDR_V6}" +ipv6: + enabled: true +#% endif %# +#% if bootstrap_distribution in ["k3s"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 6444 +#% elif bootstrap_distribution in ["talos"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +#% endif %# +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + #% if ((bootstrap_bgp.enabled) or (bootstrap_feature_gates.dual_stack_ipv4_first)) %# + enabled: false # https://github.com/cilium/cilium/issues/28985 + #% else %# + enabled: true + #% endif %# +loadBalancer: + algorithm: maglev + mode: snat +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium +prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true +dashboards: + enabled: true + annotations: + grafana_folder: Cilium +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 b/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 new file mode 100644 index 000000000..b1b0c3531 --- /dev/null +++ b/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 @@ -0,0 +1,79 @@ +autoDirectNodeRoutes: true +bgpControlPlane: + enabled: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: #{ bootstrap_cluster_name|default('home-kubernetes', true) }# +cni: + exclusive: false + #% if bootstrap_distribution in ["k3s"] %# + binPath: /var/lib/rancher/k3s/data/current/bin + confPath: /var/lib/rancher/k3s/agent/etc/cni/net.d + #% endif %# +containerRuntime: + integration: containerd + #% if bootstrap_distribution in ["k3s"] %# + socketPath: /var/run/k3s/containerd/containerd.sock + #% endif %# +# NOTE: devices might need to be set if you have more than one active NIC on your hosts +# devices: eno+ eth+ +endpointRoutes: + enabled: true +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "#{ bootstrap_pod_network }#" +#% if bootstrap_feature_gates.dual_stack_ipv4_first %# +ipv6NativeRoutingCIDR: "#{ bootstrap_pod_network_v6 }#" +ipv6: + enabled: true +#% endif %# +#% if bootstrap_distribution in ["k3s"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 6444 +#% elif bootstrap_distribution in ["talos"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +#% endif %# +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + #% if ((bootstrap_bgp.enabled) or (bootstrap_feature_gates.dual_stack_ipv4_first)) %# + enabled: false # https://github.com/cilium/cilium/issues/28985 + #% else %# + enabled: true + #% endif %# +loadBalancer: + algorithm: maglev + mode: snat +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 new file mode 100644 index 000000000..9d77947cd --- /dev/null +++ b/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-vip + namespace: kube-system + labels: + app.kubernetes.io/name: kube-vip +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-vip + template: + metadata: + labels: + app.kubernetes.io/name: kube-vip + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.8.0 + imagePullPolicy: IfNotPresent + args: ["manager"] + env: + - name: address + value: "#{ bootstrap_controllers_vip }#" + - name: vip_arp + value: "true" + - name: lb_enable + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: prometheus_server + value: :2112 + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW", "SYS_TIME"] + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + serviceAccountName: kube-vip + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 new file mode 100644 index 000000000..d6ecc9367 --- /dev/null +++ b/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-vip + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:kube-vip-role +rules: + - apiGroups: [""] + resources: ["services/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["services", "endpoints"] + verbs: ["list","get","watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","get","watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "get", "watch", "update", "create"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["list","get","watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-vip-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-vip-role +subjects: +- kind: ServiceAccount + name: kube-vip + namespace: kube-system diff --git a/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 b/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 new file mode 100644 index 000000000..0bf92493c --- /dev/null +++ b/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 @@ -0,0 +1,2 @@ +providerRegex: ^(#{ (bootstrap_node_inventory | map(attribute='name') | join('|')) }#)$ +bypassDnsResolution: true diff --git a/bootstrap/vars/.gitignore b/bootstrap/vars/.gitignore deleted file mode 100644 index 35ba105e5..000000000 --- a/bootstrap/vars/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -addons.yaml -config.yaml diff --git a/bootstrap/vars/addons.sample.yaml b/bootstrap/vars/addons.sample.yaml deleted file mode 100644 index 83ab4826e..000000000 --- a/bootstrap/vars/addons.sample.yaml +++ /dev/null @@ -1,51 +0,0 @@ ---- -# -# Addons configuration - addons.yaml is gitignored -# - -# https://github.com/toboshii/hajimari -hajimari: - enabled: false - -# https://github.com/grafana/grafana -grafana: - enabled: false - # password: # password for `admin` user - -# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack -kube_prometheus_stack: - enabled: false - -# https://github.com/kubernetes/dashboard -kubernetes_dashboard: - enabled: false - # Password can be obtained by running the following command once it is deployed: - # kubectl -n monitoring get secret kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d - -# https://github.com/weaveworks/weave-gitops -weave_gitops: - enabled: false - # password: # password for `admin` user - -# https://github.com/kubernetes-csi/csi-driver-nfs -csi_driver_nfs: - enabled: false - storage_class: - # - name: # name of the storage class (must match [a-z0-9-]+) - # server: # dns name or ip address of nfs server - # share: # exported share (path) - # ... - -# https://github.com/rancher/system-upgrade-controller -system_upgrade_controller: - # WARNING: Only enable this if you also track the version of k3s in the - # ansible configuration files. Running ansible against an already provisioned - # cluster with this enabled might cause your cluster to be downgraded. - enabled: false - -# https://github.com/morphy2k/rss-forwarder -discord_template_notifier: - # Will post commits from the template repository to the specified discord channel - # so it's easier to keep track of changes. - enabled: false - webhook_url: # Discord webhook url diff --git a/bootstrap/vars/config.sample.yaml b/bootstrap/vars/config.sample.yaml deleted file mode 100644 index 0e71c64ae..000000000 --- a/bootstrap/vars/config.sample.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -# -# Bootstrap configuration - config.yaml is gitignored -# - -# Github username (e.g. onedr0p) -bootstrap_github_username: -# Github repository (e.g. flux-cluster-template) -bootstrap_github_repository_name: -# Github repository branch (e.g. main) -bootstrap_github_repository_branch: -# Age Public Key (e.g. age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta) -bootstrap_age_public_key: -# Choose your timezone (e.g. America/New_York) -bootstrap_timezone: Etc/UTC -# Email you want to be associated with the ACME account (used for TLS certificates via letsencrypt.org) -bootstrap_acme_email: -# Use the ACME production env when requesting the wildcard certificate, -# the default here is `false` which means ACME staging env will be used instead. -# This is to prevent being rate-limited. Update this option to `true` when you -# have verified the staging certificate works and then re-run `task configure` -# and push your changes to Github. -bootstrap_acme_production_enabled: false - -# Flux github webhook token (openssl rand -hex 12) -bootstrap_flux_github_webhook_token: - -# Cloudflare domain -bootstrap_cloudflare_domain: -# Cloudflare API Token (not API Key) -bootstrap_cloudflare_token: -# Cloudflared Account Tag (cat ~/.cloudflared/*.json | jq -r .AccountTag) -bootstrap_cloudflare_account_tag: -# Cloudflared Tunnel Secret (cat ~/.cloudflared/*.json | jq -r .TunnelSecret) -bootstrap_cloudflare_tunnel_secret: -# Cloudflared Tunnel ID (cat ~/.cloudflared/*.json | jq -r .TunnelID) -bootstrap_cloudflare_tunnel_id: - -# CIDR your nodes are on (e.g. 192.168.1.0/24) -bootstrap_node_cidr: -# The IP address to use with kube-vip, choose an available IP in your nodes network that is not being used -bootstrap_kube_vip_addr: -# The Load balancer IP for k8s_gateway, choose an available IP in your nodes network that is not being used -bootstrap_k8s_gateway_addr: -# The Load balancer IP for external ingress, choose an available IP in your nodes network that is not being used -bootstrap_external_ingress_addr: -# The Load balancer IP for internal ingress, choose an available IP in your nodes network that is not being used -bootstrap_internal_ingress_addr: - -# Keep the next three options default unless you know what you are doing -# (Advanced) Enable ipv6 -bootstrap_ipv6_enabled: false -# (Advanced) For ipv6 use format 10.42.0.0/16,fd78:c889:47fb:10::/60 -# /60 IPv6 block is enough for 16 nodes -bootstrap_cluster_cidr: 10.42.0.0/16 -# (Advanced) For ipv6 use format 10.43.0.0/16,fd78:c889:47fb:e0::/112 -bootstrap_service_cidr: 10.43.0.0/16 - -# Node information -bootstrap_nodes: - # Use only 1, 3 or more odd master nodes, recommended is 3 - master: - # - name: # name of the master node (must match [a-z0-9-]+) - # address: # ip address of the master node - # username: # ssh username of the master node - # password: # password of ssh username for the master node - # ... - worker: # set to [] or omit if no workers are needed - # - name: # name of the worker node (must match [a-z0-9-]+) - # address: # ip address of the worker node - # username: # ssh username of the worker node - # password: # password of ssh username for the worker node - # ... diff --git a/config.sample.yaml b/config.sample.yaml new file mode 100644 index 000000000..d40840af7 --- /dev/null +++ b/config.sample.yaml @@ -0,0 +1,222 @@ +--- + +# +# 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations +# + +# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York) +bootstrap_timezone: "" + +# (Required) Distribution can either be k3s or talos +bootstrap_distribution: k3s + +# (Optional) Cluster name; affects Cilium and Talos +bootstrap_cluster_name: "" + +# (Required: Talos) Talos Specific Options +bootstrap_talos: + # (Optional: Talos) Go to https://factory.talos.dev/ and choose any System Extensions, and/or add kernel arguments you need. + # Copy the generated schematic id and paste it below. + # IMPORTANT: The default ID given here means no System Extensions or Kernel args will be used. + schematic_id: "376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba" + # (Optional: Talos) Add vlan tag to network master device, this is not needed if you tag ports on your switch with the VLAN + # See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans + vlan: "" + # (Optional: Talos) Secureboot and TPM-based disk encryption + secureboot: + # (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode. + # See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot + enabled: false + # (Optional) Enable TPM-based disk encryption. Requires TPM 2.0 + # See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm + encrypt_disk_with_tpm: false + # (Optional) Add includes for user provided patches to generated talconfig.yaml. + # See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml + # Patches are applied in this order. (global overrides cp/worker which overrides node-specific). + # Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok). + # kubernetes/bootstrap/talos/patches/node_.yaml # Patches for individual nodes + # kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes + # kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes + # kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes + user_patches: false + +# (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24) +bootstrap_node_network: "" + +# (Optional) The default gateway for the nodes +# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1' +bootstrap_node_default_gateway: "" + +# (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3 +# Worker nodes are optional +bootstrap_node_inventory: [] + # - name: "" # (Required) Name of the node (must match [a-z0-9-\.]+) + # address: "" # (Required) IP address of the node + # controller: true # (Required) Set to true if this is a controller node + # talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node (talosctl disks -n --insecure) + # talos_nic: "" # (Required: Talos) MAC address of the NIC for this node (talosctl get links -n --insecure) + # ssh_user: "" # (Required: k3s) SSH username of the node + # ssh_key: "" # (Optional: k3s) Set specific SSH key for this node + # ... + +# (Optional) The DNS server to use for the cluster, this can be an existing +# local DNS server or a public one. +# Default is ["1.1.1.1", "1.0.0.1"] +# If using a local DNS server make sure it meets the following requirements: +# 1. your nodes can reach it +# 2. it is configured to forward requests to a public DNS server +# 3. you are not force redirecting DNS requests to it - this will break cert generation over DNS01 +# If using multiple DNS servers make sure they are setup the same way, there is no +# guarantee that the first DNS server will always be used for every lookup. +bootstrap_dns_servers: [] + +# (Optional) The DNS search domain to use for the nodes. +# Default is "." +# Use the default or leave empty to avoid possible DNS issues inside the cluster. +bootstrap_search_domain: "" + +# (Required) The pod CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below and be aware of +# https://github.com/onedr0p/cluster-template/issues/1148 +bootstrap_pod_network: "10.69.0.0/16" + +# (Required) The service CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below and be aware of +# https://github.com/onedr0p/cluster-template/issues/1148 +bootstrap_service_network: "10.96.0.0/16" + +# (Required) The IP address of the Kube API, choose an available IP in +# your nodes host network that is NOT being used. This is announced over L2. +# For k3s kube-vip is used, built-in functionality is used with Talos +bootstrap_controllers_vip: "" + +# (Optional) Add additional SANs to the Kube API cert, this is useful +# if you want to call the Kube API by hostname rather than IP +bootstrap_tls_sans: [] + +# (Required) Age Public Key (e.g. age1...) +# 1. Generate a new key with the following command: +# > task sops:age-keygen +# 2. Copy the PUBLIC key and paste it below +bootstrap_sops_age_pubkey: "" + +# (Optional) Use cilium BGP control plane when L2 announcements won't traverse VLAN network segments. +# Needs a BGP capable router setup with the node IPs as peers. +# See: https://docs.cilium.io/en/latest/network/bgp-control-plane/ +bootstrap_bgp: + enabled: false + # (Optional) If using multiple BGP peers add them here. + # Default is .1 derrived from host_network: ['x.x.x.1'] + peers: [] + # (Required) Set the BGP Autonomous System Number for the router(s) and nodes. + # If these match, iBGP will be used. If not, eBGP will be used. + peer_asn: "" # Router(s) AS + local_asn: "" # Node(s) AS + # (Required) The advertised CIDR for the cluster, this must NOT overlap with any + # existing networks and is usually a /16 (64K IPs). + # If you want to use IPv6 check the advanced flags below + advertised_network: "" + +# +# 2. (Required) Flux details - Flux is used to manage the cluster configuration. +# + +# (Required) GitHub repository URL +# For a public repo use the 'https://' URL (e.g. "https://github.com/onedr0p/cluster-template.git") +# For a private repo use the 'ssh://' URL (e.g. "ssh://git@github.com/onedr0p/cluster-template.git") +# If using a private repo make sure to following the instructions with the 'bootstrap_github_private_key' option below. +bootstrap_github_address: "" + +# (Required) GitHub repository branch +bootstrap_github_branch: "main" + +# (Required) Token for GitHub push-based sync +# 1. Generate a new token with the following command: +# > openssl rand -hex 16 +# 2. Copy the token and paste it below +bootstrap_github_webhook_token: "" + +# (Optional) Private key for Flux to access the GitHub repository +# 1. Generate a new key with the following command: +# > ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f github-deploy.key -q -P "" +# 2. Make sure to paste public key from "github-deploy.key.pub" into +# the deploy keys section of your GitHub repository settings. +# 3. Uncomment and paste the private key below +# 4. Optionally set your repository on GitHub to private +# bootstrap_github_private_key: | +# -----BEGIN OPENSSH PRIVATE KEY----- +# ... +# -----END OPENSSH PRIVATE KEY----- + +# +# 3. (Optional) Cloudflare details - Cloudflare is used for DNS, TLS certificates and tunneling. +# + +bootstrap_cloudflare: + # (Required) Disable to manually setup and use a different DNS provider - setting this + # to false will not deploy a network namespace or the workloads contained within. + enabled: true + # (Required) Cloudflare Domain + domain: "" + # (Required) Cloudflare API Token (NOT API Key) + # 1. Head over to Cloudflare and create a API Token by going to + # https://dash.cloudflare.com/profile/api-tokens + # 2. Under the `API Tokens` section click the blue `Create Token` button. + # 3. Click the blue `Use template` button for the `Edit zone DNS` template. + # 4. Name your token something like `home-kubernetes` + # 5. Under `Permissions`, click `+ Add More` and add each permission below: + # `Zone - DNS - Edit` + # `Account - Cloudflare Tunnel - Read` + # 6. Limit the permissions to a specific account and zone resources. + # 7. Click the blue `Continue to Summary` button and then the blue `Create Token` button. + # 8. Copy the token and paste it below. + token: "" + # (Required) Optionals for Cloudflare Acme + acme: + # (Required) Any email you want to be associated with the ACME account (used for TLS certs via letsencrypt.org) + email: "" + # (Required) Use the ACME production server when requesting the wildcard certificate. + # By default the ACME staging server is used. This is to prevent being rate-limited. + # Update this option to `true` when you have verified the staging certificate + # works and then re-run `task configure` and push your changes to Github. + production: false + # (Required) Provide LAN access to the cluster ingresses for internal ingress classes + # The Load balancer IP for internal ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + # (Required) Gateway is used for providing DNS to your cluster on LAN + # The Load balancer IP for k8s_gateway, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + gateway_vip: "" + # (Required) Options for Cloudflare Tunnel + # There's two methods to create a tunnel, via the CLI or the Cloudflare dashboard. + # 1. Authenticate cloudflared to your domain with the following command: + # > cloudflared tunnel login + # 2. Create the tunnel with the following command: + # > cloudflared tunnel create k8s + tunnel: + # (Required) Get the Cloudflared Tunnel ID with the following command: + # > jq -r .TunnelID ~/.cloudflared/*.json + id: "" + # (Required) Get the Cloudflare Account ID with the following command: + # > jq -r .AccountTag ~/.cloudflared/*.json + account_id: "" + # (Required) Get the Cloudflared Tunnel Secret with the following command: + # > jq -r .TunnelSecret ~/.cloudflared/*.json + secret: "" + # (Required) Provide WAN access to the cluster ingresses for external ingress classes + # The Load balancer IP for external ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + +# (Optional) Feature gates are used to enable experimental features +# bootstrap_feature_gates: +# # Enable Dual Stack IPv4 first +# # IMPORTANT: I am looking for people to help maintain IPv6 support since I cannot test it. +# # Ref: https://github.com/onedr0p/cluster-template/issues/1148 +# # Keep in mind that Cilium does not currently support IPv6 L2 announcements. +# # Make sure you set cluster.pod_cidr and cluster.service_cidr +# # to a valid dual stack CIDRs, e.g. "10.42.0.0/16,fd00:10:244::/64" +# dual_stack_ipv4_first: false diff --git a/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml index a7eb30d13..5a0496483 100644 --- a/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml +++ b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: cert-manager - namespace: cert-manager spec: interval: 30m chart: spec: chart: cert-manager - version: v1.14.4 + version: v1.14.5 sourceRef: kind: HelmRepository name: jetstack namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,13 +20,10 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: installCRDs: true - extraArgs: - - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 - - --dns01-recursive-nameservers-only + dns01RecursiveNameservers: 1.1.1.1:53,9.9.9.9:53 + dns01RecursiveNameserversOnly: true podDnsPolicy: None podDnsConfig: nameservers: @@ -38,4 +33,3 @@ spec: enabled: true servicemonitor: enabled: true - prometheusInstance: monitoring diff --git a/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml index d7e7064ca..5dd7baca7 100644 --- a/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml +++ b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -1,7 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: cert-manager resources: - ./helmrelease.yaml - - ./prometheusrule.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml b/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml deleted file mode 100644 index 71e17677f..000000000 --- a/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: cert-manager.rules - namespace: cert-manager -spec: - groups: - - name: cert-manager - rules: - - alert: CertManagerAbsent - expr: | - absent(up{job="cert-manager"}) - for: 15m - labels: - severity: critical - annotations: - description: > - New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent - summary: "Cert Manager has dissapeared from Prometheus service discovery." - - name: certificates - rules: - - alert: CertManagerCertExpirySoon - expr: | - avg by (exported_namespace, namespace, name) (certmanager_certificate_expiration_timestamp_seconds - time()) < (21 * 24 * 3600) - for: 15m - labels: - severity: warning - annotations: - description: > - The domain that this cert covers will be unavailable after - {{ $value | humanizeDuration }}. Clients using endpoints that this cert - protects will start to fail in {{ $value | humanizeDuration }}. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon - summary: | - The cert {{ $labels.name }} is {{ $value | humanizeDuration }} from expiry, it should have renewed over a week ago. - - alert: CertManagerCertNotReady - expr: | - max by (name, exported_namespace, namespace, condition) (certmanager_certificate_ready_status{condition!="True"} == 1) - for: 15m - labels: - severity: critical - annotations: - description: > - This certificate has not been ready to serve traffic for at least - 10m. If the cert is being renewed or there is another valid cert, the ingress - controller _may_ be able to serve that instead. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready - summary: "The cert {{ $labels.name }} is not ready to serve traffic." - - alert: CertManagerHittingRateLimits - expr: | - sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) > 0 - for: 15m - labels: - severity: critical - annotations: - description: > - Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week. - runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits - summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml index 3de50c50b..a308c6fcb 100644 --- a/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml @@ -2,9 +2,8 @@ apiVersion: v1 kind: Secret metadata: name: cert-manager-secret - namespace: cert-manager stringData: - api-token: ENC[AES256_GCM,data:hD6IiLPpUaBX8Myv2spWLUPXELisqCLL0p/MdZovbSrabx8sG4+Dpg==,iv:TqdB5JaHCJcyqZxAZYvHLSLYlVY7KS7C5wrqqf4BuPw=,tag:nwgxKWsNgiea+3FM3PB4pw==,type:str] + api-token: ENC[AES256_GCM,data:ccx6qCnE8lNC9equDqK61O7+Y2Fzj+TK3dNyuLa01BvWp4+7ULsr3g==,iv:fzkWByaNbmqzCNmbTRlINddtAsPLIBEETSxTxYcrVR4=,tag:1eCYs+OwygmtJ29xYcwAhQ==,type:str] sops: kms: [] gcp_kms: [] @@ -14,14 +13,14 @@ sops: - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 enc: | -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnSWZ5dDNxL3JzYmU3blJC - VURCbnJUdUUwOUlmTTc0WWwrdkZ2cHNtdUYwCnNJREtUMGFxZmVTVWpacGFLQVNn - ZlNkMUc2Rm9RM1pQaWxxWDM1UVQzL3cKLS0tIHhpTS9TVGU2WUUxRFFrUXJQZWYw - TXBISUtsa2dKTmM0SFRCdDRaTVJtS1UKzgXxKYiXPbZOETK8x2ezLCAyn2ngc3C8 - lX7rrZtEDzbfN6cvLivNTop4t6sjXB2L8tLvM+P1BmTAi1WoU89ksA== + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpWnlYSlYzRk9XZDRKYWRB + YWx0TzZGYkJEa1dwZW5EYSs1dktySW9mbUZZCk1mWnJvWXkrMGNEaStQZDVKWUlN + QUZXcVp2T3BLMlZIK20vZjdFVmlyblkKLS0tIEkwVnlkbHlxVmtudEM4Y29uSy9V + d3RsWjdVVXhXZUR5S2UxWWwvLzJJL0kK0VkVbE5+8wkwrKTQFFZ+FiPgLXvFA+b7 + czEZ9qz2ORKJXrmjkYK3bc7He+QBgq8GoKcSqofrAqsu//KKLwpOjQ== -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:35Z" - mac: ENC[AES256_GCM,data:qerkSaKj5EBqUfZPaZddLcWmfOG2b4L7RJY4JOiR6Ev1WGQfe8IaXFuK1PkxkRWnnp0WRQPwQ7PCN0PsP4jG4PUOW3qSrjfcpfs/0JoCe0vNASm9Fg2+t/0fJugFsQ5LaM6fKu65BHlVTnyJy/c0B9fBtW10DzFZIc2x0l4m1vA=,iv:IRC5LHgmnoaxkMjM0iIwBcyVGRJmwvOpvVQkMbmAQrQ=,tag:+fu8q9R4nmAOBVSzhUlSqw==,type:str] + lastmodified: "2024-05-19T15:03:02Z" + mac: ENC[AES256_GCM,data:VAxk82687jKxZncWYZ8dW9HPiZwN4Z4iW32L2QuVfYwzOwcwqhUW5sJMBw5ZbPmWzbSDAIYg1TW7o6E/IcGfNF6/VeG5EoccNA7Picc6GVlV6etwfqJi6So42WHiI82yOVBJ6Ior9TbuGyKWVeaWWtGdCOKMmcEOE9vlpexE3gY=,iv:xsSisKci8X8oLWJTPADFUJ4i6iK+PAQgv9ggF8MgAEM=,tag:tjvs6zt79S/Qf5z+pFdWtw==,type:str] pgp: [] encrypted_regex: ^(data|stringData)$ - version: 3.7.3 + version: 3.8.1 diff --git a/kubernetes/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/apps/cert-manager/cert-manager/ks.yaml index c5fb6a6c5..ea0d2af9a 100644 --- a/kubernetes/apps/cert-manager/cert-manager/ks.yaml +++ b/kubernetes/apps/cert-manager/cert-manager/ks.yaml @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cert-manager + name: &app cert-manager namespace: flux-system spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/cert-manager/cert-manager/app prune: true sourceRef: @@ -18,11 +22,15 @@ spec: apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cert-manager-issuers + name: &app cert-manager-issuers namespace: flux-system spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-cert-manager + - name: cert-manager path: ./kubernetes/apps/cert-manager/cert-manager/issuers prune: true sourceRef: diff --git a/kubernetes/apps/cert-manager/cert-manager/monitoring/servicemonitor.yaml b/kubernetes/apps/cert-manager/cert-manager/monitoring/servicemonitor.yaml deleted file mode 100644 index 1cfa1c6fd..000000000 --- a/kubernetes/apps/cert-manager/cert-manager/monitoring/servicemonitor.yaml +++ /dev/null @@ -1,25 +0,0 @@ - -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cert-manager - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/version: v1.12.3 - name: cert-manager - namespace: cert-manager -spec: - endpoints: - - honorLabels: false - interval: 60s - path: /metrics - scrapeTimeout: 30s - targetPort: 9402 - jobLabel: cert-manager - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager diff --git a/kubernetes/apps/cicd/disk-images/app/dv_fedora.yaml b/kubernetes/apps/cicd/disk-images/app/dv_fedora.yaml deleted file mode 100644 index 0d9f42bcd..000000000 --- a/kubernetes/apps/cicd/disk-images/app/dv_fedora.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: cdi.kubevirt.io/v1beta1 -kind: DataVolume -metadata: - name: "fedora" - namespace: "cicd" -spec: - storage: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 5Gi - source: - http: - url: "https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/x86_64/images/Fedora-Cloud-Base-37-1.7.x86_64.raw.xz" diff --git a/kubernetes/apps/cicd/disk-images/ks.yaml b/kubernetes/apps/cicd/disk-images/ks.yaml deleted file mode 100644 index e05211ce6..000000000 --- a/kubernetes/apps/cicd/disk-images/ks.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 -kind: Kustomization -metadata: - name: cluster-apps-disk-images - namespace: flux-system - labels: - substitution.flux.home.arpa/enabled: "true" -spec: - dependsOn: - - name: cluster-apps-cdi - path: ./kubernetes/apps/cicd/disk-images/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - interval: 30m - retryInterval: 1m - timeout: 3m diff --git a/kubernetes/apps/cicd/kustomization.yaml b/kubernetes/apps/cicd/kustomization.yaml deleted file mode 100644 index 4926a883d..000000000 --- a/kubernetes/apps/cicd/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ./namespace.yaml - #- ./disk-images/ks.yaml diff --git a/kubernetes/apps/cicd/namespace.yaml b/kubernetes/apps/cicd/namespace.yaml deleted file mode 100644 index 55d6a3b40..000000000 --- a/kubernetes/apps/cicd/namespace.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: cicd diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml b/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml deleted file mode 100644 index b353191ad..000000000 --- a/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: github-webhook-token-secret - namespace: flux-system -stringData: - token: ENC[AES256_GCM,data:KKsc3usxSw1bgtnoeiM8pR/ugygPsmfM,iv:WVlh9/D0WsccoUhWDQH4Y/ZaoRSlPdfrrKLNGUFKYs0=,tag:58FEZVQH2QilC8GnqfO1JQ==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBaNFprTVhoL1QwVmVsTUZw - dFhGWmJOaHpobUMxR20yUE10YXhoRWkyU1ZnCjl4TERkczVNTnVyVlBUWENpUTlJ - VGdndE5ydUtJMmMzNXJKQ3BLVFdadjQKLS0tIDZrcDlpcTF1RDU0WlpHSEFOTFov - Vmp3UVJEajVyaDBIRzJBdm9OM3F5ejQK34asXnooz9lpUPLM/IQD/nPf7GwC8ypb - pVD2u2wHcrwLyQxPFpkZD402HpXCCa9B5WiyvOf54CNCIhMm43eYpQ== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:35Z" - mac: ENC[AES256_GCM,data:pwSHvTC54BtmV0jEODPMTloLwrAlYZbJ+fHYsatZKfNabzLpxYmpWJuCRkFFbPf1+wh2F6TA6CztfqKwUL9dg7ZX8zCeo+UlKMnn74r+AsOZ2lN46tkA7gZQK/Nt6FJTN1fmS0J1nGfNS79szMJlRdoEM/Y39IEKvWVEZFIJyv4=,iv:bU7y7LwT/FLQAqayrYKtvj4rxuE7VXkF7VvKb9vuT18=,tag:iwUzb6+oBY2YgTZnSOvnfA==,type:str] - pgp: [] - encrypted_regex: ^(data|stringData)$ - version: 3.7.3 diff --git a/kubernetes/apps/flux-system/capacitor/app/ingress.yaml b/kubernetes/apps/flux-system/capacitor/app/ingress.yaml deleted file mode 100644 index d9f2f9c60..000000000 --- a/kubernetes/apps/flux-system/capacitor/app/ingress.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - hajimari.io/enable: "true" - hajimari.io/icon: simple-icons:capacitor - hajimari.io/group: "Public" - #nginx.ingress.kubernetes.io/auth-url: http://authelia.kube-system.svc.cluster.local/api/verify - #nginx.ingress.kubernetes.io/auth-signin: https://auth.${SECRET_DOMAIN} - name: capacitor - namespace: tools -spec: - ingressClassName: internal - rules: - - host: &host "capacitor.${SECRET_DOMAIN}" - http: - paths: - - backend: - service: - name: capacitor - port: - number: 9000 - path: / - pathType: Prefix - tls: - - hosts: - - *host diff --git a/kubernetes/apps/flux-system/capacitor/app/kustomization.yaml b/kubernetes/apps/flux-system/capacitor/app/kustomization.yaml deleted file mode 100644 index 38ba111a7..000000000 --- a/kubernetes/apps/flux-system/capacitor/app/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: tools -resources: - - https://raw.githubusercontent.com/gimlet-io/capacitor/main/deploy/k8s/rbac.yaml - - https://raw.githubusercontent.com/gimlet-io/capacitor/main/deploy/k8s/manifest.yaml - - ./ingress.yaml diff --git a/kubernetes/apps/flux-system/capacitor/ks.yaml b/kubernetes/apps/flux-system/capacitor/ks.yaml deleted file mode 100644 index 9d2619544..000000000 --- a/kubernetes/apps/flux-system/capacitor/ks.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 -kind: Kustomization -metadata: - name: cluster-apps-capacitor - namespace: flux-system - labels: - substitution.flux.home.arpa/enabled: "true" -spec: - dependsOn: - - name: cluster-apps-external-secrets-stores - - name: cluster-apps-nginx-internal - path: ./kubernetes/apps/flux-system/capacitor/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - interval: 30m - retryInterval: 1m - timeout: 3m diff --git a/kubernetes/apps/flux-system/kustomization.yaml b/kubernetes/apps/flux-system/kustomization.yaml index e979b370a..10587f8c9 100644 --- a/kubernetes/apps/flux-system/kustomization.yaml +++ b/kubernetes/apps/flux-system/kustomization.yaml @@ -3,4 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - - ./addons/ks.yaml + - ./webhooks/ks.yaml diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml b/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml similarity index 75% rename from kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml rename to kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml index c6d007099..e20604f04 100644 --- a/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml +++ b/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml @@ -3,14 +3,12 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: flux-webhook - namespace: flux-system annotations: external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" - hajimari.io/enable: "false" spec: ingressClassName: external rules: - - host: &host "flux-webhook.${SECRET_DOMAIN}" + - host: "flux-webhook.${SECRET_DOMAIN}" http: paths: - path: /hook/ @@ -20,6 +18,3 @@ spec: name: webhook-receiver port: number: 80 - tls: - - hosts: - - *host diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml b/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml similarity index 100% rename from kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml rename to kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml b/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml similarity index 95% rename from kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml rename to kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml index b4f78ca65..cca5931bd 100644 --- a/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml +++ b/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml @@ -3,7 +3,6 @@ apiVersion: notification.toolkit.fluxcd.io/v1 kind: Receiver metadata: name: github-receiver - namespace: flux-system spec: type: github events: diff --git a/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml b/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml new file mode 100644 index 000000000..baf7b5604 --- /dev/null +++ b/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: github-webhook-token-secret +stringData: + token: ENC[AES256_GCM,data:6sFv7v1Of8USJAHTreRyhRoM9qGIBcPU,iv:plbzaR5OAq/eIgIo0Uy7wZsn3G2HK8GZYWcGBh4Y+7M=,tag:pTps3TkkYmAAhIJYKrv6Ng==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOOGdMaVdqVzJCVjEzd3Nl + cDYyc3JLZmdzK2drMHNDOFBUalN5WmIwQUdRCmdoT0c0S2FITzVQcjhMcVFLOHFC + Q2RDRlR5S0hsZjd5djhyVmhTR0FKMW8KLS0tIDJhQU5Mb3RNMWJjOThFMnp3Snp1 + VzJySlRpaHp5T1Q5c2VYeGlxcVNTZ1kKIVFFXr2kaPbt9+au6KTEJnPA2aC7cLBW + StV18RMEGZHRYusrMwMfOhFjTwB586adZe9ejEBwQkwNHcpIxc4gXw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-05-19T15:03:02Z" + mac: ENC[AES256_GCM,data:tFDY4hzc7ExPKRj1eLvNE80W7fA7oQc99FOMsmdpoK63t8AkhwjYUDQP5RX3x5Vfp429HwYWsY9mUeCtZU37x4X/GMXHLc+0s9LYIFYLPOOkic7rV/oP4kYDNe/h1uystEn1U8Mtxbm4uRTJC7K+BW+Nxgr44bCcO8MgDgt7FeE=,iv:Ig4y1GN2NX7dkeJuqVD/dRXk8ZnpOM4ep0IwUUKd3n0=,tag:y4qOy+GMhpxBosXMoD5k9w==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml b/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml similarity index 100% rename from kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml rename to kubernetes/apps/flux-system/webhooks/app/kustomization.yaml diff --git a/kubernetes/apps/flux-system/addons/ks.yaml b/kubernetes/apps/flux-system/webhooks/ks.yaml similarity index 58% rename from kubernetes/apps/flux-system/addons/ks.yaml rename to kubernetes/apps/flux-system/webhooks/ks.yaml index 3eef3c510..e80c50b23 100644 --- a/kubernetes/apps/flux-system/addons/ks.yaml +++ b/kubernetes/apps/flux-system/webhooks/ks.yaml @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-flux-webhooks + name: &app flux-webhooks namespace: flux-system spec: - path: ./kubernetes/apps/flux-system/addons/webhooks + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/flux-system/webhooks/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml index b703217b7..76f16ec3a 100644 --- a/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml +++ b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: cilium - namespace: kube-system spec: interval: 30m chart: spec: chart: cilium - version: 1.14.0 + version: 1.15.5 sourceRef: kind: HelmRepository name: cilium namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,20 +20,25 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: autoDirectNodeRoutes: true + bgpControlPlane: + enabled: true bpf: - masquerade: true - bgp: - enabled: false + masquerade: false + cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup cluster: - name: home-cluster id: 1 + name: apps + cni: + exclusive: false containerRuntime: integration: containerd - socketPath: /var/run/k3s/containerd/containerd.sock + # NOTE: devices might need to be set if you have more than one active NIC on your hosts + # devices: eno+ eth+ endpointRoutes: enabled: true hubble: @@ -67,29 +70,19 @@ spec: ingress: enabled: true className: internal - annotations: - hajimari.io/icon: simple-icons:cilium - hosts: - - &host "hubble.${SECRET_DOMAIN}" - tls: - - hosts: - - *host + hosts: ["hubble.${SECRET_DOMAIN}"] ipam: mode: kubernetes ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" - k8sServiceHost: "${KUBE_VIP_ADDR}" - k8sServicePort: 6443 - kubeProxyReplacement: strict + k8sServiceHost: 127.0.0.1 + k8sServicePort: 7445 + kubeProxyReplacement: true kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 l2announcements: enabled: true - # https://github.com/cilium/cilium/issues/26586 - leaseDuration: 120s - leaseRenewDeadline: 60s - leaseRetryPeriod: 1s loadBalancer: algorithm: maglev - mode: dsr + mode: snat localRedirectPolicy: true operator: replicas: 1 @@ -112,6 +105,22 @@ spec: annotations: grafana_folder: Cilium rollOutCiliumPods: true + routingMode: native securityContext: - privileged: true - tunnel: disabled + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/kubernetes/apps/kube-system/cilium/app/kustomization.yaml b/kubernetes/apps/kube-system/cilium/app/kustomization.yaml index d98c72b7a..5dd7baca7 100644 --- a/kubernetes/apps/kube-system/cilium/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/cilium/app/kustomization.yaml @@ -1,7 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - - ./cilium-l2.yaml - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml b/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml similarity index 67% rename from kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml rename to kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml index e8eba26e1..e2de5889b 100644 --- a/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml +++ b/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml @@ -3,12 +3,13 @@ apiVersion: cilium.io/v2alpha1 kind: CiliumL2AnnouncementPolicy metadata: - name: policy + name: l2-policy spec: loadBalancerIPs: true - # NOTE: This might need to be set if you have more than one active NIC on your nodes + # NOTE: interfaces might need to be set if you have more than one active NIC on your hosts # interfaces: # - ^eno[0-9]+ + # - ^eth[0-9]+ nodeSelector: matchLabels: kubernetes.io/os: linux @@ -16,7 +17,8 @@ spec: apiVersion: cilium.io/v2alpha1 kind: CiliumLoadBalancerIPPool metadata: - name: pool + name: l2-pool spec: - cidrs: + allowFirstLastIPs: "Yes" + blocks: - cidr: "${NODE_CIDR}" diff --git a/kubernetes/apps/cicd/disk-images/app/kustomization.yaml b/kubernetes/apps/kube-system/cilium/config/kustomization.yaml similarity index 79% rename from kubernetes/apps/cicd/disk-images/app/kustomization.yaml rename to kubernetes/apps/kube-system/cilium/config/kustomization.yaml index 67504790c..f68996538 100644 --- a/kubernetes/apps/cicd/disk-images/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/cilium/config/kustomization.yaml @@ -2,4 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./dv_fedora.yaml + - ./cilium-l2.yaml diff --git a/kubernetes/apps/kube-system/cilium/ks.yaml b/kubernetes/apps/kube-system/cilium/ks.yaml index 7d29d9821..2522f1dfe 100644 --- a/kubernetes/apps/kube-system/cilium/ks.yaml +++ b/kubernetes/apps/kube-system/cilium/ks.yaml @@ -2,11 +2,37 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cilium + name: &app cilium namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/cilium/app prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cilium + path: ./kubernetes/apps/kube-system/cilium/config + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-kubernetes diff --git a/kubernetes/apps/kube-system/cilium/monitoring/agent-servicemonitor.yaml b/kubernetes/apps/kube-system/cilium/monitoring/agent-servicemonitor.yaml deleted file mode 100644 index 54bb1711c..000000000 --- a/kubernetes/apps/kube-system/cilium/monitoring/agent-servicemonitor.yaml +++ /dev/null @@ -1,31 +0,0 @@ - -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cilium - app.kubernetes.io/name: cilium - app.kubernetes.io/component: agent - app.kubernetes.io/version: 1.14.2 - name: cilium-agent - namespace: kube-system -spec: - endpoints: - - honorLabels: true - interval: 10s - path: /metrics - port: metrics - relabelings: - - action: replace - replacement: ${1} - sourceLabels: - - __meta_kubernetes_pod_node_name - targetLabel: node - namespaceSelector: - matchNames: - - kube-system - selector: - matchLabels: - k8s-app: cilium - targetLabels: - - k8s-app diff --git a/kubernetes/apps/kube-system/cilium/monitoring/hubble-servicemonitor.yaml b/kubernetes/apps/kube-system/cilium/monitoring/hubble-servicemonitor.yaml deleted file mode 100644 index 75885c4fc..000000000 --- a/kubernetes/apps/kube-system/cilium/monitoring/hubble-servicemonitor.yaml +++ /dev/null @@ -1,29 +0,0 @@ - -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cilium - app.kubernetes.io/name: cilium - app.kubernetes.io/component: hubble - app.kubernetes.io/version: 1.14.2 - name: hubble - namespace: kube-system -spec: - endpoints: - - honorLabels: true - interval: 10s - path: /metrics - port: hubble-metrics - relabelings: - - action: replace - replacement: ${1} - sourceLabels: - - __meta_kubernetes_pod_node_name - targetLabel: node - namespaceSelector: - matchNames: - - kube-system - selector: - matchLabels: - k8s-app: hubble diff --git a/kubernetes/apps/kube-system/cilium/monitoring/operator-servicemonitor.yaml b/kubernetes/apps/kube-system/cilium/monitoring/operator-servicemonitor.yaml deleted file mode 100644 index 7b2659a52..000000000 --- a/kubernetes/apps/kube-system/cilium/monitoring/operator-servicemonitor.yaml +++ /dev/null @@ -1,26 +0,0 @@ - -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cilium - app.kubernetes.io/name: cilium - app.kubernetes.io/component: operator - app.kubernetes.io/version: 1.14.2 - name: cilium-operator - namespace: kube-system -spec: - endpoints: - - honorLabels: true - interval: 10s - path: /metrics - port: metrics - namespaceSelector: - matchNames: - - kube-system - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - targetLabels: - - io.cilium/app diff --git a/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml b/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml deleted file mode 100644 index f083cd929..000000000 --- a/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml +++ /dev/null @@ -1,89 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: coredns - namespace: kube-system -spec: - interval: 30m - chart: - spec: - chart: coredns - version: 1.29.0 - sourceRef: - kind: HelmRepository - name: coredns - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - fullnameOverride: coredns - replicaCount: 1 - k8sAppLabelOverride: kube-dns - service: - name: kube-dns - clusterIP: "${COREDNS_ADDR}" - serviceAccount: - create: true - deployment: - annotations: - reloader.stakater.com/auto: "true" - servers: - - zones: - - zone: . - scheme: dns:// - use_tcp: true - port: 53 - plugins: - - name: log - - name: errors - - name: health - configBlock: |- - lameduck 5s - - name: ready - - name: kubernetes - parameters: cluster.local in-addr.arpa ip6.arpa - configBlock: |- - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - - name: prometheus - parameters: 0.0.0.0:9153 - - name: forward - parameters: . /etc/resolv.conf - - name: cache - parameters: 30 - - name: loop - - name: reload - - name: loadbalance - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - app.kubernetes.io/instance: coredns diff --git a/kubernetes/apps/kube-system/coredns/app/kustomization.yaml b/kubernetes/apps/kube-system/coredns/app/kustomization.yaml deleted file mode 100644 index 1c3fdb04d..000000000 --- a/kubernetes/apps/kube-system/coredns/app/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: kube-system -resources: - - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml new file mode 100644 index 000000000..3cad66ade --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.1.0 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + providerRegex: ^(lpkm1|lpkw1|lpkw2)$ + bypassDnsResolution: true + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/minio/operator/app/kustomization.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml similarity index 82% rename from kubernetes/apps/minio/operator/app/kustomization.yaml rename to kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml index e03a69adc..5dd7baca7 100644 --- a/kubernetes/apps/minio/operator/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml @@ -2,5 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./namespace.yaml - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/coredns/ks.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml similarity index 58% rename from kubernetes/apps/kube-system/coredns/ks.yaml rename to kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml index c036bad99..adfb4940a 100644 --- a/kubernetes/apps/kube-system/coredns/ks.yaml +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-coredns + name: &app kubelet-csr-approver namespace: flux-system spec: - path: ./kubernetes/apps/kube-system/coredns/app + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kubelet-csr-approver/app prune: false # never should be deleted sourceRef: kind: GitRepository diff --git a/kubernetes/apps/kube-system/kustomization.yaml b/kubernetes/apps/kube-system/kustomization.yaml index 1c92dcfbb..1e903cd5f 100644 --- a/kubernetes/apps/kube-system/kustomization.yaml +++ b/kubernetes/apps/kube-system/kustomization.yaml @@ -5,11 +5,11 @@ resources: - ./namespace.yaml - ./authelia/ks.yaml - ./cilium/ks.yaml - - ./coredns/ks.yaml - ./external-secrets/ks.yaml - ./glauth/ks.yaml - - ./local-path-provisioner/ks.yaml + - ./kubelet-csr-approver/ks.yaml - ./metrics-server/ks.yaml - ./nfs-subdir-external-provisioner/ks.yaml - ./reloader/ks.yaml - ./snapshot-controller/ks.yaml + - ./spegel/ks.yaml diff --git a/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml b/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml deleted file mode 100644 index be4f0f6fc..000000000 --- a/kubernetes/apps/kube-system/local-path-provisioner/app/helmrelease.yaml +++ /dev/null @@ -1,71 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: local-path-provisioner - namespace: kube-system -spec: - interval: 30m - chart: - spec: - chart: ./deploy/chart/local-path-provisioner - sourceRef: - kind: GitRepository - name: local-path-provisioner - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - helperImage: - repository: public.ecr.aws/docker/library/busybox - tag: latest - storageClass: - defaultClass: false - nodePathMap: - - node: DEFAULT_PATH_FOR_NON_LISTED_NODES - paths: ["/var/lib/rancher/k3s/storage"] - # NOTE: Do not enable Flux variable substitution on this HelmRelease - configmap: - setup: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - mkdir -m 0777 -p ${absolutePath} - chmod 701 ${absolutePath}/.. - teardown: |- - #!/bin/sh - while getopts "m:s:p:" opt - do - case $opt in - p) - absolutePath=$OPTARG - ;; - s) - sizeInBytes=$OPTARG - ;; - m) - volMode=$OPTARG - ;; - esac - done - rm -rf ${absolutePath} diff --git a/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml b/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml deleted file mode 100644 index 1c3fdb04d..000000000 --- a/kubernetes/apps/kube-system/local-path-provisioner/app/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: kube-system -resources: - - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml index ae719d340..60298df66 100644 --- a/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml +++ b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: metrics-server - namespace: kube-system spec: interval: 30m chart: @@ -14,7 +13,6 @@ spec: kind: HelmRepository name: metrics-server namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,11 +20,8 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: args: - - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s diff --git a/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml index 1c3fdb04d..5dd7baca7 100644 --- a/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/ks.yaml b/kubernetes/apps/kube-system/metrics-server/ks.yaml index d10ca1fbe..244f53c16 100644 --- a/kubernetes/apps/kube-system/metrics-server/ks.yaml +++ b/kubernetes/apps/kube-system/metrics-server/ks.yaml @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-metrics-server + name: &app metrics-server namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/metrics-server/app prune: true sourceRef: diff --git a/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml index b33220653..f5cd4317d 100644 --- a/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml +++ b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: reloader - namespace: &namespace kube-system spec: interval: 30m chart: spec: chart: reloader - version: 1.0.36 + version: 1.0.97 sourceRef: kind: HelmRepository name: stakater namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,12 +20,10 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: reloader reloader: - reloadStrategy: annotations + readOnlyRootFileSystem: true podMonitor: enabled: true - namespace: *namespace + namespace: "{{ .Release.Namespace }}" diff --git a/kubernetes/apps/kube-system/reloader/app/kustomization.yaml b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml index 1c3fdb04d..5dd7baca7 100644 --- a/kubernetes/apps/kube-system/reloader/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: kube-system resources: - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/reloader/ks.yaml b/kubernetes/apps/kube-system/reloader/ks.yaml index 27a247c5b..9aa429934 100644 --- a/kubernetes/apps/kube-system/reloader/ks.yaml +++ b/kubernetes/apps/kube-system/reloader/ks.yaml @@ -2,9 +2,13 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-reloader + name: &app reloader namespace: flux-system spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app path: ./kubernetes/apps/kube-system/reloader/app prune: true sourceRef: diff --git a/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml b/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml new file mode 100644 index 000000000..5c960bbaa --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: spegel +spec: + interval: 30m + chart: + spec: + chart: spegel + version: v0.0.22 + sourceRef: + kind: HelmRepository + name: spegel + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts + service: + registry: + hostPort: 29999 + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/networking/echo-server/app/kustomization.yaml b/kubernetes/apps/kube-system/spegel/app/kustomization.yaml similarity index 82% rename from kubernetes/apps/networking/echo-server/app/kustomization.yaml rename to kubernetes/apps/kube-system/spegel/app/kustomization.yaml index c83d92a87..5dd7baca7 100644 --- a/kubernetes/apps/networking/echo-server/app/kustomization.yaml +++ b/kubernetes/apps/kube-system/spegel/app/kustomization.yaml @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: networking resources: - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/cloudflared/ks.yaml b/kubernetes/apps/kube-system/spegel/ks.yaml similarity index 60% rename from kubernetes/apps/networking/cloudflared/ks.yaml rename to kubernetes/apps/kube-system/spegel/ks.yaml index 11e3a4711..83c730b07 100644 --- a/kubernetes/apps/networking/cloudflared/ks.yaml +++ b/kubernetes/apps/kube-system/spegel/ks.yaml @@ -2,12 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cloudflared + name: &app spegel namespace: flux-system spec: - dependsOn: - - name: cluster-apps-external-dns - path: ./kubernetes/apps/networking/cloudflared/app + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/spegel/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/minio/kustomization.yaml b/kubernetes/apps/minio/kustomization.yaml deleted file mode 100644 index cc3ff95b0..000000000 --- a/kubernetes/apps/minio/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - # Flux-Kustomizations - - ./operator/ks.yaml diff --git a/kubernetes/apps/minio/monitoring/dashboards/kustomization.yaml b/kubernetes/apps/minio/monitoring/dashboards/kustomization.yaml deleted file mode 100644 index 96b8de12a..000000000 --- a/kubernetes/apps/minio/monitoring/dashboards/kustomization.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: observability - -configMapGenerator: - - - name: minio-overview - files: - - ./minio-overview.json - options: - labels: - home_ops: minio-overview - disableNameSuffixHash: true diff --git a/kubernetes/apps/minio/monitoring/dashboards/minio-overview.json b/kubernetes/apps/minio/monitoring/dashboards/minio-overview.json deleted file mode 100644 index d2d62ab49..000000000 --- a/kubernetes/apps/minio/monitoring/dashboards/minio-overview.json +++ /dev/null @@ -1,2908 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.0.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph (old)", - "version": "" - }, - { - "type": "panel", - "id": "piechart", - "name": "Pie chart", - "version": "" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "MinIO Grafana Dashboard - https://min.io/", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 13502, - "graphTooltip": 0, - "id": null, - "links": [ - { - "icon": "external link", - "includeVars": true, - "keepTime": true, - "tags": [ - "minio" - ], - "type": "dashboards" - } - ], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "percentage", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "dtdurations" - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 3, - "x": 0, - "y": 0 - }, - "id": 1, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "time() - max(minio_node_process_starttime_seconds{job=\"$scrape_jobs\"})", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Uptime", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 3, - "y": 0 - }, - "id": 65, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (instance) (minio_s3_traffic_received_bytes{job=\"$scrape_jobs\"})", - "format": "table", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total S3 Traffic Inbound", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "unit": "bytes" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Free" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "green", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Used" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "orange", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 4, - "x": 6, - "y": 0 - }, - "id": 50, - "interval": "1m", - "links": [], - "maxDataPoints": 100, - "options": { - "displayLabels": [], - "legend": { - "displayMode": "table", - "placement": "bottom", - "showLegend": true, - "values": [ - "percent" - ] - }, - "pieType": "donut", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.2.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "topk(1, sum(minio_cluster_capacity_usable_total_bytes{job=\"$scrape_jobs\"}) by (instance)) - topk(1, sum(minio_cluster_capacity_usable_free_bytes{job=\"$scrape_jobs\"}) by (instance))", - "format": "time_series", - "instant": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "Used", - "refId": "A", - "step": 300 - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "topk(1, sum(minio_cluster_capacity_usable_free_bytes{job=\"$scrape_jobs\"}) by (instance)) ", - "hide": false, - "interval": "1m", - "legendFormat": "Free", - "refId": "B" - } - ], - "title": "Capacity", - "type": "piechart" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": true, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Objects" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "Usage" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 6, - "x": 10, - "y": 0 - }, - "id": 68, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.2.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "minio_cluster_usage_total_bytes{job=\"$scrape_jobs\"}", - "interval": "", - "legendFormat": "Usage", - "range": true, - "refId": "A" - } - ], - "title": "Data Usage Growth", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "semi-dark-red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 5, - "x": 16, - "y": 0 - }, - "id": 52, - "links": [], - "options": { - "displayMode": "basic", - "minVizHeight": 10, - "minVizWidth": 0, - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": false, - "text": {}, - "valueMode": "color" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "minio_cluster_objects_size_distribution{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{range}}", - "refId": "A", - "step": 300 - } - ], - "title": "Object size distribution", - "type": "bargauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 2000 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 21, - "y": 0 - }, - "id": 61, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum (minio_node_file_descriptor_open_total{job=\"$scrape_jobs\"})", - "format": "table", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Open FDs", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 3, - "y": 3 - }, - "id": 64, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (instance) (minio_s3_traffic_sent_bytes{job=\"$scrape_jobs\"})", - "format": "table", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total S3 Traffic Outbound", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 2000 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 21, - "y": 3 - }, - "id": 62, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum without (server,instance) (minio_node_go_routine_total{job=\"$scrape_jobs\"})", - "format": "table", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Goroutines", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 0, - "y": 6 - }, - "id": 53, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_nodes_online_total{job=\"$scrape_jobs\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Online Servers", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 3, - "y": 6 - }, - "id": 9, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_drive_online_total{job=\"$scrape_jobs\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Total online drives in MinIO Cluster", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Online Drives", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "dark-yellow", - "value": 75000000 - }, - { - "color": "dark-red", - "value": 100000000 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 6, - "y": 6 - }, - "id": 66, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "minio_cluster_bucket_total{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "title": "Number of Buckets", - "type": "stat" - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 7, - "x": 9, - "y": 6 - }, - "hiddenSeries": false, - "id": 63, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (rate(minio_s3_traffic_received_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "Data Received [{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "S3 API Data Received Rate ", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 6 - }, - "hiddenSeries": false, - "id": 70, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (rate(minio_s3_traffic_sent_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "Data Sent [{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "S3 API Data Sent Rate ", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 0, - "y": 8 - }, - "id": 69, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_nodes_offline_total{job=\"$scrape_jobs\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Offline Servers", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 3, - "y": 8 - }, - "id": 78, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_drive_offline_total{job=\"$scrape_jobs\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Total Offline Drives", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "dark-yellow", - "value": 75000000 - }, - { - "color": "dark-red", - "value": 100000000 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 6, - "y": 9 - }, - "id": 44, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "minio_cluster_usage_object_total{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "title": "Number of Objects", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 0, - "y": 10 - }, - "id": 80, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_heal_time_last_activity_nano_seconds{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{server}}", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Time Since Last Heal Activity", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 3, - "x": 3, - "y": 10 - }, - "id": 81, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "10.0.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_usage_last_activity_nano_seconds{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{server}}", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - } - ], - "title": "Time Since Last Scan Activity", - "type": "stat" - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 10, - "w": 10, - "x": 0, - "y": 12 - }, - "hiddenSeries": false, - "id": 60, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server,api) (increase(minio_s3_requests_total{job=\"$scrape_jobs\"}[$__rate_interval]))", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server,api}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "S3 API Request Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 10, - "w": 7, - "x": 10, - "y": 12 - }, - "hiddenSeries": false, - "id": 88, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server,api) (increase(minio_s3_requests_4xx_errors_total{job=\"$scrape_jobs\"}[$__rate_interval]))", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server,api}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "S3 API Request 4xx Error Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 10, - "w": 7, - "x": 17, - "y": 12 - }, - "hiddenSeries": false, - "id": 86, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server,api) (increase(minio_s3_requests_5xx_errors_total{job=\"$scrape_jobs\"}[$__rate_interval]))", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server,api}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "S3 API Request 5xx Error Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "10.13.1.25:9000 DELETE": "red", - "10.13.1.25:9000 GET": "green", - "10.13.1.25:9000 POST": "blue" - }, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Total number of bytes received and sent among all MinIO server instances", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 10, - "fillGradient": 1, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 22 - }, - "hiddenSeries": false, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_inter_node_traffic_sent_bytes{job=\"$scrape_jobs\"}[$__rate_interval])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Internode Bytes Received [{{server}}]", - "metric": "minio_http_requests_duration_seconds_count", - "refId": "A", - "step": 4 - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_inter_node_traffic_received_bytes{job=\"$scrape_jobs\"}[$__rate_interval])", - "interval": "", - "legendFormat": "Internode Bytes Sent [{{server}}]", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Internode Data Transfer", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:211", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:212", - "format": "s", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 22 - }, - "hiddenSeries": false, - "id": 84, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (instance) (minio_heal_objects_heal_total{job=\"$scrape_jobs\"})", - "interval": "", - "legendFormat": "Objects healed in current self heal run", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (instance) (minio_heal_objects_error_total{job=\"$scrape_jobs\"})", - "hide": false, - "interval": "", - "legendFormat": "Heal errors in current self heal run", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (instance) (minio_heal_objects_total{job=\"$scrape_jobs\"}) ", - "hide": false, - "interval": "", - "legendFormat": "Objects scanned in current self heal run", - "refId": "C" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Healing", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:846", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:847", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 32 - }, - "hiddenSeries": false, - "id": 77, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_node_process_cpu_total_seconds{job=\"$scrape_jobs\"}[$__rate_interval])", - "interval": "", - "legendFormat": "CPU Usage Rate [{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Node CPU Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:1043", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:1044", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 32 - }, - "hiddenSeries": false, - "id": 76, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_node_process_resident_memory_bytes{job=\"$scrape_jobs\"}", - "interval": "", - "legendFormat": "Memory Used [{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Node Memory Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:1043", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:1044", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 41 - }, - "hiddenSeries": false, - "id": 74, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_node_drive_used_bytes{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Used Capacity [{{server}}:{{drive}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Drive Used Capacity", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:381", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:382", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 41 - }, - "hiddenSeries": false, - "id": 82, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_node_drive_free_inodes{job=\"$scrape_jobs\"}", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Free Inodes [{{server}}:{{drive}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Drives Free Inodes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:381", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:382", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Offline 10.13.1.25:9000": "dark-red", - "Total 10.13.1.25:9000": "blue" - }, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Number of online drives per MinIO Server", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 49 - }, - "hiddenSeries": false, - "id": 11, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_node_syscall_read_total{job=\"$scrape_jobs\"}[$__rate_interval])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Read Syscalls [{{server}}]", - "metric": "process_start_time_seconds", - "refId": "A", - "step": 60 - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_node_syscall_write_total{job=\"$scrape_jobs\"}[$__rate_interval])", - "interval": "", - "legendFormat": "Write Syscalls [{{server}}]", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Node Syscalls", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:185", - "decimals": 0, - "format": "short", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:186", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "available 10.13.1.25:9000": "green", - "used 10.13.1.25:9000": "blue" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 49 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_node_file_descriptor_open_total{job=\"$scrape_jobs\"}", - "interval": "", - "legendFormat": "Open FDs [{{server}}]", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Node File Descriptors", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 58 - }, - "hiddenSeries": false, - "id": 73, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_node_io_rchar_bytes{job=\"$scrape_jobs\"}[$__rate_interval])", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Node RChar [{{server}}]", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "rate(minio_node_io_wchar_bytes{job=\"$scrape_jobs\"}[$__rate_interval])", - "interval": "", - "legendFormat": "Node WChar [{{server}}]", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Node IO", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:381", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:382", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - } - ], - "refresh": "", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "minio" - ], - "templating": { - "list": [ - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "label_values(job)", - "hide": 0, - "includeAll": true, - "multi": true, - "name": "scrape_jobs", - "options": [], - "query": { - "query": "label_values(job)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "MinIO Dashboard", - "uid": "TgmJnqnnk", - "version": 1, - "weekStart": "" -} \ No newline at end of file diff --git a/kubernetes/apps/minio/monitoring/kustomization.yaml b/kubernetes/apps/minio/monitoring/kustomization.yaml deleted file mode 100644 index 888ce1420..000000000 --- a/kubernetes/apps/minio/monitoring/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: observability -resources: - - ./dashboards diff --git a/kubernetes/apps/minio/operator/app/helmrelease.yaml b/kubernetes/apps/minio/operator/app/helmrelease.yaml deleted file mode 100644 index d63ab0cc7..000000000 --- a/kubernetes/apps/minio/operator/app/helmrelease.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: operator - namespace: minio-operator -spec: - interval: 5m - chart: - spec: - # renovate: registryUrl=https://charts.longhorn.io - chart: operator - version: 5.0.8 - sourceRef: - kind: HelmRepository - name: minio-operator - namespace: flux-system - interval: 5m - - install: - createNamespace: false - remediation: - retries: 5 - - upgrade: - crds: CreateReplace - remediation: - retries: 5 - - values: - operator: - replicaCount: 1 - env: - - name: PROMETHEUS_NAMESPACE - value: "monitoring" - - name: MINIO_PROMETHEUS_URL - value: "http://kube-prometheus-stack-prometheus.monitoring:9090" - - name: MINIO_PROMETHEUS_AUTH_TYPE - value: "public" - resources: - requests: - cpu: 200m - memory: 256Mi - ephemeral-storage: 500Mi - limits: - memory: 1Gi - ephemeral-storage: 500Gi - console: - ingress: - enabled: true - ingressClassName: internal - annotations: - external-dns.alpha.kubernetes.io/target: internal.${SECRET_DOMAIN} - #nginx.ingress.kubernetes.io/auth-url: http://authelia.kube-system.svc.cluster.local/api/verify - #nginx.ingress.kubernetes.io/auth-signin: https://auth.${SECRET_DOMAIN} - host: &host "minio-operator.${SECRET_DOMAIN}" - path: / - pathType: Prefix - tls: - - hosts: - - *host diff --git a/kubernetes/apps/minio/operator/app/namespace.yaml b/kubernetes/apps/minio/operator/app/namespace.yaml deleted file mode 100644 index 7b33b3468..000000000 --- a/kubernetes/apps/minio/operator/app/namespace.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: minio-operator - labels: - kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/minio/operator/ks.yaml b/kubernetes/apps/minio/operator/ks.yaml deleted file mode 100644 index 53f11e8f4..000000000 --- a/kubernetes/apps/minio/operator/ks.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 -kind: Kustomization -metadata: - name: cluster-apps-minio-operator - namespace: flux-system - labels: - substitution.flux.home.arpa/enabled: "true" -spec: - dependsOn: - - name: cluster-apps-nginx-internal - - name: cluster-apps-external-secrets - path: ./kubernetes/apps/minio/operator/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - healthChecks: - - apiVersion: helm.toolkit.fluxcd.io/v2beta1 - kind: HelmRelease - name: operator - namespace: minio-operator - interval: 30m - retryInterval: 1m - timeout: 5m ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 -kind: Kustomization -metadata: - name: cluster-apps-minio-monitoring - namespace: flux-system - labels: - substitution.flux.home.arpa/enabled: "true" -spec: - dependsOn: - - name: cluster-apps-minio-operator - path: ./kubernetes/apps/minio/monitoring/ - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - healthChecks: - - apiVersion: helm.toolkit.fluxcd.io/v2beta1 - kind: HelmRelease - name: operator - namespace: minio-operator - interval: 30m - retryInterval: 1m - timeout: 5m diff --git a/kubernetes/apps/network/cloudflared/app/configs/config.yaml b/kubernetes/apps/network/cloudflared/app/configs/config.yaml new file mode 100644 index 000000000..05bcef5cf --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/configs/config.yaml @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: "external.${SECRET_DOMAIN}" + +ingress: + - hostname: "${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml b/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml similarity index 91% rename from kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml rename to kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml index 2a748f949..43d7d7b29 100644 --- a/kubernetes/apps/networking/cloudflared/app/dnsendpoint.yaml +++ b/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml @@ -3,7 +3,6 @@ apiVersion: externaldns.k8s.io/v1alpha1 kind: DNSEndpoint metadata: name: cloudflared - namespace: networking spec: endpoints: - dnsName: "external.${SECRET_DOMAIN}" diff --git a/kubernetes/apps/network/cloudflared/app/helmrelease.yaml b/kubernetes/apps/network/cloudflared/app/helmrelease.yaml new file mode 100644 index 000000000..f15dd501c --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/helmrelease.yaml @@ -0,0 +1,110 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + cloudflared: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.5.0 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + TUNNEL_ID: + valueFrom: + secretKeyRef: + name: cloudflared-secret + key: TUNNEL_ID + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "$(TUNNEL_ID)" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/kubernetes/apps/networking/cloudflared/app/kustomization.yaml b/kubernetes/apps/network/cloudflared/app/kustomization.yaml similarity index 92% rename from kubernetes/apps/networking/cloudflared/app/kustomization.yaml rename to kubernetes/apps/network/cloudflared/app/kustomization.yaml index 0536740db..891a864ad 100644 --- a/kubernetes/apps/networking/cloudflared/app/kustomization.yaml +++ b/kubernetes/apps/network/cloudflared/app/kustomization.yaml @@ -1,7 +1,6 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: networking resources: - ./dnsendpoint.yaml - ./secret.sops.yaml diff --git a/kubernetes/apps/network/cloudflared/app/secret.sops.yaml b/kubernetes/apps/network/cloudflared/app/secret.sops.yaml new file mode 100644 index 000000000..1bfa96160 --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/secret.sops.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cloudflared-secret +stringData: + TUNNEL_ID: ENC[AES256_GCM,data:bYmZE2MHHS7SZsIxSbYWCEEGKbXlz93BB0wFQf3ICN2LHBLI,iv:hLWJn/jk2DsZBE+hf4WbY8wcvxcz0QACkntvnfcbhfc=,tag:x2T5CtZjyM1GJA9QqP7b6g==,type:str] + credentials.json: ENC[AES256_GCM,data:Mfjy9ylzNMK0ZAOiAA/PO7DUV39KKsUCAktGYiMotzPZC3ngzNafTXuQ5EiMreeqjKZTX/eHHKm6EXYNiX09ER1MyretXpJpQLEl5jHV0obwpPinbLCtUblR517Otnx693CR8qO54T5ycq50HN7aoj7zRDvC+9P3WKfgCdHZobipgmCDtEGHzV/rBVavOiMFN6GYbK4JKXqxoxqgBRLvM8jO5pdc0I3JPJef3BuyjQ==,iv:ii1YZQlSp4lHluz2vaLcgDc+tajWabgd3WhiZ/H8O+c=,tag:lX+IQ5zcW8AHjFIYy1wiNg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0OStKNzcwUElGb04zMTBW + Ny95bUhmZWorRm43NTdYZVY3alJZQkwxTmx3ClBOYk13VS9LeERWNDJBU051eHl5 + UWtkQ3IxVDNCT3oxeHQrdU9udWJHZzAKLS0tIHdibnVMRXQwekdoeHkwTlNyUmVC + Y2NmaWJyNWVrekRDVDk5cWpIUUo1UkkKt1pjK+9Z00NmPuEGVGEHxkSDI6YIjbQg + /PfTKj2Nf3zehGmiRzm/T2iqGs48sloSGEoc6abB1rUD+t30/yohwQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-05-19T15:03:02Z" + mac: ENC[AES256_GCM,data:44t42tiwfJJ0rrI6ajFNlZvreFklRroA0181gRSHYiY21ZbMcwa7EFGGnYgIF5CwKcmadAu9HZKwZ5w8QuHQRbFoxgaVB8tXODX5caOJJmvs+Am4qdim5jfXX0cAR970hdLA8yVwFTXgB561oC6lYODTpcvvysQ2r2YdGlhyeeA=,iv:A80TJH/d1QLQutV+Ax1thE765moHgkFJofbXX0J5vVg=,tag:6bYMlPsn+0+wQRDNFVVw3w==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/bootstrap/templates/kubernetes/apps/networking/cloudflared/ks.yaml.j2 b/kubernetes/apps/network/cloudflared/ks.yaml similarity index 57% rename from bootstrap/templates/kubernetes/apps/networking/cloudflared/ks.yaml.j2 rename to kubernetes/apps/network/cloudflared/ks.yaml index 11e3a4711..eb8d8da0b 100644 --- a/bootstrap/templates/kubernetes/apps/networking/cloudflared/ks.yaml.j2 +++ b/kubernetes/apps/network/cloudflared/ks.yaml @@ -2,12 +2,16 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-cloudflared + name: &app cloudflared namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-external-dns - path: ./kubernetes/apps/networking/cloudflared/app + - name: external-dns + path: ./kubernetes/apps/network/cloudflared/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/network/echo-server/app/helmrelease.yaml b/kubernetes/apps/network/echo-server/app/helmrelease.yaml new file mode 100644 index 000000000..b35cdd06d --- /dev/null +++ b/kubernetes/apps/network/echo-server/app/helmrelease.yaml @@ -0,0 +1,91 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: echo-server +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + echo-server: + strategy: RollingUpdate + containers: + app: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: 33 + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: true + LOG_IGNORE_PATH: /healthz + PROMETHEUS_ENABLED: true + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 64Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: echo-server + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: echo-server + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http diff --git a/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml b/kubernetes/apps/network/echo-server/app/kustomization.yaml similarity index 82% rename from kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml rename to kubernetes/apps/network/echo-server/app/kustomization.yaml index c83d92a87..5dd7baca7 100644 --- a/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml +++ b/kubernetes/apps/network/echo-server/app/kustomization.yaml @@ -1,6 +1,5 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: networking resources: - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/echo-server/ks.yaml b/kubernetes/apps/network/echo-server/ks.yaml similarity index 59% rename from kubernetes/apps/networking/echo-server/ks.yaml rename to kubernetes/apps/network/echo-server/ks.yaml index 0fe3d81a6..2984f219c 100644 --- a/kubernetes/apps/networking/echo-server/ks.yaml +++ b/kubernetes/apps/network/echo-server/ks.yaml @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-echo-server + name: &app echo-server namespace: flux-system spec: - path: ./kubernetes/apps/networking/echo-server/app + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/echo-server/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/networking/external-dns/app/helmrelease.yaml b/kubernetes/apps/network/external-dns/app/helmrelease.yaml similarity index 89% rename from kubernetes/apps/networking/external-dns/app/helmrelease.yaml rename to kubernetes/apps/network/external-dns/app/helmrelease.yaml index 71b532f21..f9b23788e 100644 --- a/kubernetes/apps/networking/external-dns/app/helmrelease.yaml +++ b/kubernetes/apps/network/external-dns/app/helmrelease.yaml @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: &app external-dns - namespace: networking spec: interval: 30m chart: @@ -14,16 +13,16 @@ spec: kind: HelmRepository name: external-dns namespace: flux-system - maxHistory: 2 install: + crds: CreateReplace remediation: retries: 3 upgrade: cleanupOnFail: true + crds: CreateReplace remediation: + strategy: rollback retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: *app provider: cloudflare diff --git a/bootstrap/templates/addons/grafana/app/kustomization.yaml.j2 b/kubernetes/apps/network/external-dns/app/kustomization.yaml similarity index 85% rename from bootstrap/templates/addons/grafana/app/kustomization.yaml.j2 rename to kubernetes/apps/network/external-dns/app/kustomization.yaml index b80069e78..95bf4747f 100644 --- a/bootstrap/templates/addons/grafana/app/kustomization.yaml.j2 +++ b/kubernetes/apps/network/external-dns/app/kustomization.yaml @@ -1,7 +1,6 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: monitoring resources: - ./secret.sops.yaml - ./helmrelease.yaml diff --git a/kubernetes/apps/network/external-dns/app/secret.sops.yaml b/kubernetes/apps/network/external-dns/app/secret.sops.yaml new file mode 100644 index 000000000..323516ea5 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/secret.sops.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: external-dns-secret +stringData: + api-token: ENC[AES256_GCM,data:W4iTeouED0FcsyIBXo0QA4NbpGtTBKmrMm9ouep0FGY9PkrhMUzizw==,iv:TiZJRcWESrrRUASIW1vheBSvozmvyg0wLsm3i2n9en4=,tag:dLjGufetJk/VNVZAVN2fAw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4MXZONWEzb0x0VnZQRkxK + OTlORU9PT0l2V0VaNmZTZjk3TVhBUTJUYUZrCnZHUlN6QnZ5NDl2RFJ2SStCMlBI + b3ZwZXpMWU9XYTY3dUpJV2ljb285elUKLS0tIFpXNmxKQWJBa1IvR2JVZlF6bTlw + ZTAySzRTOE52VnM2VE9td1FsNVNIT1kK5uDY0NS+zJRJcQlbyyVfERNxsmu1nXAf + jLiq7ZnQ1h+IXe5NJPP5ragqXqLfWwsKEwPgzI7OidryOP/37RU+zw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-05-19T15:03:02Z" + mac: ENC[AES256_GCM,data:8dEjpxvvVBMKYDSBtktdoRYAcsknGTHiTXP4sfnEpXoPqVkwhbUbQkpv6kI+HSPPEbf8HTAk3R+HD7GkJf8wvHTxCr86+HM+Rl0O4lqw8FTE30Y003RDi9TW+H3TJflRgjpsit0mdOYfrxJzoZ5YEUEQrSJnxKNW+bqsPNFmf/g=,iv:YEUtX4hR27bD1hqJN9k6mHLEG6t88ePYe38o5i0GDE0=,tag:wH7glnbW96p7H5bC9/S+jA==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/kubernetes/apps/networking/external-dns/ks.yaml b/kubernetes/apps/network/external-dns/ks.yaml similarity index 59% rename from kubernetes/apps/networking/external-dns/ks.yaml rename to kubernetes/apps/network/external-dns/ks.yaml index 9dcaf8c01..eaed4b566 100644 --- a/kubernetes/apps/networking/external-dns/ks.yaml +++ b/kubernetes/apps/network/external-dns/ks.yaml @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-external-dns + name: &app external-dns namespace: flux-system spec: - path: ./kubernetes/apps/networking/external-dns/app + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/external-dns/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/networking/nginx/certificates/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml similarity index 81% rename from kubernetes/apps/networking/nginx/certificates/kustomization.yaml rename to kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml index f58e4a76f..e7892580d 100644 --- a/kubernetes/apps/networking/nginx/certificates/kustomization.yaml +++ b/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml @@ -3,4 +3,3 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./staging.yaml - - ./production.yaml diff --git a/kubernetes/apps/networking/nginx/certificates/production.yaml b/kubernetes/apps/network/ingress-nginx/certificates/production.yaml similarity index 93% rename from kubernetes/apps/networking/nginx/certificates/production.yaml rename to kubernetes/apps/network/ingress-nginx/certificates/production.yaml index 952f26490..b5afdf419 100644 --- a/kubernetes/apps/networking/nginx/certificates/production.yaml +++ b/kubernetes/apps/network/ingress-nginx/certificates/production.yaml @@ -3,7 +3,6 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: "${SECRET_DOMAIN/./-}-production" - namespace: networking spec: secretName: "${SECRET_DOMAIN/./-}-production-tls" issuerRef: diff --git a/kubernetes/apps/networking/nginx/certificates/staging.yaml b/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml similarity index 93% rename from kubernetes/apps/networking/nginx/certificates/staging.yaml rename to kubernetes/apps/network/ingress-nginx/certificates/staging.yaml index 7b3914fa1..9c8694251 100644 --- a/kubernetes/apps/networking/nginx/certificates/staging.yaml +++ b/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml @@ -3,7 +3,6 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: "${SECRET_DOMAIN/./-}-staging" - namespace: networking spec: secretName: "${SECRET_DOMAIN/./-}-staging-tls" issuerRef: diff --git a/kubernetes/apps/networking/nginx/external/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml similarity index 82% rename from kubernetes/apps/networking/nginx/external/helmrelease.yaml rename to kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml index abc665475..683d624ee 100644 --- a/kubernetes/apps/networking/nginx/external/helmrelease.yaml +++ b/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml @@ -1,20 +1,18 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: nginx-external - namespace: networking + name: ingress-nginx-external spec: interval: 30m chart: spec: chart: ingress-nginx - version: 4.7.1 + version: 4.10.1 sourceRef: kind: HelmRepository name: ingress-nginx namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,19 +20,17 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false dependsOn: - name: cloudflared - namespace: networking + namespace: network values: - fullnameOverride: nginx-external + fullnameOverride: ingress-nginx-external controller: replicaCount: 1 service: annotations: external-dns.alpha.kubernetes.io/hostname: "external.${SECRET_DOMAIN}" - io.cilium/lb-ipam-ips: "10.69.3.121" + io.cilium/lb-ipam-ips: "10.69.3.122" externalTrafficPolicy: Cluster ingressClassResource: name: external @@ -69,23 +65,22 @@ spec: enabled: true serviceMonitor: enabled: true - namespace: networking namespaceSelector: any: true extraArgs: - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-production-tls" + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: - app.kubernetes.io/name: nginx-external + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-external app.kubernetes.io/component: controller resources: requests: - cpu: 10m - memory: 250Mi + cpu: 100m limits: memory: 500Mi defaultBackend: diff --git a/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/nginx/internal/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml similarity index 73% rename from kubernetes/apps/networking/nginx/internal/helmrelease.yaml rename to kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml index e92989b08..f7c8b65f7 100644 --- a/kubernetes/apps/networking/nginx/internal/helmrelease.yaml +++ b/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml @@ -1,20 +1,19 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: - name: nginx-internal - namespace: networking + name: ingress-nginx-internal + namespace: network spec: interval: 30m chart: spec: chart: ingress-nginx - version: 4.10.0 + version: 4.10.1 sourceRef: kind: HelmRepository name: ingress-nginx namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,24 +21,18 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: - fullnameOverride: nginx-internal + fullnameOverride: ingress-nginx-internal controller: replicaCount: 1 service: annotations: - external-dns.alpha.kubernetes.io/hostname: "internal.${SECRET_DOMAIN}" io.cilium/lb-ipam-ips: "10.69.3.122" externalTrafficPolicy: Cluster ingressClassResource: name: internal default: true controllerValue: k8s.io/internal - opentelemetry: - enabled: true - resources: {} admissionWebhooks: objectSelector: matchExpressions: @@ -64,35 +57,28 @@ spec: "http_user_agent": "$http_user_agent"} proxy-body-size: 0 proxy-buffer-size: 16k - proxy-read-timeout: 2400 # Elastic reindexing testing ssl-protocols: TLSv1.3 TLSv1.2 metrics: enabled: true serviceMonitor: enabled: true - namespace: networking namespaceSelector: any: true extraArgs: - default-ssl-certificate: "networking/${SECRET_DOMAIN/./-}-production-tls" + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" topologySpreadConstraints: - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchLabels: - app.kubernetes.io/name: nginx-internal + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-internal app.kubernetes.io/component: controller resources: requests: - cpu: 10m - memory: 250Mi + cpu: 100m limits: memory: 500Mi defaultBackend: enabled: false - #tcp: - #"6379": "datahub/redis-standalone-master:6379" - #"9200": "observability/elk-elasticsearch:9200" - #"9095": "datahub/datahub-cluster-kafka-external-bootstrap:9095" - #"22": "tools/gitea-ssh:22" diff --git a/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/nginx/ks.yaml b/kubernetes/apps/network/ingress-nginx/ks.yaml similarity index 53% rename from kubernetes/apps/networking/nginx/ks.yaml rename to kubernetes/apps/network/ingress-nginx/ks.yaml index 60439e48b..99b1abb58 100644 --- a/kubernetes/apps/networking/nginx/ks.yaml +++ b/kubernetes/apps/network/ingress-nginx/ks.yaml @@ -2,12 +2,16 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-certificates + name: &app ingress-nginx-certificates namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-cert-manager-issuers - path: ./kubernetes/apps/networking/nginx/certificates + - name: cert-manager-issuers + path: ./kubernetes/apps/network/ingress-nginx/certificates prune: true sourceRef: kind: GitRepository @@ -20,12 +24,16 @@ spec: apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-external + name: &app ingress-nginx-internal namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-nginx-certificates - path: ./kubernetes/apps/networking/nginx/external + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/internal prune: true sourceRef: kind: GitRepository @@ -38,12 +46,16 @@ spec: apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-nginx-internal + name: &app ingress-nginx-external namespace: flux-system spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app dependsOn: - - name: cluster-apps-nginx-certificates - path: ./kubernetes/apps/networking/nginx/internal + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/external prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml b/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml similarity index 83% rename from kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml rename to kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml index 4d7e44f1e..0b334f52e 100644 --- a/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml +++ b/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml @@ -1,9 +1,8 @@ --- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 +apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: name: k8s-gateway - namespace: networking spec: interval: 30m chart: @@ -14,7 +13,6 @@ spec: kind: HelmRepository name: k8s-gateway namespace: flux-system - maxHistory: 2 install: remediation: retries: 3 @@ -22,8 +20,6 @@ spec: cleanupOnFail: true remediation: retries: 3 - uninstall: - keepHistory: false values: fullnameOverride: k8s-gateway domain: "${SECRET_DOMAIN}" @@ -34,3 +30,4 @@ spec: annotations: io.cilium/lb-ipam-ips: "10.69.3.120" externalTrafficPolicy: Cluster + watchedResources: ["Ingress", "Service"] diff --git a/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml b/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/k8s-gateway/ks.yaml b/kubernetes/apps/network/k8s-gateway/ks.yaml similarity index 59% rename from kubernetes/apps/networking/k8s-gateway/ks.yaml rename to kubernetes/apps/network/k8s-gateway/ks.yaml index 502e1f2f8..06f442555 100644 --- a/kubernetes/apps/networking/k8s-gateway/ks.yaml +++ b/kubernetes/apps/network/k8s-gateway/ks.yaml @@ -2,10 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-k8s-gateway + name: &app k8s-gateway namespace: flux-system spec: - path: ./kubernetes/apps/networking/k8s-gateway/app + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/k8s-gateway/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/networking/kustomization.yaml b/kubernetes/apps/network/kustomization.yaml similarity index 79% rename from kubernetes/apps/networking/kustomization.yaml rename to kubernetes/apps/network/kustomization.yaml index 4ad531cd6..db7ef4d55 100644 --- a/kubernetes/apps/networking/kustomization.yaml +++ b/kubernetes/apps/network/kustomization.yaml @@ -6,5 +6,6 @@ resources: - ./cloudflared/ks.yaml - ./echo-server/ks.yaml - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml - ./k8s-gateway/ks.yaml - - ./nginx/ks.yaml + - ./smtp-relay/ks.yaml diff --git a/kubernetes/apps/networking/namespace.yaml b/kubernetes/apps/network/namespace.yaml similarity index 84% rename from kubernetes/apps/networking/namespace.yaml rename to kubernetes/apps/network/namespace.yaml index b9e4a4161..4d78d7b11 100644 --- a/kubernetes/apps/networking/namespace.yaml +++ b/kubernetes/apps/network/namespace.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: Namespace metadata: - name: networking + name: network labels: kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/networking/smtp-relay/app/externalsecret.yaml b/kubernetes/apps/network/smtp-relay/app/externalsecret.yaml similarity index 100% rename from kubernetes/apps/networking/smtp-relay/app/externalsecret.yaml rename to kubernetes/apps/network/smtp-relay/app/externalsecret.yaml diff --git a/kubernetes/apps/networking/smtp-relay/app/helmrelease.yaml b/kubernetes/apps/network/smtp-relay/app/helmrelease.yaml similarity index 100% rename from kubernetes/apps/networking/smtp-relay/app/helmrelease.yaml rename to kubernetes/apps/network/smtp-relay/app/helmrelease.yaml diff --git a/kubernetes/apps/networking/smtp-relay/app/kustomization.yaml b/kubernetes/apps/network/smtp-relay/app/kustomization.yaml similarity index 100% rename from kubernetes/apps/networking/smtp-relay/app/kustomization.yaml rename to kubernetes/apps/network/smtp-relay/app/kustomization.yaml diff --git a/kubernetes/apps/networking/smtp-relay/app/resources/maddy.conf b/kubernetes/apps/network/smtp-relay/app/resources/maddy.conf similarity index 100% rename from kubernetes/apps/networking/smtp-relay/app/resources/maddy.conf rename to kubernetes/apps/network/smtp-relay/app/resources/maddy.conf diff --git a/kubernetes/apps/networking/smtp-relay/ks.yaml b/kubernetes/apps/network/smtp-relay/ks.yaml similarity index 100% rename from kubernetes/apps/networking/smtp-relay/ks.yaml rename to kubernetes/apps/network/smtp-relay/ks.yaml diff --git a/kubernetes/apps/networking/cloudflared/app/configs/config.yaml b/kubernetes/apps/networking/cloudflared/app/configs/config.yaml deleted file mode 100644 index fdb39f3d1..000000000 --- a/kubernetes/apps/networking/cloudflared/app/configs/config.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -originRequest: - http2Origin: true - -ingress: - - hostname: "${SECRET_DOMAIN}" - service: https://nginx-external-controller.networking.svc.cluster.local:443 - originRequest: - originServerName: "external.${SECRET_DOMAIN}" - - hostname: "*.${SECRET_DOMAIN}" - service: https://nginx-external-controller.networking.svc.cluster.local:443 - originRequest: - originServerName: "external.${SECRET_DOMAIN}" - - service: http_status:404 diff --git a/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml b/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml deleted file mode 100644 index 08d27ba17..000000000 --- a/kubernetes/apps/networking/cloudflared/app/helmrelease.yaml +++ /dev/null @@ -1,101 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: cloudflared - namespace: networking -spec: - interval: 30m - chart: - spec: - chart: app-template - version: 1.5.1 - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - controller: - replicas: 1 - strategy: RollingUpdate - annotations: - reloader.stakater.com/auto: "true" - image: - repository: docker.io/cloudflare/cloudflared - tag: 2023.7.3 - env: - NO_AUTOUPDATE: "true" - TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json - TUNNEL_METRICS: 0.0.0.0:8080 - TUNNEL_TRANSPORT_PROTOCOL: auto - TUNNEL_ID: - valueFrom: - secretKeyRef: - name: cloudflared-secret - key: TUNNEL_ID - args: - - tunnel - - --config - - /etc/cloudflared/config/config.yaml - - run - - "$(TUNNEL_ID)" - service: - main: - ports: - http: - port: 8080 - serviceMonitor: - main: - enabled: true - endpoints: - - port: http - scheme: http - path: /metrics - interval: 1m - scrapeTimeout: 30s - probes: - liveness: &probes - enabled: true - custom: true - spec: - httpGet: - path: /ready - port: http - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - readiness: *probes - startup: - enabled: false - persistence: - config: - enabled: true - type: configMap - name: cloudflared-configmap - subPath: config.yaml - mountPath: /etc/cloudflared/config/config.yaml - readOnly: true - creds: - enabled: true - type: secret - name: cloudflared-secret - subPath: credentials.json - mountPath: /etc/cloudflared/creds/credentials.json - readOnly: true - resources: - requests: - cpu: 5m - memory: 10Mi - limits: - memory: 256Mi diff --git a/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml b/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml deleted file mode 100644 index 90992b3bd..000000000 --- a/kubernetes/apps/networking/cloudflared/app/secret.sops.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: cloudflared-secret - namespace: networking -stringData: - TUNNEL_ID: ENC[AES256_GCM,data:Qd+oedMJFecZ1j5y6QkAPa/l+gSyddqSg/+6y4DgnBBMsuVg,iv:qRTGo95xbS5RadSI//KKViCWEW8vTGU8NbtCLeBIx4I=,tag:L39F696ZwP5gXJ1jNYGBVA==,type:str] - credentials.json: ENC[AES256_GCM,data:31HiQOD96f7dpCe0kyeVHePz+JPMCIYfvn9vPo3rJf5gZ3lQJTyNlXu9Pwvja4lY909MCQlOBP4gbwZcgHj4pS4G4tzePY1u8711vFSx0PenLzvzFTpOpYIT/F8pZUZ1hPSt24sozvFaxQw28EzOwVe8mHAmvmoi7ktA9+5rMMAuKAgC4wqxI+0k5DgU+cGaMn1uUx0By3dzK4E2BUzyn0qR6I8FovU77EBtZewACA==,iv:cquF6u341bhTQCURYBcHtKMfuEBmqiHCcoTEVzez8C4=,tag:buvEQVK9qhNk4pQI0bycgg==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLdERTWVFxSERucnBzWWhv - QlBQbmM0R0FjdFdMTEpZaWFDeUtJUGdFRmpBCmx1eVQwSjY3RkswVmdlVi9xQkVX - VGZESFVJNW5reW5EcWozODZ4QXE0N3cKLS0tIEdZRUczMkwrQ2drVXhUd2xHVGo5 - K2JzUXFhckZnc2lyWE93MHozWEhSWVkKln9noeiR87wYEBecEY1PzUqQIulXcMkM - 9ZPUNdIGdN6QUIqfUMuhibXJi6YsbOcRY8tPrzH65/OMIVJZRJhO+w== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:35Z" - mac: ENC[AES256_GCM,data:33NOmYsU6UphhnZI6CkSboH0fdxEuzQfborG6QbJTezKNqqMDUqqiW5H3fbY9t3aF+Gf+Gqsbwpdda91jmSQuz39j/7knJwNsOY4eGs3VMg+4bUzDeXKwXQ+3fLE3dGRz1CxfTY0K/Ao4tkn7h1GDfRtnoCIcTIF4RRnAaOtwgM=,iv:Sou9bEzzGEzRcMl7+bgT7jWH6nMYvOQPZaz66ovnsJo=,tag:7n6kxFMzxtrwcNhbyEUA/w==,type:str] - pgp: [] - encrypted_regex: ^(data|stringData)$ - version: 3.7.3 diff --git a/kubernetes/apps/networking/echo-server/app/helmrelease.yaml b/kubernetes/apps/networking/echo-server/app/helmrelease.yaml deleted file mode 100644 index d782c6f25..000000000 --- a/kubernetes/apps/networking/echo-server/app/helmrelease.yaml +++ /dev/null @@ -1,73 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: echo-server - namespace: networking -spec: - interval: 30m - chart: - spec: - chart: app-template - version: 1.5.1 - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - maxHistory: 2 - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - uninstall: - keepHistory: false - values: - controller: - strategy: RollingUpdate - image: - repository: docker.io/jmalloc/echo-server - tag: 0.3.5 - service: - main: - ports: - http: - port: &port 8080 - probes: - liveness: &probes - enabled: true - custom: true - spec: - httpGet: - path: /health - port: *port - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - readiness: *probes - startup: - enabled: false - ingress: - main: - enabled: true - ingressClassName: external - annotations: - external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" - hajimari.io/icon: video-input-antenna - hosts: - - host: &host "{{ .Release.Name }}.${SECRET_DOMAIN}" - paths: - - path: / - pathType: Prefix - tls: - - hosts: - - *host - resources: - requests: - cpu: 5m - memory: 10Mi - limits: - memory: 50Mi diff --git a/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml b/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml deleted file mode 100644 index 9254f89d1..000000000 --- a/kubernetes/apps/networking/external-dns/app/dnsendpoint-crd.yaml +++ /dev/null @@ -1,93 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.5.0 - api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007" - creationTimestamp: null - name: dnsendpoints.externaldns.k8s.io -spec: - group: externaldns.k8s.io - names: - kind: DNSEndpoint - listKind: DNSEndpointList - plural: dnsendpoints - singular: dnsendpoint - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DNSEndpointSpec defines the desired state of DNSEndpoint - properties: - endpoints: - items: - description: Endpoint is a high-level way of a connection between a service and an IP - properties: - dnsName: - description: The hostname of the DNS record - type: string - labels: - additionalProperties: - type: string - description: Labels stores labels defined for the Endpoint - type: object - providerSpecific: - description: ProviderSpecific stores provider specific config - items: - description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers - properties: - name: - type: string - value: - type: string - type: object - type: array - recordTTL: - description: TTL for the record - format: int64 - type: integer - recordType: - description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc - type: string - setIdentifier: - description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple') - type: string - targets: - description: The targets the DNS record points to - items: - type: string - type: array - type: object - type: array - type: object - status: - description: DNSEndpointStatus defines the observed state of DNSEndpoint - properties: - observedGeneration: - description: The generation observed by the external-dns controller. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/kubernetes/apps/networking/external-dns/app/kustomization.yaml b/kubernetes/apps/networking/external-dns/app/kustomization.yaml deleted file mode 100644 index 1278dd8b5..000000000 --- a/kubernetes/apps/networking/external-dns/app/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./dnsendpoint-crd.yaml - - ./secret.sops.yaml - - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/external-dns/app/secret.sops.yaml b/kubernetes/apps/networking/external-dns/app/secret.sops.yaml deleted file mode 100644 index da5bb3c40..000000000 --- a/kubernetes/apps/networking/external-dns/app/secret.sops.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: external-dns-secret - namespace: networking -stringData: - api-token: ENC[AES256_GCM,data:LH3+uGoAVjOqBj7rsYvNCIIzqrh0seTEynQR1YYE4YU5YlvjSWg6kA==,iv:ibJ6UIBec/sR294qqxpyYY9jPesCp6T06kMK4nwQg/k=,tag:YiFsrPmmVULLdFsOFpe5RA==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEaXIrbFBPVzhCTlRwWnZT - Q0p5YUl3MXhMYmI4dm1PZHk2RkZoWUFjYlZrCi9RVHg1T1JzYmkzNzU1bXJVaDM5 - OFRtdlNHT0FBVXdqZGt2dHU4WGY3dDQKLS0tIGkyblNadVV4Y25QU2ZBUUZuNTFD - cXl2ZnlrNFJCUVpwVEh5azZYSGpSencK6+2W9llCDiADlmTf29qNB5MqAKpmxWBf - NqHuoex0oD3l1d1+K3xMr8YxoXHuF8kXzD8H6u11jCLwLp8li54UzA== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-13T18:05:35Z" - mac: ENC[AES256_GCM,data:BuOpLJgcFNxqkjBkaOb8Mnfs3yFdnJLWW4BLBK3TrBzkWENPyrp90kBuQx5wc8RZmKSFer26daKsB+SogBkQ7x1WNTL2yzc6T0G6/9809jIDfEoRg+LD6RLWGl86vC0F37/UNMvP8U5g5p999POWaKcygueezAYap2AM1iXNCS4=,iv:q3nzADthkxjHLZZHr10SzlbSY+IfrVR/uzI2eBkxMw4=,tag:JMS3djY0YRCTUqnJGQNUMA==,type:str] - pgp: [] - encrypted_regex: ^(data|stringData)$ - version: 3.7.3 diff --git a/kubernetes/apps/networking/nginx/external/kustomization.yaml b/kubernetes/apps/networking/nginx/external/kustomization.yaml deleted file mode 100644 index c83d92a87..000000000 --- a/kubernetes/apps/networking/nginx/external/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/nginx/internal/kustomization.yaml b/kubernetes/apps/networking/nginx/internal/kustomization.yaml deleted file mode 100644 index c83d92a87..000000000 --- a/kubernetes/apps/networking/nginx/internal/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: networking -resources: - - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/nginx/monitoring/dashboards/kustomization.yaml b/kubernetes/apps/networking/nginx/monitoring/dashboards/kustomization.yaml deleted file mode 100644 index cc1b83e22..000000000 --- a/kubernetes/apps/networking/nginx/monitoring/dashboards/kustomization.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -configMapGenerator: - - - name: nginx - files: - - ./nginx.json - options: - labels: - home_ops: nginx - disableNameSuffixHash: true - - - name: request-handling-performance - files: - - ./request-handling-performance.json - options: - labels: - home_ops: request-handling-performance - disableNameSuffixHash: true diff --git a/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx-review.json b/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx-review.json deleted file mode 100644 index 0b9b93a23..000000000 --- a/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx-review.json +++ /dev/null @@ -1,2164 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.7.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - }, - { - "datasource": "${DS_PROMETHEUS}", - "enable": true, - "expr": "sum(changes(nginx_ingress_controller_config_last_reload_successful_timestamp_seconds{instance!=\"unknown\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[30s])) by (controller_class)", - "hide": false, - "iconColor": "rgba(255, 96, 96, 1)", - "limit": 100, - "name": "Config Reloads", - "showIn": 0, - "step": "30s", - "tagKeys": "controller_class", - "tags": [], - "titleFormat": "Config Reloaded", - "type": "tags" - } - ] - }, - "editable": true, - "gnetId": 14314, - "graphTooltip": 0, - "id": 35, - "iteration": 1619515274866, - "links": [], - "panels": [ - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 31, - "panels": [], - "title": "Overview", - "type": "row" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 1, - "description": "This is the total number of requests made in this period (top-right period selected)", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 8, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Requests (period)", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": null, - "decimals": 1, - "description": "This is the percentage of successful requests over the entire period in the top-right hand corner.\n\nNOTE: Ignoring 404s in this metric, since a 404 is a normal response for errant/invalid request. This helps prevent this percentage from being affected by typical web scanners and security probes.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 3, - "y": 1 - }, - "id": 14, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(\n rate(\n nginx_ingress_controller_requests{status!~\"[4-5].*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) \n/ \n(\n sum(\n rate(\n nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) - \n (\n sum(\n rate(\n nginx_ingress_controller_requests{status=~\"404|499\", controller_class=~\"$controller_class\", ingress=~\"$ingress\",namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) \n or vector(0)\n )\n)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "% Success (period)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": null, - "decimals": 0, - "description": "This is the number of new connections made to the controller in the last minute. NOTE: This metric does not support the Ingress, Namespace variables, as this is at a lower-level than the actual application. It does support the others though (Env, Controller Class, Pod)", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 5, - "y": 1 - }, - "id": 6, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{state=~\"active\", state=~\"active\", controller_class=~\"$controller_class\", controller_pod=~\"$pod\"}[$__interval]))", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "{{ingress}}", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Conns (2m)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": null, - "decimals": 0, - "description": "The number of HTTP requests made in the last 1 minute window", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 7, - "y": 1 - }, - "id": 7, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval]))", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeFrom": null, - "timeShift": null, - "title": "Reqs (2m)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#d44a3a", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": null, - "description": "This is the percentage of successful requests over the last minute.\n\nNOTE: Ignoring 404s in this metric, since a 404 is a normal response for errant requests", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 9, - "y": 1 - }, - "id": 13, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(nginx_ingress_controller_requests{status!~\"[4-5].*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) / \n(sum(rate(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) - \n(sum(rate(nginx_ingress_controller_requests{status=~\"404|499\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) or vector(0)))", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "0.8,0.9", - "timeFrom": null, - "timeShift": null, - "title": "% Success (2m)", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#73BF69", - "#73BF69", - "#73BF69" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "description": "This is the number of successful requests in the last minute. Successful being 1xx or 2xx by the standard HTTP definition.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 12, - "y": 1 - }, - "id": 12, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{status=~\"(1|2).*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) or vector(0)", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "HTTP 1/2xx (2m)", - "transparent": true, - "type": "singlestat", - "valueFontSize": "150%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPrefix": false, - "colorValue": true, - "colors": [ - "#3274D9", - "#3274D9", - "#3274D9" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "description": "This is the number of 3xx requests in the last minute.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 15, - "y": 1 - }, - "id": 10, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{status=~\"3.*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[2m])) or vector(0)", - "format": "time_series", - "interval": "$__interval", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "HTTP 3xx (2m)", - "transparent": true, - "type": "singlestat", - "valueFontSize": "150%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#FF9830", - "#FF9830", - "#FF9830" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "description": "This is the number of 4xx requests in the last minute.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 18, - "y": 1 - }, - "id": 18, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{status=~\"4.*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) or vector(0)", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "HTTP 4xx (2m)", - "transparent": true, - "type": "singlestat", - "valueFontSize": "150%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#F2495C", - "#F2495C", - "#F2495C" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "description": "This is the number of 5xx requests in the last minute.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 21, - "y": 1 - }, - "id": 11, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{status=~\"5.*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) or vector(0)", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "HTTP 5xx (2m)", - "transparent": true, - "type": "singlestat", - "valueFontSize": "150%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "This is a total number of requests broken down by the ingress. This can help get a sense of scale in relation to each other.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 4 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) by (ingress)", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "{{ingress}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP Requests / Ingress", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3838", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:3839", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "HTTP 101": "dark-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "The breakdown of the various HTTP status codes of the requests handled within' this period that matches the variables chosen above.\n\nThis chart helps notice and dive into which service is having failures and of what kind.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 4 - }, - "hiddenSeries": false, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "$$hashKey": "object:154", - "alias": "/HTTP [1-2].*/i", - "color": "#37872D" - }, - { - "$$hashKey": "object:155", - "alias": "/HTTP 4.*/i", - "color": "#C4162A" - }, - { - "$$hashKey": "object:156", - "alias": "HTTP 404", - "color": "#FF9830" - }, - { - "$$hashKey": "object:285", - "alias": "HTTP 499", - "color": "#FA6400" - }, - { - "$$hashKey": "object:293", - "alias": "/HTTP 5.*/i", - "color": "#C4162A" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval])) by (status)", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "HTTP {{status}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP Status Codes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:182", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:183", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "The total number of HTTP requests made within' each period", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 4 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[$__interval]))", - "format": "time_series", - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "{{ingress}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Total HTTP Requests", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": false, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 12 - }, - "id": 33, - "panels": [], - "title": "Latency", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "decimals": 1, - "description": "This graph can help assess and help us meet SLA requirements as far as the responsive time of our services.\n\nFor a more detailed latency graph broken out by ingress please open the closed tab at the bottom because it is very CPU intensive.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 13 - }, - "hiddenSeries": false, - "id": 29, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": true, - "min": true, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "$$hashKey": "object:294", - "alias": "Average", - "color": "#F2495C", - "fill": 0, - "points": true - }, - { - "$$hashKey": "object:316", - "alias": "0.95", - "color": "rgb(44, 0, 182)" - }, - { - "$$hashKey": "object:422", - "alias": "0.9", - "color": "#1F60C4" - }, - { - "$$hashKey": "object:430", - "alias": "0.75", - "color": "#8AB8FF", - "fill": 1 - }, - { - "$$hashKey": "object:440", - "alias": "0.5", - "color": "rgb(255, 255, 255)", - "fill": 0 - }, - { - "$$hashKey": "object:4144", - "alias": "0.99", - "color": "#8F3BB8", - "fill": 0 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.99,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304|499\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\"\n }[$__interval]\n )\n )\n)", - "format": "time_series", - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "0.99", - "refId": "A" - }, - { - "expr": "histogram_quantile(\n 0.95,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304|499\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\"\n }[$__interval]\n )\n )\n)", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "0.95", - "refId": "B" - }, - { - "expr": "histogram_quantile(\n 0.9,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304|499\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\"\n }[$__interval]\n )\n )\n)", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "0.9", - "refId": "C" - }, - { - "expr": "histogram_quantile(\n 0.5,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304|499\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\"\n }[$__interval]\n )\n )\n)", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "0.5", - "refId": "D" - }, - { - "expr": "histogram_quantile(\n 0.75,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304|499\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\"\n }[$__interval]\n )\n )\n)", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "0.75", - "refId": "E" - }, - { - "expr": "(\n\n(sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"0.01\"\n}[$__interval]))\n* 0.01)\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"0.1\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"0.01\"\n}[$__interval])))\n* 0.1)\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"1\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"0.1\"\n}[$__interval])))\n* 1)\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"10\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"1\"\n}[$__interval])))\n* 10 )\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"30\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"10\"\n}[$__interval])))\n* 30 )\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"60\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"30\"\n}[$__interval])))\n* 60 )\n\n+\n\n((sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"+Inf\"\n}[$__interval]))\n-\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"60\"\n}[$__interval])))\n* 120 )\n\n) / \n\nsum(increase(nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404|500|304\",\n controller_class=~\"$controller_class\",\n ingress=~\"$ingress\",\n namespace=~\"$namespace\",\n controller_pod=~\"$pod\",\n le=\"+Inf\"\n}[$__interval]))\n", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "Average", - "refId": "F" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Latency (Average Percentiles)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:1035", - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:1036", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#C4162A", - "colorScale": "linear", - "colorScheme": "interpolateTurbo", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": null, - "description": "This graph can help assess and help us meet SLA requirements as far as the responsive time of our services.\n\nFor a more detailed latency graph broken out by ingress please open the closed tab at the bottom because it is very CPU intensive.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 13 - }, - "heatmap": {}, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 27, - "legend": { - "show": true - }, - "links": [], - "pluginVersion": "7.4.3", - "reverseYBuckets": false, - "targets": [ - { - "expr": "sum by (le)(\n increase(\n nginx_ingress_controller_request_duration_seconds_bucket{\n status!=\"404\",status!=\"500\",\n controller_class =~ \"$controller_class\",\n namespace =~ \"$namespace\",\n ingress =~ \"$ingress\"\n }[$__interval]\n )\n)", - "format": "time_series", - "hide": false, - "interval": "5m", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "D" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Latency Heatmap", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": 0, - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 35, - "panels": [], - "title": "Connections", - "type": "row" - }, - { - "aliasColors": { - "New Connections": "purple" - }, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "NOTE: This does not work per ingress/namespace\n\nThis is the number of new connections opened by the controller", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 22 - }, - "hiddenSeries": false, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(nginx_ingress_controller_nginx_process_connections{state=~\"active\", controller_class=~\"$controller_class\", controller_pod=~\"$pod\"}[$__interval]))", - "format": "time_series", - "interval": "2m", - "intervalFactor": 1, - "legendFormat": "New Connections", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "New Connections Opened (Controller / Ingress Pod)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": false, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3252", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:3253", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "Connections": "rgb(255, 200, 4)" - }, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "NOTE: This does not work per ingress/namespace\n\nThe total number of connections opened to our ingresses. If you have a CDN in front of our services, it is not unusual for this to be very low. If/when we use something like websockets with a persistent connection this can/will be very high.", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 22 - }, - "hiddenSeries": false, - "id": 22, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{state=~\"active\", state=~\"active\", controller_class=~\"$controller_class\", controller_pod=~\"$pod\"}[$__range]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Connections", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Total Connections Open (Controller / Ingress Pod)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": false, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3098", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:3099", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": true, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 30 - }, - "id": 24, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 38 - }, - "hiddenSeries": false, - "id": 25, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(\n rate(\n nginx_ingress_controller_requests{status!~\"[4-5].*\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) by (ingress)\n/ \n(\n sum(\n rate(\n nginx_ingress_controller_requests{ controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) by (ingress)\n - \n (\n sum(\n rate(\n nginx_ingress_controller_requests{status=~\"404|499\", controller_class=~\"$controller_class\", ingress=~\"$ingress\",namespace=~\"$namespace\", controller_pod=~\"$pod\"}[${__range_s}s]\n )\n ) by (ingress)\n or vector(0)\n )\n)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ingress}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Percentage of Success (non-2xx) - By Ingress", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:108", - "decimals": null, - "format": "percentunit", - "label": null, - "logBase": 1, - "max": "1", - "min": "0", - "show": true - }, - { - "$$hashKey": "object:109", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 13, - "w": 24, - "x": 0, - "y": 47 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "sort": "avg", - "sortDesc": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.4.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{status!=\"404\",status!=\"500\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[5m])) by (le, ingress))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "p99 {{ ingress }}", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.95, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{status!=\"404\",status!=\"500\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[5m])) by (le, ingress))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "p95 {{ ingress }}", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{status!=\"404\",status!=\"500\", controller_class=~\"$controller_class\", ingress=~\"$ingress\", namespace=~\"$namespace\", controller_pod=~\"$pod\"}[5m])) by (le, ingress))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "p90 {{ ingress }}", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Latency (per ingress)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "CPU Intensive / Optional Graphs", - "type": "row" - } - ], - "refresh": "1m", - "schemaVersion": 27, - "style": "dark", - "tags": [ - "ingress", - "nginx", - "networking", - "services", - "k8s" - ], - "templating": { - "list": [ - { - "allValue": ".*", - "current": { - "selected": true, - "text": [ - "All" - ], - "value": [ - "$__all" - ] - }, - "datasource": "${DS_PROMETHEUS}", - "definition": "label_values(nginx_ingress_controller_config_hash, controller_class) ", - "description": null, - "error": null, - "hide": 0, - "includeAll": true, - "label": "Controller Class", - "multi": true, - "name": "controller_class", - "options": [], - "query": { - "query": "label_values(nginx_ingress_controller_config_hash, controller_class) ", - "refId": "prometheus-controller_class-Variable-Query" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".*", - "current": { - "selected": true, - "tags": [], - "text": [ - "All" - ], - "value": [ - "$__all" - ] - }, - "datasource": "${DS_PROMETHEUS}", - "definition": "label_values(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\"},namespace)", - "description": null, - "error": null, - "hide": 0, - "includeAll": true, - "label": "Namespace", - "multi": true, - "name": "namespace", - "options": [], - "query": { - "query": "label_values(nginx_ingress_controller_requests{ controller_class=~\"$controller_class\"},namespace)", - "refId": "prometheus-namespace-Variable-Query" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "selected": true, - "text": [ - "All" - ], - "value": [ - "$__all" - ] - }, - "datasource": "${DS_PROMETHEUS}", - "definition": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\"}, ingress) ", - "description": null, - "error": null, - "hide": 0, - "includeAll": true, - "label": "Ingress", - "multi": true, - "name": "ingress", - "options": [], - "query": { - "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\"}, ingress) ", - "refId": "prometheus-ingress-Variable-Query" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".*", - "current": { - "selected": true, - "text": [ - "All" - ], - "value": [ - "$__all" - ] - }, - "datasource": "${DS_PROMETHEUS}", - "definition": "label_values(nginx_ingress_controller_config_hash{controller_class=~\"$controller_class\"}, controller_pod) ", - "description": null, - "error": null, - "hide": 0, - "includeAll": true, - "label": "Ingress Pod", - "multi": true, - "name": "pod", - "options": [], - "query": { - "query": "label_values(nginx_ingress_controller_config_hash{controller_class=~\"$controller_class\"}, controller_pod) ", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-3h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes Nginx Ingress Prometheus NextGen", - "description": "Nginx Ingress Controller via Prometheus Metrics Dashboard created for DevOps Nirvana @ https://github.com/DevOps-Nirvana", - "uid": "k8s-nginx-ingress-prometheus-ng", - "version": 27 -} diff --git a/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx.json b/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx.json deleted file mode 100644 index 3c3205b4a..000000000 --- a/kubernetes/apps/networking/nginx/monitoring/dashboards/nginx.json +++ /dev/null @@ -1,1630 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.2.1" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - }, - { - "datasource": "${DS_PROMETHEUS}", - "enable": true, - "expr": "sum(changes(nginx_ingress_controller_config_last_reload_successful_timestamp_seconds{instance!=\"unknown\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[30s])) by (controller_class)", - "hide": false, - "iconColor": "rgba(255, 96, 96, 1)", - "limit": 100, - "name": "Config Reloads", - "showIn": 0, - "step": "30s", - "tagKeys": "controller_class", - "tags": [], - "titleFormat": "Config Reloaded", - "type": "tags" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "iteration": 1534359654832, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "${DS_PROMETHEUS}", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 0, - "y": 0 - }, - "id": 20, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m])), 0.001)", - "format": "time_series", - "intervalFactor": 1, - "refId": "A", - "step": 4 - } - ], - "thresholds": "", - "title": "Controller Request Volume", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "${DS_PROMETHEUS}", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 6, - "y": 0 - }, - "id": 82, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",state=\"active\"}[2m]))", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A", - "step": 4 - } - ], - "thresholds": "", - "title": "Controller Connections", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "${DS_PROMETHEUS}", - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 80, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": false - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 12, - "y": 0 - }, - "id": 21, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",status!~\"[4-5].*\"}[2m])) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m]))", - "format": "time_series", - "intervalFactor": 1, - "refId": "A", - "step": 4 - } - ], - "thresholds": "95, 99, 99.5", - "title": "Controller Success Rate (non-4|5xx responses)", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 18, - "y": 0 - }, - "id": 81, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "avg(irate(nginx_ingress_controller_success{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[1m])) * 60", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A", - "step": 4 - } - ], - "thresholds": "", - "title": "Config Reloads", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "total" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "${DS_PROMETHEUS}", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 21, - "y": 0 - }, - "id": 83, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(nginx_ingress_controller_config_last_reload_successful{controller_pod=~\"$controller\",controller_namespace=~\"$namespace\"} == 0)", - "format": "time_series", - "instant": true, - "intervalFactor": 1, - "refId": "A", - "step": 4 - } - ], - "thresholds": "", - "title": "Last Config Failed", - "transparent": false, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 3 - }, - "height": "200px", - "id": 86, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "hideZero": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 300, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress), 0.001)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "metric": "network", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Ingress Request Volume", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "reqps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "max - istio-proxy": "#890f02", - "max - master": "#bf1b00", - "max - prometheus": "#bf1b00" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": false, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 3 - }, - "id": 87, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "sideWidth": 300, - "sort": "avg", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\",status!~\"[4-5].*\"}[2m])) by (ingress) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", - "format": "time_series", - "instant": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Ingress Success Rate (non-4|5xx responses)", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 1, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 10 - }, - "height": "200px", - "id": 32, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (irate (nginx_ingress_controller_request_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "Received", - "metric": "network", - "refId": "A", - "step": 10 - }, - { - "expr": "- sum (irate (nginx_ingress_controller_response_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", - "format": "time_series", - "hide": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "Sent", - "metric": "network", - "refId": "B", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Network I/O pressure", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "max - istio-proxy": "#890f02", - "max - master": "#bf1b00", - "max - prometheus": "#bf1b00" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 2, - "editable": false, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 10 - }, - "id": 77, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sideWidth": 200, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg(nginx_ingress_controller_nginx_process_resident_memory_bytes{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}) ", - "format": "time_series", - "instant": false, - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "nginx", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average Memory Usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "max - istio-proxy": "#890f02", - "max - master": "#bf1b00" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "decimals": 3, - "editable": false, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 10 - }, - "height": "", - "id": 79, - "isNew": true, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg (rate (nginx_ingress_controller_nginx_process_cpu_seconds_total{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m])) ", - "format": "time_series", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "nginx", - "metric": "container_cpu", - "refId": "A", - "step": 10 - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Average CPU Usage", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "columns": [], - "datasource": "${DS_PROMETHEUS}", - "description": "This data is real time, independent of dashboard time range", - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 16 - }, - "hideTimeOverride": false, - "id": 75, - "links": [], - "pageSize": 7, - "repeat": null, - "repeatDirection": "h", - "scroll": true, - "showHeader": true, - "sort": { - "col": 1, - "desc": true - }, - "styles": [ - { - "alias": "Ingress", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "ingress", - "preserveFormat": false, - "sanitize": false, - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Requests", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "Value #A", - "thresholds": [ - "" - ], - "type": "number", - "unit": "ops" - }, - { - "alias": "Errors", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "Value #B", - "thresholds": [], - "type": "number", - "unit": "ops" - }, - { - "alias": "P50 Latency", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": false, - "pattern": "Value #C", - "thresholds": [], - "type": "number", - "unit": "dtdurations" - }, - { - "alias": "P90 Latency", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "Value #D", - "thresholds": [], - "type": "number", - "unit": "dtdurations" - }, - { - "alias": "P99 Latency", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "Value #E", - "thresholds": [], - "type": "number", - "unit": "dtdurations" - }, - { - "alias": "IN", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "Value #F", - "thresholds": [ - "" - ], - "type": "number", - "unit": "Bps" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "Time", - "thresholds": [], - "type": "hidden", - "unit": "short" - }, - { - "alias": "OUT", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "Value #G", - "thresholds": [], - "type": "number", - "unit": "Bps" - } - ], - "targets": [ - { - "expr": "histogram_quantile(0.50, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", - "format": "table", - "hide": false, - "instant": true, - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", - "format": "table", - "hide": false, - "instant": true, - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "refId": "D" - }, - { - "expr": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", - "format": "table", - "hide": false, - "instant": true, - "intervalFactor": 1, - "legendFormat": "{{ destination_service }}", - "refId": "E" - }, - { - "expr": "sum(irate(nginx_ingress_controller_request_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "refId": "F" - }, - { - "expr": "sum(irate(nginx_ingress_controller_response_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", - "format": "table", - "instant": true, - "intervalFactor": 1, - "legendFormat": "{{ ingress }}", - "refId": "G" - } - ], - "timeFrom": null, - "title": "Ingress Percentile Response Times and Transfer Rates", - "transform": "table", - "transparent": false, - "type": "table" - }, - { - "datasource": "${DS_PROMETHEUS}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 24 - }, - "hideTimeOverride": false, - "id": 91, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "8.3.4", - "repeatDirection": "h", - "targets": [ - { - "exemplar": true, - "expr": "histogram_quantile(0.80, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "P80", - "refId": "C" - }, - { - "exemplar": true, - "expr": "histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "P90", - "refId": "D" - }, - { - "editorMode": "code", - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "P99", - "refId": "E" - } - ], - "title": "Ingress Percentile Response Times", - "type": "timeseries" - }, - { - "cards": {}, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateWarm", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", - "description": "", - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 24 - }, - "heatmap": {}, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 89, - "legend": { - "show": true - }, - "reverseYBuckets": false, - "targets": [ - { - "exemplar": true, - "expr": "sum(increase(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le)", - "format": "heatmap", - "interval": "", - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "title": "Ingress Request Latency Heatmap", - "tooltip": { - "show": true, - "showHistogram": true - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "yAxis": { - "format": "s", - "logBase": 1, - "show": true - }, - "yBucketBound": "auto" - }, - { - "columns": [ - { - "text": "Current", - "value": "current" - } - ], - "datasource": "${DS_PROMETHEUS}", - "fontSize": "100%", - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 31 - }, - "height": "1024", - "id": 85, - "links": [], - "pageSize": 7, - "scroll": true, - "showHeader": true, - "sort": { - "col": 1, - "desc": false - }, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "date" - }, - { - "alias": "TTL", - "colorMode": "cell", - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "pattern": "Current", - "thresholds": [ - "0", - "691200" - ], - "type": "number", - "unit": "s" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "number", - "unit": "short" - } - ], - "targets": [ - { - "expr": "avg(nginx_ingress_controller_ssl_expire_time_seconds{kubernetes_pod_name=~\"$controller\",namespace=~\"$namespace\",ingress=~\"$ingress\"}) by (host) - time()", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{ host }}", - "metric": "gke_letsencrypt_cert_expiration", - "refId": "A", - "step": 1 - } - ], - "title": "Ingress Certificate Expiry", - "transform": "timeseries_aggregations", - "type": "table" - } - ], - "refresh": "5s", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "nginx" - ], - "templating": { - "list": [ - { - "hide": 0, - "label": "datasource", - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": ".*", - "current": { - "text": "All", - "value": "$__all" - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": true, - "label": "Namespace", - "multi": false, - "name": "namespace", - "options": [], - "query": "label_values(nginx_ingress_controller_config_hash, controller_namespace)", - "refresh": 1, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".*", - "current": { - "text": "All", - "value": "$__all" - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": true, - "label": "Controller Class", - "multi": false, - "name": "controller_class", - "options": [], - "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\"}, controller_class) ", - "refresh": 1, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".*", - "current": { - "text": "All", - "value": "$__all" - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": true, - "label": "Controller", - "multi": false, - "name": "controller", - "options": [], - "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\",controller_class=~\"$controller_class\"}, controller_pod) ", - "refresh": 1, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": ".*", - "current": { - "tags": [], - "text": "All", - "value": "$__all" - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": true, - "label": "Ingress", - "multi": false, - "name": "ingress", - "options": [], - "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\",controller_pod=~\"$controller\"}, ingress) ", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "2m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "NGINX Ingress controller", - "uid": "nginx", - "version": 1 -} diff --git a/kubernetes/apps/networking/nginx/monitoring/dashboards/request-handling-performance.json b/kubernetes/apps/networking/nginx/monitoring/dashboards/request-handling-performance.json deleted file mode 100644 index 1422336ae..000000000 --- a/kubernetes/apps/networking/nginx/monitoring/dashboards/request-handling-performance.json +++ /dev/null @@ -1,985 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "8.3.4" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph (old)", - "version": "" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 9614, - "graphTooltip": 1, - "id": null, - "iteration": 1646929474557, - "links": [], - "liveNow": false, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "description": "Total time for NGINX and upstream servers to process a request and send a response", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 91, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.5,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "legendFormat": ".5", - "refId": "D" - }, - { - "expr": "histogram_quantile(\n 0.95,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "legendFormat": ".95", - "refId": "B" - }, - { - "expr": "histogram_quantile(\n 0.99,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "legendFormat": ".99", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Request Latency Percentiles", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "description": "The time spent on receiving the response from the upstream server", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 94, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n 0.5,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": ".5", - "refId": "D" - }, - { - "expr": "histogram_quantile(\n 0.95,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "legendFormat": ".95", - "refId": "B" - }, - { - "expr": "histogram_quantile(\n 0.99,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "legendFormat": ".99", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Upstream Response Latency Percentiles", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 8 - }, - "hiddenSeries": false, - "id": 93, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": " sum by (method, host, path)(\n rate(\n nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Request Rate by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "reqps", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "description": "For each path observed, its median upstream response time", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 98, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(\n .5,\n sum by (le, method, host, path)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[5m]\n )\n )\n)", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Median Upstream Response Time by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "description": "Percentage of 4xx and 5xx responses among all responses.", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 16 - }, - "hiddenSeries": false, - "id": 100, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (method, host, path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n status =~ \"[4-5].*\"\n}[5m])) / sum by (method, host, path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n}[5m]))", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Response Error Rate by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "description": "For each path observed, the sum of upstream request time", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 102, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (method, host, path) (rate(nginx_ingress_controller_response_duration_seconds_sum{ingress =~ \"$ingress\"}[5m]))", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Upstream Response Time by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 24 - }, - "hiddenSeries": false, - "id": 101, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": " sum (\n rate(\n nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n status =~\"[4-5].*\",\n }[5m]\n )\n ) by(method, host, path, status)\n", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }} {{ status }}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Response Error Rate by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "reqps", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 24 - }, - "hiddenSeries": false, - "id": 99, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (\n rate (\n nginx_ingress_controller_response_size_sum {\n ingress =~ \"$ingress\",\n }[5m]\n )\n) by (method, host, path) / sum (\n rate(\n nginx_ingress_controller_response_size_count {\n ingress =~ \"$ingress\",\n }[5m]\n )\n) by (method, host, path)\n", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ method }} {{ host }}{{path }}", - "refId": "D" - }, - { - "expr": " sum (rate(nginx_ingress_controller_response_size_bucket{\n ingress =~ \"$ingress\",\n }[5m])) by (le)\n", - "hide": true, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Average Response Size by Method and Path", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 32 - }, - "hiddenSeries": false, - "id": 96, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.4", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_sum {\n ingress =~ \"$ingress\",\n }[5m]\n)) / sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_count {\n ingress =~ \"$ingress\",\n }[5m]\n )\n)\n", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "average", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Upstream Service Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - } - ], - "refresh": "30s", - "schemaVersion": 34, - "style": "dark", - "tags": [ - "nginx" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "includeAll": false, - "label": "datasource", - "multi": false, - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": ".*", - "current": {}, - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "definition": "label_values(nginx_ingress_controller_requests, ingress) ", - "hide": 0, - "includeAll": true, - "label": "Service Ingress", - "multi": false, - "name": "ingress", - "options": [], - "query": { - "query": "label_values(nginx_ingress_controller_requests, ingress) ", - "refId": "Prometheus-ingress-Variable-Query" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 2, - "tagValuesQuery": "", - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "2m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "Request Handling Performance", - "uid": "4GFbkOsZk", - "version": 1, - "weekStart": "" -} diff --git a/kubernetes/apps/networking/nginx/monitoring/kustomization.yaml b/kubernetes/apps/networking/nginx/monitoring/kustomization.yaml deleted file mode 100644 index a3d9836c9..000000000 --- a/kubernetes/apps/networking/nginx/monitoring/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: monitoring -resources: - - ./dashboards diff --git a/kubernetes/apps/monitoring/kustomization.yaml b/kubernetes/apps/openebs-system/kustomization.yaml similarity index 81% rename from kubernetes/apps/monitoring/kustomization.yaml rename to kubernetes/apps/openebs-system/kustomization.yaml index 5413fe6af..9cd8d4e4f 100644 --- a/kubernetes/apps/monitoring/kustomization.yaml +++ b/kubernetes/apps/openebs-system/kustomization.yaml @@ -3,3 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml + - ./openebs/ks.yaml diff --git a/kubernetes/apps/monitoring/namespace.yaml b/kubernetes/apps/openebs-system/namespace.yaml similarity index 81% rename from kubernetes/apps/monitoring/namespace.yaml rename to kubernetes/apps/openebs-system/namespace.yaml index ef4dd87a4..f173c6c9c 100644 --- a/kubernetes/apps/monitoring/namespace.yaml +++ b/kubernetes/apps/openebs-system/namespace.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: Namespace metadata: - name: monitoring + name: openebs-system labels: kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml b/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml new file mode 100644 index 000000000..975bff303 --- /dev/null +++ b/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: openebs +spec: + interval: 30m + chart: + spec: + chart: openebs + version: 4.0.1 + sourceRef: + kind: HelmRepository + name: openebs + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + engines: + local: + lvm: + enabled: false + zfs: + enabled: false + replicated: + mayastor: + enabled: false + openebs-crds: + csi: + volumeSnapshots: + enabled: false + localpv-provisioner: + localpv: + image: + registry: quay.io/ + hostpathClass: + enabled: true + name: openebs-hostpath + isDefaultClass: false + basePath: /var/openebs/local diff --git a/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml b/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml b/kubernetes/apps/openebs-system/openebs/ks.yaml similarity index 59% rename from kubernetes/apps/kube-system/local-path-provisioner/ks.yaml rename to kubernetes/apps/openebs-system/openebs/ks.yaml index 985be51e7..170feca91 100644 --- a/kubernetes/apps/kube-system/local-path-provisioner/ks.yaml +++ b/kubernetes/apps/openebs-system/openebs/ks.yaml @@ -2,12 +2,14 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: cluster-apps-local-path-provisioner + name: &app openebs namespace: flux-system - labels: - substitution.flux.home.arpa/disabled: "true" spec: - path: ./kubernetes/apps/kube-system/local-path-provisioner/app + targetNamespace: openebs-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/openebs-system/openebs/app prune: true sourceRef: kind: GitRepository diff --git a/kubernetes/apps/system-upgrade/namespace.yaml b/kubernetes/apps/system-upgrade/namespace.yaml deleted file mode 100644 index 5ea024dde..000000000 --- a/kubernetes/apps/system-upgrade/namespace.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: system-upgrade - labels: - kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/bootstrap/flux/kustomization.yaml b/kubernetes/bootstrap/flux/kustomization.yaml new file mode 100644 index 000000000..4a669d63e --- /dev/null +++ b/kubernetes/bootstrap/flux/kustomization.yaml @@ -0,0 +1,61 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components and CRDs into your cluster. +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.3.0 +patches: + # Remove the default network policies + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/kubernetes/bootstrap/kustomization.yaml b/kubernetes/bootstrap/kustomization.yaml deleted file mode 100644 index ca6f64993..000000000 --- a/kubernetes/bootstrap/kustomization.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# IMPORTANT: This file is not tracked by flux and should never be. Its -# purpose is to only install the Flux components and CRDs into your cluster. ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - github.com/fluxcd/flux2/manifests/install?ref=v2.2.3 -patches: - # Remove the default network policies - - patch: |- - $patch: delete - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: not-used - target: - group: networking.k8s.io - kind: NetworkPolicy diff --git a/kubernetes/bootstrap/talos/apps/cilium-values.yaml b/kubernetes/bootstrap/talos/apps/cilium-values.yaml new file mode 100644 index 000000000..b7257357c --- /dev/null +++ b/kubernetes/bootstrap/talos/apps/cilium-values.yaml @@ -0,0 +1,59 @@ +--- +autoDirectNodeRoutes: true +bgpControlPlane: + enabled: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: apps +cni: + exclusive: false +containerRuntime: + integration: containerd +# NOTE: devices might need to be set if you have more than one active NIC on your hosts +# devices: eno+ eth+ +endpointRoutes: + enabled: true +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "172.16.0.0/16" +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + enabled: true +loadBalancer: + algorithm: maglev + mode: snat +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/kubernetes/bootstrap/talos/apps/helmfile.yaml b/kubernetes/bootstrap/talos/apps/helmfile.yaml new file mode 100644 index 000000000..8308db2e6 --- /dev/null +++ b/kubernetes/bootstrap/talos/apps/helmfile.yaml @@ -0,0 +1,26 @@ +--- +repositories: + - name: cilium + url: https://helm.cilium.io + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + recreatePods: true + force: true + +releases: + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.5 + values: ["./cilium-values.yaml"] + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.1.0 + values: ["./kubelet-csr-approver-values.yaml"] + needs: ["cilium"] diff --git a/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml b/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml new file mode 100644 index 000000000..9b116d8ad --- /dev/null +++ b/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml @@ -0,0 +1,3 @@ +--- +providerRegex: ^(lpkm1|lpkw1|lpkw2)$ +bypassDnsResolution: true diff --git a/kubernetes/bootstrap/talos/talconfig.yaml b/kubernetes/bootstrap/talos/talconfig.yaml new file mode 100644 index 000000000..6ab8f95fa --- /dev/null +++ b/kubernetes/bootstrap/talos/talconfig.yaml @@ -0,0 +1,187 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json +--- +# renovate: datasource=docker depName=ghcr.io/siderolabs/installer +talosVersion: v1.7.2 +# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet +kubernetesVersion: v1.30.1 + +clusterName: &cluster apps +endpoint: https://10.69.3.154:6443 +clusterPodNets: + - "172.16.0.0/16" +clusterSvcNets: + - "10.96.0.0/16" +additionalApiServerCertSans: &sans + - "10.69.3.154" + - 127.0.0.1 # KubePrism +additionalMachineCertSans: *sans +cniConfig: + name: none + +nodes: + - hostname: "lpkm1" + ipAddress: "10.69.3.26" + installDiskSelector: + serial: "S649NL0TC70241T" + talosImageURL: factory.talos.dev/installer/376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba + controlPlane: true + networkInterfaces: + - deviceSelector: + hardwareAddr: "58:47:ca:71:5f:dd" + dhcp: false + addresses: + - "10.69.3.26/24" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + gateway: "10.69.3.1" + vip: + ip: "10.69.3.154" + - hostname: "lpkw1" + ipAddress: "10.69.3.27" + installDiskSelector: + serial: "50026B7381956EDD" + talosImageURL: factory.talos.dev/installer/376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba + controlPlane: false + networkInterfaces: + - deviceSelector: + hardwareAddr: "1c:83:41:31:db:42" + dhcp: false + addresses: + - "10.69.3.27/24" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + gateway: "10.69.3.1" + - hostname: "lpkw2" + ipAddress: "10.69.3.25" + installDiskSelector: + serial: "S649NL0TC64476T" + talosImageURL: factory.talos.dev/installer/376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba + controlPlane: false + networkInterfaces: + - deviceSelector: + hardwareAddr: "84:47:09:10:90:15" + dhcp: false + addresses: + - "10.69.3.25/24" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + gateway: "10.69.3.1" + +patches: + # Configure containerd + - |- + machine: + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: |- + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + + # Disable search domain everywhere + - |- + machine: + network: + disableSearchDomain: true + + # Enable cluster discovery + - |- + cluster: + discovery: + registries: + kubernetes: + disabled: false + service: + disabled: false + + # Configure kubelet + - |- + machine: + kubelet: + extraArgs: + rotate-server-certificates: true + nodeIP: + validSubnets: ["10.69.3.0/24"] + + # Force nameserver + - |- + machine: + network: + nameservers: + + # Configure NTP + - |- + machine: + time: + disabled: false + servers: ["time.cloudflare.com"] + + # Custom sysctl settings + - |- + machine: + sysctls: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_watches: "524288" + fs.inotify.max_user_instances: "8192" + net.core.rmem_max: "2500000" + net.core.wmem_max: "2500000" + + # Mount openebs-hostpath in kubelet + - |- + machine: + kubelet: + extraMounts: + - destination: /var/openebs/local + type: bind + source: /var/openebs/local + options: ["bind", "rshared", "rw"] + + + +controlPlane: + patches: + # Cluster configuration + - |- + cluster: + allowSchedulingOnControlPlanes: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + proxy: + disabled: true + scheduler: + extraArgs: + bind-address: 0.0.0.0 + + # ETCD configuration + - |- + cluster: + etcd: + extraArgs: + listen-metrics-urls: http://0.0.0.0:2381 + advertisedSubnets: + - "10.69.3.0/24" + + # Disable default API server admission plugins. + - |- + - op: remove + path: /cluster/apiServer/admissionControl + + # Enable K8s Talos API Access + - |- + machine: + features: + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: ["os:admin"] + allowedKubernetesNamespaces: ["system-upgrade"] + + diff --git a/kubernetes/flux/apps.yaml b/kubernetes/flux/apps.yaml index d557f8286..2284be624 100644 --- a/kubernetes/flux/apps.yaml +++ b/kubernetes/flux/apps.yaml @@ -23,8 +23,10 @@ spec: name: cluster-secrets - kind: ConfigMap name: cluster-settings-user + optional: true - kind: Secret name: cluster-secrets-user + optional: true patches: - patch: |- apiVersion: kustomize.toolkit.fluxcd.io/v1 @@ -44,8 +46,10 @@ spec: name: cluster-secrets - kind: ConfigMap name: cluster-settings-user + optional: true - kind: Secret name: cluster-secrets-user + optional: true target: group: kustomize.toolkit.fluxcd.io kind: Kustomization diff --git a/kubernetes/flux/config/cluster.yaml b/kubernetes/flux/config/cluster.yaml index a49db441d..77b9bd691 100644 --- a/kubernetes/flux/config/cluster.yaml +++ b/kubernetes/flux/config/cluster.yaml @@ -6,9 +6,9 @@ metadata: namespace: flux-system spec: interval: 30m + url: "https://github.com/oscaromeu/home-ops" ref: - branch: main - url: "https://github.com/oscaromeu/home-ops.git" + branch: "main" ignore: | # exclude all /* diff --git a/kubernetes/flux/config/flux.yaml b/kubernetes/flux/config/flux.yaml index 9f9ffd321..b6889a4c3 100644 --- a/kubernetes/flux/config/flux.yaml +++ b/kubernetes/flux/config/flux.yaml @@ -8,7 +8,7 @@ spec: interval: 10m url: oci://ghcr.io/fluxcd/flux-manifests ref: - tag: v2.2.3 + tag: v2.3.0 --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization @@ -69,18 +69,6 @@ spec: target: kind: Deployment name: (kustomize-controller|helm-controller|source-controller) - # Enable drift detection for HelmReleases and set the log level to debug - # https://fluxcd.io/flux/components/helm/helmreleases/#drift-detection - - patch: | - - op: add - path: /spec/template/spec/containers/0/args/- - value: --feature-gates=DetectDrift=true,CorrectDrift=false - - op: add - path: /spec/template/spec/containers/0/args/- - value: --log-level=debug - target: - kind: Deployment - name: helm-controller # Enable Helm near OOM detection # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection - patch: | diff --git a/kubernetes/flux/repositories/git/elastic.yaml b/kubernetes/flux/repositories/git/elastic.yaml deleted file mode 100644 index 433e151be..000000000 --- a/kubernetes/flux/repositories/git/elastic.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: GitRepository -metadata: - name: eck-operator - namespace: flux-system -spec: - interval: 5m - url: https://github.com/elastic/cloud-on-k8s - ref: - branch: '2.6' - ignore: | - # exclude all - /* - # include eck-operator helm chart directory - !/deploy/eck-operator diff --git a/kubernetes/flux/repositories/git/kubernetes-csi-addons.yaml b/kubernetes/flux/repositories/git/kubernetes-csi-addons.yaml deleted file mode 100644 index b0e5bb0ed..000000000 --- a/kubernetes/flux/repositories/git/kubernetes-csi-addons.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1 -kind: GitRepository -metadata: - name: kubernetes-csi-addons - namespace: flux-system -spec: - interval: 30m - url: https://github.com/csi-addons/kubernetes-csi-addons - ref: - tag: v0.8.0 - ignore: | - # exclude all - /* - # include files - !/deploy/controller/crds.yaml - !/deploy/controller/rbac.yaml - !/deploy/controller/setup-controller.yaml diff --git a/kubernetes/flux/repositories/git/kustomization.yaml b/kubernetes/flux/repositories/git/kustomization.yaml index fe0fddca1..fe0f332a9 100644 --- a/kubernetes/flux/repositories/git/kustomization.yaml +++ b/kubernetes/flux/repositories/git/kustomization.yaml @@ -1,7 +1,4 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -resources: - - ./elastic.yaml - - ./kubernetes-csi-addons.yaml - - ./local-path-provisioner.yaml +resources: [] diff --git a/kubernetes/flux/repositories/git/local-path-provisioner.yaml b/kubernetes/flux/repositories/git/local-path-provisioner.yaml deleted file mode 100644 index 669fb0533..000000000 --- a/kubernetes/flux/repositories/git/local-path-provisioner.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1 -kind: GitRepository -metadata: - name: local-path-provisioner - namespace: flux-system -spec: - interval: 30m - url: https://github.com/rancher/local-path-provisioner - ref: - tag: v0.0.24 - ignore: | - # exclude all - /* - # include kubernetes directory - !/deploy/chart/local-path-provisioner diff --git a/kubernetes/flux/repositories/helm/actions-runner-controller.yaml b/kubernetes/flux/repositories/helm/actions-runner-controller.yaml deleted file mode 100644 index 0c3a14cd3..000000000 --- a/kubernetes/flux/repositories/helm/actions-runner-controller.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: actions-runner-controller - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://ghcr.io/actions/actions-runner-controller-charts ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: actions-runner-controller-2 - namespace: flux-system -spec: - interval: 120m0s - url: https://actions-runner-controller.github.io/actions-runner-controller diff --git a/kubernetes/flux/repositories/helm/authelia.yaml b/kubernetes/flux/repositories/helm/authelia.yaml deleted file mode 100644 index e28d9aa32..000000000 --- a/kubernetes/flux/repositories/helm/authelia.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: authelia - namespace: flux-system -spec: - interval: 2h - url: https://charts.authelia.com - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/backube.yaml b/kubernetes/flux/repositories/helm/backube.yaml deleted file mode 100644 index 452a3e3ca..000000000 --- a/kubernetes/flux/repositories/helm/backube.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: backube - namespace: flux-system -spec: - interval: 2h - url: https://backube.github.io/helm-charts/ diff --git a/kubernetes/flux/repositories/helm/bitnami.yaml b/kubernetes/flux/repositories/helm/bitnami.yaml deleted file mode 100644 index eca160a1a..000000000 --- a/kubernetes/flux/repositories/helm/bitnami.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: bitnami - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://registry-1.docker.io/bitnamicharts diff --git a/kubernetes/flux/repositories/helm/bjw-s.yaml b/kubernetes/flux/repositories/helm/bjw-s.yaml index df0c6474a..a40b5d778 100644 --- a/kubernetes/flux/repositories/helm/bjw-s.yaml +++ b/kubernetes/flux/repositories/helm/bjw-s.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: bjw-s diff --git a/kubernetes/flux/repositories/helm/botkube.yaml b/kubernetes/flux/repositories/helm/botkube.yaml deleted file mode 100644 index 71dbb1ccf..000000000 --- a/kubernetes/flux/repositories/helm/botkube.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: botkube - namespace: flux-system -spec: - interval: 24h - url: https://charts.botkube.io - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/calico.yaml b/kubernetes/flux/repositories/helm/calico.yaml deleted file mode 100644 index c8ca6d7b4..000000000 --- a/kubernetes/flux/repositories/helm/calico.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: calico - namespace: flux-system -spec: - interval: 2h - url: https://projectcalico.docs.tigera.io/charts diff --git a/kubernetes/flux/repositories/helm/chaos-mesh.yaml b/kubernetes/flux/repositories/helm/chaos-mesh.yaml deleted file mode 100644 index 8ebdc1aa7..000000000 --- a/kubernetes/flux/repositories/helm/chaos-mesh.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: chaos-mesh - namespace: flux-system -spec: - interval: 1h - url: https://charts.chaos-mesh.org diff --git a/kubernetes/flux/repositories/helm/cilium.yaml b/kubernetes/flux/repositories/helm/cilium.yaml index 51c65d691..3aee36788 100644 --- a/kubernetes/flux/repositories/helm/cilium.yaml +++ b/kubernetes/flux/repositories/helm/cilium.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: cilium diff --git a/kubernetes/flux/repositories/helm/cloudnative-pg.yaml b/kubernetes/flux/repositories/helm/cloudnative-pg.yaml deleted file mode 100644 index b7812e283..000000000 --- a/kubernetes/flux/repositories/helm/cloudnative-pg.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helmrepository_v1beta2.json -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: cloudnative-pg - namespace: flux-system -spec: - interval: 2h - url: https://cloudnative-pg.github.io/charts diff --git a/kubernetes/flux/repositories/helm/coredns.yaml b/kubernetes/flux/repositories/helm/coredns.yaml deleted file mode 100644 index e3a16bd18..000000000 --- a/kubernetes/flux/repositories/helm/coredns.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: coredns - namespace: flux-system -spec: - interval: 1h - url: https://coredns.github.io/helm diff --git a/kubernetes/flux/repositories/helm/crossplane.yaml b/kubernetes/flux/repositories/helm/crossplane.yaml deleted file mode 100644 index 285669cdb..000000000 --- a/kubernetes/flux/repositories/helm/crossplane.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: crossplane - namespace: flux-system -spec: - interval: 2h - url: https://charts.crossplane.io/stable diff --git a/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml b/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml deleted file mode 100644 index b48140d78..000000000 --- a/kubernetes/flux/repositories/helm/csi-driver-nfs.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: csi-driver-nfs - namespace: flux-system -spec: - interval: 1h - url: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts diff --git a/kubernetes/flux/repositories/helm/ddosify.yaml b/kubernetes/flux/repositories/helm/ddosify.yaml deleted file mode 100644 index 9fc5b62dc..000000000 --- a/kubernetes/flux/repositories/helm/ddosify.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: ddosify - namespace: flux-system -spec: - interval: 2h - url: https://ddosify.github.io/ddosify-helm-charts/ diff --git a/kubernetes/flux/repositories/helm/deliveryhero.yaml b/kubernetes/flux/repositories/helm/deliveryhero.yaml deleted file mode 100644 index dd05ed653..000000000 --- a/kubernetes/flux/repositories/helm/deliveryhero.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: deliveryhero - namespace: flux-system -spec: - interval: 2h - url: https://charts.deliveryhero.io/ diff --git a/kubernetes/flux/repositories/helm/descheduler.yaml b/kubernetes/flux/repositories/helm/descheduler.yaml deleted file mode 100644 index 64d63f6e2..000000000 --- a/kubernetes/flux/repositories/helm/descheduler.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: descheduler - namespace: flux-system -spec: - interval: 1h - url: https://kubernetes-sigs.github.io/descheduler diff --git a/kubernetes/flux/repositories/helm/external-dns.yaml b/kubernetes/flux/repositories/helm/external-dns.yaml index b76b9662c..a44512667 100644 --- a/kubernetes/flux/repositories/helm/external-dns.yaml +++ b/kubernetes/flux/repositories/helm/external-dns.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: external-dns diff --git a/kubernetes/flux/repositories/helm/external-secrets.yaml b/kubernetes/flux/repositories/helm/external-secrets.yaml deleted file mode 100644 index 65405ea3a..000000000 --- a/kubernetes/flux/repositories/helm/external-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: external-secrets - namespace: flux-system -spec: - interval: 1h - url: https://charts.external-secrets.io diff --git a/kubernetes/flux/repositories/helm/fairwinds.yaml b/kubernetes/flux/repositories/helm/fairwinds.yaml deleted file mode 100644 index 410a0345d..000000000 --- a/kubernetes/flux/repositories/helm/fairwinds.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: fairwinds - namespace: flux-system -spec: - interval: 2h - url: https://charts.fairwinds.com/stable diff --git a/kubernetes/flux/repositories/helm/flanksource.yaml b/kubernetes/flux/repositories/helm/flanksource.yaml deleted file mode 100644 index f74777190..000000000 --- a/kubernetes/flux/repositories/helm/flanksource.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: flanksource - namespace: flux-system -spec: - interval: 2h - url: https://flanksource.github.io/charts diff --git a/kubernetes/flux/repositories/helm/fleet.yaml b/kubernetes/flux/repositories/helm/fleet.yaml deleted file mode 100644 index 0203e20c3..000000000 --- a/kubernetes/flux/repositories/helm/fleet.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: fleet - namespace: flux-system -spec: - interval: 1h - url: https://fleetdm.github.io/fleet/charts diff --git a/kubernetes/flux/repositories/helm/gitea.yaml b/kubernetes/flux/repositories/helm/gitea.yaml deleted file mode 100644 index 97f072f09..000000000 --- a/kubernetes/flux/repositories/helm/gitea.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: gitea - namespace: flux-system -spec: - interval: 2h - url: https://dl.gitea.io/charts - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/grafana.yaml b/kubernetes/flux/repositories/helm/grafana.yaml deleted file mode 100644 index 5c3939d53..000000000 --- a/kubernetes/flux/repositories/helm/grafana.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: grafana - namespace: flux-system -spec: - interval: 1h - url: https://grafana.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/hajimari.yaml b/kubernetes/flux/repositories/helm/hajimari.yaml deleted file mode 100644 index e246f09be..000000000 --- a/kubernetes/flux/repositories/helm/hajimari.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: hajimari - namespace: flux-system -spec: - interval: 1h - url: https://hajimari.io diff --git a/kubernetes/flux/repositories/helm/harbor.yaml b/kubernetes/flux/repositories/helm/harbor.yaml deleted file mode 100644 index 1879f137d..000000000 --- a/kubernetes/flux/repositories/helm/harbor.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: harbor - namespace: flux-system -spec: - interval: 1h - url: https://helm.goharbor.io diff --git a/kubernetes/flux/repositories/helm/influxdata.yaml b/kubernetes/flux/repositories/helm/influxdata.yaml deleted file mode 100644 index f7df0cffa..000000000 --- a/kubernetes/flux/repositories/helm/influxdata.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: influxdata - namespace: flux-system -spec: - interval: 2h - url: https://helm.influxdata.com/ - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/flux/repositories/helm/ingress-nginx.yaml index 4dcf5eeac..82a0d0fff 100644 --- a/kubernetes/flux/repositories/helm/ingress-nginx.yaml +++ b/kubernetes/flux/repositories/helm/ingress-nginx.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: ingress-nginx diff --git a/kubernetes/flux/repositories/helm/intel.yaml b/kubernetes/flux/repositories/helm/intel.yaml deleted file mode 100644 index 9dc61e609..000000000 --- a/kubernetes/flux/repositories/helm/intel.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: intel - namespace: flux-system -spec: - interval: 2h - url: https://intel.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/istio.yaml b/kubernetes/flux/repositories/helm/istio.yaml deleted file mode 100644 index 137062695..000000000 --- a/kubernetes/flux/repositories/helm/istio.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: istio - namespace: flux-system -spec: - interval: 1h - url: https://istio-release.storage.googleapis.com/charts diff --git a/kubernetes/flux/repositories/helm/jetstack.yaml b/kubernetes/flux/repositories/helm/jetstack.yaml index d7e64ffc7..737e06af0 100644 --- a/kubernetes/flux/repositories/helm/jetstack.yaml +++ b/kubernetes/flux/repositories/helm/jetstack.yaml @@ -1,9 +1,9 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: jetstack namespace: flux-system spec: interval: 1h - url: https://charts.jetstack.io/ + url: https://charts.jetstack.io diff --git a/kubernetes/flux/repositories/helm/jupyter.yaml b/kubernetes/flux/repositories/helm/jupyter.yaml deleted file mode 100644 index 5305840d0..000000000 --- a/kubernetes/flux/repositories/helm/jupyter.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: jupyterhub - namespace: flux-system -spec: - interval: 24h - url: https://jupyterhub.github.io/helm-chart/ - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/k8s-gateway.yaml b/kubernetes/flux/repositories/helm/k8s-gateway.yaml index a18177eb4..63a90615e 100644 --- a/kubernetes/flux/repositories/helm/k8s-gateway.yaml +++ b/kubernetes/flux/repositories/helm/k8s-gateway.yaml @@ -1,9 +1,9 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: k8s-gateway namespace: flux-system spec: interval: 1h - url: https://ori-edge.github.io/k8s_gateway/ + url: https://ori-edge.github.io/k8s_gateway diff --git a/kubernetes/flux/repositories/helm/kafka.yaml b/kubernetes/flux/repositories/helm/kafka.yaml deleted file mode 100644 index f609e9d52..000000000 --- a/kubernetes/flux/repositories/helm/kafka.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: strimzi - namespace: flux-system -spec: - interval: 1h - url: https://strimzi.io/charts/ diff --git a/kubernetes/flux/repositories/helm/keda.yaml b/kubernetes/flux/repositories/helm/keda.yaml deleted file mode 100644 index 774ca0081..000000000 --- a/kubernetes/flux/repositories/helm/keda.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: keda - namespace: flux-system -spec: - interval: 2h - url: https://kedacore.github.io/charts diff --git a/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml b/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml deleted file mode 100644 index d63e74b70..000000000 --- a/kubernetes/flux/repositories/helm/kubernetes-dashboard.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: kubernetes-dashboard - namespace: flux-system -spec: - interval: 1h - url: https://kubernetes.github.io/dashboard/ diff --git a/kubernetes/flux/repositories/helm/kustomization.yaml b/kubernetes/flux/repositories/helm/kustomization.yaml index 03aa25e13..86d1775f2 100644 --- a/kubernetes/flux/repositories/helm/kustomization.yaml +++ b/kubernetes/flux/repositories/helm/kustomization.yaml @@ -1,73 +1,15 @@ +--- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./actions-runner-controller.yaml - - ./authelia.yaml - - ./backube.yaml - - ./bitnami.yaml - ./bjw-s.yaml - - ./botkube.yaml - - ./calico.yaml - - ./chaos-mesh.yaml - ./cilium.yaml - - ./cloudnative-pg.yaml - - ./coredns.yaml - - ./crossplane.yaml - - ./csi-driver-nfs.yaml - - ./ddosify.yaml - - ./deliveryhero.yaml - - ./descheduler.yaml - ./external-dns.yaml - - ./external-secrets.yaml - - ./fairwinds.yaml - - ./flanksource.yaml - - ./fleet.yaml - - ./gitea.yaml - - ./grafana.yaml - - ./hajimari.yaml - - ./harbor.yaml - - ./influxdata.yaml - ./ingress-nginx.yaml - - ./intel.yaml - - ./istio.yaml - - ./jetstack.yaml - - ./jupyter.yaml - ./k8s-gateway.yaml - - ./kafka.yaml - - ./keda.yaml - - ./kubernetes-dashboard.yaml - - ./kyverno.yaml - - ./longhorn.yaml - - ./marketplane.yaml - - ./metallb.yaml + - ./jetstack.yaml - ./metrics-server.yaml - - ./microcks.yaml - - ./minio-operator.yaml - - ./minio.yaml - - ./mongodb.yaml - - ./nfs-subdir-external-provisioner.yaml - - ./node-feature-discovery.yaml - - ./nvidia-gpu-feature-discovery.yaml - - ./nvidia.yaml - - ./ot-helm.yaml - - ./passbolt.yaml - - ./piraeus.yaml - - ./podinfo.yaml - - ./prometheus-community.yaml - - ./pyroscope.yaml - - ./questdb.yaml - - ./redpanda.yaml - - ./robusta.yaml - - ./rook-ceph.yaml - - ./runix.yaml - - ./sloth.yaml + - ./openebs.yaml + - ./postfinance.yaml + - ./spegel.yaml - ./stakater.yaml - - ./tf-controller.yaml - - ./timescale.yaml - - ./twuni.yaml - - ./vector.yaml - - ./vm.yaml - - ./vmware.yaml - - ./weave-gitops.yaml - - ./wikijs.yaml - - ./woodpecker.yaml diff --git a/kubernetes/flux/repositories/helm/kyverno.yaml b/kubernetes/flux/repositories/helm/kyverno.yaml deleted file mode 100644 index 938caa687..000000000 --- a/kubernetes/flux/repositories/helm/kyverno.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: kyverno - namespace: flux-system -spec: - interval: 60m - url: https://kyverno.github.io/kyverno/ - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/longhorn.yaml b/kubernetes/flux/repositories/helm/longhorn.yaml deleted file mode 100644 index 5d020abc6..000000000 --- a/kubernetes/flux/repositories/helm/longhorn.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: longhorn - namespace: flux-system -spec: - interval: 15m - url: https://charts.longhorn.io - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/marketplane.yaml b/kubernetes/flux/repositories/helm/marketplane.yaml deleted file mode 100644 index b0f89c8c9..000000000 --- a/kubernetes/flux/repositories/helm/marketplane.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: marketplane - namespace: flux-system -spec: - interval: 1h - url: https://helm.plane.so/ diff --git a/kubernetes/flux/repositories/helm/metallb.yaml b/kubernetes/flux/repositories/helm/metallb.yaml deleted file mode 100644 index 61bf8079c..000000000 --- a/kubernetes/flux/repositories/helm/metallb.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: metallb - namespace: flux-system -spec: - interval: 1h - url: https://metallb.github.io/metallb diff --git a/kubernetes/flux/repositories/helm/metrics-server.yaml b/kubernetes/flux/repositories/helm/metrics-server.yaml index 57e7aa0c5..27a44828a 100644 --- a/kubernetes/flux/repositories/helm/metrics-server.yaml +++ b/kubernetes/flux/repositories/helm/metrics-server.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: metrics-server diff --git a/kubernetes/flux/repositories/helm/microcks.yaml b/kubernetes/flux/repositories/helm/microcks.yaml deleted file mode 100644 index e12054f01..000000000 --- a/kubernetes/flux/repositories/helm/microcks.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: microcks - namespace: flux-system -spec: - interval: 1h - url: https://microcks.io/helm diff --git a/kubernetes/flux/repositories/helm/minio-operator.yaml b/kubernetes/flux/repositories/helm/minio-operator.yaml deleted file mode 100644 index f455ca60b..000000000 --- a/kubernetes/flux/repositories/helm/minio-operator.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: minio-operator - namespace: flux-system -spec: - interval: 1h - url: https://operator.min.io/ - diff --git a/kubernetes/flux/repositories/helm/minio.yaml b/kubernetes/flux/repositories/helm/minio.yaml deleted file mode 100644 index 5e25d87b9..000000000 --- a/kubernetes/flux/repositories/helm/minio.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: minio - namespace: flux-system -spec: - interval: 1h - url: https://charts.min.io/ diff --git a/kubernetes/flux/repositories/helm/mongodb.yaml b/kubernetes/flux/repositories/helm/mongodb.yaml deleted file mode 100644 index 0e20c01e3..000000000 --- a/kubernetes/flux/repositories/helm/mongodb.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: mongodb - namespace: flux-system -spec: - interval: 1h - url: https://mongodb.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/nfs-subdir-external-provisioner.yaml b/kubernetes/flux/repositories/helm/nfs-subdir-external-provisioner.yaml deleted file mode 100644 index 7b1259501..000000000 --- a/kubernetes/flux/repositories/helm/nfs-subdir-external-provisioner.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: nfs-subdir-external-provisioner - namespace: flux-system -spec: - interval: 1h - url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner diff --git a/kubernetes/flux/repositories/helm/node-feature-discovery.yaml b/kubernetes/flux/repositories/helm/node-feature-discovery.yaml deleted file mode 100644 index f21ad6fa1..000000000 --- a/kubernetes/flux/repositories/helm/node-feature-discovery.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: node-feature-discovery - namespace: flux-system -spec: - interval: 2h - url: https://kubernetes-sigs.github.io/node-feature-discovery/charts diff --git a/kubernetes/flux/repositories/helm/nvidia-gpu-feature-discovery.yaml b/kubernetes/flux/repositories/helm/nvidia-gpu-feature-discovery.yaml deleted file mode 100644 index e20871446..000000000 --- a/kubernetes/flux/repositories/helm/nvidia-gpu-feature-discovery.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: nvgfd - namespace: flux-system -spec: - interval: 2h - url: https://nvidia.github.io/gpu-feature-discovery diff --git a/kubernetes/flux/repositories/helm/nvidia.yaml b/kubernetes/flux/repositories/helm/nvidia.yaml deleted file mode 100644 index 90f68cd31..000000000 --- a/kubernetes/flux/repositories/helm/nvidia.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# yaml-language-server: $schema=http://kubernetes-schemas.local.lan:8080/source.toolkit.fluxcd.io/helmrepository_v1beta2.json -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: nvidia - namespace: flux-system -spec: - interval: 1h - url: https://helm.ngc.nvidia.com/nvidia diff --git a/kubernetes/flux/repositories/helm/openebs.yaml b/kubernetes/flux/repositories/helm/openebs.yaml new file mode 100644 index 000000000..4f48013ee --- /dev/null +++ b/kubernetes/flux/repositories/helm/openebs.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: openebs + namespace: flux-system +spec: + interval: 1h + url: https://openebs.github.io/openebs diff --git a/kubernetes/flux/repositories/helm/ot-helm.yaml b/kubernetes/flux/repositories/helm/ot-helm.yaml deleted file mode 100644 index 3a5614edd..000000000 --- a/kubernetes/flux/repositories/helm/ot-helm.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: ot-helm - namespace: flux-system -spec: - interval: 2h - url: "https://ot-container-kit.github.io/helm-charts/" - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/passbolt.yaml b/kubernetes/flux/repositories/helm/passbolt.yaml deleted file mode 100644 index 0b4e4573a..000000000 --- a/kubernetes/flux/repositories/helm/passbolt.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: passbolt - namespace: flux-system -spec: - interval: 2h - url: "https://download.passbolt.com/charts/passbolt" - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/piraeus.yaml b/kubernetes/flux/repositories/helm/piraeus.yaml deleted file mode 100644 index 8ed7196e8..000000000 --- a/kubernetes/flux/repositories/helm/piraeus.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: piraeus - namespace: flux-system -spec: - interval: 2h - url: https://piraeus.io/helm-charts/ diff --git a/kubernetes/flux/repositories/helm/podinfo.yaml b/kubernetes/flux/repositories/helm/podinfo.yaml deleted file mode 100644 index 21abe9e0b..000000000 --- a/kubernetes/flux/repositories/helm/podinfo.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: podinfo - namespace: flux-system -spec: - interval: 30m - url: https://stefanprodan.github.io/podinfo diff --git a/kubernetes/flux/repositories/helm/postfinance.yaml b/kubernetes/flux/repositories/helm/postfinance.yaml new file mode 100644 index 000000000..b14a64d8e --- /dev/null +++ b/kubernetes/flux/repositories/helm/postfinance.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 1h + url: https://postfinance.github.io/kubelet-csr-approver diff --git a/kubernetes/flux/repositories/helm/prometheus-community.yaml b/kubernetes/flux/repositories/helm/prometheus-community.yaml deleted file mode 100644 index a97a3d445..000000000 --- a/kubernetes/flux/repositories/helm/prometheus-community.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: prometheus-community - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://ghcr.io/prometheus-community/charts diff --git a/kubernetes/flux/repositories/helm/pyroscope.yaml b/kubernetes/flux/repositories/helm/pyroscope.yaml deleted file mode 100644 index 09c4ceb32..000000000 --- a/kubernetes/flux/repositories/helm/pyroscope.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: pyroscope-io - namespace: flux-system -spec: - interval: 30m - url: https://pyroscope-io.github.io/helm-chart diff --git a/kubernetes/flux/repositories/helm/questdb.yaml b/kubernetes/flux/repositories/helm/questdb.yaml deleted file mode 100644 index 2c52e999e..000000000 --- a/kubernetes/flux/repositories/helm/questdb.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: questdb - namespace: flux-system -spec: - interval: 2h - url: https://helm.questdb.io/ diff --git a/kubernetes/flux/repositories/helm/redpanda.yaml b/kubernetes/flux/repositories/helm/redpanda.yaml deleted file mode 100644 index 85d83653e..000000000 --- a/kubernetes/flux/repositories/helm/redpanda.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: redpanda - namespace: flux-system -spec: - interval: 1h - url: https://charts.redpanda.com diff --git a/kubernetes/flux/repositories/helm/robusta.yaml b/kubernetes/flux/repositories/helm/robusta.yaml deleted file mode 100644 index d2d5f30db..000000000 --- a/kubernetes/flux/repositories/helm/robusta.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: robusta - namespace: flux-system -spec: - interval: 1h - url: https://robusta-charts.storage.googleapis.com diff --git a/kubernetes/flux/repositories/helm/rook-ceph.yaml b/kubernetes/flux/repositories/helm/rook-ceph.yaml deleted file mode 100644 index 5a87d9718..000000000 --- a/kubernetes/flux/repositories/helm/rook-ceph.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: rook-ceph - namespace: flux-system -spec: - interval: 1h - url: https://charts.rook.io/release diff --git a/kubernetes/flux/repositories/helm/runix.yaml b/kubernetes/flux/repositories/helm/runix.yaml deleted file mode 100644 index 1470c9f5e..000000000 --- a/kubernetes/flux/repositories/helm/runix.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: runix - namespace: flux-system -spec: - interval: 1h - url: https://helm.runix.net diff --git a/kubernetes/flux/repositories/helm/sloth.yaml b/kubernetes/flux/repositories/helm/sloth.yaml deleted file mode 100644 index 51a4b68a1..000000000 --- a/kubernetes/flux/repositories/helm/sloth.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: sloth - namespace: flux-system -spec: - interval: 2h - url: https://slok.github.io/sloth - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/spegel.yaml b/kubernetes/flux/repositories/helm/spegel.yaml new file mode 100644 index 000000000..d9a8b2cd3 --- /dev/null +++ b/kubernetes/flux/repositories/helm/spegel.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: spegel + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/spegel-org/helm-charts diff --git a/kubernetes/flux/repositories/helm/stakater.yaml b/kubernetes/flux/repositories/helm/stakater.yaml index 1846e8ae4..98a3f6455 100644 --- a/kubernetes/flux/repositories/helm/stakater.yaml +++ b/kubernetes/flux/repositories/helm/stakater.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: stakater diff --git a/kubernetes/flux/repositories/helm/tf-controller.yaml b/kubernetes/flux/repositories/helm/tf-controller.yaml deleted file mode 100644 index 75ce67119..000000000 --- a/kubernetes/flux/repositories/helm/tf-controller.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: tf-controller - namespace: flux-system -spec: - interval: 2h - url: https://weaveworks.github.io/tf-controller/ diff --git a/kubernetes/flux/repositories/helm/timescale.yaml b/kubernetes/flux/repositories/helm/timescale.yaml deleted file mode 100644 index fe471fc61..000000000 --- a/kubernetes/flux/repositories/helm/timescale.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: timescale - namespace: flux-system -spec: - interval: 1m0s - url: https://charts.timescale.com - timeout: 3m \ No newline at end of file diff --git a/kubernetes/flux/repositories/helm/twuni.yaml b/kubernetes/flux/repositories/helm/twuni.yaml deleted file mode 100644 index 63169caf6..000000000 --- a/kubernetes/flux/repositories/helm/twuni.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: twuni - namespace: flux-system -spec: - interval: 2h - url: https://helm.twun.io - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/vector.yaml b/kubernetes/flux/repositories/helm/vector.yaml deleted file mode 100644 index e5090d6b2..000000000 --- a/kubernetes/flux/repositories/helm/vector.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: vector - namespace: flux-system -spec: - interval: 1h - url: https://helm.vector.dev diff --git a/kubernetes/flux/repositories/helm/vm.yaml b/kubernetes/flux/repositories/helm/vm.yaml deleted file mode 100644 index fae2a9d54..000000000 --- a/kubernetes/flux/repositories/helm/vm.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: vm - namespace: flux-system -spec: - interval: 24h - url: https://victoriametrics.github.io/helm-charts/ - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/vmware.yaml b/kubernetes/flux/repositories/helm/vmware.yaml deleted file mode 100644 index 6291c6d73..000000000 --- a/kubernetes/flux/repositories/helm/vmware.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: vmware-tanzu - namespace: flux-system -spec: - interval: 30m - url: https://vmware-tanzu.github.io/helm-charts - timeout: 3m diff --git a/kubernetes/flux/repositories/helm/weave-gitops.yaml b/kubernetes/flux/repositories/helm/weave-gitops.yaml deleted file mode 100644 index f325c18ba..000000000 --- a/kubernetes/flux/repositories/helm/weave-gitops.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: weave-gitops - namespace: flux-system -spec: - type: oci - interval: 5m - url: oci://ghcr.io/weaveworks/charts diff --git a/kubernetes/flux/repositories/helm/wikijs.yaml b/kubernetes/flux/repositories/helm/wikijs.yaml deleted file mode 100644 index ba7a7b449..000000000 --- a/kubernetes/flux/repositories/helm/wikijs.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: requarks - namespace: flux-system -spec: - interval: 1h - url: https://charts.js.wiki diff --git a/kubernetes/flux/repositories/helm/woodpecker.yaml b/kubernetes/flux/repositories/helm/woodpecker.yaml deleted file mode 100644 index 7a9fec18e..000000000 --- a/kubernetes/flux/repositories/helm/woodpecker.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: HelmRepository -metadata: - name: woodpecker - namespace: flux-system -spec: - interval: 2h - url: https://woodpecker-ci.org/ diff --git a/kubernetes/flux/repositories/kustomization.yaml b/kubernetes/flux/repositories/kustomization.yaml index cb57fda12..d158d426e 100644 --- a/kubernetes/flux/repositories/kustomization.yaml +++ b/kubernetes/flux/repositories/kustomization.yaml @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./git - ./helm - # - ./oci + - ./oci diff --git a/kubernetes/flux/repositories/oci/.gitkeep b/kubernetes/flux/repositories/oci/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/kubernetes/flux/repositories/oci/flamingo.yaml b/kubernetes/flux/repositories/oci/flamingo.yaml deleted file mode 100644 index c2aabe72c..000000000 --- a/kubernetes/flux/repositories/oci/flamingo.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: OCIRepository -metadata: - name: fsa-home-ops - namespace: flux-system - annotations: - metadata.weave.works/flamingo-default-app: "https://localhost:8080/applications/argocd/default-app?view=tree" - metadata.weave.works/flamingo-fsa-installation: "https://localhost:8080/applications/argocd/fsa-installation?view=tree" - link.argocd.argoproj.io/external-link: "http://localhost:9001/oci/details?clusterName=Default&name=home-ops&namespace=flux-system" -spec: - interval: 30s - url: oci://ghcr.io/flux-subsystem-argo/flamingo/manifests - ref: - tag: v2.8 diff --git a/kubernetes/flux/repositories/oci/kustomization.yaml b/kubernetes/flux/repositories/oci/kustomization.yaml index c64c3fde7..fe0f332a9 100644 --- a/kubernetes/flux/repositories/oci/kustomization.yaml +++ b/kubernetes/flux/repositories/oci/kustomization.yaml @@ -1,5 +1,4 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -resources: - - ./flamingo.yaml +resources: [] diff --git a/kubernetes/flux/vars/cluster-secrets-user.sops.yaml b/kubernetes/flux/vars/cluster-secrets-user.sops.yaml deleted file mode 100644 index 4875220e4..000000000 --- a/kubernetes/flux/vars/cluster-secrets-user.sops.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: cluster-secrets-user - namespace: flux-system -stringData: - SECRET_PLACEHOLDER: ENC[AES256_GCM,data:qdTr8cioQCrmKDvj,iv:qPV+N3RAgeLqxtP5mAHF9Y74KEWKkIRpT7PXKrb9eZk=,tag:GvCi2ZiS7W4rmh58yumMYA==,type:str] - #ENC[AES256_GCM,data:0xTv5ws=,iv:FSWs/msnU5s0G25PiyWM0m8OrQFB57xHpXGBqKZW1B0=,tag:gvCUxECkskXKZ9f8NcrEOw==,type:comment] - SECRET_LDAP_USER: ENC[AES256_GCM,data:A95TeDtx/jbqTjYa,iv:aXj4jKbgUQ9eHzVef2Ha+/Zi6Bhvf/dE12UZ/LniqLs=,tag:BwWAlbdRC9pbiZ75MmcOHw==,type:str] - SECRET_LDAP_BASE_DN: ENC[AES256_GCM,data:jNq0oewEhw0DMp6OLIOr,iv:+xJL44TxABzo8kek4bu2rEDvfv1i9xcq5qp6R78elj8=,tag:hqW82iu35SCCdIYC1bPzNQ==,type:str] - #ENC[AES256_GCM,data:fm03LlMo,iv:Xr1g6Pts3raj1XB2sbrLlV4vQVtKub0rQ2GNamrzlbQ=,tag:98qRHTsRSN/qud97Et7iow==,type:comment] - SECRET_ACME_EMAIL: ENC[AES256_GCM,data:jcgpts0a9n4LIR7ttS5nWfPRQw==,iv:2C1/wSZ6Cp52EzKFmym/ex5Saq/oPF0m2Lb/neKXNGE=,tag:j1ghjk71T9JJ00DVFTsMaA==,type:str] - #ENC[AES256_GCM,data:Dt743i0=,iv:kdLSJzT01Mu3YjPcbmZ8/BHYMPQcJin/2/MeXWiXiww=,tag:L2VFuiZPPK+ZmFtvUWk8rg==,type:comment] - OUTLINE_OAUTH_CLIENT_SECRET: ENC[AES256_GCM,data:TWKFfj7k7kvWcmqrTyiVgor6Cb1Ljl+RQbjhVPpMkN/bPhbZvIZBK0NF2kjqELTmH2xBIR+FZDbPB011goDazw==,iv:XNkmu7eTsJ4TKwhgYmtlSvKN+JBfblgQfeCm9Frh4lE=,tag:jboB6psowl6Rd7CSlmzJXw==,type:str] - MINIO_ROOT_USER: ENC[AES256_GCM,data:SIhvtJk=,iv:vqcmH+EiSDwIlaqMthTMHVb6E5zy8bFMOAN0fNNx3Oo=,tag:AG6SkrNW8bb4ZFJtswff7Q==,type:str] - MINIO_ROOT_PASSWORD: ENC[AES256_GCM,data:jiszSXfxHm+4x1KEikmM1rjkOAZwHMjr6icL9+XsL3k=,iv:k1jE1hk3DsU0K80znkTKqF46zg4rtsSmWFOIFSBX5/M=,tag:CTfZNfIGKHhJ9Df8/4M5lA==,type:str] - MINIO_AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:WulUfMSX2p8/m8uN3GdALRNH0VU=,iv:pLmxK3fmsbdo/qL/MSdJh6OkA+/VNduyt9RzE9dsyJo=,tag:pe+c3ljsfmslkMx+QJ0EBQ==,type:str] - MINIO_AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:nWVpvL/HYrxOcWoM6DpnmtM+gMUO5nEZzNYkob7GcCoM9vGyVqnY0Q==,iv:eqESA++95JNeQBxVEQV+Ev6odWj7pdKB93EzFiVT/1E=,tag:C5X4sXsyqqcpFwMoOOEB2w==,type:str] - #ENC[AES256_GCM,data:oBOJpFSJ,iv:8pfvIJ5gHdCCEYK2oI3n4qFasGRUlLjISzSAoBfLif8=,tag:slVCjWDXMP68QM5BVXRCow==,type:comment] - KAFKA_USER: ENC[AES256_GCM,data:d3vuZQJ76jcIAQ==,iv:/wQIphay8paHPcADZldMKrbR0a/PBMDS3vN7gb5T5RE=,tag:pgvvxF9mpNAxO3Xs3NVhxg==,type:str] - KAFKA_PASS: ENC[AES256_GCM,data:RxaclJEXm1VfOmRLwH7bnocjOGgLfdCIgtAD6DjeeXI=,iv:6sPD142XBf+fvfpLt8tIDzmfn0lHj1fKI2Kfxw3hFko=,tag:hn7v9+vOlTEaNXbWa8xOsg==,type:str] - #ENC[AES256_GCM,data:GaQ+wPMeseSV,iv:aEErxzpTSZbi0awV4A4WvHJckVUBbkv0bewC/yhLnu0=,tag:hO+O0FJcIysI6DUcTJ4lEg==,type:comment] - CUSTOM_PUSHOVER_TOKEN: ENC[AES256_GCM,data:aL75izbozUUfZp7MVmZoNtRsuPp4vndzbSmKiSUh,iv:8/gejNBancSOINXKEYh5YPeMpNmII9rJdU673OpW8Dc=,tag:isbeLNhwMwv5v9p9oXh+Xw==,type:str] - CUSTOM_PUSHOVER_USER_KEY: ENC[AES256_GCM,data:xi2Sxr0YBawKQClyP8uIM9YUAQ0se2ysxGPXCZFe,iv:vAau7DWsnTENE658WCsRoUIoiiD92M1dvHvCDgAybhU=,tag:z8Q1jy6Q9pUtTim+lf+wTw==,type:str] - #ENC[AES256_GCM,data:MZZOwg==,iv:vbq65mFHA1SGLaY7cKkJSHPXnjgrs0SqpA7ifh1P1vE=,tag:pwRtqc8ukdXZF22wlty6Og==,type:comment] - SSH_KEY: ENC[AES256_GCM,data:IOTqA/32DvuVeBZQPPDgWKDRwY/hs57zqqOoFT5ZadOUIIKOI1mcYjBPJ0/FiPJjY7rUf606/lCmMlXy6J/u5u2QggJquKQCcJ5qXzjHdYVt+2WLhqlOoObU5CL0t/ilMsBkpA==,iv:jAsCJC1jyy1T2ljINUQmu3r3yOOlALnVO2XJ10RkeRQ=,tag:sMN+iJwXtASIQy8jxYAqHA==,type:str] -sops: - kms: [] - gcp_kms: [] - azure_kv: [] - hc_vault: [] - age: - - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 - enc: | - -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0RlNXQmhLT2pHYXUxR2lj - U3ZSalVUSDZORjJjVFVBRysvbzk2dzNQQzNRCnNpRVdnQXo2UVhnZVU2MnBCUVpN - NTdZYVhNaWo3WXBHdENsN3VuQ0xpNmsKLS0tIGZqZUE1ZFB1NU5WZTlzaE1BbXVN - TlU5NkdhWmtLK1B0SjBKN2NxcisrTEUK1bWvGBomxsxWVT27V6GIo2Hp6QdYXEnv - DSyzrdf1lLC6647NWF4HyB0/Lx7V/hyZo8g6AtSVU5U0+dgnHss32g== - -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-04T20:12:23Z" - mac: ENC[AES256_GCM,data:UUKv/e3tEJVjWSuniN7v8IWT9suhRPPRB+r12Obx5TBt59XgFwU6psOD9wKnk0DaDGzHaD0OsOSYcXeeLHHxfiRobSVuY0YCXqCD+yOCzjcZnTDNjkCDrFAJF79xhCMUOdINphElemi1W4hgdsZiZaUYEZuZm+9M1y4MSJgnjGo=,iv:s50Ff/qh3GdUMe2ysSomDywcGazGpENymg+g0G/L5Eg=,tag:hD2sVAPWyMk1Ua+NyhfBsQ==,type:str] - pgp: [] - encrypted_regex: ^(data|stringData)$ - version: 3.7.3 diff --git a/kubernetes/flux/vars/cluster-secrets.sops.yaml b/kubernetes/flux/vars/cluster-secrets.sops.yaml index 59aacf77d..1b02cad77 100644 --- a/kubernetes/flux/vars/cluster-secrets.sops.yaml +++ b/kubernetes/flux/vars/cluster-secrets.sops.yaml @@ -4,9 +4,25 @@ metadata: name: cluster-secrets namespace: flux-system stringData: - SECRET_ACME_EMAIL: ENC[AES256_GCM,data:x74JQspD0xKmUDDzvwpognGF7A==,iv:caNhmPjZo2rIX1WURUBhtDb/xcNWVoo1tVwqJqwOrNc=,tag:E294nxMFSi8bkXsseL7YkQ==,type:str] - SECRET_CLOUDFLARE_TUNNEL_ID: ENC[AES256_GCM,data:jlKhHOozNyG3Tf+z2IbDBxckzO+q9zBKSKGmIIaiokQXwLbe,iv:DgiHl8JUqw7J08xjGMvt5DWPZs0WHm1SU5hvb3o4mHg=,tag:Usah07gl8VZjNl8O1cc+xA==,type:str] - SECRET_DOMAIN: ENC[AES256_GCM,data:94YinQ1OIDbaFC3+,iv:kV/LmzKMdCAOIiUHEO9wqMfqSMZRSZnDMzcX41l+L6c=,tag:7s1M2n54wkYin8U2CtdMdA==,type:str] + SECRET_DOMAIN: ENC[AES256_GCM,data:GzH6CxU7KE+HHfhO,iv:5YlJHqj8r4qmTCPCM93OKr5VKjAjb3VRdAvDkRRo+yo=,tag:3AdB2j+srNi0w5Z0WwJadg==,type:str] + SECRET_ACME_EMAIL: ENC[AES256_GCM,data:W3QVTy7eL1i8EU9RVwgDIo6fqQ==,iv:Jgq/2Mq1yAzVreiGHk1/TW/4CwlbZmSzWVTj2Xkcino=,tag:q7nAMxJfVAwju6iDsYn+mg==,type:str] + SECRET_CLOUDFLARE_TUNNEL_ID: ENC[AES256_GCM,data:u5ze6TyxOQYMBAdoF40aAmaptvkd5gWtWaSq4xysvxB6t1v6,iv:yjRJfNrD86GWzQKmEIaIfTS1yG4A5+IKkf4quQs9L5k=,tag:7NYm3oqegQzgHeCXod7RNw==,type:str] + SECRET_LDAP_USER: ENC[AES256_GCM,data:t3nlp6fLNYkpSfqH,iv:LaYGuR5V1khcjkc9MD3HSmF0avWUwxJzyjjdPIR/rsk=,tag:7uHTINzhYxAWZN3vZ3S/AQ==,type:str] + SECRET_LDAP_BASE_DN: ENC[AES256_GCM,data:QJhEMxKVy7NRxm3XCMCg,iv:VVQzqaG9yvegC6GqdgY+qRSul7Pny5irt0l8tYhHG7M=,tag:aBmbUIQ4OfPnqS30v5zyUA==,type:str] + #ENC[AES256_GCM,data:TGUWew8=,iv:akwharOW6AEbN5XHLFWZFjJDKZVjjplHBOi20XRpL9c=,tag:R2AdgDMxZxiw8bbg30rMMQ==,type:comment] + OUTLINE_OAUTH_CLIENT_SECRET: ENC[AES256_GCM,data:1QbfyBXZcW7JSey37uCayUiXThmXnf6BPls6kl1w+/ZHFf24CphULUSxrVB1l6Bxx9rf6QOHLE37+/cvTyj2xw==,iv:MzzCktaz+KvjIrdvMS1BkeeJiijzvch3iOih0Zvabn8=,tag:hTwa277UvA/IBfHnM5FEnQ==,type:str] + MINIO_ROOT_USER: ENC[AES256_GCM,data:AMwhnpM=,iv:jRULNENejbU8Bakba8rVUbft/nK2X4/anAQLWzwjRBc=,tag:fZTWRAj+rekFPEXo3Bvh8w==,type:str] + MINIO_ROOT_PASSWORD: ENC[AES256_GCM,data:DUOZMNaUOoIm77mTNgHLu1uDPQm0x438WJqOI2rfJj0=,iv:mFB04Huv931xYxtr+XrTTO3uTb8Imi+NMn3alARda80=,tag:sd/tmk5qQLvnN2AtvDiehg==,type:str] + MINIO_AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:aIW0CYjD/ZpO75oAe4Y2n7pHllg=,iv:QAw/6x/n1vZUk2Ot8SnJHBvXlEbqjV/y6QWXNsw+zh4=,tag:6D88bfiuFJNmRm0SLdAF7A==,type:str] + MINIO_AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:Dxb0vkaPOTIFIG3BzMn6SmFHOOKZemf9bCpimhb45qZKk/GFmKQJ8A==,iv:mzsiUsQbr6QE32M2L5XmxroECFoiM0KlR/Zr2Auq68c=,tag:n+IWPxho+A25ZIccyEFmDw==,type:str] + #ENC[AES256_GCM,data:o12zoimb,iv:cRFTABhpZencYiQp2/v84Sl4uIdZ+Ch+JfjxdjiRAs8=,tag:Ky+ocVca8e8o4YdWcJTfLg==,type:comment] + KAFKA_USER: ENC[AES256_GCM,data:KERJFadEct6EPA==,iv:co+NoXyY169J79qBLfiLki1lqmlXiXdv2yBBx9DMCTo=,tag:QnHg9zN9NklOqDrkKwMXUg==,type:str] + KAFKA_PASS: ENC[AES256_GCM,data:YpIO5S7WIy/cFAdIk8eMThLyWlQJwKJ18SejwNC4JTE=,iv:w7UVgLnKZTcWphgXroHhyBPNoY5lN2HlZorJ960u0sg=,tag:SFmMqlEfjeMN+/BGZusjFg==,type:str] + #ENC[AES256_GCM,data:59sSx+NLYeGG,iv:WEHg5wmZU55NmZVxPfBlXXoazdLsYyuUyLohYpgb5Gc=,tag:06NksfJidtL8GJGaL/P45w==,type:comment] + CUSTOM_PUSHOVER_TOKEN: ENC[AES256_GCM,data:+trdIng0n7fKCNPaoaGp/LqCyTeXS/4Upaas7zTi,iv:QjzJPHZoV8f8LBMVa34FfJhCnWlbmHTm75ghcI6XhGE=,tag:wu/HoWArLQUwF1QHaq5H3A==,type:str] + CUSTOM_PUSHOVER_USER_KEY: ENC[AES256_GCM,data:H76pLu9lsJYWLvN5Rlinu2rJxfZ1FgWWN8xD5zsX,iv:vW99kZQfV0Xj9p482CsGyoA1h3xNnbf8PkzA8WkAUVM=,tag:vYlYxXDvKn7OdatjFLRl9w==,type:str] + #ENC[AES256_GCM,data:CWiHwA==,iv:W+3LXL9rKpylF1QZgxa6GU64JFaMhqQeTZnOGckqsFQ=,tag:zLh9/DVNH3IzvRzQZ9pVoQ==,type:comment] + SSH_KEY: ENC[AES256_GCM,data:lqXMUJlF1NdoYs3TpuU79p+9jXXhe9zDtHWRQi9jzi3Une1Oun126iTaFiNjV1uzFymR5cw2ugNaaLED/0gMtABzHikWHlfe/N65uiIuQb1Sq2FIDGMRYLiTUlKErgHwBlt2bw==,iv:OzbWJoGf3S7ViaH95xQ4Uiy03+hUfxGagketY+IeFos=,tag:sflHb31BCqtFjNLvJtzL3Q==,type:str] sops: kms: [] gcp_kms: [] @@ -16,14 +32,14 @@ sops: - recipient: age1ptththqpxnx0zuzmq0peq9x30vqgdedjsdlsuzxr5gfc36mnwqlsylrpr8 enc: | -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnZFN4TjFDdWpHS3JMbCsr - NGZDUVRBODdMNTlNQi9Fd3hWTHhFRlMrSUZVCjR2WEh1RlovR2pQTG9vWkluMWhr - WHNzL0pxUGpFY1pVWGtyYkFPMzJvRWcKLS0tIG9JTGlMT09EWUdEeVZUVUVCQy95 - V0JuckxaWWVLU056SWE0SVp3Sk1HV2cKVpOD5skI8cfG/FEkzxDUdei+nk/kYLA+ - vaAw7PvjnlQACzNuufT1ub+kwbdye56wuh+179UpkAwKV4xEqp0kUA== + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiVER6ZHBFQXRpaHF4SkdM + dHJRdmJSM3llTk80dnlBQjUrRURTZXArZ2dFCkVOT3QxamNoRUhCU3l2Z1VOd2Vp + R0JqaTgydUpQalVtL21VZ01QWTc4eXMKLS0tIG9YOVYybTJjWENwbFBaWkVJSUJT + TDV4MDEwTDRXek1nVTl4VjZnbHF3bjAK5IDFO0cN53SQuaX1czTSRz+jFPekv1in + mShNslNMfvzxOIOba5vaiBVDUXD8DZrQs/Ie4AFT0z3otZKyDyzoFQ== -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-05-04T19:39:41Z" - mac: ENC[AES256_GCM,data:nrhHWBnSlBrIKGs1OWCKbXolC7jP9YGgcNqYA10CmJZmYckIk0KpHjPYTAH+5Ot1OEFBHGD9+8R5ydOAib4Z91TM1UY4761sbNOsAv/b3rdOaorOf3/KT/emBToloP6HZ1+g+SMiJfE/748+tqEgkaLpJAhpB25Lg66+n5LgA+c=,iv:I3+BdFlYYzuST5IxuAe+ajG8NTpfvyFt0YHNgxszihc=,tag:Hr9JA797s+MYz/08s0IxoA==,type:str] + lastmodified: "2024-05-19T15:14:40Z" + mac: ENC[AES256_GCM,data:h0F08UPqf2teeH5OHZ8climcIYM4qycUyyrk9OLUti9o9z1dbMvq2guTK1yTRoaLv/h708WRhg33AU88mTJbqfkuPHFUnbIePuRNat0mjNl68zPK33JFIjmEVo/v2uBwxynOKgUYVsIaZz0mLZ3aJHlu8u9JJJaK1q8PW5nbZ5k=,iv:ZoxzNOGUlaaGC5fEQa3fF4Vk3pIBb2qszGjhrm6E5So=,tag:1T4aFHf12JXGLNCE8BuGBA==,type:str] pgp: [] encrypted_regex: ^(data|stringData)$ - version: 3.7.3 + version: 3.8.1 diff --git a/kubernetes/flux/vars/cluster-settings-user.yaml b/kubernetes/flux/vars/cluster-settings-user.yaml deleted file mode 100644 index 7b8176166..000000000 --- a/kubernetes/flux/vars/cluster-settings-user.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-settings-user - namespace: flux-system -data: - SETTINGS_PLACEHOLDER: "settings-value" diff --git a/kubernetes/flux/vars/cluster-settings.yaml b/kubernetes/flux/vars/cluster-settings.yaml index 6a84e5c15..491f854b6 100644 --- a/kubernetes/flux/vars/cluster-settings.yaml +++ b/kubernetes/flux/vars/cluster-settings.yaml @@ -5,9 +5,6 @@ metadata: name: cluster-settings namespace: flux-system data: - TIMEZONE: "Etc/UTC" - COREDNS_ADDR: "10.43.0.10" - KUBE_VIP_ADDR: "10.69.3.154" - CLUSTER_CIDR: "10.42.0.0/16" - SERVICE_CIDR: "10.43.0.0/16" + TIMEZONE: "Europe/Madrid" + CLUSTER_CIDR: "172.16.0.0/16" NODE_CIDR: "10.69.3.0/24" diff --git a/kubernetes/flux/vars/kustomization.yaml b/kubernetes/flux/vars/kustomization.yaml index dd93387ae..bca40ce03 100644 --- a/kubernetes/flux/vars/kustomization.yaml +++ b/kubernetes/flux/vars/kustomization.yaml @@ -2,6 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./cluster-settings.yaml - - ./cluster-settings-user.yaml - ./cluster-secrets.sops.yaml - - ./cluster-secrets-user.sops.yaml + - ./cluster-secrets-git.sops.yaml diff --git a/makejinja.toml b/makejinja.toml new file mode 100644 index 000000000..52845a379 --- /dev/null +++ b/makejinja.toml @@ -0,0 +1,18 @@ +[makejinja] +inputs = ["./bootstrap/overrides","./bootstrap/templates"] +output = "./" +exclude_patterns = [".mjfilter.py", "*.partial.yaml.j2"] +data = ["./config.yaml"] +import_paths = ["./bootstrap/scripts"] +loaders = ["plugin:Plugin"] +jinja_suffix = ".j2" +force = true +undefined = "chainable" + +[makejinja.delimiter] +block_start = "#%" +block_end = "%#" +comment_start = "#|" +comment_end = "#|" +variable_start = "#{" +variable_end = "}#" diff --git a/requirements.txt b/requirements.txt index 3a8d4341f..bdc5b50e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ -ansible==8.3.0 -ansible-lint==6.17.2 -bcrypt==4.0.1 -jmespath==1.0.1 -netaddr==0.8.0 -openshift==0.13.2 +bcrypt==4.1.3 +cloudflare==2.20.0 +email-validator==2.1.1 +makejinja==2.6.0 +netaddr==1.2.1 passlib==1.7.4 diff --git a/requirements.yaml b/requirements.yaml deleted file mode 100644 index 8aec996bc..000000000 --- a/requirements.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -collections: - - name: ansible.posix - version: 1.5.4 - - name: ansible.utils - version: 2.10.3 - - name: community.general - version: 7.3.0 - - name: community.sops - version: 1.6.7 - - name: kubernetes.core - version: 2.4.0 -roles: - - name: xanmanning.k3s - version: v3.4.2 - - name: geerlingguy.docker - version: 7.0.2 - - name: geerlingguy.pip - version: 3.0.1 diff --git a/scripts/kubeconform.sh b/scripts/kubeconform.sh new file mode 100755 index 000000000..a69308b1f --- /dev/null +++ b/scripts/kubeconform.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -o errexit +set -o pipefail + +KUBERNETES_DIR=$1 + +[[ -z "${KUBERNETES_DIR}" ]] && echo "Kubernetes location not specified" && exit 1 + +kustomize_args=("--load-restrictor=LoadRestrictionsNone") +kustomize_config="kustomization.yaml" +kubeconform_args=( + "-strict" + "-ignore-missing-schemas" + "-skip" + "Secret" + "-schema-location" + "default" + "-schema-location" + "https://kubernetes-schemas.pages.dev/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json" + "-verbose" +) + +echo "=== Validating standalone manifests in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -maxdepth 1 -type f -name '*.yaml' -print0 | while IFS= read -r -d $'\0' file; + do + kubeconform "${kubeconform_args[@]}" "${file}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/apps ===" +find "${KUBERNETES_DIR}/apps" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done