diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..1e510b7 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,151 @@ +name: Push to feature branch + +on: + push: + branches: + - 'master' + - 'develop' + - 'feature/**' + + +env: + #TF_LOG: INFO #debug only + TF_INPUT: false + +jobs: + terraform-core: + environment: demogcp + runs-on: ubuntu-latest + defaults: + run: + shell: bash + working-directory: ./terraform/core + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Authenticate to GCP + uses: 'google-github-actions/auth@v2' + with: + credentials_json: '${{ secrets.SERVICE_ACCOUNT_KEY }}' + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: "1.7.5" + + - name: Terraform Init + id: init + run: terraform init + + - name: Terraform Validate + id: validate + # Run even if formatting fails + if: success() || failure() + run: terraform validate + + - name: Terraform Plan + id: plan + run: terraform plan + + - name: Terraform Apply + id: apply + run: terraform apply --auto-approve + if: ${{ github.ref == 'refs/heads/master' }} + + terraform-services: + environment: demogcp + runs-on: ubuntu-latest + needs: terraform-core + defaults: + run: + shell: bash + working-directory: ./terraform/services + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Authenticate to GCP + uses: 'google-github-actions/auth@v2' + with: + credentials_json: '${{ secrets.SERVICE_ACCOUNT_KEY }}' + + # Needed for authenticating with GKE cluster + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v2' + + - name: 'Install gcloud GKE auth plugin' + run: gcloud components install gke-gcloud-auth-plugin + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: "1.7.5" + + - name: Terraform Init + id: init + run: terraform init + + - name: Terraform Validate + id: validate + # Run even if formatting fails + if: success() || failure() + run: terraform validate + + - name: Terraform Plan + id: plan + run: terraform plan + + + - name: Terraform Apply + id: apply + run: terraform apply --auto-approve + if: ${{ github.ref == 'refs/heads/master' }} + + + terraform-application: + environment: demogcp + runs-on: ubuntu-latest + needs: terraform-services + defaults: + run: + shell: bash + working-directory: ./terraform/application + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Authenticate to GCP + uses: 'google-github-actions/auth@v2' + with: + credentials_json: '${{ secrets.SERVICE_ACCOUNT_KEY }}' + + # Needed for authenticating with GKE cluster + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v2' + + - name: 'Install gcloud GKE auth plugin' + run: gcloud components install gke-gcloud-auth-plugin + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: "1.7.5" + + - name: Terraform Init + id: init + run: terraform init + + - name: Terraform Validate + id: validate + # Run even if formatting fails + if: success() || failure() + run: terraform validate + + - name: Terraform Plan + id: plan + run: terraform plan + + + - name: Terraform Apply + id: apply + run: terraform apply --auto-approve + if: ${{ github.ref == 'refs/heads/master' }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2df3e00 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +#ignore testing files +**/*.test +#don't check in terraform dependencies +**/.terraform \ No newline at end of file diff --git a/README.md b/README.md index 35975cf..d1ef897 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,114 @@ # k8s-demo-prod-infra -Kubernetes cluster infra +Terraform plan for deploying and configuring a GKE cluster, installing shared services, and deploying GCP infrastructure to support the production application ("demoapp"). -Features -- Separation of concerns -- limit blast radius (core/services) -- Reusable code -- parameterized/generalized; can be used to provision additional environments -Assumptions -- Env types: Prod, QA, Dev -- represented by the environment labels `prd`, `qal`, and `dev` respectively -- State bucket `k8s-test-tfstate-u518zm` created outside of the TF plan -- `google_container_node_pool.node_config.oauth_scopes` would be appropiately narrowed to follow PoLP in actual prod; currently grants GCP SA access to all APIs -- TLS certs manually created via certbot CLI -- in prod, we'd automate their creation with certbot on k8s and GCP Cloud DNS01 challenge \ No newline at end of file + +## Features +- **Separation of Concerns** + - Limits "blast radius" by grouping infrastructure in a way that minimizes the number of infra components that must be "touched" when making changes +- **Reusability** + - The terraform plans are parameterized in such a way that they can easily be used to deploy additional environments (e.g. nonprod) +- **Secure and Transparent** + - Deployment is performed via a dedicated service account, access to which is tightly controlled via GCP IAM + - Deployments are only performed using this service account via GHA, making all changes to infrastructure visible and auditable via git history +- **Observability and Alerting** + - Basic logging has been configured at the cluster infra and k8s level (both for control plan and workloads) + - A proof-of-concept alert has been configured via Terraform to alert when cluster node CPU utilization crosses a threshold + +## Structure & Usage + +### Repo Structure + +``` +. +├── core GKE cluster and related GCP resources +├── services Shared Kubernetes services +└── application Production application GCP resources + +``` + +Below is a description of what each plan does: + +- **core** + - Sets up basic Kubernetes infrastructure in GCP + - Deploys the following: + - GCP VPC and subnets + - GKE cluster + - GKE nodepool (defined separately for easier management) + - GCP Service Account for cluster access (see [TODO](#todo) below) +- **services** + - Sets up shared services on the k8s cluster + - Deploys the following: + - ArgoCD + - Ingress Nginx controller + - kube-prometheus stack, including Prometheus and Grafana (not used, just an example of where this sort of service would go) + - Namespaces for the above applictions + - DNS records pointing to the nginx ingresses for the above applications +- **application** + - Sets up infrastructure to support the application. + - Deploys the following: + - Namespace for "demoapp" application + - DNS record pointing to the nginx ingress for the "demoapp" application + + +### Usage + +The three plans described above are intended to be run in a specific order: + +1. `core` +2. `services` +3. `application` + +The GHA pipeline requires these plans to be run in this order (see Fig. 1 below). + +The `core` plan statefile is referenced by a `terraform_remote_state` data source in both `services` and `application` in order to retrieve cluster information required for making changed to the cluster (see Fig. 2 below). + + +Figure 1: +![Figure 1](./docs/kdpi-gha-flow.png) + +Figure 2: +![Figure 2](./docs/kdpi-tf-flow.png) + + + +### Monitoring and Alerting + +GCP Logging has been enabled for the GKE cluster infrastructure and the Kubernetes cluster, including both the control plane and the workloads. Logs can be viewed via the GCP console (see Fig. 3 below). A simple alert has been configured as a "proof-of-concept" to notify when cluster node CPU utilization exceeds >80% (see Fig. 4 below). + +Figure 3: +[![Figure 3](./docs/gke_logging_t.png)](./docs/gke_logging.png) + + +Figure 4: +[![Figure 4](./docs/gke_alerting_t.png)](./docs/gke_alerting.png) + + + +## Additional Context + +### Assumptions +- Naming convention: Prod, QA, Dev are represented by the environment labels `prd`, `qal`, and `dev` respectively +- Existing infrastructure + - State bucket `k8s-test-tfstate-c74f3a` + - Service account `gha-access` for programmatic access from GHA + - Nginx Ingress TLS cert secrets manually created via certbot CLI +- GCP alert notification channel already exists + +### TODO + - General + - Reconfigure networking so that internal services (e.g. Grafana) are only availabe on the private network + - CI/CD + - Configure GCP OIDC provider so that GHA does not have to store a GCP SA service key + - Configure GHA pipeline to treat this repo as a monorepo using a cascading Terraform apply: + - `core` modified: `core -> services -> application` + - `services` modified: `services -> application` + - `application` modified: `application` + - Configure GHA pipeline to allow Terraform to apply the `application` plan after changes are made to the application code repo + - Core + - Narrow `google_container_node_pool.node_config.oauth_scopes` in accordance with PoLP in prod; currently grants GCP SA access to all APIs + - Parameterize GKE cluster config for horizontal/vertical cluster scaling + - Services + - Automate creation and renewal of TLS certs with certbot (using DNS01 challenge on GCP Cloud DNS) + - Add additional IaC for ArgoCD configuration (project CRDs, etc.) diff --git a/docs/gke_alerting.png b/docs/gke_alerting.png new file mode 100644 index 0000000..2242d98 Binary files /dev/null and b/docs/gke_alerting.png differ diff --git a/docs/gke_alerting_t.png b/docs/gke_alerting_t.png new file mode 100644 index 0000000..f86501d Binary files /dev/null and b/docs/gke_alerting_t.png differ diff --git a/docs/gke_logging.png b/docs/gke_logging.png new file mode 100644 index 0000000..e2f402a Binary files /dev/null and b/docs/gke_logging.png differ diff --git a/docs/gke_logging_t.png b/docs/gke_logging_t.png new file mode 100644 index 0000000..cad7045 Binary files /dev/null and b/docs/gke_logging_t.png differ diff --git a/docs/kdpi-gha-flow.excalidraw b/docs/kdpi-gha-flow.excalidraw new file mode 100644 index 0000000..f8cc00f --- /dev/null +++ b/docs/kdpi-gha-flow.excalidraw @@ -0,0 +1,533 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "a7UATJB5f2dIDfLx0-pC9", + "type": "rectangle", + "x": 457.23046875, + "y": 300.375, + "width": 240.53515625, + "height": 86.77343750000001, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a0", + "roundness": { + "type": 3 + }, + "seed": 132009924, + "version": 345, + "versionNonce": 686833348, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "qnBD7hzjSMzTaP-es8xsR" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + } + ], + "updated": 1727939263542, + "link": null, + "locked": false + }, + { + "id": "qnBD7hzjSMzTaP-es8xsR", + "type": "text", + "x": 558.3880601525307, + "y": 330.26171875, + "width": 38.21997344493866, + "height": 27, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a0V", + "roundness": null, + "seed": 1367381244, + "version": 175, + "versionNonce": 937916284, + "isDeleted": false, + "boundElements": null, + "updated": 1727939256075, + "link": null, + "locked": false, + "text": "core", + "fontSize": 20, + "fontFamily": 6, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "a7UATJB5f2dIDfLx0-pC9", + "originalText": "core", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "id": "Sp2cLqd1qw9Ym2pNHKfXE", + "type": "text", + "x": 466.8984375, + "y": 234.8046875, + "width": 630.08203125, + "height": 27, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a1", + "roundness": null, + "seed": 796275780, + "version": 229, + "versionNonce": 821079236, + "isDeleted": false, + "boundElements": null, + "updated": 1727939266275, + "link": null, + "locked": false, + "text": "GHA Pipeline Flow", + "fontSize": 20, + "fontFamily": 6, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "GHA Pipeline Flow", + "autoResize": false, + "lineHeight": 1.35 + }, + { + "type": "rectangle", + "version": 417, + "versionNonce": 825192060, + "index": "a2", + "isDeleted": false, + "id": "RNGdmJrw4ZJfZZ4iVmqMP", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 795.419921875, + "y": 300.23046875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "width": 240.53515625, + "height": 86.77343750000001, + "seed": 1853361276, + "groupIds": [], + "frameId": null, + "roundness": { + "type": 3 + }, + "boundElements": [ + { + "type": "text", + "id": "TLx7y0Wipmn1PHwPKJuTv" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + }, + { + "id": "bFdQUDe6VaOJZ74CIJM-P", + "type": "arrow" + } + ], + "updated": 1727939261877, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 258, + "versionNonce": 2005043524, + "index": "a3", + "isDeleted": false, + "id": "TLx7y0Wipmn1PHwPKJuTv", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 879.2975318729877, + "y": 330.1171875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 72.7799362540245, + "height": 27, + "seed": 1094262012, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939261108, + "link": null, + "locked": false, + "fontSize": 20, + "fontFamily": 6, + "text": "services", + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "RNGdmJrw4ZJfZZ4iVmqMP", + "originalText": "services", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "type": "rectangle", + "version": 573, + "versionNonce": 60504956, + "index": "a4", + "isDeleted": false, + "id": "sMMz0J30NV8-6UKSb_K4A", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1139.048828125, + "y": 300.07421875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#ffec99", + "width": 240.76171874999997, + "height": 86.77343750000001, + "seed": 809752772, + "groupIds": [], + "frameId": null, + "roundness": { + "type": 3 + }, + "boundElements": [ + { + "type": "text", + "id": "ms0vP584eU48Lz_ID42bB" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + }, + { + "id": "bFdQUDe6VaOJZ74CIJM-P", + "type": "arrow" + } + ], + "updated": 1727939259986, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 421, + "versionNonce": 279798724, + "index": "a5", + "isDeleted": false, + "id": "ms0vP584eU48Lz_ID42bB", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1209.6897430419922, + "y": 329.9609375, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 99.47988891601562, + "height": 27, + "seed": 2136219716, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939258808, + "link": null, + "locked": false, + "fontSize": 20, + "fontFamily": 6, + "text": "application", + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "sMMz0J30NV8-6UKSb_K4A", + "originalText": "application", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "type": "arrow", + "version": 510, + "versionNonce": 1013498052, + "index": "a7", + "isDeleted": false, + "id": "42l7cmU_K_UguBDN7gIwJ", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 702.765625, + "y": 343.66171875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 87.654296875, + "height": 0.14453125, + "seed": 607699652, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939261108, + "link": null, + "locked": false, + "startBinding": { + "elementId": "a7UATJB5f2dIDfLx0-pC9", + "focus": -0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + 1.0207869821524271, + 0.4988475736022325 + ] + }, + "endBinding": { + "elementId": "RNGdmJrw4ZJfZZ4iVmqMP", + "focus": 0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + -0.020786982152427042, + 0.4988475736022325 + ] + }, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": "arrow", + "points": [ + [ + 0, + 0 + ], + [ + 43.8271484375, + 0 + ], + [ + 43.8271484375, + -0.14453125 + ], + [ + 87.654296875, + -0.14453125 + ] + ], + "elbowed": true + }, + { + "id": "4uZMdstfPIPCfmJtmt-2l", + "type": "image", + "x": 467.44921875, + "y": 309.375, + "width": 29.730468750000007, + "height": 29.730468750000007, + "angle": 0, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "aD", + "roundness": null, + "seed": 1364763772, + "version": 184, + "versionNonce": 1185049540, + "isDeleted": false, + "boundElements": null, + "updated": 1727939076557, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + }, + { + "type": "image", + "version": 291, + "versionNonce": 846323396, + "index": "aE", + "isDeleted": false, + "id": "Ug3t5srMXX1Qe2LSV8nX-", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 805.451171875, + "y": 308.603515625, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "width": 29.730468750000007, + "height": 29.730468750000007, + "seed": 1275415236, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939213891, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + }, + { + "type": "image", + "version": 464, + "versionNonce": 1664707012, + "index": "aF", + "isDeleted": false, + "id": "idecp7V-0mAEuTtVC_DLw", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1147.576171875, + "y": 307.439453125, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "width": 29.730468750000007, + "height": 29.730468750000007, + "seed": 1278493308, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939251409, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + }, + { + "type": "arrow", + "version": 686, + "versionNonce": 110889028, + "index": "aH", + "isDeleted": false, + "id": "bFdQUDe6VaOJZ74CIJM-P", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1040.955078125, + "y": 343.5171875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 93.09375, + "height": 0.15625, + "seed": 18922876, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939261108, + "link": null, + "locked": false, + "startBinding": { + "elementId": "RNGdmJrw4ZJfZZ4iVmqMP", + "focus": -0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + 1.0207869821524271, + 0.4988475736022325 + ] + }, + "endBinding": { + "elementId": "sMMz0J30NV8-6UKSb_K4A", + "focus": 0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + -0.020767421108136612, + 0.4988475736022325 + ] + }, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": "arrow", + "points": [ + [ + 0, + 0 + ], + [ + 46.546875, + 0 + ], + [ + 46.546875, + -0.15625 + ], + [ + 93.09375, + -0.15625 + ] + ], + "elbowed": true + } + ], + "appState": { + "gridSize": 20, + "gridStep": 5, + "gridModeEnabled": false, + "viewBackgroundColor": "#ffffff" + }, + "files": { + "701e71a9d2562a9de30388fb936a06539f331521": { + "mimeType": "image/svg+xml", + "id": "701e71a9d2562a9de30388fb936a06539f331521", + "dataURL": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxMjggMTI4IiB3aWR0aD0iMTI4IiBoZWlnaHQ9IjEyOCI+PGcgZmlsbC1ydWxlPSJldmVub2RkIj48cGF0aCBkPSJNNzcuOTQxIDQ0LjV2MzYuODM2TDQ2LjMyNCA2Mi45MThWMjYuMDgyem0wIDAiIGZpbGw9IiM1YzRlZTUiLz48cGF0aCBkPSJNODEuNDEgODEuMzM2bDMxLjYzMy0xOC40MThWMjYuMDgyTDgxLjQxIDQ0LjV6bTAgMCIgZmlsbD0iIzQwNDBiMiIvPjxwYXRoIGQ9Ik0xMS4yNDIgNDIuMzZMNDIuODYgNjAuNzc2VjIzLjk0MUwxMS4yNDIgNS41MjN6bTAgME03Ny45NDEgODUuMzc1TDQ2LjMyNCA2Ni45NTd2MzYuODJsMzEuNjE3IDE4LjQxOHptMCAwIiBmaWxsPSIjNWM0ZWU1Ii8+PC9nPjwvc3ZnPg==", + "created": 1727939065485, + "lastRetrieved": 1727939065485 + } + } +} \ No newline at end of file diff --git a/docs/kdpi-gha-flow.png b/docs/kdpi-gha-flow.png new file mode 100644 index 0000000..f38bfd2 Binary files /dev/null and b/docs/kdpi-gha-flow.png differ diff --git a/docs/kdpi-tf-flow.excalidraw b/docs/kdpi-tf-flow.excalidraw new file mode 100644 index 0000000..4f46b4b --- /dev/null +++ b/docs/kdpi-tf-flow.excalidraw @@ -0,0 +1,570 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "a7UATJB5f2dIDfLx0-pC9", + "type": "rectangle", + "x": 457.23046875, + "y": 300.8359375, + "width": 240.53515625, + "height": 86.77343750000001, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a0", + "roundness": { + "type": 3 + }, + "seed": 132009924, + "version": 340, + "versionNonce": 1627185348, + "isDeleted": false, + "boundElements": [ + { + "type": "text", + "id": "qnBD7hzjSMzTaP-es8xsR" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + }, + { + "id": "5sFFRV_LnciQG-St8NdnH", + "type": "arrow" + } + ], + "updated": 1727938827457, + "link": null, + "locked": false + }, + { + "id": "qnBD7hzjSMzTaP-es8xsR", + "type": "text", + "x": 558.3880601525307, + "y": 330.72265625, + "width": 38.21997344493866, + "height": 27, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a0V", + "roundness": null, + "seed": 1367381244, + "version": 173, + "versionNonce": 1135355972, + "isDeleted": false, + "boundElements": null, + "updated": 1727938827457, + "link": null, + "locked": false, + "text": "core", + "fontSize": 20, + "fontFamily": 6, + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "a7UATJB5f2dIDfLx0-pC9", + "originalText": "core", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "id": "Sp2cLqd1qw9Ym2pNHKfXE", + "type": "text", + "x": 457.23046875, + "y": 201.796875, + "width": 630.08203125, + "height": 27, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a1", + "roundness": null, + "seed": 796275780, + "version": 177, + "versionNonce": 572465604, + "isDeleted": false, + "boundElements": null, + "updated": 1727938878324, + "link": null, + "locked": false, + "text": "Terraform Data Flow", + "fontSize": 20, + "fontFamily": 6, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "Terraform Data Flow", + "autoResize": false, + "lineHeight": 1.35 + }, + { + "type": "rectangle", + "version": 295, + "versionNonce": 511786564, + "index": "a2", + "isDeleted": false, + "id": "RNGdmJrw4ZJfZZ4iVmqMP", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1054.986328125, + "y": 240.4375, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 240.53515625, + "height": 86.77343750000001, + "seed": 1853361276, + "groupIds": [], + "frameId": null, + "roundness": { + "type": 3 + }, + "boundElements": [ + { + "type": "text", + "id": "TLx7y0Wipmn1PHwPKJuTv" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + }, + { + "id": "5sFFRV_LnciQG-St8NdnH", + "type": "arrow" + } + ], + "updated": 1727938817790, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 140, + "versionNonce": 1539888580, + "index": "a3", + "isDeleted": false, + "id": "TLx7y0Wipmn1PHwPKJuTv", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1138.8639381229877, + "y": 270.32421875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 72.7799362540245, + "height": 27, + "seed": 1094262012, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727938817790, + "link": null, + "locked": false, + "fontSize": 20, + "fontFamily": 6, + "text": "services", + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "RNGdmJrw4ZJfZZ4iVmqMP", + "originalText": "services", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "type": "rectangle", + "version": 364, + "versionNonce": 639186116, + "index": "a4", + "isDeleted": false, + "id": "sMMz0J30NV8-6UKSb_K4A", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1052.888671875, + "y": 373.9375, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 240.76171874999997, + "height": 86.77343750000001, + "seed": 809752772, + "groupIds": [], + "frameId": null, + "roundness": { + "type": 3 + }, + "boundElements": [ + { + "type": "text", + "id": "ms0vP584eU48Lz_ID42bB" + }, + { + "id": "42l7cmU_K_UguBDN7gIwJ", + "type": "arrow" + } + ], + "updated": 1727938817790, + "link": null, + "locked": false + }, + { + "type": "text", + "version": 215, + "versionNonce": 1243479108, + "index": "a5", + "isDeleted": false, + "id": "ms0vP584eU48Lz_ID42bB", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1123.5295867919922, + "y": 403.82421875, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 99.47988891601562, + "height": 27, + "seed": 2136219716, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727938817790, + "link": null, + "locked": false, + "fontSize": 20, + "fontFamily": 6, + "text": "application", + "textAlign": "center", + "verticalAlign": "middle", + "containerId": "sMMz0J30NV8-6UKSb_K4A", + "originalText": "application", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "id": "5sFFRV_LnciQG-St8NdnH", + "type": "arrow", + "x": 702.765625, + "y": 344.12265625, + "width": 347.220703125, + "height": 60.3984375, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a6", + "roundness": null, + "seed": 393798212, + "version": 315, + "versionNonce": 1579804484, + "isDeleted": false, + "boundElements": null, + "updated": 1727938827457, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + 173.6103515625, + 0 + ], + [ + 173.6103515625, + -60.3984375 + ], + [ + 347.220703125, + -60.3984375 + ] + ], + "lastCommittedPoint": null, + "startBinding": { + "elementId": "a7UATJB5f2dIDfLx0-pC9", + "focus": -0.002304852795534871, + "gap": 5, + "fixedPoint": [ + 1.0207869821524271, + 0.4988475736022325 + ] + }, + "endBinding": { + "elementId": "RNGdmJrw4ZJfZZ4iVmqMP", + "focus": 0.002304852795534871, + "gap": 5, + "fixedPoint": [ + -0.020786982152427042, + 0.4988475736022325 + ] + }, + "startArrowhead": null, + "endArrowhead": "arrow", + "elbowed": true + }, + { + "type": "arrow", + "version": 375, + "versionNonce": 805732292, + "index": "a7", + "isDeleted": false, + "id": "42l7cmU_K_UguBDN7gIwJ", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 702.765625, + "y": 344.12265625, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "width": 345.123046875, + "height": 73.1015625, + "seed": 607699652, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727938827457, + "link": null, + "locked": false, + "startBinding": { + "elementId": "a7UATJB5f2dIDfLx0-pC9", + "focus": -0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + 1.0207869821524271, + 0.4988475736022325 + ] + }, + "endBinding": { + "elementId": "sMMz0J30NV8-6UKSb_K4A", + "focus": 0.0023048527955348715, + "gap": 5, + "fixedPoint": [ + -0.020767421108136612, + 0.4988475736022325 + ] + }, + "lastCommittedPoint": null, + "startArrowhead": null, + "endArrowhead": "arrow", + "points": [ + [ + 0, + 0 + ], + [ + 172.5615234375, + 0 + ], + [ + 172.5615234375, + 73.1015625 + ], + [ + 345.123046875, + 73.1015625 + ] + ], + "elbowed": true + }, + { + "id": "1sqCpEmARpHqrPbhtyCgO", + "type": "text", + "x": 708.6640625, + "y": 354.91015625, + "width": 161.7597974538803, + "height": 81, + "angle": 0, + "strokeColor": "#1e1e1e", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "a9", + "roundness": null, + "seed": 1293657412, + "version": 156, + "versionNonce": 880866940, + "isDeleted": false, + "boundElements": null, + "updated": 1727938841565, + "link": null, + "locked": false, + "text": "- cluster CA cert\n- cluster endpoint\n", + "fontSize": 20, + "fontFamily": 6, + "textAlign": "left", + "verticalAlign": "top", + "containerId": null, + "originalText": "- cluster CA cert\n- cluster endpoint\n", + "autoResize": true, + "lineHeight": 1.35 + }, + { + "id": "4uZMdstfPIPCfmJtmt-2l", + "type": "image", + "x": 467.44921875, + "y": 309.375, + "width": 29.730468750000007, + "height": 29.730468750000007, + "angle": 0, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "frameId": null, + "index": "aD", + "roundness": null, + "seed": 1364763772, + "version": 184, + "versionNonce": 1185049540, + "isDeleted": false, + "boundElements": null, + "updated": 1727939076557, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + }, + { + "type": "image", + "version": 246, + "versionNonce": 1618833788, + "index": "aE", + "isDeleted": false, + "id": "Ug3t5srMXX1Qe2LSV8nX-", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1063.537109375, + "y": 250.119140625, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "width": 29.730468750000007, + "height": 29.730468750000007, + "seed": 1275415236, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939079924, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + }, + { + "type": "image", + "version": 291, + "versionNonce": 711931332, + "index": "aF", + "isDeleted": false, + "id": "idecp7V-0mAEuTtVC_DLw", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "angle": 0, + "x": 1063.099609375, + "y": 385.462890625, + "strokeColor": "transparent", + "backgroundColor": "#a5d8ff", + "width": 29.730468750000007, + "height": 29.730468750000007, + "seed": 1278493308, + "groupIds": [], + "frameId": null, + "roundness": null, + "boundElements": [], + "updated": 1727939082907, + "link": null, + "locked": false, + "status": "saved", + "fileId": "701e71a9d2562a9de30388fb936a06539f331521", + "scale": [ + 1, + 1 + ] + } + ], + "appState": { + "gridSize": 20, + "gridStep": 5, + "gridModeEnabled": false, + "viewBackgroundColor": "#ffffff" + }, + "files": { + "701e71a9d2562a9de30388fb936a06539f331521": { + "mimeType": "image/svg+xml", + "id": "701e71a9d2562a9de30388fb936a06539f331521", + "dataURL": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxMjggMTI4IiB3aWR0aD0iMTI4IiBoZWlnaHQ9IjEyOCI+PGcgZmlsbC1ydWxlPSJldmVub2RkIj48cGF0aCBkPSJNNzcuOTQxIDQ0LjV2MzYuODM2TDQ2LjMyNCA2Mi45MThWMjYuMDgyem0wIDAiIGZpbGw9IiM1YzRlZTUiLz48cGF0aCBkPSJNODEuNDEgODEuMzM2bDMxLjYzMy0xOC40MThWMjYuMDgyTDgxLjQxIDQ0LjV6bTAgMCIgZmlsbD0iIzQwNDBiMiIvPjxwYXRoIGQ9Ik0xMS4yNDIgNDIuMzZMNDIuODYgNjAuNzc2VjIzLjk0MUwxMS4yNDIgNS41MjN6bTAgME03Ny45NDEgODUuMzc1TDQ2LjMyNCA2Ni45NTd2MzYuODJsMzEuNjE3IDE4LjQxOHptMCAwIiBmaWxsPSIjNWM0ZWU1Ii8+PC9nPjwvc3ZnPg==", + "created": 1727939065485, + "lastRetrieved": 1727939065485 + } + } +} \ No newline at end of file diff --git a/docs/kdpi-tf-flow.png b/docs/kdpi-tf-flow.png new file mode 100644 index 0000000..0640f14 Binary files /dev/null and b/docs/kdpi-tf-flow.png differ diff --git a/terraform/application/.terraform.lock.hcl b/terraform/application/.terraform.lock.hcl new file mode 100644 index 0000000..bd93b45 --- /dev/null +++ b/terraform/application/.terraform.lock.hcl @@ -0,0 +1,59 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.5.0" + hashes = [ + "h1:qKHzN9B+9uOjHDXxanQJanBdsd0bzCP3egUjIcyYxSY=", + "zh:14101a38e880d4a1ef14c0a24476b008a3b577540a260a246a471bcfb5c31f4c", + "zh:478a7b0810956d39843e785262ab8162324a7412c3f6cf1ceb43977e2c05f22e", + "zh:6c9b583abcbaa2093b1b55494ac08851bd3364919fe86850a9c3e8f6c46851d4", + "zh:7c400eb5488221ba7ea48725ab43db1464cefd96cb29a24e63fe1950666b465f", + "zh:82931b2c186403753356a73878d36efc209c9e5ae46d0b609bb7ca38aece931d", + "zh:87e7966ef7067de3684f658251cdede057be419bbfeaaad935ab6f501024046a", + "zh:a2f4aaa3b9260732a53f78c8053eb2cbcee2abf11d3d245c58f3065423ad30ab", + "zh:bbc4c3ca9d51287e77130fc95880792007dd919b9b5396433f9eed737119c6c3", + "zh:edcda54d37be1b8d4cbe029e30df6a228e0be3887831b892c11536502d87e840", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f95d92ca2ac527442f6767a217b8a557ba6b2d1915c39efba412382e986e4c3e", + "zh:f96148c3742126219b810a687551284460a8d78aa66efbfd2c58880f48049dda", + ] +} + +provider "registry.terraform.io/hashicorp/helm" { + version = "2.15.0" + hashes = [ + "h1:VymvscRkDy0+zN2uKpKYY6njXPY8JROARuaL3VPsEos=", + "zh:18b94c7c83c30ad166722a61a412e3de6a67935772960e79aaa24c15f8ea0d0f", + "zh:4f07c929a71e8169f7471b7600bfcca36dfb295787e975e82ac0455a3ab68b47", + "zh:776b804a14c3c4ae6075b12176f81c1f1987214ee1cae4a542599389591cde11", + "zh:7c11e3adbe9bd26e88484dcdbd28c473ce3a5c58950a3e3c4f0a2caee225b845", + "zh:821e1a53415df0ae4ed523f098360d367a95d6ce3872ba841f22adfdd2f97664", + "zh:94c06e483f75a11c3f139c41b3f64b51a96d1d1485e7d1fd3c0f795e2e750945", + "zh:aa2040de0b8150ef40222a965445ec40e3df2997ffde1fb062ab4c226689115e", + "zh:ad73eebeffe20228656567963477d034b9ed3d1bd2075c1c81150def4927d810", + "zh:b77450a36807f3ad1d3ae736d1d165a94fa26f476504a280e9fb2ccb89f648d0", + "zh:d2ebd3c34c50c92106ce2df25d5598f47127dc7c60172b9e2fe56ac73dc863a8", + "zh:e565995e2614df5ddde75a743a674129288fb91669596a7b0b2580fa7ed49979", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.32.0" + hashes = [ + "h1:3j4XBR5UWQA7xXaiEnzZp0bHbcwOhWetHYKTWIrUTI0=", + "zh:0e715d7fb13a8ad569a5fdc937b488590633f6942e986196fdb17cd7b8f7720e", + "zh:495fc23acfe508ed981e60af9a3758218b0967993065e10a297fdbc210874974", + "zh:4b930a8619910ef528bc90dae739cb4236b9b76ce41367281e3bc3cf586101c7", + "zh:5344405fde7b1febf0734052052268ee24e7220818155702907d9ece1c0697c7", + "zh:92ee11e8c23bbac3536df7b124456407f35c6c2468bc0dbab15c3fc9f414bd0e", + "zh:a45488fe8d5bb59c49380f398da5d109a4ac02ebc10824567dabb87f6102fda8", + "zh:a4a0b57cf719a4c91f642436882b7bea24d659c08a5b6f4214ce4fe6a0204caa", + "zh:b7a27a6d11ba956a2d7b0f7389a46ec857ebe46ae3aeee537250e66cac15bf03", + "zh:bf94ce389028b686bfa70a90f536e81bb776c5c20ab70138bbe5c3d0a04c4253", + "zh:d965b2608da0212e26a65a0b3f33c5baae46cbe839196be15d93f70061516908", + "zh:f441fc793d03057a17af8bdca8b26d54916645bc5c148f54e22a54ed39089e83", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/terraform/application/data.tf b/terraform/application/data.tf new file mode 100644 index 0000000..3f2ff71 --- /dev/null +++ b/terraform/application/data.tf @@ -0,0 +1,23 @@ +data "terraform_remote_state" "core" { + backend = "gcs" + config = { + bucket = "k8s-test-tfstate-c74f3a" + prefix = "env1/core/" + } +} + +data "google_client_config" "provider" {} + +data "kubernetes_ingress_v1" "demoapp_ingress" { + count = local.app_deployed ? 1 : 0 + + metadata { + name = "demoapp-ingress" + namespace = "demoapp" + } +} + +# Assumes consistent DNS zone naming scheme: foo.bar.baz -> foo-bar-baz +data "google_dns_managed_zone" "this" { + name = replace(var.primary_domain, ".", "-") +} \ No newline at end of file diff --git a/terraform/application/main.tf b/terraform/application/main.tf new file mode 100644 index 0000000..80dea7d --- /dev/null +++ b/terraform/application/main.tf @@ -0,0 +1,24 @@ +locals { + app_deployed = true # deployed via argod? if yes, true; if not yet, false +} + +resource "kubernetes_namespace" "demoapp" { + metadata { + name = "demoapp" + } +} + + +resource "google_dns_record_set" "demoapp" { + count = local.app_deployed ? 1 : 0 + + name = "demoapp.${var.primary_domain}." + type = "A" + ttl = 300 + + managed_zone = data.google_dns_managed_zone.this.name + + rrdatas = [ + data.kubernetes_ingress_v1.demoapp_ingress[0].status.0.load_balancer.0.ingress.0.ip + ] +} \ No newline at end of file diff --git a/terraform/application/providers.tf b/terraform/application/providers.tf new file mode 100644 index 0000000..0b9b70d --- /dev/null +++ b/terraform/application/providers.tf @@ -0,0 +1,47 @@ +provider "google" { + project = "demoproj-437500" + region = "asia-northeast1" +} + +terraform { + backend "gcs" { + bucket = "k8s-test-tfstate-c74f3a" + prefix = "env1/application/" + } +} + +# Retrieve an access token as the Terraform runner +provider "kubernetes" { + host = "https://${data.terraform_remote_state.core.outputs.cluster_endpoint}" + token = data.google_client_config.provider.access_token + cluster_ca_certificate = base64decode(data.terraform_remote_state.core.outputs.cluster_ca_certificate) + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + } +} + +# For k8s custom resources deployed with kubectl_manifest (because kubernetes_manifest does not work well with CRDs) +/* +provider "kubectl" { + host = "https://${data.terraform_remote_state.core.outputs.cluster_endpoint}" + cluster_ca_certificate = base64decode(data.terraform_remote_state.core.outputs.cluster_ca_certificate) + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + } + load_config_file = false +} +*/ + +provider "helm" { + kubernetes { + host = "https://${data.terraform_remote_state.core.outputs.cluster_endpoint}" + cluster_ca_certificate = base64decode(data.terraform_remote_state.core.outputs.cluster_ca_certificate) + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + } + } +} + diff --git a/terraform/application/terraform.tfvars b/terraform/application/terraform.tfvars new file mode 100644 index 0000000..c98992a --- /dev/null +++ b/terraform/application/terraform.tfvars @@ -0,0 +1 @@ +primary_domain = "gcp.jtreutel.io" \ No newline at end of file diff --git a/terraform/application/variables.tf b/terraform/application/variables.tf new file mode 100644 index 0000000..3c21a45 --- /dev/null +++ b/terraform/application/variables.tf @@ -0,0 +1,3 @@ +variable "primary_domain" { + description = "DNS domain for this cluster" +} diff --git a/terraform/core/.terraform.lock.hcl b/terraform/core/.terraform.lock.hcl new file mode 100644 index 0000000..e117c1f --- /dev/null +++ b/terraform/core/.terraform.lock.hcl @@ -0,0 +1,21 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.5.0" + hashes = [ + "h1:qKHzN9B+9uOjHDXxanQJanBdsd0bzCP3egUjIcyYxSY=", + "zh:14101a38e880d4a1ef14c0a24476b008a3b577540a260a246a471bcfb5c31f4c", + "zh:478a7b0810956d39843e785262ab8162324a7412c3f6cf1ceb43977e2c05f22e", + "zh:6c9b583abcbaa2093b1b55494ac08851bd3364919fe86850a9c3e8f6c46851d4", + "zh:7c400eb5488221ba7ea48725ab43db1464cefd96cb29a24e63fe1950666b465f", + "zh:82931b2c186403753356a73878d36efc209c9e5ae46d0b609bb7ca38aece931d", + "zh:87e7966ef7067de3684f658251cdede057be419bbfeaaad935ab6f501024046a", + "zh:a2f4aaa3b9260732a53f78c8053eb2cbcee2abf11d3d245c58f3065423ad30ab", + "zh:bbc4c3ca9d51287e77130fc95880792007dd919b9b5396433f9eed737119c6c3", + "zh:edcda54d37be1b8d4cbe029e30df6a228e0be3887831b892c11536502d87e840", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f95d92ca2ac527442f6767a217b8a557ba6b2d1915c39efba412382e986e4c3e", + "zh:f96148c3742126219b810a687551284460a8d78aa66efbfd2c58880f48049dda", + ] +} diff --git a/terraform/core/main.tf b/terraform/core/main.tf new file mode 100644 index 0000000..3d471a0 --- /dev/null +++ b/terraform/core/main.tf @@ -0,0 +1,153 @@ +#------------------------------------------------------------------------------- +# GKE Cluster and Networking +#------------------------------------------------------------------------------- + +resource "google_compute_network" "this" { + name = "${var.identifier}-${var.environment}-network" + + auto_create_subnetworks = true +} + +resource "google_compute_subnetwork" "this" { + name = "${var.identifier}-${var.environment}-subnetwork" + + ip_cidr_range = "10.0.0.0/16" + region = var.gcp_region + + stack_type = "IPV4_ONLY" + + network = google_compute_network.this.id + + # Don't remove the secondary IP ranges added automatically by the cluster + lifecycle { + ignore_changes = [ + secondary_ip_range + ] + } +} + +resource "google_container_cluster" "this" { + name = "${var.identifier}-${var.environment}-cluster" + location = var.gcp_region + + remove_default_node_pool = true + initial_node_count = 1 + + network = google_compute_network.this.id + subnetwork = google_compute_subnetwork.this.id + + datapath_provider = "ADVANCED_DATAPATH" + + monitoring_config { + enable_components = [ + "SYSTEM_COMPONENTS", + "APISERVER", + "SCHEDULER", + "CONTROLLER_MANAGER", + "STORAGE", + "HPA", + "POD", + "DAEMONSET", + "DEPLOYMENT", + "STATEFULSET", + "KUBELET", + "CADVISOR" + ] + } + + logging_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + + + # Set deletion protection if this is a production environment + deletion_protection = contains(["prd", "qal"], var.environment) ? true : false +} + +resource "google_container_node_pool" "primary" { + name = "${var.identifier}-${var.environment}-pool-primary" + location = var.gcp_region + cluster = google_container_cluster.this.name + node_count = 1 + + # >1 and in multiple zones + autoscaling { + min_node_count = 2 + max_node_count = 4 + + location_policy = "BALANCED" + } + + node_config { + preemptible = false # we don't want VMs to be interrupted + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.gke_access.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring" + ] + } +} + + + +#------------------------------------------------------------------------------- +# Supporting IAM resources +#------------------------------------------------------------------------------- + +resource "google_service_account" "gke_access" { + account_id = "${var.identifier}-${var.environment}-access" + display_name = "Service Account for accessing ${var.identifier}${var.environment} GKE cluster" +} + +#Allow the creation of tokens for accessing GKE +resource "google_project_iam_member" "service_account_token_creator" { + for_each = var.gke_sa_roles + + project = var.gcp_project + role = each.value + member = "serviceAccount:${google_service_account.gke_access.email}" +} + + + + +#------------------------------------------------------------------------------- +# Monitoring and Alerting +#------------------------------------------------------------------------------- + +resource "google_monitoring_alert_policy" "gke_node_cpu_utilization" { + display_name = "Demoapp GKE Node CPU Utilization" + combiner = "OR" + + notification_channels = [ + "projects/demoproj-437500/notificationChannels/1196374165928758087" #TODO: Add resource to TF + ] + severity = "WARNING" + + conditions { + display_name = "Kubernetes Node - CPU allocatable utilization" + condition_threshold { + filter = "resource.type = \"k8s_node\" AND resource.labels.cluster_name = \"demoapp-prd-cluster\" AND metric.type = \"kubernetes.io/node/cpu/allocatable_utilization\"" + duration = "60s" + comparison = "COMPARISON_GT" + threshold_value = 0.8 + aggregations { + alignment_period = "300s" + per_series_aligner = "ALIGN_MEAN" + } + trigger { + count = 1 + percent = 0 + } + } + } + documentation { + subject = "Demoapp GKE Node CPU Utilization > 80%" + } +} \ No newline at end of file diff --git a/terraform/core/outputs.tf b/terraform/core/outputs.tf new file mode 100644 index 0000000..13e209f --- /dev/null +++ b/terraform/core/outputs.tf @@ -0,0 +1,11 @@ +output "cluster_endpoint" { + value = google_container_cluster.this.endpoint +} + +output "cluster_ca_certificate" { + value = google_container_cluster.this.master_auth[0].cluster_ca_certificate +} + +output "cluster_access_service_account_email" { + value = google_service_account.gke_access.email +} \ No newline at end of file diff --git a/terraform/core/providers.tf b/terraform/core/providers.tf new file mode 100644 index 0000000..1ae9b04 --- /dev/null +++ b/terraform/core/providers.tf @@ -0,0 +1,11 @@ +provider "google" { + project = "demoproj-437500" + region = "asia-northeast1" +} + +terraform { + backend "gcs" { + bucket = "k8s-test-tfstate-c74f3a" + prefix = "env1/core/" + } +} \ No newline at end of file diff --git a/terraform/core/terraform.tfvars b/terraform/core/terraform.tfvars new file mode 100644 index 0000000..1829019 --- /dev/null +++ b/terraform/core/terraform.tfvars @@ -0,0 +1,4 @@ +identifier = "demoapp" #unique ID +environment = "prd" #environment +gcp_region = "asia-northeast1" #GCP region, chosen for geographic proximity +gcp_project = "demoproj-437500" \ No newline at end of file diff --git a/terraform/core/variables.tf b/terraform/core/variables.tf new file mode 100644 index 0000000..9e71dc4 --- /dev/null +++ b/terraform/core/variables.tf @@ -0,0 +1,31 @@ +variable "identifier" { + description = "A unique identifier for this environment." + type = string +} + +variable "environment" { + description = "An environment signifier. Accepted values: dev|qal|prd" + type = string +} + +variable "gcp_region" { + description = "GCP region in which to deploy" + type = string +} + +variable "gcp_project" { + description = "GCP project in which to deploy" + type = string +} + +variable "gke_sa_roles" { + description = "List of roles to be provided for GKE SA" + type = set(string) + default = [ + "roles/monitoring.viewer", + "roles/monitoring.metricWriter", + "roles/logging.logWriter", + "roles/stackdriver.resourceMetadata.writer", + "roles/iam.serviceAccountTokenCreator" + ] +} \ No newline at end of file diff --git a/terraform/services/.terraform.lock.hcl b/terraform/services/.terraform.lock.hcl new file mode 100644 index 0000000..bd93b45 --- /dev/null +++ b/terraform/services/.terraform.lock.hcl @@ -0,0 +1,59 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.5.0" + hashes = [ + "h1:qKHzN9B+9uOjHDXxanQJanBdsd0bzCP3egUjIcyYxSY=", + "zh:14101a38e880d4a1ef14c0a24476b008a3b577540a260a246a471bcfb5c31f4c", + "zh:478a7b0810956d39843e785262ab8162324a7412c3f6cf1ceb43977e2c05f22e", + "zh:6c9b583abcbaa2093b1b55494ac08851bd3364919fe86850a9c3e8f6c46851d4", + "zh:7c400eb5488221ba7ea48725ab43db1464cefd96cb29a24e63fe1950666b465f", + "zh:82931b2c186403753356a73878d36efc209c9e5ae46d0b609bb7ca38aece931d", + "zh:87e7966ef7067de3684f658251cdede057be419bbfeaaad935ab6f501024046a", + "zh:a2f4aaa3b9260732a53f78c8053eb2cbcee2abf11d3d245c58f3065423ad30ab", + "zh:bbc4c3ca9d51287e77130fc95880792007dd919b9b5396433f9eed737119c6c3", + "zh:edcda54d37be1b8d4cbe029e30df6a228e0be3887831b892c11536502d87e840", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f95d92ca2ac527442f6767a217b8a557ba6b2d1915c39efba412382e986e4c3e", + "zh:f96148c3742126219b810a687551284460a8d78aa66efbfd2c58880f48049dda", + ] +} + +provider "registry.terraform.io/hashicorp/helm" { + version = "2.15.0" + hashes = [ + "h1:VymvscRkDy0+zN2uKpKYY6njXPY8JROARuaL3VPsEos=", + "zh:18b94c7c83c30ad166722a61a412e3de6a67935772960e79aaa24c15f8ea0d0f", + "zh:4f07c929a71e8169f7471b7600bfcca36dfb295787e975e82ac0455a3ab68b47", + "zh:776b804a14c3c4ae6075b12176f81c1f1987214ee1cae4a542599389591cde11", + "zh:7c11e3adbe9bd26e88484dcdbd28c473ce3a5c58950a3e3c4f0a2caee225b845", + "zh:821e1a53415df0ae4ed523f098360d367a95d6ce3872ba841f22adfdd2f97664", + "zh:94c06e483f75a11c3f139c41b3f64b51a96d1d1485e7d1fd3c0f795e2e750945", + "zh:aa2040de0b8150ef40222a965445ec40e3df2997ffde1fb062ab4c226689115e", + "zh:ad73eebeffe20228656567963477d034b9ed3d1bd2075c1c81150def4927d810", + "zh:b77450a36807f3ad1d3ae736d1d165a94fa26f476504a280e9fb2ccb89f648d0", + "zh:d2ebd3c34c50c92106ce2df25d5598f47127dc7c60172b9e2fe56ac73dc863a8", + "zh:e565995e2614df5ddde75a743a674129288fb91669596a7b0b2580fa7ed49979", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.32.0" + hashes = [ + "h1:3j4XBR5UWQA7xXaiEnzZp0bHbcwOhWetHYKTWIrUTI0=", + "zh:0e715d7fb13a8ad569a5fdc937b488590633f6942e986196fdb17cd7b8f7720e", + "zh:495fc23acfe508ed981e60af9a3758218b0967993065e10a297fdbc210874974", + "zh:4b930a8619910ef528bc90dae739cb4236b9b76ce41367281e3bc3cf586101c7", + "zh:5344405fde7b1febf0734052052268ee24e7220818155702907d9ece1c0697c7", + "zh:92ee11e8c23bbac3536df7b124456407f35c6c2468bc0dbab15c3fc9f414bd0e", + "zh:a45488fe8d5bb59c49380f398da5d109a4ac02ebc10824567dabb87f6102fda8", + "zh:a4a0b57cf719a4c91f642436882b7bea24d659c08a5b6f4214ce4fe6a0204caa", + "zh:b7a27a6d11ba956a2d7b0f7389a46ec857ebe46ae3aeee537250e66cac15bf03", + "zh:bf94ce389028b686bfa70a90f536e81bb776c5c20ab70138bbe5c3d0a04c4253", + "zh:d965b2608da0212e26a65a0b3f33c5baae46cbe839196be15d93f70061516908", + "zh:f441fc793d03057a17af8bdca8b26d54916645bc5c148f54e22a54ed39089e83", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/terraform/services/data.tf b/terraform/services/data.tf new file mode 100644 index 0000000..73a5447 --- /dev/null +++ b/terraform/services/data.tf @@ -0,0 +1,43 @@ +data "terraform_remote_state" "core" { + backend = "gcs" + config = { + bucket = "k8s-test-tfstate-c74f3a" + prefix = "env1/core/" + } +} + +data "google_client_config" "provider" {} + +#Generates a token for accessing the k8s cluster +#data "google_service_account_access_token" "my_kubernetes_sa" { +# target_service_account = data.terraform_remote_state.core.outputs.cluster_access_service_account_email +# scopes = ["userinfo-email", "cloud-platform"] +# lifetime = "3600s" +#} + +data "kubernetes_ingress_v1" "grafana_ingress" { + metadata { + name = "kube-prometheus-stack-grafana" + namespace = kubernetes_namespace.prometheus.id + } + + depends_on = [ + helm_release.kube_prometheus_stack + ] +} + +data "kubernetes_ingress_v1" "argocd_ingress" { + metadata { + name = "argo-cd-argocd-server" + namespace = kubernetes_namespace.argocd.id + } + + depends_on = [ + helm_release.argo_cd + ] +} + +# Assumes consistent DNS zone naming scheme: foo.bar.baz -> foo-bar-baz +data "google_dns_managed_zone" "this" { + name = replace(var.primary_domain, ".", "-") +} \ No newline at end of file diff --git a/terraform/services/helm-values/argo_cd.yaml b/terraform/services/helm-values/argo_cd.yaml new file mode 100644 index 0000000..035fac4 --- /dev/null +++ b/terraform/services/helm-values/argo_cd.yaml @@ -0,0 +1,3725 @@ +## Argo CD configuration +## Ref: https://github.com/argoproj/argo-cd +## + +# -- Provide a name in place of `argocd` +nameOverride: argocd +# -- String to fully override `"argo-cd.fullname"` +fullnameOverride: "" +# -- Override the namespace +# @default -- `.Release.Namespace` +namespaceOverride: "" +# -- Override the Kubernetes version, which is used to evaluate certain manifests +kubeVersionOverride: "" +# Override APIVersions +# If you want to template helm charts but cannot access k8s API server +# you can set api versions here +apiVersionOverrides: {} + +# -- Create aggregated roles that extend existing cluster roles to interact with argo-cd resources +## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles +createAggregateRoles: false +# -- Create cluster roles for cluster-wide installation. +## Used when you manage applications in the same cluster where Argo CD runs +createClusterRoles: true + +openshift: + # -- enables using arbitrary uid for argo repo server + enabled: false + +## Custom resource configuration +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + # -- Annotations to be added to all CRDs + annotations: {} + # -- Addtional labels to be added to all CRDs + additionalLabels: {} + +## Globally shared configuration +global: + # -- Default domain used by all components + ## Used for ingresses, certificates, SSO, notifications, etc. + domain: argocd.example.com + + # -- Runtime class name for all components + runtimeClassName: "" + + # -- Common labels for the all resources + additionalLabels: {} + # app: argo-cd + + # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected. + revisionHistoryLimit: 3 + + # Default image used by all components + image: + # -- If defined, a repository applied to all Argo CD deployments + repository: quay.io/argoproj/argocd + # -- Overrides the global Argo CD image tag whose default is the chart appVersion + tag: "" + # -- If defined, a imagePullPolicy applied to all Argo CD deployments + imagePullPolicy: IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + imagePullSecrets: [] + + # Default logging options used by all components + logging: + # -- Set the global logging format. Either: `text` or `json` + format: text + # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error` + level: info + + # -- Annotations for the all deployed Statefulsets + statefulsetAnnotations: {} + + # -- Annotations for the all deployed Deployments + deploymentAnnotations: {} + + # -- Annotations for the all deployed pods + podAnnotations: {} + + # -- Labels for the all deployed pods + podLabels: {} + + # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors. + addPrometheusAnnotations: false + + # -- Toggle and define pod-level security context. + # @default -- `{}` (See [values.yaml]) + securityContext: {} + # runAsUser: 999 + # runAsGroup: 999 + # fsGroup: 999 + + # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files + hostAliases: [] + # - ip: 10.20.30.40 + # hostnames: + # - git.myhostname + + # Configure dual-stack used by all component services + dualStack: + # -- IP family policy to configure dual-stack see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services) + ipFamilyPolicy: "" + # -- IP families that should be supported and the order in which they should be applied to ClusterIP as well. Can be IPv4 and/or IPv6. + ipFamilies: [] + + # Default network policy rules used by all components + networkPolicy: + # -- Create NetworkPolicy objects for all components + create: false + # -- Default deny all ingress traffic + defaultDenyIngress: false + + # -- Default priority class for all components + priorityClassName: "" + + # -- Default node selector for all components + nodeSelector: {} + + # -- Default tolerations for all components + tolerations: [] + + # Default affinity preset for all components + affinity: + # -- Default pod anti-affinity rules. Either: `none`, `soft` or `hard` + podAntiAffinity: soft + # Node affinity rules + nodeAffinity: + # -- Default node affinity rules. Either: `none`, `soft` or `hard` + type: hard + # -- Default match expressions for node affinity + matchExpressions: [] + # - key: topology.kubernetes.io/zone + # operator: In + # values: + # - antarctica-east1 + # - antarctica-west1 + + # -- Default [TopologySpreadConstraints] rules for all components + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector of the component + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy for the all deployed Deployments + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Environment variables to pass to all deployed Deployments + env: [] + + # -- Annotations for the all deployed Certificates + certificateAnnotations: {} + +## Argo Configs +configs: + # General Argo CD configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + cm: + # -- Create the argocd-cm configmap for [declarative setup] + create: true + + # -- Annotations to be added to argocd-cm configmap + annotations: {} + + # -- The name of tracking label used by Argo CD for resource pruning + application.instanceLabelKey: argocd.argoproj.io/instance + + # -- Enable logs RBAC enforcement + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement + server.rbac.log.enforce.enable: false + + # -- Enable exec feature in Argo UI + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource + exec.enabled: false + + # -- Enable local admin user + ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user + admin.enabled: true + + # -- Timeout to discover if a new manifests version got published to the repository + timeout.reconciliation: 180s + + # -- Timeout to refresh application data as well as target manifests cache + timeout.hard.reconciliation: 0s + + # -- Enable Status Badge + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/status-badge/ + statusbadge.enabled: false + + # Dex configuration + # dex.config: | + # connectors: + # # GitHub example + # - type: github + # id: github + # name: GitHub + # config: + # clientID: aabbccddeeff00112233 + # clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret + # orgs: + # - name: your-github-org + + # OIDC configuration as an alternative to dex (optional). + # oidc.config: | + # name: AzureAD + # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 + # clientID: CLIENT_ID + # clientSecret: $oidc.azuread.clientSecret + # rootCA: | + # -----BEGIN CERTIFICATE----- + # ... encoded certificate data here ... + # -----END CERTIFICATE----- + # requestedIDTokenClaims: + # groups: + # essential: true + # requestedScopes: + # - openid + # - profile + # - email + + # Extension Configuration + ## Ref: https://argo-cd.readthedocs.io/en/latest/developer-guide/extensions/proxy-extensions/ + # extension.config: | + # extensions: + # - name: httpbin + # backend: + # connectionTimeout: 2s + # keepAlive: 15s + # idleConnectionTimeout: 60s + # maxIdleConnections: 30 + # services: + # - url: http://httpbin.org + # headers: + # - name: some-header + # value: '$some.argocd.secret.key' + # cluster: + # name: some-cluster + # server: https://some-cluster + + # Argo CD configuration parameters + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml + params: + # -- Create the argocd-cmd-params-cm configmap + # If false, it is expected the configmap will be created by something else. + create: true + + # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap + annotations: {} + + ## Generic parameters + # -- Open-Telemetry collector address: (e.g. "otel-collector:4317") + otlp.address: '' + + ## Controller Properties + # -- Number of application status processors + controller.status.processors: 20 + # -- Number of application operation processors + controller.operation.processors: 10 + # -- Specifies timeout between application self heal attempts + controller.self.heal.timeout.seconds: 5 + # -- Repo server RPC call timeout seconds. + controller.repo.server.timeout.seconds: 60 + + ## Server properties + # -- Run server without TLS + ## NOTE: This value should be set when you generate params by other means as it changes ports used by ingress template. + server.insecure: true + # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / + server.basehref: / + # -- Used if Argo CD is running behind reverse proxy under subpath different from / + server.rootpath: '' + # -- Directory path that contains additional static assets + server.staticassets: /shared/app + # -- Disable Argo CD RBAC for user authentication + server.disable.auth: false + # -- Enable GZIP compression + server.enable.gzip: true + # -- Enable proxy extension feature. (proxy extension is in Alpha phase) + server.enable.proxy.extension: false + # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "". + server.x.frame.options: sameorigin + + ## Repo-server properties + # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + reposerver.parallelism.limit: 0 + + ## ApplicationSet Properties + # -- Modify how application is synced between the generator and the cluster. One of: `sync`, `create-only`, `create-update`, `create-delete` + applicationsetcontroller.policy: sync + # -- Enables use of the Progressive Syncs capability + applicationsetcontroller.enable.progressive.syncs: false + # -- A list of glob patterns specifying where to look for ApplicationSet resources. (e.g. `"argocd,argocd-appsets-*"`) + # @default -- `""` (default is only the ns where the controller is installed) + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Appset-Any-Namespace/ + applicationsetcontroller.namespaces: "" + + # -- Enables [Applications in any namespace] + ## List of additional namespaces where applications may be created in and reconciled from. + ## The namespace where Argo CD is installed to will always be allowed. + ## Set comma-separated list. (e.g. app-team-one, app-team-two) + application.namespaces: "" + + # -- JQ Path expression timeout + ## By default, the evaluation of a JQPathExpression is limited to one second. + ## If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression + ## that requires more time to evaluate, you can extend the timeout period. + controller.ignore.normalizer.jq.timeout: "1s" + + # Argo CD RBAC policy configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md + rbac: + # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions. + # If false, it is expected the configmap will be created by something else. + # Argo CD will not work if there is no configmap created with the name above. + create: true + + # -- Annotations to be added to argocd-rbac-cm configmap + annotations: {} + + # -- The name of the default role which Argo CD will falls back to, when authorizing API requests (optional). + # If omitted or empty, users may be still be able to login, but will see no apps, projects, etc... + policy.default: '' + + # -- File containing user-defined policies and role definitions. + # @default -- `''` (See [values.yaml]) + policy.csv: '' + # Policy rules are in the form: + # p, subject, resource, action, object, effect + # Role definitions and bindings are in the form: + # g, subject, inherited-subject + # policy.csv: | + # p, role:org-admin, applications, *, */*, allow + # p, role:org-admin, clusters, get, *, allow + # p, role:org-admin, repositories, *, *, allow + # p, role:org-admin, logs, get, *, allow + # p, role:org-admin, exec, create, */*, allow + # g, your-github-org:your-team, role:org-admin + + # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). + # The scope value can be a string, or a list of strings. + scopes: "[groups]" + + # -- Matcher function for Casbin, `glob` for glob matcher and `regex` for regex matcher. + policy.matchMode: "glob" + + # GnuPG public keys for commit verification + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/gpg-verification/ + gpg: + # -- Annotations to be added to argocd-gpg-keys-cm configmap + annotations: {} + + # -- [GnuPG] public keys to add to the keyring + # @default -- `{}` (See [values.yaml]) + ## Note: Public keys should be exported with `gpg --export --armor ` + keys: {} + # 4AEE18F83AFDEB23: | + # -----BEGIN PGP PUBLIC KEY BLOCK----- + # ... + # -----END PGP PUBLIC KEY BLOCK----- + + # SSH known hosts for Git repositories + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#ssh-known-host-public-keys + ssh: + # -- Annotations to be added to argocd-ssh-known-hosts-cm configmap + annotations: {} + + # -- Known hosts to be added to the known host list by default. + # @default -- See [values.yaml] + knownHosts: | + [ssh.github.com]:443 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + [ssh.github.com]:443 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + [ssh.github.com]:443 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE= + bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M= + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + + # -- Additional known hosts for private repositories + extraHosts: '' + + # Repository TLS certificates + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories-using-self-signed-tls-certificates-or-are-signed-by-custom-ca + tls: + # -- Annotations to be added to argocd-tls-certs-cm configmap + annotations: {} + + # -- TLS certificates for Git repositories + # @default -- `{}` (See [values.yaml]) + certificates: {} + # server.example.com: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + + # ConfigMap for Config Management Plugins + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/config-management-plugins/ + cmp: + # -- Create the argocd-cmp-cm configmap + create: false + + # -- Annotations to be added to argocd-cmp-cm configmap + annotations: {} + + # -- Plugin yaml files to be added to argocd-cmp-cm + plugins: {} + # --- First plugin + # my-plugin: + # init: + # command: [sh] + # args: [-c, 'echo "Initializing..."'] + # generate: + # command: [sh, -c] + # args: + # - | + # echo "{\"kind\": \"ConfigMap\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"$ARGOCD_APP_NAME\", \"namespace\": \"$ARGOCD_APP_NAMESPACE\", \"annotations\": {\"Foo\": \"$ARGOCD_ENV_FOO\", \"KubeVersion\": \"$KUBE_VERSION\", \"KubeApiVersion\": \"$KUBE_API_VERSIONS\",\"Bar\": \"baz\"}}}" + # discover: + # fileName: "./subdir/s*.yaml" + # find: + # glob: "**/Chart.yaml" + # command: [sh, -c, find . -name env.yaml] + + # --- Second plugin + # my-plugin2: + # init: + # command: [sh] + # args: [-c, 'echo "Initializing..."'] + # generate: + # command: [sh, -c] + # args: + # - | + # echo "{\"kind\": \"ConfigMap\", \"apiVersion\": \"v1\", \"metadata\": { \"name\": \"$ARGOCD_APP_NAME\", \"namespace\": \"$ARGOCD_APP_NAMESPACE\", \"annotations\": {\"Foo\": \"$ARGOCD_ENV_FOO\", \"KubeVersion\": \"$KUBE_VERSION\", \"KubeApiVersion\": \"$KUBE_API_VERSIONS\",\"Bar\": \"baz\"}}}" + # discover: + # fileName: "./subdir/s*.yaml" + # find: + # glob: "**/Chart.yaml" + # command: [sh, -c, find . -name env.yaml] + + # -- Provide one or multiple [external cluster credentials] + # @default -- `{}` (See [values.yaml]) + ## Ref: + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials + ## - https://argo-cd.readthedocs.io/en/stable/user-guide/projects/#project-scoped-repositories-and-clusters + clusterCredentials: {} + # mycluster: + # server: https://mycluster.example.com + # labels: {} + # annotations: {} + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # mycluster2: + # server: https://mycluster2.example.com + # labels: {} + # annotations: {} + # namespaces: namespace1,namespace2 + # clusterResources: true + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # mycluster3-project-scoped: + # server: https://mycluster3.example.com + # labels: {} + # annotations: {} + # project: my-project1 + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # mycluster4-sharded: + # shard: 1 + # server: https://mycluster4.example.com + # labels: {} + # annotations: {} + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + + # -- Repository credentials to be used as Templates for other repos + ## Creates a secret for each key/value specified below to create repository credentials + credentialTemplates: {} + # github-enterprise-creds-1: + # url: https://github.com/argoproj + # githubAppID: 1 + # githubAppInstallationID: 2 + # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 + # githubAppPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + # https-creds: + # url: https://github.com/argoproj + # password: my-password + # username: my-username + # ssh-creds: + # url: git@github.com:argoproj-labs + # sshPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + + # -- Annotations to be added to `configs.credentialTemplates` Secret + credentialTemplatesAnnotations: {} + + # -- Repositories list to be used by applications + ## Creates a secret for each key/value specified below to create repositories + ## Note: the last example in the list would use a repository credential template, configured under "configs.credentialTemplates". + repositories: {} + # istio-helm-repo: + # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts + # name: istio.io + # type: helm + # private-helm-repo: + # url: https://my-private-chart-repo.internal + # name: private-repo + # type: helm + # password: my-password + # username: my-username + # private-repo: + # url: https://github.com/argoproj/private-repo + + # -- Annotations to be added to `configs.repositories` Secret + repositoriesAnnotations: {} + + # Argo CD sensitive data + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + secret: + # -- Create the argocd-secret + createSecret: true + # -- Labels to be added to argocd-secret + labels: {} + # -- Annotations to be added to argocd-secret + annotations: {} + + # -- Shared secret for authenticating GitHub webhook events + githubSecret: "" + # -- Shared secret for authenticating GitLab webhook events + gitlabSecret: "" + # -- Shared secret for authenticating BitbucketServer webhook events + bitbucketServerSecret: "" + # -- UUID for authenticating Bitbucket webhook events + bitbucketUUID: "" + # -- Shared secret for authenticating Gogs webhook events + gogsSecret: "" + ## Azure DevOps + azureDevops: + # -- Shared secret username for authenticating Azure DevOps webhook events + username: "" + # -- Shared secret password for authenticating Azure DevOps webhook events + password: "" + + # -- add additional secrets to be added to argocd-secret + ## Custom secrets. Useful for injecting SSO secrets into environment variables. + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + ## Note that all values must be non-empty. + extra: + {} + # LDAP_PASSWORD: "mypassword" + + # -- Bcrypt hashed admin password + ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with + ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` + argocdServerAdminPassword: "" + # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"` + # @default -- `""` (defaults to current time) + argocdServerAdminPasswordMtime: "" + + # -- Define custom [CSS styles] for your argo instance. + # This setting will automatically mount the provided CSS and reference it in the argo configuration. + # @default -- `""` (See [values.yaml]) + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/ + styles: "" + # styles: | + # .sidebar { + # background: linear-gradient(to bottom, #999, #777, #333, #222, #111); + # } + +# -- Array of extra K8s manifests to deploy +## Note: Supports use of custom Helm templates +extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: argocd-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "argocd" + # objectType: "secretsmanager" + # jmesPath: + # - path: "client_id" + # objectAlias: "client_id" + # - path: "client_secret" + # objectAlias: "client_secret" + # secretObjects: + # - data: + # - key: client_id + # objectName: client_id + # - key: client_secret + # objectName: client_secret + # secretName: argocd-secrets-store + # type: Opaque + # labels: + # app.kubernetes.io/part-of: argocd + +## Application controller +controller: + # -- Application controller name string + name: application-controller + + # -- The number of application controller pods to run. + # Additional replicas will cause sharding of managed clusters across number of replicas. + ## With dynamic cluster distribution turned on, sharding of the clusters will gracefully + ## rebalance if the number of replica's changes or one becomes unhealthy. (alpha) + replicas: 1 + + # -- Enable dynamic cluster distribution (alpha) + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution + ## This is done using a deployment instead of a statefulSet + ## When replicas are added or removed, the sharding algorithm is re-run to ensure that the + ## clusters are distributed according to the algorithm. If the algorithm is well-balanced, + ## like round-robin, then the shards will be well-balanced. + dynamicClusterDistribution: false + + # -- Runtime class name for the application controller + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + # -- Application controller heartbeat time + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution/#working-of-dynamic-distribution + heartbeatTime: 10 + + # -- Maximum number of controller revisions that will be maintained in StatefulSet history + revisionHistoryLimit: 5 + + ## Application controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the application controller + enabled: false + # -- Labels to be added to application controller pdb + labels: {} + # -- Annotations to be added to application controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `controller.pdb.minAvailable` + maxUnavailable: "" + + ## Application controller image + image: + # -- Repository to use for the application controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the application controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the application controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to application controller + extraArgs: [] + + # -- Environment variables to pass to application controller + env: [] + + # -- envFrom to pass to application controller + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the application controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the application controller pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the server pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + ## Note: Supports use of custom Helm templates + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the application controller main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the application controller pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + ## Application controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for application controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + # -- Annotations for the application controller StatefulSet + statefulsetAnnotations: {} + + # -- Annotations for the application controller Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to application controller pods + podAnnotations: {} + + # -- Labels to be added to application controller pods + podLabels: {} + + # -- Resource limits and requests for the application controller pods + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # Application controller container ports + containerPorts: + # -- Metrics container port + metrics: 8082 + + # -- Host Network for application controller pods + hostNetwork: false + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for application controller pods + dnsPolicy: "ClusterFirst" + + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # Readiness probe for application controller + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- Priority class for the application controller pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the application controller + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create a service account for the application controller + create: true + # -- Service account name + name: argocd-application-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + ## Application controller metrics configuration + metrics: + # -- Deploy metrics service + enabled: false + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + applicationLabels: + # -- Enables additional labels in argocd_app_labels metric + enabled: false + # -- Additional labels + labels: [] + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8082 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + rules: + # -- Deploy a PrometheusRule for the application controller + enabled: false + # -- PrometheusRule namespace + namespace: "" # "monitoring" + # -- PrometheusRule selector + selector: {} + # prometheus: kube-prometheus + + # -- PrometheusRule labels + additionalLabels: {} + # -- PrometheusRule annotations + annotations: {} + + # -- PrometheusRule.Spec for the application controller + spec: [] + # - alert: ArgoAppMissing + # expr: | + # absent(argocd_app_info) == 1 + # for: 15m + # labels: + # severity: critical + # annotations: + # summary: "[Argo CD] No reported applications" + # description: > + # Argo CD has not reported any applications data for the past 15 minutes which + # means that it must be down or not functioning properly. This needs to be + # resolved for this cloud to continue to maintain state. + # - alert: ArgoAppNotSynced + # expr: | + # argocd_app_info{sync_status!="Synced"} == 1 + # for: 12h + # labels: + # severity: warning + # annotations: + # summary: "[{{`{{$labels.name}}`}}] Application not synchronized" + # description: > + # The application [{{`{{$labels.name}}`}} has not been synchronized for over + # 12 hours which means that the state of this cloud has drifted away from the + # state inside Git. + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the application controller's ClusterRole resource + enabled: false + # -- List of custom rules for the application controller's ClusterRole resource + rules: [] + +## Dex +dex: + # -- Enable dex + enabled: true + # -- Dex name + name: dex-server + + # -- Additional command line arguments to pass to the Dex server + extraArgs: [] + + # -- Runtime class name for Dex + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Dex Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Dex server + enabled: false + # -- Labels to be added to Dex server pdb + labels: {} + # -- Annotations to be added to Dex server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `dex.pdb.minAvailable` + maxUnavailable: "" + + ## Dex image + image: + # -- Dex image repository + repository: ghcr.io/dexidp/dex + # -- Dex image tag + tag: v2.38.0 + # -- Dex imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # Argo CD init image that creates Dex config + initImage: + # -- Argo CD init image repository + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Argo CD init image tag + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Argo CD init image imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + # -- Argo CD init image resources + # @default -- `{}` (defaults to dex.resources) + resources: {} + # requests: + # cpu: 5m + # memory: 96Mi + # limits: + # cpu: 10m + # memory: 144Mi + + # -- Environment variables to pass to the Dex server + env: [] + + # -- envFrom to pass to the Dex server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the dex pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the dex pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- Additional volumeMounts to the dex main container + volumeMounts: [] + + # -- Additional volumes to the dex pod + volumes: [] + + ## Dex server emptyDir volumes + emptyDir: + # -- EmptyDir size limit for Dex server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers. + certificateSecret: + # -- Create argocd-dex-server-tls secret + enabled: false + # -- Labels to be added to argocd-dex-server-tls secret + labels: {} + # -- Annotations to be added to argocd-dex-server-tls secret + annotations: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Dex service (ie: argocd-dex-server, argocd-dex-server.argo-cd.svc) + crt: '' + + # -- Annotations to be added to the Dex server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Dex server pods + podAnnotations: {} + + # -- Labels to be added to the Dex server pods + podLabels: {} + + # -- Resource limits and requests for dex + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Dex container ports + # NOTE: These ports are currently hardcoded and cannot be changed + containerPorts: + # -- HTTP container port + http: 5556 + # -- gRPC container port + grpc: 5557 + # -- Metrics container port + metrics: 5558 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for Dex server pods + dnsPolicy: "ClusterFirst" + + # -- Dex container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for Dex server + ## Supported from Dex >= 2.28.0 + livenessProbe: + # -- Enable Kubernetes liveness probe for Dex >= 2.28.0 + enabled: false + # -- Http path to use for the liveness probe + httpPath: /healthz/live + # -- Http port to use for the liveness probe + httpPort: metrics + # -- Scheme to use for for the liveness probe (can be HTTP or HTTPS) + httpScheme: HTTP + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + readinessProbe: + # -- Enable Kubernetes readiness probe for Dex >= 2.28.0 + enabled: false + # -- Http path to use for the readiness probe + httpPath: /healthz/ready + # -- Http port to use for the readiness probe + httpPort: metrics + # -- Scheme to use for for the liveness probe (can be HTTP or HTTPS) + httpScheme: HTTP + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create dex service account + create: true + # -- Dex service account name + name: argocd-dex-server + # -- Annotations applied to created service account + annotations: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Service port for HTTP access + servicePortHttp: 5556 + # -- Service port name for HTTP access + servicePortHttpName: http + # -- Service port for gRPC access + servicePortGrpc: 5557 + # -- Service port name for gRPC access + servicePortGrpcName: grpc + # -- Service port for metrics access + servicePortMetrics: 5558 + + # -- Priority class for the dex pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to dex + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the Dex server Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Dex log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Dex log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + +## Redis +redis: + # -- Enable redis + enabled: true + # -- Redis name + name: redis + + # -- Runtime class name for redis + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Redis Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Redis + enabled: false + # -- Labels to be added to Redis pdb + labels: {} + # -- Annotations to be added to Redis pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `redis.pdb.minAvailable` + maxUnavailable: "" + + ## Redis image + image: + # -- Redis repository + repository: public.ecr.aws/docker/library/redis + # -- Redis tag + tag: 7.2.4-alpine + # -- Redis image pull policy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + ## Prometheus redis-exporter sidecar + exporter: + # -- Enable Prometheus redis-exporter sidecar + enabled: false + # -- Environment variables to pass to the Redis exporter + env: [] + ## Prometheus redis-exporter image + image: + # -- Repository to use for the redis-exporter + repository: public.ecr.aws/bitnami/redis-exporter + # -- Tag to use for the redis-exporter + tag: 1.58.0 + # -- Image pull policy for the redis-exporter + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Redis exporter security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for Redis exporter (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for Redis exporter (optional) + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + livenessProbe: + # -- Enable Kubernetes liveness probe for Redis exporter + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + + # -- Resource limits and requests for redis-exporter sidecar + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to redis-server + extraArgs: [] + # - --bind + # - "0.0.0.0" + + # -- Environment variables to pass to the Redis server + env: [] + + # -- envFrom to pass to the Redis server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + ## Probes for Redis server (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for Redis server + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + livenessProbe: + # -- Enable Kubernetes liveness probe for Redis server + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 30 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 15 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 15 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 5 + + # -- Additional containers to be added to the redis pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the redis pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- Additional volumeMounts to the redis container + volumeMounts: [] + + # -- Additional volumes to the redis pod + volumes: [] + + # -- Annotations to be added to the Redis server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Redis server pods + podAnnotations: {} + + # -- Labels to be added to the Redis server pods + podLabels: {} + + # -- Resource limits and requests for redis + resources: {} + # limits: + # cpu: 200m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 64Mi + + # -- Redis pod-level security context + # @default -- See [values.yaml] + securityContext: + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + + # Redis container ports + containerPorts: + # -- Redis container port + redis: 6379 + # -- Metrics container port + metrics: 9121 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for Redis server pods + dnsPolicy: "ClusterFirst" + + # -- Redis container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + + # -- Redis service port + servicePort: 6379 + + # -- Priority class for redis pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to redis + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create a service account for the redis pod + create: false + # -- Service account name for redis pod + name: "" + # -- Annotations applied to created service account + annotations: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: false + + service: + # -- Redis service annotations + annotations: {} + # -- Additional redis service labels + labels: {} + + metrics: + # -- Deploy metrics service + enabled: false + + # Redis metrics service configuration + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: None + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 9121 + # -- Metrics service port name + portName: http-metrics + + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Interval at which metrics should be scraped + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + +## Redis-HA subchart replaces custom redis deployment when `redis-ha.enabled=true` +# Ref: https://github.com/DandyDeveloper/charts/blob/master/charts/redis-ha/values.yaml +redis-ha: + # -- Enables the Redis HA subchart and disables the custom Redis single node deployment + enabled: false + ## Redis image + image: + # -- Redis repository + repository: public.ecr.aws/docker/library/redis + # -- Redis tag + tag: 7.2.4-alpine + ## Prometheus redis-exporter sidecar + exporter: + # -- Enable Prometheus redis-exporter sidecar + enabled: false + # -- Repository to use for the redis-exporter + image: public.ecr.aws/bitnami/redis-exporter + # -- Tag to use for the redis-exporter + tag: 1.58.0 + persistentVolume: + # -- Configures persistence on Redis nodes + enabled: false + ## Redis specific configuration options + redis: + # -- Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated + masterGroupName: argocd + # -- Any valid redis config options in this section will be applied to each server (see `redis-ha` chart) + # @default -- See [values.yaml] + config: + # -- Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled + # @default -- `'""'` + save: '""' + ## Enables a HA Proxy for better LoadBalancing / Sentinel Master support. Automatically proxies to Redis master. + haproxy: + # -- Enabled HAProxy LoadBalancing/Proxy + enabled: true + # -- Custom labels for the haproxy pod. This is relevant for Argo CD CLI. + labels: + app.kubernetes.io/name: argocd-redis-ha-haproxy + metrics: + # -- HAProxy enable prometheus metric scraping + enabled: true + # -- Whether the haproxy pods should be forced to run on separate nodes. + hardAntiAffinity: true + # -- Additional affinities to add to the haproxy pods. + additionalAffinities: {} + # -- Assign custom [affinity] rules to the haproxy pods. + affinity: | + + # -- [Tolerations] for use with node taints for haproxy pods. + tolerations: [] + # -- HAProxy container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + readOnlyRootFilesystem: true + + # -- Configures redis-ha with AUTH + auth: true + # -- Existing Secret to use for redis-ha authentication. + # By default the redis-secret-init Job is generating this Secret. + existingSecret: argocd-redis + + # -- Whether the Redis server pods should be forced to run on separate nodes. + hardAntiAffinity: true + + # -- Additional affinities to add to the Redis server pods. + additionalAffinities: {} + + # -- Assign custom [affinity] rules to the Redis pods. + affinity: | + + # -- [Tolerations] for use with node taints for Redis pods. + tolerations: [] + + # -- Assign custom [TopologySpreadConstraints] rules to the Redis pods. + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: + # -- Enable Redis HA topology spread constraints + enabled: false + # -- Max skew of pods tolerated + # @default -- `""` (defaults to `1`) + maxSkew: "" + # -- Topology key for spread + # @default -- `""` (defaults to `topology.kubernetes.io/zone`) + topologyKey: "" + # -- Enforcement policy, hard or soft + # @default -- `""` (defaults to `ScheduleAnyway`) + whenUnsatisfiable: "" + # -- Redis HA statefulset container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + readOnlyRootFilesystem: true + +# External Redis parameters +externalRedis: + # -- External Redis server host + host: "" + # -- External Redis username + username: "" + # -- External Redis password + password: "" + # -- External Redis server port + port: 6379 + # -- The name of an existing secret with Redis (must contain key `redis-password`) and Sentinel credentials. + # When it's set, the `externalRedis.password` parameter is ignored + existingSecret: "" + # -- External Redis Secret annotations + secretAnnotations: {} + +redisSecretInit: + # -- Enable Redis secret initialization. If disabled, secret must be provisioned by alternative methods + enabled: true + # -- Redis secret-init name + name: redis-secret-init + + image: + # -- Repository to use for the Redis secret-init Job + # @default -- `""` (defaults to global.image.repository) + repository: "" # defaults to global.image.repository + # -- Tag to use for the Redis secret-init Job + # @default -- `""` (defaults to global.image.tag) + tag: "" # defaults to global.image.tag + # -- Image pull policy for the Redis secret-init Job + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" # IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Annotations to be added to the Redis secret-init Job + jobAnnotations: {} + + # -- Annotations to be added to the Redis secret-init Job + podAnnotations: {} + + # -- Labels to be added to the Redis secret-init Job + podLabels: {} + + # -- Resource limits and requests for Redis secret-init Job + resources: {} + # limits: + # cpu: 200m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 64Mi + + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # -- Redis secret-init Job pod-level security context + securityContext: {} + + serviceAccount: + # -- Create a service account for the redis pod + create: true + # -- Service account name for redis pod + name: "" + # -- Annotations applied to created service account + annotations: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Priority class for Redis secret-init Job + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- Node selector to be added to the Redis secret-init Job + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- Tolerations to be added to the Redis secret-init Job + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + +## Server +server: + # -- Argo CD server name + name: server + + # -- The number of server pods to run + replicas: 1 + + # -- Runtime class name for the Argo CD server + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Argo CD server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server + enabled: false + # -- Minimum number of replicas for the Argo CD server [HPA] + minReplicas: 1 + # -- Maximum number of replicas for the Argo CD server [HPA] + maxReplicas: 5 + # -- Average CPU utilization percentage for the Argo CD server [HPA] + targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the Argo CD server [HPA] + targetMemoryUtilizationPercentage: 50 + # -- Configures the scaling behavior of the target in both Up and Down directions. + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Configures custom HPA metrics for the Argo CD server + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + metrics: [] + + ## Argo CD server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Argo CD server + enabled: false + # -- Labels to be added to Argo CD server pdb + labels: {} + # -- Annotations to be added to Argo CD server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `server.pdb.minAvailable` + maxUnavailable: "" + + ## Argo CD server image + image: + # -- Repository to use for the Argo CD server + # @default -- `""` (defaults to global.image.repository) + repository: "" # defaults to global.image.repository + # -- Tag to use for the Argo CD server + # @default -- `""` (defaults to global.image.tag) + tag: "" # defaults to global.image.tag + # -- Image pull policy for the Argo CD server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" # IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to Argo CD server + extraArgs: [] + + # -- Environment variables to pass to Argo CD server + env: [] + + # -- envFrom to pass to Argo CD server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Specify postStart and preStop lifecycle hooks for your argo-cd-server container + lifecycle: {} + + ## Argo CD extensions + ## This function in tech preview stage, do expect instability or breaking changes in newer versions. + ## Ref: https://github.com/argoproj-labs/argocd-extension-installer + ## When you enable extensions, you need to configure RBAC of logged in Argo CD user. + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/#the-extensions-resource + extensions: + # -- Enable support for Argo CD extensions + enabled: false + + ## Argo CD extension installer image + image: + # -- Repository to use for extension installer image + repository: "quay.io/argoprojlabs/argocd-extension-installer" + # -- Tag to use for extension installer image + tag: "v0.0.5" + # -- Image pull policy for extensions + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Extensions for Argo CD + # @default -- `[]` (See [values.yaml]) + ## Ref: https://github.com/argoproj-labs/argocd-extension-metrics#install-ui-extension + extensionList: [] + # - name: extension-metrics + # env: + # - name: EXTENSION_URL + # value: https://github.com/argoproj-labs/argocd-extension-metrics/releases/download/v1.0.0/extension.tar.gz + # - name: EXTENSION_CHECKSUM_URL + # value: https://github.com/argoproj-labs/argocd-extension-metrics/releases/download/v1.0.0/extension_checksums.txt + + # -- Server UI extensions container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for the argocd-extensions container + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # -- Additional containers to be added to the server pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Init containers to add to the server pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the application controller pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the server main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the server pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + ## Argo CD server emptyDir volumes + emptyDir: + # -- EmptyDir size limit for the Argo CD server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + # -- Annotations to be added to server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to server pods + podAnnotations: {} + + # -- Labels to be added to server pods + podLabels: {} + + # -- Resource limits and requests for the Argo CD server + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 50m + # memory: 64Mi + + # Server container ports + containerPorts: + # -- Server container port + server: 8080 + # -- Metrics container port + metrics: 8083 + + # -- Host Network for Server pods + hostNetwork: false + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for Server pods + dnsPolicy: "ClusterFirst" + + # -- Server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- Priority class for the Argo CD server pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the Argo CD server + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the server Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificate: + # -- Deploy a Certificate resource (requires cert-manager) + enabled: false + # -- Certificate primary domain (commonName) + # @default -- `""` (defaults to global.domain) + domain: "" + # -- Certificate Subject Alternate Names (SANs) + additionalHosts: [] + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate issuer name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + # -- Annotations to be applied to the Server Certificate + annotations: {} + # -- Usages for the certificate + ### Ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.KeyUsage + usages: [] + # -- Annotations that allow the certificate to be composed from data residing in existing Kubernetes Resources + secretTemplateAnnotations: {} + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificateSecret: + # -- Create argocd-server-tls secret + enabled: false + # -- Annotations to be added to argocd-server-tls secret + annotations: {} + # -- Labels to be added to argocd-server-tls secret + labels: {} + # -- Private Key of the certificate + key: '' + # -- Certificate data + crt: '' + + ## Server service configuration + service: + # -- Server service annotations + annotations: {} + # -- Server service labels + labels: {} + # -- Server service type + type: ClusterIP + # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttp: 30080 + # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttps: 30443 + # -- Server service http port + servicePortHttp: 80 + # -- Server service https port + servicePortHttps: 443 + # -- Server service http port name, can be used to route traffic via istio + servicePortHttpName: http + # -- Server service https port name, can be used to route traffic via istio + servicePortHttpsName: https + # -- Server service https port appProtocol + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + servicePortHttpsAppProtocol: "" + # -- The class of the load balancer implementation + loadBalancerClass: "" + # -- LoadBalancer will get created with the IP specified in this field + loadBalancerIP: "" + # -- Source IP ranges to allow access to service from + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + loadBalancerSourceRanges: [] + # -- Server service external IPs + externalIPs: [] + # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: Cluster + # -- Used to maintain session affinity. Supports `ClientIP` and `None` + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + + ## Server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8083 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create server service account + create: true + # -- Server service account name + name: argocd-server + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # Argo CD server ingress configuration + ingress: + # -- Enable an ingress resource for the Argo CD server + enabled: true + # -- Specific implementation for ingress controller. One of `generic`, `aws` or `gke` + ## Additional configuration might be required in related configuration sections + controller: gke + # -- Additional ingress labels + labels: {} + # -- Additional ingress annotations + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#option-1-ssl-passthrough + annotations: #{} + kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + # nginx.ingress.kubernetes.io/ssl-passthrough: "true" + + # -- Defines which ingress controller will implement the resource + ingressClassName: nginx + + # -- Argo CD server hostname + # @default -- `""` (defaults to global.domain) + hostname: "argocd.${primary_domain}" + + # -- The path to Argo CD server + path: / + + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `server.ingress.hostname` + ## TLS certificate will be retrieved from a TLS secret `argocd-server-tls` + ## You can create this secret via `certificate` or `certificateSecret` option + tls: true + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: argocd.example.com + # path: / + + # -- Additional ingress paths + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: / + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.server.fullname" . }}' + # port: + # name: '{{ .Values.server.service.servicePortHttpsName }}' + + # -- Additional TLS configuration + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - hosts: + # - argocd.example.com + # secretName: your-certificate-name + + # AWS specific options for Application Load Balancer + # Applies only when `serv.ingress.controller` is set to `aws` + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#aws-application-load-balancers-albs-and-classic-elb-http-mode + aws: + # -- Backend protocol version for the AWS ALB gRPC service + ## This tells AWS to send traffic from the ALB using gRPC. + ## For more information: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#health-check-settings + backendProtocolVersion: GRPC + # -- Service type for the AWS ALB gRPC service + ## Can be of type NodePort or ClusterIP depending on which mode you are running. + ## Instance mode needs type NodePort, IP mode needs type ClusterIP + ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic + serviceType: NodePort + + # Google specific options for Google Application Load Balancer + # Applies only when `server.ingress.controller` is set to `gke` + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#google-cloud-load-balancers-with-kubernetes-ingress + gke: + # -- Google [BackendConfig] resource, for use with the GKE Ingress Controller + # @default -- `{}` (See [values.yaml]) + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + backendConfig: {} + # iap: + # enabled: true + # oauthclientCredentials: + # secretName: argocd-secret + + # -- Google [FrontendConfig] resource, for use with the GKE Ingress Controller + # @default -- `{}` (See [values.yaml]) + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + frontendConfig: {} + # redirectToHttps: + # enabled: true + # responseCodeName: RESPONSE_CODE + + # Managed GKE certificate for ingress hostname + managedCertificate: + # -- Create ManagedCertificate resource and annotations for Google Load balancer + ## Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs + create: true + # -- Additional domains for ManagedCertificate resource + extraDomains: [] + # - argocd.example.com + + # Dedicated gRPC ingress for ingress controllers that supports only single backend protocol per Ingress resource + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#option-2-multiple-ingress-objects-and-hosts + ingressGrpc: + # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress] + enabled: false + # -- Additional ingress annotations for dedicated [gRPC-ingress] + annotations: {} + # -- Additional ingress labels for dedicated [gRPC-ingress] + labels: {} + # -- Defines which ingress controller will implement the resource [gRPC-ingress] + ingressClassName: "" + + # -- Argo CD server hostname for dedicated [gRPC-ingress] + # @default -- `""` (defaults to grpc.`server.ingress.hostname`) + hostname: "" + + # -- Argo CD server ingress path for dedicated [gRPC-ingress] + path: / + + # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `server.ingressGrpc.hostname` + ## TLS certificate will be retrieved from a TLS secret with name: `argocd-server-grpc-tls` + tls: false + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: grpc.argocd.example.com + # path: / + + # -- Additional ingress paths for dedicated [gRPC-ingress] + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: / + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.server.fullname" . }}' + # port: + # name: '{{ .Values.server.service.servicePortHttpName }}' + + # -- Additional TLS configuration for dedicated [gRPC-ingress] + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - secretName: your-certificate-name + # hosts: + # - argocd.example.com + + # Create a OpenShift Route with SSL passthrough for UI and CLI + # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain + # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain: + # If 'hostname' is an empty string "" OpenShift will create a hostname for you. + route: + # -- Enable an OpenShift Route for the Argo CD server + enabled: false + # -- Openshift Route annotations + annotations: {} + # -- Hostname of OpenShift Route + hostname: "" + # -- Termination type of Openshift Route + termination_type: passthrough + # -- Termination policy of Openshift Route + termination_policy: None + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the server's ClusterRole resource + enabled: false + # -- List of custom rules for the server's ClusterRole resource + rules: [] + +## Repo Server +repoServer: + # -- Repo server name + name: repo-server + + # -- The number of repo server pods to run + replicas: 1 + + # -- Runtime class name for the repo server + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Repo server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server + enabled: false + # -- Minimum number of replicas for the repo server [HPA] + minReplicas: 1 + # -- Maximum number of replicas for the repo server [HPA] + maxReplicas: 5 + # -- Average CPU utilization percentage for the repo server [HPA] + targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the repo server [HPA] + targetMemoryUtilizationPercentage: 50 + # -- Configures the scaling behavior of the target in both Up and Down directions. + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Configures custom HPA metrics for the Argo CD repo server + # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + metrics: [] + + ## Repo server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the repo server + enabled: false + # -- Labels to be added to repo server pdb + labels: {} + # -- Annotations to be added to repo server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `repoServer.pdb.minAvailable` + maxUnavailable: "" + + ## Repo server image + image: + # -- Repository to use for the repo server + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the repo server + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the repo server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to repo server + extraArgs: [] + + # -- Environment variables to pass to repo server + env: [] + + # -- envFrom to pass to repo server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Specify postStart and preStop lifecycle hooks for your argo-repo-server container + lifecycle: {} + + # -- Additional containers to be added to the repo server pod + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/ + ## Note: Supports use of custom Helm templates + extraContainers: [] + # - name: cmp-my-plugin + # command: + # - "/var/run/argocd/argocd-cmp-server" + # image: busybox + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: my-plugin.yaml + # name: argocd-cmp-cm + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + # - name: cmp-my-plugin2 + # command: + # - "/var/run/argocd/argocd-cmp-server" + # image: busybox + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: my-plugin2.yaml + # name: argocd-cmp-cm + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + + # -- Init containers to add to the repo server pods + initContainers: [] + + # -- Additional volumeMounts to the repo server main container + volumeMounts: [] + + # -- Additional volumes to the repo server pod + volumes: [] + # - name: argocd-cmp-cm + # configMap: + # name: argocd-cmp-cm + # - name: cmp-tmp + # emptyDir: {} + + # -- Volumes to be used in replacement of emptydir on default volumes + existingVolumes: {} + # gpgKeyring: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-keyring + # helmWorkingDir: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-workdir + # tmp: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-tmp + # varFiles: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-varfiles + # plugins: + # persistentVolumeClaim: + # claimName: pvc-argocd-repo-server-plugins + + ## RepoServer emptyDir volumes + emptyDir: + # -- EmptyDir size limit for repo server + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + # -- Toggle the usage of a ephemeral Helm working directory + useEphemeralHelmWorkingDir: true + + # -- Annotations to be added to repo server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to repo server pods + podAnnotations: {} + + # -- Labels to be added to repo server pods + podLabels: {} + + # -- Resource limits and requests for the repo server pods + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # Repo server container ports + containerPorts: + # -- Repo server container port + server: 8081 + # -- Metrics container port + metrics: 8084 + + # -- Host Network for Repo server pods + hostNetwork: false + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for Repo server pods + dnsPolicy: "ClusterFirst" + + # -- Repo server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the repo server + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the repo server Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Priority class for the repo server pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers. + certificateSecret: + # -- Create argocd-repo-server-tls secret + enabled: false + # -- Annotations to be added to argocd-repo-server-tls secret + annotations: {} + # -- Labels to be added to argocd-repo-server-tls secret + labels: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc) + crt: '' + + ## Repo server service configuration + service: + # -- Repo server service annotations + annotations: {} + # -- Repo server service labels + labels: {} + # -- Repo server service port + port: 8081 + # -- Repo server service port name + portName: tcp-repo-server + + ## Repo server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8084 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Enable Custom Rules for the Repo server's Cluster Role resource + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the Repo server's Cluster Role resource + enabled: false + # -- List of custom rules for the Repo server's Cluster Role resource + rules: [] + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + ## Repo server service account + ## If create is set to true, make sure to uncomment the name and update the rbac section below + serviceAccount: + # -- Create repo server service account + create: true + # -- Repo server service account name + name: "" # "argocd-repo-server" + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Repo server rbac rules + rbac: [] + # - apiGroups: + # - argoproj.io + # resources: + # - applications + # verbs: + # - get + # - list + # - watch + +## ApplicationSet controller +applicationSet: + # -- ApplicationSet controller name string + name: applicationset-controller + + # -- The number of ApplicationSet controller pods to run + replicas: 1 + + # -- Runtime class name for the ApplicationSet controller + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## ApplicationSet controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller + enabled: false + # -- Labels to be added to ApplicationSet controller pdb + labels: {} + # -- Annotations to be added to ApplicationSet controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `applicationSet.pdb.minAvailable` + maxUnavailable: "" + + ## ApplicationSet controller image + image: + # -- Repository to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the ApplicationSet controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- If defined, uses a Secret to pull an image from a private Docker registry or repository. + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- ApplicationSet controller command line flags + extraArgs: [] + + # -- Environment variables to pass to the ApplicationSet controller + extraEnv: [] + # - name: "MY_VAR" + # value: "value" + + # -- envFrom to pass to the ApplicationSet controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the ApplicationSet controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + ## ApplicationSet controller emptyDir volumes + emptyDir: + # -- EmptyDir size limit for applicationSet controller + # @default -- `""` (defaults not set if not specified i.e. no size limit) + sizeLimit: "" + # sizeLimit: "1Gi" + + ## Metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8080 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. + scrapeTimeout: "" + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## ApplicationSet service configuration + service: + # -- ApplicationSet service annotations + annotations: {} + # -- ApplicationSet service labels + labels: {} + # -- ApplicationSet service type + type: ClusterIP + # -- ApplicationSet service port + port: 7000 + # -- ApplicationSet service port name + portName: http-webhook + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create ApplicationSet controller service account + create: true + # -- ApplicationSet controller service account name + name: argocd-applicationset-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Annotations to be added to ApplicationSet controller Deployment + deploymentAnnotations: {} + + # -- Annotations for the ApplicationSet controller pods + podAnnotations: {} + + # -- Labels for the ApplicationSet controller pods + podLabels: {} + + # -- Resource limits and requests for the ApplicationSet controller pods. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # ApplicationSet controller container ports + containerPorts: + # -- Metrics container port + metrics: 8080 + # -- Probe container port + probe: 8081 + # -- Webhook container port + webhook: 7000 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for ApplicationSet controller pods + dnsPolicy: "ClusterFirst" + + # -- ApplicationSet controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for ApplicationSet controller (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the ApplicationSet controller + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the ApplicationSet controller Deployment + deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # -- Priority class for the ApplicationSet controller pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-configuration + certificate: + # -- Deploy a Certificate resource (requires cert-manager) + enabled: false + # -- Certificate primary domain (commonName) + # @default -- `""` (defaults to global.domain) + domain: "" + # -- Certificate Subject Alternate Names (SANs) + additionalHosts: [] + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate issuer name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + # -- Annotations to be applied to the ApplicationSet Certificate + annotations: {} + + ## Ingress for the Git Generator webhook + ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration) + ingress: + # -- Enable an ingress resource for ApplicationSet webhook + enabled: false + # -- Additional ingress labels + labels: {} + # -- Additional ingress annotations + annotations: {} + + # -- Defines which ingress ApplicationSet controller will implement the resource + ingressClassName: "" + + # -- Argo CD ApplicationSet hostname + # @default -- `""` (defaults to global.domain) + hostname: "" + + # -- List of ingress paths + path: /api/webhook + + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + + # -- Enable TLS configuration for the hostname defined at `applicationSet.webhook.ingress.hostname` + ## TLS certificate will be retrieved from a TLS secret with name:`argocd-applicationset-controller-tls` + tls: false + + # -- The list of additional hostnames to be covered by ingress record + # @default -- `[]` (See [values.yaml]) + extraHosts: [] + # - name: argocd.example.com + # path: / + + # -- Additional ingress paths + # @default -- `[]` (See [values.yaml]) + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Additional ingress rules + # @default -- `[]` (See [values.yaml]) + ## Note: Supports use of custom Helm templates + extraRules: [] + # - http: + # paths: + # - path: /api/webhook + # pathType: Prefix + # backend: + # service: + # name: '{{ include "argo-cd.applicationSet.fullname" . }}' + # port: + # name: '{{ .Values.applicationSet.service.portName }}' + + # -- Additional ingress TLS configuration + # @default -- `[]` (See [values.yaml]) + extraTls: [] + # - secretName: argocd-applicationset-tls + # hosts: + # - argocd-applicationset.example.com + # -- Enable ApplicationSet in any namespace feature + allowAnyNamespace: false +## Notifications controller +notifications: + # -- Enable notifications controller + enabled: true + + # -- Notifications controller name string + name: notifications-controller + + # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates + # @default -- `""` (defaults to https://`global.domain`) + argocdUrl: "" + + # -- Runtime class name for the notifications controller + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + + ## Notifications controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the notifications controller + enabled: false + # -- Labels to be added to notifications controller pdb + labels: {} + # -- Annotations to be added to notifications controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.pdb.minAvailable` + maxUnavailable: "" + + ## Notifications controller image + image: + # -- Repository to use for the notifications controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the notifications controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the notifications controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Notifications controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + # -- Extra arguments to provide to the notifications controller + extraArgs: [] + + # -- Additional container environment variables + extraEnv: [] + + # -- envFrom to pass to the notifications controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the notifications controller pod + ## Note: Supports use of custom Helm templates + extraContainers: [] + + # -- Init containers to add to the notifications controller pod + ## Note: Supports use of custom Helm templates + initContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + # -- Define user-defined context + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/templates/#defining-user-defined-context + context: {} + # region: east + # environmentName: staging + + secret: + # -- Whether helm chart creates notifications controller secret + ## If true, will create a secret with the name below. Otherwise, will assume existence of a secret with that name. + create: true + + # -- notifications controller Secret name + name: "argocd-notifications-secret" + + # -- key:value pairs of annotations to be added to the secret + annotations: {} + + # -- key:value pairs of labels to be added to the secret + labels: {} + + # -- Generic key:value pairs to be inserted into the secret + ## Can be used for templates, notification services etc. Some examples given below. + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/overview/ + items: {} + # slack-token: + # # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/slack/ + + # grafana-apiKey: + # # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/grafana/ + + # webhooks-github-token: + + # email-username: + # email-password: + # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/email/ + + metrics: + # -- Enables prometheus metrics server + enabled: false + # -- Metrics port + port: 9001 + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: "" + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + + # -- Configures notification services such as slack, email or custom webhook + # @default -- See [values.yaml] + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/overview/ + notifiers: {} + # service.slack: | + # token: $slack-token + + # -- Annotations to be applied to the notifications controller Deployment + deploymentAnnotations: {} + + # -- Annotations to be applied to the notifications controller Pods + podAnnotations: {} + + # -- Labels to be applied to the notifications controller Pods + podLabels: {} + + # -- Resource limits and requests for the notifications controller + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Notification controller container ports + containerPorts: + # -- Metrics container port + metrics: 9001 + + # -- [DNS configuration] + dnsConfig: {} + # -- Alternative DNS policy for notifications controller Pods + dnsPolicy: "ClusterFirst" + + # -- Notification controller container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for notifications controller Pods (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for notifications controller Pods + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for notifications controller Pods + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + + # -- [Node selector] + # @default -- `{}` (defaults to global.nodeSelector) + nodeSelector: {} + + # -- [Tolerations] for use with node taints + # @default -- `[]` (defaults to global.tolerations) + tolerations: [] + + # -- Assign custom [affinity] rules + # @default -- `{}` (defaults to global.affinity preset) + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the application controller + # @default -- `[]` (defaults to global.topologySpreadConstraints) + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Deployment strategy to be added to the notifications controller Deployment + deploymentStrategy: + type: Recreate + + # -- Priority class for the notifications controller pods + # @default -- `""` (defaults to global.priorityClassName) + priorityClassName: "" + + # -- Automount API credentials for the Service Account into the pod. + automountServiceAccountToken: true + + serviceAccount: + # -- Create notifications controller service account + create: true + # -- Notification controller service account name + name: argocd-notifications-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + cm: + # -- Whether helm chart creates notifications controller config map + create: true + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- List of custom rules for the notifications controller's ClusterRole resource + rules: [] + + # -- Contains centrally managed global application subscriptions + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/subscriptions/ + subscriptions: [] + # # subscription for on-sync-status-unknown trigger notifications + # - recipients: + # - slack:test2 + # - email:test@gmail.com + # triggers: + # - on-sync-status-unknown + # # subscription restricted to applications with matching labels only + # - recipients: + # - slack:test3 + # selector: test=true + # triggers: + # - on-sync-status-unknown + + # -- The notification template is used to generate the notification content + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/templates/ + templates: {} + # template.app-deployed: | + # email: + # subject: New version of an application {{.app.metadata.name}} is up and running. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests. + # slack: + # attachments: | + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # }, + # { + # "title": "Revision", + # "value": "{{.app.status.sync.revision}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-health-degraded: | + # email: + # subject: Application {{.app.metadata.name}} has degraded. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#f4c030", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-failed: | + # email: + # subject: Failed to sync application {{.app.metadata.name}}. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}} + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-running: | + # email: + # subject: Start syncing application {{.app.metadata.name}}. + # message: | + # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#0DADEA", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-status-unknown: | + # email: + # subject: Application {{.app.metadata.name}} sync status is 'Unknown' + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # {{if ne .serviceType "slack"}} + # {{range $c := .app.status.conditions}} + # * {{$c.message}} + # {{end}} + # {{end}} + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-succeeded: | + # email: + # subject: Application {{.app.metadata.name}} has been successfully synced. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + + # -- The trigger defines the condition when the notification should be sent + ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/triggers/ + triggers: {} + # trigger.on-deployed: | + # - description: Application is synced and healthy. Triggered once per commit. + # oncePer: app.status.sync.revision + # send: + # - app-deployed + # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + # trigger.on-health-degraded: | + # - description: Application has degraded + # send: + # - app-health-degraded + # when: app.status.health.status == 'Degraded' + # trigger.on-sync-failed: | + # - description: Application syncing has failed + # send: + # - app-sync-failed + # when: app.status.operationState.phase in ['Error', 'Failed'] + # trigger.on-sync-running: | + # - description: Application is being synced + # send: + # - app-sync-running + # when: app.status.operationState.phase in ['Running'] + # trigger.on-sync-status-unknown: | + # - description: Application status is 'Unknown' + # send: + # - app-sync-status-unknown + # when: app.status.sync.status == 'Unknown' + # trigger.on-sync-succeeded: | + # - description: Application syncing has succeeded + # send: + # - app-sync-succeeded + # when: app.status.operationState.phase in ['Succeeded'] + # + # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/triggers/#default-triggers + # defaultTriggers: | + # - on-sync-status-unknown + diff --git a/terraform/services/helm-values/kube_prometheus_stack.yaml b/terraform/services/helm-values/kube_prometheus_stack.yaml new file mode 100644 index 0000000..ac512f1 --- /dev/null +++ b/terraform/services/helm-values/kube_prometheus_stack.yaml @@ -0,0 +1,4813 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## +nameOverride: "" + +## Override the deployment namespace +## +namespaceOverride: "${namespaceOverride}" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6 +## +kubeTargetVersionOverride: "" + +## Allow kubeVersion to be overridden while creating the ingress +## +kubeVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Checks if any deprecated values are used +## +checkDeprecation: true + +## Install Prometheus Operator CRDs +## +crds: + enabled: true + +## custom Rules to override "for" and "severity" in defaultRules +## +customRules: {} + # AlertmanagerFailedReload: + # for: 3m + # AlertmanagerMembersInconsistent: + # for: 5m + # severity: "warning" + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + configReloaders: true + general: true + k8sContainerCpuUsageSecondsTotal: true + k8sContainerMemoryCache: true + k8sContainerMemoryRss: true + k8sContainerMemorySwap: true + k8sContainerResource: true + k8sContainerMemoryWorkingSetBytes: true + k8sPodOwner: true + kubeApiserverAvailability: true + kubeApiserverBurnrate: true + kubeApiserverHistogram: true + kubeApiserverSlos: true + kubeControllerManager: true + kubelet: true + kubeProxy: true + kubePrometheusGeneral: true + kubePrometheusNodeRecording: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeSchedulerAlerting: true + kubeSchedulerRecording: true + kubeStateMetrics: true + network: true + node: true + nodeExporterAlerting: true + nodeExporterRecording: true + prometheus: true + prometheusOperator: true + windows: true + + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Set keep_firing_for for all alerts + keepFiringFor: "" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + + ## Additional annotations for PrometheusRule alerts + additionalRuleAnnotations: {} + + ## Additional labels for specific PrometheusRule alert groups + additionalRuleGroupLabels: + alertmanager: {} + etcd: {} + configReloaders: {} + general: {} + k8sContainerCpuUsageSecondsTotal: {} + k8sContainerMemoryCache: {} + k8sContainerMemoryRss: {} + k8sContainerMemorySwap: {} + k8sContainerResource: {} + k8sPodOwner: {} + kubeApiserverAvailability: {} + kubeApiserverBurnrate: {} + kubeApiserverHistogram: {} + kubeApiserverSlos: {} + kubeControllerManager: {} + kubelet: {} + kubeProxy: {} + kubePrometheusGeneral: {} + kubePrometheusNodeRecording: {} + kubernetesApps: {} + kubernetesResources: {} + kubernetesStorage: {} + kubernetesSystem: {} + kubeSchedulerAlerting: {} + kubeSchedulerRecording: {} + kubeStateMetrics: {} + network: {} + node: {} + nodeExporterAlerting: {} + nodeExporterRecording: {} + prometheus: {} + prometheusOperator: {} + + ## Additional annotations for specific PrometheusRule alerts groups + additionalRuleGroupAnnotations: + alertmanager: {} + etcd: {} + configReloaders: {} + general: {} + k8sContainerCpuUsageSecondsTotal: {} + k8sContainerMemoryCache: {} + k8sContainerMemoryRss: {} + k8sContainerMemorySwap: {} + k8sContainerResource: {} + k8sPodOwner: {} + kubeApiserverAvailability: {} + kubeApiserverBurnrate: {} + kubeApiserverHistogram: {} + kubeApiserverSlos: {} + kubeControllerManager: {} + kubelet: {} + kubeProxy: {} + kubePrometheusGeneral: {} + kubePrometheusNodeRecording: {} + kubernetesApps: {} + kubernetesResources: {} + kubernetesStorage: {} + kubernetesSystem: {} + kubeSchedulerAlerting: {} + kubeSchedulerRecording: {} + kubeStateMetrics: {} + network: {} + node: {} + nodeExporterAlerting: {} + nodeExporterRecording: {} + prometheus: {} + prometheusOperator: {} + + additionalAggregationLabels: [] + + ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. + runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" + + node: + fsSelector: 'fstype!=""' + # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"' + + ## Disabled PrometheusRule alerts + disabled: {} + # KubeAPIDown: true + # NodeRAIDDegraded: true + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + rbac: + create: true + + ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles + createAggregateClusterRoles: false + pspEnabled: false + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...) + ## + imageRegistry: "" + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + # or + # - "image-pull-secret" + +windowsMonitoring: + ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter') + enabled: false + +## Configuration for prometheus-windows-exporter +## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter +## +prometheus-windows-exporter: + ## Enable ServiceMonitor and set Kubernetes label to use as a job label + ## + prometheus: + monitor: + enabled: true + jobLabel: jobLabel + + releaseLabel: true + + ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards + ## + podLabels: + jobLabel: windows-exporter + + ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards + ## + config: |- + collectors: + enabled: '[defaults],memory,container' + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Annotations for Alertmanager + ## + annotations: {} + + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## + apiVersion: v2 + + ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features. + ## + enableFeatures: [] + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + automountServiceAccountToken: true + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + config: + global: + resolve_timeout: 5m + inhibit_rules: + - source_matchers: + - 'severity = critical' + target_matchers: + - 'severity =~ warning|info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'severity = warning' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'alertname = InfoInhibitor' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + - target_matchers: + - 'alertname = InfoInhibitor' + route: + group_by: ['namespace'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - receiver: 'null' + matchers: + - alertname = "Watchdog" + receivers: + - name: 'null' + templates: + - '/etc/alertmanager/config/*.tmpl' + + ## Alertmanager configuration directives (as string type, preferred over the config hash map) + ## stringConfig will be used only, if tplConfig is true + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + stringConfig: "" + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#tmpl_string + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## By default, templateFiles are placed in /etc/alertmanager/config/ and if + ## they have a .tmpl file suffix will be loaded. See config.templates above + ## to change, add other suffixes. If adding other suffixes, be sure to update + ## config.templates above to include those suffixes. + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: {} + # + ## An example template: + # template_1.tmpl: |- + # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} + # + # {{ define "slack.myorg.text" }} + # {{- $root := . -}} + # {{ range .Alerts }} + # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` + # *Cluster:* {{ template "cluster" $root }} + # *Description:* {{ .Annotations.description }} + # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> + # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> + # *Details:* + # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}` + # {{ end }} + # {{ end }} + # {{ end }} + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + + labels: {} + + ## Override ingress to a different defined port on the service + # servicePort: 8081 + ## Override ingress to a different service then the default, this is useful if you need to + ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0) + # serviceName: kube-prometheus-stack-alertmanager-0 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - alertmanager.domain.com + + ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Alertmanager Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: alertmanager-general-tls + # hosts: + # - alertmanager.example.com + + ## Configuration for Alertmanager secret + ## + secret: + annotations: {} + + ## Configuration for creating an Ingress that will map to each Alertmanager replica service + ## alertmanager.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for alertmanager per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "alertmanager" + + ## Configuration for Alertmanager service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port for Alertmanager Service to listen on + ## + port: 9093 + ## To be used with a proxy extraContainer port + ## + targetPort: 9093 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30903 + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + + ## Additional ports to open for Alertmanager service + ## + additionalPorts: [] + # - name: oauth-proxy + # port: 8081 + # targetPort: 8081 + # - name: oauth-metrics + # port: 8082 + # targetPort: 8082 + + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## If you want to make sure that connections from a particular client are passed to the same Pod each time + ## Accepts 'ClientIP' or 'None' + ## + sessionAffinity: None + + ## If you want to modify the ClientIP sessionAffinity timeout + ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" + ## + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 + + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a separate Service for each statefulset Alertmanager replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Alertmanager Service per replica to listen on + ## + port: 9093 + + ## To be used with a proxy extraContainer port + targetPort: 9093 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30904 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a ServiceMonitor for AlertManager + ## + serviceMonitor: + ## If true, a ServiceMonitor will be created for the AlertManager service. + ## + selfMonitor: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## enableHttp2: Whether to enable HTTP2. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + enableHttp2: true + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional Endpoints + ## + additionalEndpoints: [] + # - port: oauth-metrics + # path: /metrics + + ## Settings affecting alertmanagerSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec + ## + alertmanagerSpec: + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. + ## + podMetadata: {} + + ## Image of Alertmanager + ## + image: + registry: quay.io + repository: prometheus/alertmanager + tag: v0.27.0 + sha: "" + + ## If true then the user will be responsible to provide a secret with alertmanager configuration + ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used + ## + useExistingSecret: false + + ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the + ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + ## + secrets: [] + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. + ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ## + configMaps: [] + + ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for + ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + ## + # configSecret: + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec + web: {} + + ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. + ## + alertmanagerConfigSelector: {} + ## Example which selects all alertmanagerConfig resources + ## with label "alertconfig" with values any of "example-config" or "example-config-2" + # alertmanagerConfigSelector: + # matchExpressions: + # - key: alertconfig + # operator: In + # values: + # - example-config + # - example-config-2 + # + ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" + # alertmanagerConfigSelector: + # matchLabels: + # role: example-config + + ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + ## + alertmanagerConfigNamespaceSelector: {} + ## Example which selects all namespaces + ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" + # alertmanagerConfigNamespaceSelector: + # matchExpressions: + # - key: alertmanagerconfig + # operator: In + # values: + # - example-namespace + # - example-namespace-2 + + ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" + # alertmanagerConfigNamespaceSelector: + # matchLabels: + # alertmanagerconfig: enabled + + ## AlermanagerConfig to be used as top level configuration + ## + alertmanagerConfiguration: {} + ## Example with select a global alertmanagerconfig + # alertmanagerConfiguration: + # name: global-alertmanager-Configuration + + ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg: + ## + alertmanagerConfigMatcherStrategy: {} + ## Example with use OnNamespace strategy + # alertmanagerConfigMatcherStrategy: + # type: OnNamespace + + ## Define Log Format + # Use logfmt (default) or json logging + logFormat: logfmt + + ## Log level for Alertmanager to be configured with. + ## + logLevel: info + + ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the + ## running cluster equal to the expected size. + replicas: 1 + + ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression + ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). + ## + retention: 120h + + ## Storage is the definition of how storage will be used by the Alertmanager instances. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storage: {} + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + + ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false + ## + externalUrl: + + ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, + ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. + ## + routePrefix: / + + ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. + ## + paused: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Define resources requests and limits for single Pods. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # requests: + # memory: 400Mi + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + ## + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the alertmanager instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## If specified, the pod's tolerations. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: alertmanager + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. + ## Note this is only for the Alertmanager UI, not the gossip communication. + ## + listenLocal: false + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. + ## + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 + # args: + # - --upstream=http://127.0.0.1:9093 + # - --http-address=0.0.0.0:8081 + # - --metrics-address=0.0.0.0:8082 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # - containerPort: 8082 + # name: oauth-metrics + # protocol: TCP + # resources: {} + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + ## + additionalPeers: [] + + ## PortName to use for Alert Manager. + ## + portName: "http-web" + + ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ## + clusterAdvertiseAddress: false + + ## clusterGossipInterval determines interval between gossip attempts. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterGossipInterval: "" + + ## clusterPeerTimeout determines timeout for cluster peering. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterPeerTimeout: "" + + ## clusterPushpullInterval determines interval between pushpull attempts. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterPushpullInterval: "" + + ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. + clusterLabel: "" + + ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + forceEnableClusterMode: false + + ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to + ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). + minReadySeconds: 0 + + ## Additional configuration which is not covered by the properties above. (passed through tpl) + additionalConfig: {} + + ## Additional configuration which is not covered by the properties above. + ## Useful, if you need advanced templating inside alertmanagerSpec. + ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl) + additionalConfigString: "" + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + namespaceOverride: "" + + ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled + ## + forceDeployDatasources: false + + ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled + ## + forceDeployDashboards: false + + ## Deploy default dashboards + ## + defaultDashboardsEnabled: true + + ## Timezone for the default dashboards + ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg + ## + defaultDashboardsTimezone: utc + + ## Editable flag for the default dashboards + ## + defaultDashboardsEditable: true + + adminPassword: prom-operator + + rbac: + ## If true, Grafana PSPs will be created + ## + pspEnabled: false + + ingress: + ## If true, Grafana Ingress will be created + ## + enabled: true + + ## IngressClassName for Grafana Ingress. + ## Should be provided if Ingress is enable. + ## + ingressClassName: nginx + + ## Annotations for Grafana Ingress + ## + annotations: # + kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + ## Labels to be added to the Ingress + ## + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enable. + ## + hosts: + - grafana.${primary_domain} + #hosts: [] + + ## Path for grafana ingress + path: / + + ## TLS configuration for grafana Ingress + ## Secret must be manually created in the namespace + ## + tls: #[] + - secretName: grafana-tls + hosts: + - grafana.${primary_domain} + + # # To make Grafana persistent (Using Statefulset) + # # + # persistence: + # enabled: true + # type: sts + # storageClassName: "storageClassName" + # accessModes: + # - ReadWriteOnce + # size: 20Gi + # finalizers: + # - kubernetes.io/pvc-protection + + serviceAccount: + create: true + autoMount: true + + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + labelValue: "1" + # Allow discovery in all namespaces for dashboards + searchNamespace: ALL + + # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels + enableNewTablePanelSyntax: false + + ## Annotations for Grafana dashboard configmaps + ## + annotations: {} + multicluster: + global: + enabled: false + etcd: + enabled: false + provider: + allowUiUpdates: false + datasources: + enabled: true + defaultDatasourceEnabled: true + isDefaultDatasource: true + + name: Prometheus + uid: prometheus + + ## URL of prometheus datasource + ## + # url: http://prometheus-stack-prometheus:9090/ + + ## Prometheus request timeout in seconds + # timeout: 30 + + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default + # defaultDatasourceScrapeInterval: 15s + + ## Annotations for Grafana datasource configmaps + ## + annotations: {} + + ## Set method for HTTP to send query to datasource + httpMethod: POST + + ## Create datasource for each Pod of Prometheus StatefulSet; + ## this uses headless service `prometheus-operated` which is + ## created by Prometheus Operator + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286 + createPrometheusReplicasDatasources: false + label: grafana_datasource + labelValue: "1" + + ## Field with internal link pointing to existing data source in Grafana. + ## Can be provisioned via additionalDataSources + exemplarTraceIdDestinations: {} + # datasourceUid: Jaeger + # traceIdLabelName: trace_id + alertmanager: + enabled: true + name: Alertmanager + uid: alertmanager + handleGrafanaManagedAlerts: false + implementation: prometheus + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # configMap: certs-configmap + # readOnly: true + + deleteDatasources: [] + # - name: example-datasource + # orgId: 1 + + ## Configure additional grafana datasources (passed through tpl) + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + additionalDataSources: [] + # - name: prometheus-sample + # access: proxy + # basicAuth: true + # basicAuthPassword: pass + # basicAuthUser: daco + # editable: false + # jsonData: + # tlsSkipVerify: true + # orgId: 1 + # type: prometheus + # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 + # version: 1 + + # Flag to mark provisioned data sources for deletion if they are no longer configured. + # It takes no effect if data sources are already listed in the deleteDatasources section. + # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-config-file + prune: false + + ## Passed to grafana subchart and used by servicemonitor below + ## + service: + portName: http-web + ipFamilies: [] + ipFamilyPolicy: "" + + serviceMonitor: + # If true, a ServiceMonitor CRD is created for a prometheus operator + # https://github.com/coreos/prometheus-operator + # + enabled: true + + # Path to use for scraping metrics. Might be different if server.root_url is set + # in grafana.ini + path: "/metrics" + + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + + # labels for the ServiceMonitor + labels: {} + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + # + interval: "" + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Flag to disable all the kubernetes component scrapers +## +kubernetesServiceMonitors: + enabled: true + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: + # Drop excessively noisy apiserver buckets. + - action: drop + regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50) + sourceLabels: + - __name__ + - le + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator. + ## + attachMetadata: + node: false + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## If true, Prometheus use (respect) labels provided by exporter. + ## + honorLabels: true + + ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape. + ## + honorTimestamps: true + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Skip TLS certificate validation when scraping. + ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs + ## + insecureSkipVerify: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## This is disabled by default because container metrics are already exposed by cAdvisor + ## + resource: false + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + cAdvisorMetricRelabelings: + # Drop less useful container CPU metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)' + # Drop less useful container / always zero filesystem metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)' + # Drop less useful / always zero container memory metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_memory_(mapped_file|swap)' + # Drop less useful container process metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_(file_descriptors|tasks_state|threads_max)' + # Drop container spec metrics that overlap with kube-state-metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_spec.*' + # Drop cgroup metrics with no pod. + - sourceLabels: [id, pod] + action: drop + regex: '.+;' + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + cAdvisorRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + resourceRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + relabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: true + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.22. + ## + port: null + targetPort: null + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-controller-manager + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + enabled: true + port: 9153 + targetPort: 9153 + + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-dns + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping etcd +## +kubeEtcd: + enabled: true + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2381 + targetPort: 2381 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: etcd + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: true + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.23. + ## + port: null + targetPort: null + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-scheduler + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube proxy +## +kubeProxy: + enabled: true + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-proxy + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + releaseLabel: true + prometheus: + monitor: + enabled: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + # Keep labels from scraped data, overriding server-side labels + ## + honorLabels: true + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + selfMonitor: + enabled: false + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + operatingSystems: + linux: + enabled: true + darwin: + enabled: true + + ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled + ## + forceDeployDashboards: false + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + releaseLabel: true + extraArgs: + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + service: + portName: http-metrics + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + labels: + jobLabel: node-exporter + + prometheus: + monitor: + enabled: true + + jobLabel: jobLabel + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + rbac: + ## If true, create PSPs for node-exporter + ## + pspEnabled: false + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default + fullnameOverride: "" + + ## Number of old replicasets to retain ## + ## The default value is 10, 0 will garbage-collect old replicasets ## + revisionHistoryLimit: 10 + + ## Strategy of the deployment + ## + strategy: {} + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Liveness probe for the prometheusOperator deployment + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ## Readiness probe for the prometheusOperator deployment + ## + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + ## Valid values: Fail, Ignore, IgnoreOnInstallOnly + ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail" + failurePolicy: "" + ## The default timeoutSeconds is 10 and the maximum value is 30. + timeoutSeconds: 10 + enabled: true + ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. + ## If unspecified, system trust roots on the apiserver are used. + caBundle: "" + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + annotations: {} + # argocd.argoproj.io/hook: PreSync + # argocd.argoproj.io/hook-delete-policy: HookSucceeded + + namespaceSelector: {} + objectSelector: {} + + mutatingWebhookConfiguration: + annotations: {} + # argocd.argoproj.io/hook: PreSync + + validatingWebhookConfiguration: + annotations: {} + # argocd.argoproj.io/hook: PreSync + + deployment: + enabled: false + + ## Number of replicas + ## + replicas: 1 + + ## Strategy of the deployment + ## + strategy: {} + + # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + + ## Number of old replicasets to retain ## + ## The default value is 10, 0 will garbage-collect old replicasets ## + revisionHistoryLimit: 10 + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Service account for Prometheus Operator Webhook to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + annotations: {} + automountServiceAccountToken: false + create: true + name: "" + + ## Configuration for Prometheus operator Webhook service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 31080 + + nodePortTls: 31443 + + ## Additional ports to open for Prometheus operator Webhook service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # ## Labels to add to the operator webhook deployment + # ## + labels: {} + + ## Annotations to add to the operator webhook deployment + ## + annotations: {} + + ## Labels to add to the operator webhook pod + ## + podLabels: {} + + ## Annotations to add to the operator webhook pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## Prometheus-operator webhook image + ## + image: + registry: quay.io + repository: prometheus-operator/admission-webhook + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + + ## Liveness probe + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Readiness probe + ## + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + patch: + enabled: true + image: + registry: registry.k8s.io + repository: ingress-nginx/kube-webhook-certgen + tag: v20221220-controller-v1.5.1-58-g787ea74b6 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + ttlSecondsAfterFinished: 60 + annotations: {} + # argocd.argoproj.io/hook: PreSync + # argocd.argoproj.io/hook-delete-policy: HookSucceeded + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + seccompProfile: + type: RuntimeDefault + ## Service account for Prometheus Operator Webhook Job Patch to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + annotations: {} + automountServiceAccountToken: true + + # Security context for create job container + createSecretJob: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Security context for patch job container + patchWebhookJob: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + duration: "" # default to be 5y + admissionCert: + duration: "" # default to be 1y + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + alertmanagerConfigNamespaces: [] + prometheusInstanceNamespaces: [] + thanosRulerInstanceNamespaces: [] + + ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. + ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) + ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 + ## + # clusterDomain: "cluster.local" + + networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + # cilium: + # egress: + + ## match labels used in selector + # matchLabels: {} + + ## Service account for Prometheus Operator to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + annotations: {} + + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus operator service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # ## Labels to add to the operator deployment + # ## + labels: {} + + ## Annotations to add to the operator deployment + ## + annotations: {} + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + kubeletService: + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md + ## + enabled: true + namespace: kube-system + selector: "" + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default + name: "" + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## If true, create a serviceMonitor for prometheus operator + ## + selfMonitor: true + + ## Labels for ServiceMonitor + additionalLabels: {} + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + ## Operator Environment + ## env: + ## VARIABLE: value + env: + GOGC: "30" + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Enable vertical pod autoscaler support for prometheus-operator + verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + updateMode: Auto + + ## Prometheus-operator image + ## + image: + registry: quay.io + repository: prometheus-operator/prometheus-operator + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Prometheus image to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImage: prometheus/prometheus + + ## Prometheus image registry to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImageRegistry: quay.io + + ## Alertmanager image to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImage: prometheus/alertmanager + + ## Alertmanager image registry to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImageRegistry: quay.io + + ## Prometheus-config-reloader + ## + prometheusConfigReloader: + image: + registry: quay.io + repository: prometheus-operator/prometheus-config-reloader + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + + # add prometheus config reloader liveness and readiness probe. Default: false + enableProbe: false + + # resource config for prometheusConfigReloader + resources: {} + # requests: + # cpu: 200m + # memory: 50Mi + # limits: + # cpu: 200m + # memory: 50Mi + + ## Thanos side-car image when configured + ## + thanosImage: + registry: quay.io + repository: thanos/thanos + tag: v0.36.1 + sha: "" + + ## Set a Label Selector to filter watched prometheus and prometheusAgent + ## + prometheusInstanceSelector: "" + + ## Set a Label Selector to filter watched alertmanager + ## + alertmanagerInstanceSelector: "" + + ## Set a Label Selector to filter watched thanosRuler + thanosRulerInstanceSelector: "" + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1" + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + ## Additional volumes + ## + extraVolumes: [] + + ## Additional volume mounts + ## + extraVolumeMounts: [] + +## Deploy a Prometheus instance +## +prometheus: + enabled: true + + ## Toggle prometheus into agent mode + ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/designs/prometheus-agent.md + ## + agentMode: false + + ## Annotations for Prometheus + ## + annotations: {} + + ## Configure network policy for the prometheus + networkPolicy: + enabled: false + + ## Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + # cilium: + # endpointSelector: + # egress: + # ingress: + + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app: prometheus + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + automountServiceAccountToken: true + + # Service for thanos service discovery on sidecar + # Enable this can make Thanos Query can use + # `--store=dnssrv+_grpc._tcp.$${kube-prometheus-stack.fullname}-thanos-discovery.$${namespace}.svc.cluster.local` to discovery + # Thanos sidecar on prometheus nodes + # (Please remember to change $${kube-prometheus-stack.fullname} and $${namespace}. Not just copy and paste!) + thanosService: + enabled: false + annotations: {} + labels: {} + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Service dual stack + ## + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## ClusterIP to assign + # Default is to make this a headless service ("None") + clusterIP: "None" + + ## Port to expose on each node, if service type is NodePort + ## + nodePort: 30901 + httpNodePort: 30902 + + # ServiceMonitor to scrape Sidecar metrics + # Needs thanosService to be enabled as well + thanosServiceMonitor: + enabled: false + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + + ## relabel configs to apply to samples before ingestion. + relabelings: [] + + # Service for external access to sidecar + # Enabling this creates a service to expose thanos-sidecar outside the cluster. + thanosServiceExternal: + enabled: false + annotations: {} + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: LoadBalancer + + ## Port to expose on each node + ## + nodePort: 30901 + httpNodePort: 30902 + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port for Prometheus Reloader to listen on + ## + reloaderWebPort: 8080 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Additional ports to open for Prometheus service + ## + additionalPorts: [] + # additionalPorts: + # - name: oauth-proxy + # port: 8081 + # targetPort: 8081 + # - name: oauth-metrics + # port: 8082 + # targetPort: 8082 + + ## Consider that all endpoints are considered "ready" even if the Pods themselves are not + ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + + ## If you want to make sure that connections from a particular client are passed to the same Pod each time + ## Accepts 'ClientIP' or 'None' + ## + sessionAffinity: None + + ## If you want to modify the ClientIP sessionAffinity timeout + ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" + ## + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Service dual stack + ## + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + # Ingress exposes thanos sidecar outside the cluster + thanosIngress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + servicePort: 10901 + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30901 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Thanos Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + # + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: [] + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-general-tls + # hosts: + # - prometheus.example.com + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + allowedHostPaths: [] + volumes: [] + + serviceMonitor: + ## If true, create a serviceMonitor for prometheus + ## + selfMonitor: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional Endpoints + ## + additionalEndpoints: [] + # - port: oauth-metrics + # path: /metrics + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## Statefulset's persistent volume claim retention policy + ## pvcDeleteOnStsDelete and pvcDeleteOnStsScale determine whether + ## statefulset's PVCs are deleted (true) or retained (false) on scaling down + ## and deleting statefulset, respectively. Requires 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + persistentVolumeClaimRetentionPolicy: {} + # whenDeleted: Retain + # whenScaled: Retain + + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod, + ## If the field isn’t set, the operator mounts the service account token by default. + ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery, + ## It is possible to use strategic merge patch to project the service account token into the ‘prometheus’ container. + automountServiceAccountToken: true + + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Allows setting additional arguments for the Prometheus container + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus + additionalArgs: [] + + ## Interval between consecutive scrapes. + ## Defaults to 30s. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 + ## + scrapeInterval: "" + + ## Number of seconds to wait for target to respond before erroring + ## + scrapeTimeout: "" + + ## List of scrape classes to expose to scraping objects such as + ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + ## + scrapeClasses: [] + # - name: istio-mtls + # default: false + # tlsConfig: + # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem + # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## Sets version of Prometheus overriding the Prometheus version as derived + ## from the image tag. Useful in cases where the tag does not follow semver v2. + version: "" + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig + web: {} + + ## Exemplars related settings that are runtime reloadable. + ## It requires to enable the exemplar storage feature to be effective. + exemplars: "" + ## Maximum number of exemplars stored in memory for all series. + ## If not set, Prometheus uses its default value. + ## A value of zero or less than zero disables the storage. + # maxSize: 100000 + + # EnableFeatures API enables access to Prometheus disabled features. + # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ + enableFeatures: [] + # - exemplar-storage + + ## Image of Prometheus. + ## + image: + registry: quay.io + repository: prometheus/prometheus + tag: v2.54.1 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: prometheus + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## enable --web.enable-remote-write-receiver flag on prometheus-server + ## + enableRemoteWriteReceiver: false + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec + ## + query: {} + + ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery. + ruleNamespaceSelector: {} + ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel" + # ruleNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## PrometheusRules to be selected for target discovery. + ## If matchLabels.release: "{{ $.Release.Name }}" the prometheus resource will be created + ## with selectors based on values in the helm deployment, which will also match the scrapeConfigs created + ## To remove matchLabels from the selector condition, explicitly set matchLabels to null. + ## If no other selectors are configured, prometheus-operator will select all scrapeConfigs. + ## To remove the release label from the matchLabels condition, explicit set release to null. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + ## + ruleSelector: + matchLabels: + release: "{{ $.Release.Name }}" + ## Example which select all PrometheusRules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all PrometheusRules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## ServiceMonitors to be selected for target discovery. + ## If matchLabels.release: "{{ $.Release.Name }}" the prometheus resource will be created + ## with selectors based on values in the helm deployment, which will also match the scrapeConfigs created + ## To remove matchLabels from the selector condition, explicitly set matchLabels to null. + ## If no other selectors are configured, prometheus-operator will select all scrapeConfigs. + ## To remove the release label from the matchLabels condition, explicit set release to null. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + ## + serviceMonitorSelector: + matchLabels: + release: "{{ $.Release.Name }}" + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## + serviceMonitorNamespaceSelector: {} + ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" + # serviceMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## PodMonitors to be selected for target discovery. + ## If matchLabels.release: "{{ $.Release.Name }}" the prometheus resource will be created + ## with selectors based on values in the helm deployment, which will also match the scrapeConfigs created + ## To remove matchLabels from the selector condition, explicitly set matchLabels to null. + ## If no other selectors are configured, prometheus-operator will select all scrapeConfigs. + ## To remove the release label from the matchLabels condition, explicit set release to null. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + ## + podMonitorSelector: + matchLabels: + release: "{{ $.Release.Name }}" + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery. + podMonitorNamespaceSelector: {} + ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel" + # podMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## Probes to be selected for target discovery. + ## If matchLabels.release: "{{ $.Release.Name }}" the prometheus resource will be created + ## with selectors based on values in the helm deployment, which will also match the scrapeConfigs created + ## To remove matchLabels from the selector condition, explicitly set matchLabels to null. + ## If no other selectors are configured, prometheus-operator will select all scrapeConfigs. + ## To remove the release label from the matchLabels condition, explicit set release to null. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + ## + probeSelector: + matchLabels: + release: "{{ $.Release.Name }}" + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for Probe discovery. + probeNamespaceSelector: {} + ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel" + # probeNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## scrapeConfigs to be selected for target discovery. + ## If matchLabels.release: "{{ $.Release.Name }}" the prometheus resource will be created + ## with selectors based on values in the helm deployment, which will also match the scrapeConfigs created + ## To remove matchLabels from the selector condition, explicitly set matchLabels to null. + ## If no other selectors are configured, prometheus-operator will select all scrapeConfigs. + ## To remove the release label from the matchLabels condition, explicit set release to null. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + ## + scrapeConfigSelector: + matchLabels: + release: "{{ $.Release.Name }}" + ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel" + # scrapeConfigSelector: + # matchLabels: + # release: ~ + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + scrapeConfigNamespaceSelector: {} + ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel" + # scrapeConfigNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## How long to retain metrics + ## + retention: 10d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration + ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + tsdb: + outOfOrderTimeWindow: 0s + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: true + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of replicas of each shard to deploy for a Prometheus deployment. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## + replicas: 1 + + ## EXPERIMENTAL: Number of shards to distribute targets onto. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. + ## Increasing shards will not reshard data either but it will continue to be available from the same instances. + ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. + ## Sharding is done on the content of the `__address__` target meta-label. + ## + shards: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + ## additionalRemoteRead is appended to remoteRead + additionalRemoteRead: [] + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: [] + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: {} + # requests: + # memory: 400Mi + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storageSpec: {} + ## Using PersistentVolumeClaim + ## + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + ## Using tmpfs volume + ## + # emptyDir: + # medium: Memory + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## AdditionalScrapeConfigs can be defined as a list or as a templated string. + ## + ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + # + ## If scrape config contains a repetitive section, you may want to use a template. + ## In the following example, you can see how to define `gce_sd_configs` for multiple zones + # additionalScrapeConfigs: | + # - job_name: "node-exporter" + # gce_sd_configs: + # {{range $zone := .Values.gcp_zones}} + # - project: "project1" + # zone: "{{$zone}}" + # port: 9100 + # {{end}} + # relabel_configs: + # ... + + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertManagerConfigs + additionalAlertManagerConfigsSecret: {} + # name: + # key: + # optional: false + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertRelabelConfigs + additionalAlertRelabelConfigsSecret: {} + # name: + # key: + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec + ## + thanos: {} + # secretProviderClass: + # provider: gcp + # parameters: + # secrets: | + # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" + # fileName: "objstore.yaml" + ## ObjectStorageConfig configures object storage in Thanos. + # objectStorageConfig: + # # use existing secret, if configured, objectStorageConfig.secret will not be used + # existingSecret: {} + # # name: "" + # # key: "" + # # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource, + # # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set + # # https://thanos.io/tip/thanos/storage.md/#s3 + # secret: {} + # # type: S3 + # # config: + # # bucket: "" + # # endpoint: "" + # # region: "" + # # access_key: "" + # # secret_key: "" + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 + # args: + # - --upstream=http://127.0.0.1:9090 + # - --http-address=0.0.0.0:8081 + # - --metrics-address=0.0.0.0:8082 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # - containerPort: 8082 + # name: oauth-metrics + # protocol: TCP + # resources: {} + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "http-web" + + ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files + ## on the file system of the Prometheus container e.g. bearer token files. + arbitraryFSAccessThroughSMs: false + + ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor + ## or PodMonitor to true, this overrides honor_labels to false. + overrideHonorLabels: false + + ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + overrideHonorTimestamps: false + + ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored, + ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object, + ## and servicemonitors will be installed in the default service namespace. + ## Defaults to false. + ignoreNamespaceSelectors: false + + ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. + ## The label value will always be the namespace of the object that is being created. + ## Disabled by default + enforcedNamespaceLabel: "" + + ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. + ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + ## Deprecated, use `excludedFromEnforcement` instead + prometheusRulesExcludedFromEnforce: [] + + ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + ## to be excluded from enforcing a namespace label of origin. + ## Works only if enforcedNamespaceLabel set to true. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference + excludedFromEnforcement: [] + + ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, + ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such + ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions + ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + queryLogFile: false + + # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor. + # Set to 'false' to disable global sample_limit. or set to a number to override the default value. + sampleLimit: false + + # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory. + # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets + # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit. + enforcedKeepDroppedTargets: 0 + + ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit + ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall + ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + enforcedSampleLimit: false + + ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set + ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall + ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except + ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. + enforcedTargetLimit: false + + + ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelLimit: false + + ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelNameLengthLimit: false + + ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this + ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus + ## versions 2.27.0 and newer. + enforcedLabelValueLengthLimit: false + + ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental + ## in Prometheus so it may change in any upcoming release. + allowOverlappingBlocks: false + + ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to + ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). + minReadySeconds: 0 + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it. + # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically. + hostNetwork: false + + # HostAlias holds the mapping between IP and hostnames that will be injected + # as an entry in the pod’s hosts file. + hostAliases: [] + # - ip: 10.10.0.100 + # hostnames: + # - a1.app.local + # - b1.app.local + + ## TracingConfig configures tracing in Prometheus. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheustracingconfig + tracingConfig: {} + + ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints. + ## If set, the value should be either “Endpoints” or “EndpointSlice”. If unset, the operator assumes the “Endpoints” role. + serviceDiscoveryRole: "" + + ## Additional configuration which is not covered by the properties above. (passed through tpl) + additionalConfig: {} + + ## Additional configuration which is not covered by the properties above. + ## Useful, if you need advanced templating inside alertmanagerSpec. + ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl) + additionalConfigString: "" + + ## Defines the maximum time that the `prometheus` container's startup probe + ## will wait before being considered failed. The startup probe will return + ## success after the WAL replay is complete. If set, the value should be + ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15 + ## minutes). + maximumStartupDurationSeconds: 0 + + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form