diff --git a/.dockerignore b/.dockerignore index 0f04682..f784a1e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,7 @@ # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file # Ignore build and test binaries. +.git/ +.github/ +.husky/ bin/ testbin/ diff --git a/.earthlyignore b/.earthlyignore new file mode 100644 index 0000000..4807936 --- /dev/null +++ b/.earthlyignore @@ -0,0 +1,6 @@ +.git/ +.github/ +.husky/ +.vscode/ +bin/ +kind-logs-* \ No newline at end of file diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml deleted file mode 100644 index 443e501..0000000 --- a/.github/workflows/_docker-build.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: docker build - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} - IMAGE_TAG: ${{ github.sha }} -jobs: - build-and-push: - runs-on: ubuntu-latest - permissions: - packages: write - - steps: - - name: Harden Runner - uses: step-security/harden-runner@9b0655f430fba8c7001d4e38f8d4306db5c6e0ab - with: - egress-policy: audit - - - name: checkout repository - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - - name: log in to ghrc.io - uses: docker/login-action@1edf6180e07d2ffb423fc48a1a552855c0a1f508 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: build and container image - uses: docker/build-push-action@9472e9021074a3cb3279ba431598b8836d40433f - with: - context: . - push: true - tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} diff --git a/.github/workflows/_gocilint.yml b/.github/workflows/_gocilint.yml deleted file mode 100644 index 50a5108..0000000 --- a/.github/workflows/_gocilint.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: golangci-lint - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f - - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - name: golangci-lint - uses: golangci/golangci-lint-action@c3ef0c370269e2a25b67c7f8e03d37e6cb106cb9 - with: - version: latest - args: --timeout 300s diff --git a/.github/workflows/_gosecscan.yml b/.github/workflows/_gosecscan.yml deleted file mode 100644 index d5f1d1b..0000000 --- a/.github/workflows/_gosecscan.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: "gosec" - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -jobs: - build: - name: scan - runs-on: ubuntu-latest - env: - GO111MODULE: on - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - name: checkout repo - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - name: run gosec scan - uses: securego/gosec@a64cde55a4499d951566243783f204e94b9197ed - with: - args: "-exclude-dir=bin -exclude-dir=drivers -exclude-generated ./..." diff --git a/.github/workflows/_kuttl.yml b/.github/workflows/_kuttl.yml deleted file mode 100644 index 55bb297..0000000 --- a/.github/workflows/_kuttl.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: kuttl test - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -env: - IMG: local/discoblocks:e2e - KUTTL: /usr/local/bin/kubectl-kuttl - KUBECTL_STORAGEOS: /usr/local/bin/kubectl-storageos - PLUGIN_PATH: ./kubectl-storageos - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} - IMAGE_TAG: ${{ github.sha }} -jobs: - test: - name: test - runs-on: ubuntu-latest - steps: - - name: checkout - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - name: Set up Go - uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f - with: - go-version: 1.18 - - name: Determine latest version - run: echo "LATEST_VERSION=$(curl -s https://api.github.com/repos/storageos/kubectl-storageos/releases/latest | grep tag_name | head -1 | cut -d'"' -f4 | tr -d v)" >> $GITHUB_ENV - - name: install storageos plugin - run: | - sudo curl -sSLo kubectl-storageos.tar.gz https://github.com/storageos/kubectl-storageos/releases/download/v${{ env.LATEST_VERSION }}/kubectl-storageos_${{ env.LATEST_VERSION }}_linux_amd64.tar.gz - sudo tar -xf kubectl-storageos.tar.gz - sudo chmod +x kubectl-storageos - sudo mv kubectl-storageos $KUBECTL_STORAGEOS - - name: install kuttl - run: | - sudo curl -Lo $KUTTL https://github.com/kudobuilder/kuttl/releases/download/v0.11.1/kubectl-kuttl_0.11.1_linux_x86_64 - sudo chmod +x $KUTTL - - name: pull and tag image - run: | - docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} ${{ env.IMG }} - - name: run kuttl - run: kubectl-kuttl test --config tests/e2e/kuttl/kuttl-config-1.23.yaml - - uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1 - if: ${{ always() }} - with: - name: kind-logs - path: kind-logs-* diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml deleted file mode 100644 index 10239fa..0000000 --- a/.github/workflows/_test.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: "test" - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -jobs: - test: - name: Test - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - name: Set up Go - uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f - with: - go-version: 1.18 - - name: Execute test - run: make manifests generate test diff --git a/.github/workflows/_trivy.yml b/.github/workflows/_trivy.yml deleted file mode 100644 index 5f39c77..0000000 --- a/.github/workflows/_trivy.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Vulnerability Scanning - -on: workflow_call - -permissions: - contents: read - pull-requests: read - -jobs: - build: - name: Build - runs-on: ubuntu-latest - permissions: - security-events: write - steps: - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@f39d29766a1eb7432c47f6bb7b64ed70b2241524 - with: - image-ref: ghcr.io/ondat/discoblocks:${{ github.sha }} - format: 'template' - template: '@/contrib/sarif.tpl' - output: 'trivy-results.sarif' - severity: 'CRITICAL,HIGH' - - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@1fc1008278d05ba9455caf083444e6c5a1a3cfd8 - with: - sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/e2e-on-pr.yaml b/.github/workflows/e2e-on-pr.yaml new file mode 100644 index 0000000..ad8c448 --- /dev/null +++ b/.github/workflows/e2e-on-pr.yaml @@ -0,0 +1,80 @@ +# in test don't use yet + +name: end-2-end build + +on: + pull_request: + branches: [ main ] + workflow_dispatch: + +permissions: + contents: read + pull-requests: read + actions: read + security-events: write + packages: write + +concurrency: + group: ci-e2e-${{ github.ref }}-1 + cancel-in-progress: true + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + IMAGE_TAG: ${{ github.sha }} +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + security-events: write + packages: write + steps: + - name: harden runner + uses: step-security/harden-runner@9b0655f430fba8c7001d4e38f8d4306db5c6e0ab + with: + egress-policy: audit + - name: checkout repository + uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 + - name: log in to ghrc.io + uses: docker/login-action@1edf6180e07d2ffb423fc48a1a552855c0a1f508 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: restore cache Earthly build + uses: actions/cache/restore@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache + - name: run golangci-lint + run: make lint + - name: run gosec scan + run: make gosec + - name: run test + run: make test + - name: run image scan + run: make scan-image + - name: generate bundle manifest + run: make bundle + - name: run e2e test + run: make e2e-test + - name: upload Kind logs + uses: actions/upload-artifact@v3 + if: always() + with: + name: kind-logs + path: kind-logs-* + - name: upload Trivy scan results to GitHub Security tab + if: always() + uses: github/codeql-action/upload-sarif@1fc1008278d05ba9455caf083444e6c5a1a3cfd8 + with: + sarif_file: 'trivy-results.sarif' + - name: set owner of earthly cache + if: always() + run: sudo chown -R $(whoami) /var/lib/docker/volumes/earthly-cache + - name: save cache Earthly build + if: always() + uses: actions/cache/save@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache \ No newline at end of file diff --git a/.github/workflows/e2e-on-pr.yml b/.github/workflows/e2e-on-pr.yml deleted file mode 100644 index 00b857a..0000000 --- a/.github/workflows/e2e-on-pr.yml +++ /dev/null @@ -1,41 +0,0 @@ -# in test don't use yet - -name: end-2-end build - -on: - pull_request: - branches: [ main ] - workflow_dispatch: - -permissions: - contents: read - pull-requests: read - actions: read - security-events: write - packages: write - -concurrency: - group: ci-e2e-${{ github.ref }}-1 - cancel-in-progress: true - -jobs: - test: - uses: ./.github/workflows/_test.yml - - golangci-lint: - uses: ./.github/workflows/_gocilint.yml - - gosec-scanning: - uses: ./.github/workflows/_gosecscan.yml - - image-build: - uses: ./.github/workflows/_docker-build.yml - needs: [test, golangci-lint, gosec-scanning] - - image-vulnerability-scan: - uses: ./.github/workflows/_trivy.yml - needs: image-build - - kuttl: - uses: ./.github/workflows/_kuttl.yml - needs: image-build diff --git a/.github/workflows/go-lint-scan-pull_request.yaml b/.github/workflows/go-lint-scan-pull_request.yaml deleted file mode 100644 index 507c10d..0000000 --- a/.github/workflows/go-lint-scan-pull_request.yaml +++ /dev/null @@ -1,24 +0,0 @@ -name: golangci-lint - -on: - workflow_dispatch: - -permissions: read-all - -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f - - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - name: golangci-lint - uses: golangci/golangci-lint-action@4b237a63e5299c390fb934f06848da715fdde07c - with: - version: latest - args: --issues-exit-code=0 - only-new-issues: true - skip-cache: true - skip-pkg-cache: true - skip-build-cache: true - diff --git a/.github/workflows/gosec-scanner-on-pull_request.yaml b/.github/workflows/gosec-scanner-on-pull_request.yaml deleted file mode 100644 index 3e3a9b2..0000000 --- a/.github/workflows/gosec-scanner-on-pull_request.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: "gosec" - -on: - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -permissions: read-all - -jobs: - build: - runs-on: ubuntu-latest - env: - GO111MODULE: on - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - name: checkout repo - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - - name: run gosec scan - uses: securego/gosec@89dfdc0c972655dfaa4eec7a115742a28e0bc216 - with: - args: "./..." diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index ac8049d..32bd711 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -19,29 +19,8 @@ concurrency: env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} - IMAGE_TAG: ${{ github.sha }} + IMAGE_TAG: ${{ github.ref_name }} jobs: - test: - uses: ./.github/workflows/_test.yml - - golangci-lint: - uses: ./.github/workflows/_gocilint.yml - - gosec-scanning: - uses: ./.github/workflows/_gosecscan.yml - - image-build: - uses: ./.github/workflows/_docker-build.yml - needs: [test, golangci-lint, gosec-scanning] - - image-vulnerability-scan: - uses: ./.github/workflows/_trivy.yml - needs: image-build - - kuttl: - uses: ./.github/workflows/_kuttl.yml - needs: image-build - auto-pre-release: if: startsWith(github.ref, 'refs/tags/v') && (contains(github.ref_name, '-alpha.') || contains(github.ref_name, '-beta.')) runs-on: ubuntu-latest @@ -49,7 +28,11 @@ jobs: contents: write packages: write steps: - - name: Checkout + - name: harden runner + uses: step-security/harden-runner@9b0655f430fba8c7001d4e38f8d4306db5c6e0ab + with: + egress-policy: audit + - name: checkout uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 with: fetch-depth: 0 @@ -59,29 +42,57 @@ jobs: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Tag and push image - run: | - docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - - name: Generate bundle manifest + - name: restore cache Earthly build + uses: actions/cache/restore@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache + - name: run golangci-lint + run: make lint + - name: run gosec scan + run: make gosec + - name: run test + run: make test + - name: run image scan + run: make scan-image + - name: generate bundle manifest run: make bundle - env: - IMG: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - - name: Compress Kustomize manifests - run: (cd config ; tar -czvf ../discoblocks-kustomize.tar.gz .) - - name: Set since tag + - name: run e2e test + run: make e2e-test + - name: upload Kind logs + uses: actions/upload-artifact@v3 + if: always() + with: + name: kind-logs + path: kind-logs-* + - name: upload Trivy scan results to GitHub Security tab + if: always() + uses: github/codeql-action/upload-sarif@1fc1008278d05ba9455caf083444e6c5a1a3cfd8 + with: + sarif_file: 'trivy-results.sarif' + - name: set owner of earthly cache + if: always() + run: sudo chown -R $(whoami) /var/lib/docker/volumes/earthly-cache + - name: save cache Earthly build + if: always() + uses: actions/cache/save@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache + - name: push images + run: make docker-push + - name: set since tag run: echo "SINCE_TAG=$(git describe --tags --always $(git rev-list --tags) | grep -e '^v[0-9]+*\.[0-9]+*\.[0-9]+*$' | head -1)" >> $GITHUB_ENV - - name: "Generate release changelog" + - name: generate release changelog uses: heinrichreimer/github-changelog-generator-action@6f5b9494dd265d6fb7243a10c53dc0169c55f247 with: token: ${{ secrets.GITHUB_TOKEN }} base: .github/RELEASE_TEMPLATE.md sinceTag: ${{ env.SINCE_TAG }} simpleList: true - - name: Fix version in changelog + - name: fix version in changelog run: sed -i "s/#VERSION#/${{ github.ref_name }}/g" CHANGELOG.md - - name: Create pre-release + - name: create pre-release uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 with: prerelease: true @@ -89,7 +100,6 @@ jobs: files: | discoblocks-bundle.yaml discoblocks-kustomize.tar.gz - needs: [image-vulnerability-scan, kuttl] auto-release: if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref_name, '-alpha.') && !contains(github.ref_name, '-beta.') @@ -98,7 +108,11 @@ jobs: contents: write packages: write steps: - - name: Checkout + - name: harden runner + uses: step-security/harden-runner@9b0655f430fba8c7001d4e38f8d4306db5c6e0ab + with: + egress-policy: audit + - name: checkout uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 with: fetch-depth: 0 @@ -108,34 +122,61 @@ jobs: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Tag and push image - run: | - docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - docker tag ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - - name: Generate bundle manifest + - name: restore cache Earthly build + uses: actions/cache/restore@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache + - name: run golangci-lint + run: make lint + - name: run gosec scan + run: make gosec + - name: run test + run: make test + - name: run image scan + run: make scan-image + - name: generate bundle manifest run: make bundle - env: - IMG: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} - - name: Compress Kustomize manifests - run: (cd config ; tar -czvf ../discoblocks-kustomize.tar.gz .) - - name: Set since tag + - name: run e2e test + run: make e2e-test + - name: upload Kind logs + uses: actions/upload-artifact@v3 + if: always() + with: + name: kind-logs + path: kind-logs-* + - name: upload Trivy scan results to GitHub Security tab + if: always() + uses: github/codeql-action/upload-sarif@1fc1008278d05ba9455caf083444e6c5a1a3cfd8 + with: + sarif_file: 'trivy-results.sarif' + - name: set owner of earthly cache + if: always() + run: sudo chown -R $(whoami) /var/lib/docker/volumes/earthly-cache + - name: save cache Earthly build + if: always() + uses: actions/cache/save@v3 + with: + path: /var/lib/docker/volumes/earthly-cache/ + key: earthly-cache + - name: push images + run: make docker-push + - name: set since tag run: echo "SINCE_TAG=$(git describe --tags --always $(git rev-list --tags) | grep -e '^v[0-9]+*\.[0-9]+*\.[0-9]+*$' | head -2 | tail -1)" >> $GITHUB_ENV - - name: "Generate release changelog" + - name: generate release changelog uses: heinrichreimer/github-changelog-generator-action@6f5b9494dd265d6fb7243a10c53dc0169c55f247 with: token: ${{ secrets.GITHUB_TOKEN }} base: .github/RELEASE_TEMPLATE.md sinceTag: ${{ env.SINCE_TAG }} simpleList: true - - name: Fix version in changelog + - name: fix version in changelog run: sed -i "s/#VERSION#/${{ github.ref_name }}/g" CHANGELOG.md - - name: Create pre-release + - name: create pre-release uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 with: prerelease: false body_path: CHANGELOG.md files: | discoblocks-bundle.yaml - discoblocks-kustomize.tar.gz - needs: [image-vulnerability-scan, kuttl] \ No newline at end of file + discoblocks-kustomize.tar.gz \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6e3fbec..ab2c3d6 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ testbin/* *.test kind-logs-* kubeconfig +trivy-results.sarif # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/.golangci.yaml b/.golangci.yaml index 5434010..cb2b7c5 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -19,7 +19,6 @@ linters-settings: require-specific: true linters: - disable-all: true enable: - bodyclose - depguard @@ -53,10 +52,6 @@ linters: - unparam - whitespace - unused - # Deprecated - - varcheck - - structcheck - - deadcode run: issues-exit-code: 0 diff --git a/.husky/hooks/pre-push b/.husky/hooks/pre-push index e0d0e98..35a3fa3 100755 --- a/.husky/hooks/pre-push +++ b/.husky/hooks/pre-push @@ -13,6 +13,7 @@ if git status --short | grep -qv "??"; then trap unstash EXIT fi -make manifests generate test +make earthly manifests generate +./bin/earthly -P +all git diff --exit-code --quiet || (git status && exit 1) diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index e53e86e..0000000 --- a/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -# Build CSI drivers -FROM tinygo/tinygo@sha256:65dc1c3e54f88aabe1efe073c3aadb1393593a56355a6ac03df5f18e6c3855dd as drivers - -COPY drivers/ /go/src - -RUN cd /go/src/csi.storageos.com ; go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go -RUN cd /go/src/ebs.csi.aws.com ; go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go - -# Build the manager binary -FROM golang@sha256:5b75b529da0f2196ee8561a90e5b99aceee56e125c6ef09a3da4e32cf3cc6c20 as builder - -WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download - -# Copy the go source -COPY main.go main.go -COPY api/ api/ -COPY controllers/ controllers/ -COPY mutators/ mutators/ -COPY pkg/ pkg/ -COPY schedulers/ schedulers/ - -# Build -RUN GOOS=linux GOARCH=amd64 go build -a -o manager main.go - -# Use UBI as minimal base image to package the manager binary -FROM redhat/ubi8-micro@sha256:4f6f8db9a6dc949d9779a57c43954b251957bd4d019a37edbbde8ed5228fe90a - -LABEL org.opencontainers.image.title "Discoblocks" -LABEL org.opencontainers.image.vendor "Discoblocks.io" -LABEL org.opencontainers.image.licenses "Apache-2.0 License" -LABEL org.opencontainers.image.source "https://github.com/ondat/discoblocks" -LABEL org.opencontainers.image.description "Discoblocks is an open-source declarative disk configuration system for Kubernetes helping to automate CRUD (Create, Read, Update, Delete) operations for cloud disk device resources attached to Kubernetes cluster nodes." -LABEL org.opencontainers.image.documentation "https://github.com/ondat/discoblocks/wiki" - -WORKDIR / -COPY --from=drivers /go/src /drivers -COPY --from=builder /workspace/manager . -COPY --from=builder /go/pkg/mod/github.com/wasmerio/wasmer-go@v1.0.4/wasmer/packaged/lib/linux-amd64/libwasmer.so /lib64 -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt - -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/Earthfile b/Earthfile new file mode 100644 index 0000000..2f74df3 --- /dev/null +++ b/Earthfile @@ -0,0 +1,202 @@ +VERSION --use-cache-command 0.7 +FROM golang:1.18 +WORKDIR /workdir +ARG --global KUBE_VERSION=1.23 +ARG --global REGISTRY=ghcr.io +ARG --global IMAGE_NAME=discoblocks +ARG --global IMAGE_TAG=latest + +all: + WAIT + BUILD +go-lint + BUILD +go-test + BUILD +bundle + END + WAIT + BUILD +go-sec + END + WAIT + BUILD +scan-image + END + +go-lint: + FROM earthly/dind:alpine + COPY . ./workdir + WITH DOCKER --pull golangci/golangci-lint:v1.51.0 + RUN docker run -w /workdir -v /workdir:/workdir golangci/golangci-lint:v1.51.0 golangci-lint run --timeout 500s + END + +go-sec: + FROM earthly/dind:alpine + COPY . ./workdir + WITH DOCKER --pull securego/gosec:2.15.0 + RUN docker run -w /workdir -v /workdir:/workdir securego/gosec:2.15.0 -exclude-dir=bin -exclude-dir=drivers -exclude-generated ./... + END + +go-test: + FROM +deps-go-build + CACHE $HOME/.cache/go-build + COPY . ./ + WITH DOCKER --pull tinygo/tinygo:0.23.0 + RUN make _test + END + +e2e-test: + FROM earthly/dind:alpine + RUN apk add make bash + WORKDIR /workdir + ENV KUSTOMIZE=/usr/local/bin/kustomize + ARG TIMEOUT=240 + COPY --dir +deps-tooling/* /usr/local + COPY Makefile ./ + COPY --dir config ./ + COPY --dir tests ./ + WITH DOCKER --load local/discoblocks:e2e=+build-image --load local/discoblocks:job-e2e=+build-job-image --load local/discoblocks:proxy-e2e=+build-proxy-image + RUN kubectl-kuttl test --timeout ${TIMEOUT} --config tests/e2e/kuttl/kuttl-config-${KUBE_VERSION}.yaml || touch /failure + END + + IF [ -d kind-logs-* ] + SAVE ARTIFACT --if-exists kind-logs-* AS LOCAL ./ + END + + IF [ -f /failure ] + RUN echo "e2e test run failed" && exit 1 + END + +build-drivers: + FROM tinygo/tinygo:0.23.0 + COPY --dir drivers /go/src + WAIT + RUN cd /go/src/csi.storageos.com ; go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go + END + WAIT + RUN cd /go/src/ebs.csi.aws.com ; go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go + END + + SAVE ARTIFACT /go/src/csi.storageos.com /drivers/csi.storageos.com + SAVE ARTIFACT /go/src/ebs.csi.aws.com /drivers/ebs.csi.aws.com + +build-operator: + FROM +deps-go + CACHE $HOME/.cache/go-build + COPY main.go ./ + COPY --dir api ./ + COPY --dir controllers ./ + COPY --dir mutators ./ + COPY --dir pkg ./ + COPY --dir schedulers ./ + RUN GOOS=linux GOARCH=amd64 go build -a -o manager main.go + + SAVE ARTIFACT manager + SAVE ARTIFACT /go/pkg/mod /go/pkg/mod + SAVE ARTIFACT /etc/ssl/certs /etc/ssl/certs + +build-all-images: + WAIT + BUILD +build-image + END + WAIT + BUILD +build-job-image + END + WAIT + BUILD +build-proxy-image + END + +build-job-image: + FROM DOCKERFILE -f +deps-job-image/Dockerfile . + + SAVE IMAGE --push ${REGISTRY}/${IMAGE_NAME}:job-${IMAGE_TAG} + +build-proxy-image: + FROM DOCKERFILE -f +deps-proxy-image/Dockerfile . + + SAVE IMAGE --push ${REGISTRY}/${IMAGE_NAME}:proxy-${IMAGE_TAG} + +build-image: + FROM redhat/ubi8-micro@sha256:4f6f8db9a6dc949d9779a57c43954b251957bd4d019a37edbbde8ed5228fe90a + WORKDIR / + LABEL org.opencontainers.image.title="Discoblocks" + LABEL org.opencontainers.image.vendor="Discoblocks.io" + LABEL org.opencontainers.image.licenses="Apache-2.0 License" + LABEL org.opencontainers.image.source="https://github.com/ondat/discoblocks" + LABEL org.opencontainers.image.description="Discoblocks is an open-source declarative disk configuration system for Kubernetes helping to automate CRUD (Create, Read, Update, Delete) operations for cloud disk device resources attached to Kubernetes cluster nodes." + LABEL org.opencontainers.image.documentation="https://github.com/ondat/discoblocks/wiki" + COPY +build-drivers/drivers /drivers + COPY +build-operator/manager /manager + COPY +build-operator/go/pkg/mod/github.com/wasmerio/wasmer-go@v1.0.4/wasmer/packaged/lib/linux-amd64/libwasmer.so /lib64 + COPY +build-operator/etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + USER 65532:65532 + ENTRYPOINT ["/manager"] + + SAVE IMAGE --push ${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG} + +scan-image: + FROM earthly/dind:alpine + WITH DOCKER --load local/discoblocks:trivy=+build-image --pull aquasec/trivy:0.38.1 + RUN docker run -w /workdir -v ${PWD}:/workdir -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy:0.38.1 image -f sarif -o trivy-results.sarif -s 'CRITICAL,HIGH' local/discoblocks:trivy + END + + SAVE ARTIFACT --if-exists trivy-results.sarif AS LOCAL trivy-results.sarif + +bundle: + FROM +deps-go-build + ENV KUSTOMIZE=/usr/local/bin/kustomize + COPY --dir +deps-tooling/* /usr/local + COPY Makefile ./ + COPY --dir config ./ + RUN sed -i "s|$(grep jobContainerImage config/manager/controller_manager_config.yaml | awk '{print $2}')|${REGISTRY}/${IMAGE_NAME}:job-${IMAGE_TAG}|" config/manager/controller_manager_config.yaml + RUN sed -i "s|$(grep proxyContainerImage config/manager/controller_manager_config.yaml | awk '{print $2}')|${REGISTRY}/${IMAGE_NAME}:proxy-${IMAGE_TAG}|" config/manager/controller_manager_config.yaml + RUN make _bundle + + SAVE ARTIFACT --if-exists discoblocks-bundle.yaml AS LOCAL discoblocks-bundle.yaml + SAVE ARTIFACT --if-exists discoblocks-kustomize.tar.gz AS LOCAL discoblocks-kustomize.tar.gz + +deps-go: + COPY go.mod go.sum ./ + RUN go mod download + +deps-go-build: + FROM +deps-go + COPY Makefile ./ + RUN make controller-gen envtest + +deps-tooling: + SAVE ARTIFACT /usr/local/go / + SAVE ARTIFACT /usr/local/go/bin / + + WAIT + ARG LATEST_VERSION=$(curl -s https://api.github.com/repos/storageos/kubectl-storageos/releases/latest | grep tag_name | head -1 | awk -F'\"' '{ print $4 }' | tr -d v) + RUN curl -sSL https://github.com/storageos/kubectl-storageos/releases/download/v${LATEST_VERSION}/kubectl-storageos_${LATEST_VERSION}_linux_amd64.tar.gz | tar -xz + RUN chmod +x kubectl-storageos + SAVE ARTIFACT kubectl-storageos /bin/kubectl-storageos + END + + WAIT + RUN curl -sLo kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.15.0/kubectl-kuttl_0.15.0_linux_x86_64 + RUN chmod +x kubectl-kuttl + SAVE ARTIFACT kubectl-kuttl /bin/kubectl-kuttl + END + + WAIT + RUN curl -sLO https://dl.k8s.io/release/v${KUBE_VERSION}.0/bin/linux/amd64/kubectl + RUN chmod +x kubectl + SAVE ARTIFACT kubectl /bin/kubectl + END + + WAIT + COPY Makefile ./ + RUN make kustomize + SAVE ARTIFACT bin/kustomize /bin/kustomize + END + +deps-job-image: + COPY config/manager/controller_manager_config.yaml ./ + RUN echo "FROM $(grep "jobContainerImage" controller_manager_config.yaml | awk '{print $2}')" > Dockerfile + + SAVE ARTIFACT Dockerfile + +deps-proxy-image: + COPY config/manager/controller_manager_config.yaml ./ + RUN echo "FROM $(grep "proxyContainerImage" controller_manager_config.yaml | awk '{print $2}')" > Dockerfile + + SAVE ARTIFACT Dockerfile \ No newline at end of file diff --git a/Makefile b/Makefile index 01cb50c..51044d8 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,15 @@ # Image URL to use all building/pushing image targets -IMG ?= discoblocks:latest +REGISTRY ?= ghcr.io +IMAGE_NAME ?= discoblocks +IMAGE_TAG ?= latest +IMG = $(REGISTRY)/$(IMAGE_NAME):$(IMAGE_TAG) + +E2E_TIMEOUT ?= 240 + # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. ENVTEST_K8S_VERSION = 1.23 -# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) -ifeq (,$(shell go env GOBIN)) -GOBIN=$(shell go env GOPATH)/bin -else -GOBIN=$(shell go env GOBIN) -endif - LDF_FLAGS = -X github.com/ondat/discoblocks/pkg/drivers.DriversDir=$(PWD)/drivers # Setting SHELL to bash allows bash commands to be executed by recipes. @@ -42,7 +41,7 @@ help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Development - + .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases @@ -59,10 +58,25 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... +.PHONY: lint +lint: earthly + $(EARTHLY) -P +go-lint --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) + +.PHONY: gosec +gosec: earthly + $(EARTHLY) -P +go-sec --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) + .PHONY: test -test: manifests generate fmt vet build-drivers envtest ## Run tests. +test: earthly ## Run tests. + $(EARTHLY) -P +go-test --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) + +_test: manifests generate fmt vet build-drivers envtest KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -ldflags "$(LDF_FLAGS)" ./... -coverprofile cover.out +.PHONY: e2e-test +e2e-test: earthly ## Run e2e tests. + $(EARTHLY) -P +e2e-test --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) --TIMEOUT=$(E2E_TIMEOUT) + ##@ Build .PHONY: build @@ -74,18 +88,22 @@ run: manifests generate fmt vet ## Run a controller from your host. go run -ldflags "$(LDF_FLAGS)" ./main.go .PHONY: docker-build -docker-build: test ## Build docker image with the manager. - docker build -t ${IMG} . +docker-build: earthly ## Build docker image with the manager. + $(EARTHLY) -P +build-all-images --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) .PHONY: docker-push docker-push: ## Push docker image with the manager. - docker push ${IMG} + $(EARTHLY) --push -P +build-all-images --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) .PHONY: build-drivers -build-drivers: ## Build CSI driver WASIs +build-drivers: ## Build CSI driver WASIs. docker run -v $(PWD)/drivers:/go/src -w /go/src/csi.storageos.com tinygo/tinygo:0.23.0 bash -c "go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go" docker run -v $(PWD)/drivers:/go/src -w /go/src/ebs.csi.aws.com tinygo/tinygo:0.23.0 bash -c "go mod tidy && tinygo build -o main.wasm -target wasi --no-debug main.go" +.PHONY: scan-image +scan-image: earthly ## Run image scan. + $(EARTHLY) -P +scan-image --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) + ##@ Deployment ifndef ignore-not-found @@ -93,15 +111,21 @@ ifndef ignore-not-found endif .PHONY: install -install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. +install: manifests kustomize _install ## Install CRDs into the K8s cluster specified in ~/.kube/config. + +_install: $(KUSTOMIZE) build config/crd | kubectl apply -f - .PHONY: uninstall -uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +uninstall: manifests kustomize _uninstall ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + +_uninstall: $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy -deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. +deploy: manifests kustomize _deploy ## Deploy controller to the K8s cluster specified in ~/.kube/config. + +_deploy: cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - cd config/manager && $(KUSTOMIZE) edit set image controller=discoblocks:latest @@ -111,11 +135,14 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: bundle -bundle: manifests kustomize ## Generates Kubernetes manifests +bundle: earthly ## Generates Kubernetes manifests + $(EARTHLY) -P +bundle --REGISTRY=$(REGISTRY) --IMAGE_NAME=$(IMAGE_NAME) --IMAGE_TAG=$(IMAGE_TAG) + +_bundle: rm -rf discoblocks-bundle.yaml cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default > discoblocks-bundle.yaml - cd config/manager && $(KUSTOMIZE) edit set image controller=discoblocks:latest + (cd config ; tar -czvf ../discoblocks-kustomize.tar.gz .) .PHONY: deploy-prometheus deploy-prometheus: ## Deploy prometheus to the K8s cluster specified in ~/.kube/config. @@ -137,32 +164,48 @@ deploy-cert-manager: ## Deploy cert manager to the K8s cluster specified in ~/.k undeploy-cert-manager: ## Undeploy cert manager from the K8s cluster specified in ~/.kube/config. kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.yaml -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen +CONTROLLER_GEN ?= $(shell pwd)/bin/controller-gen .PHONY: controller-gen controller-gen: ## Download controller-gen locally if necessary. $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0) -KUSTOMIZE = $(shell pwd)/bin/kustomize +KUSTOMIZE ?= $(shell pwd)/bin/kustomize .PHONY: kustomize kustomize: ## Download kustomize locally if necessary. $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v4@v4.5.7) -ENVTEST = $(shell pwd)/bin/setup-envtest +ENVTEST ?= $(shell pwd)/bin/setup-envtest .PHONY: envtest envtest: ## Download envtest-setup locally if necessary. $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) -KUBEBUILDER = $(shell pwd)/bin/kubebuilder +KUBEBUILDER ?= $(shell pwd)/bin/kubebuilder .PHONY: kubebuilder kubebuilder: ## Download kubebuilder locally if necessary. - curl -L -o $(KUBEBUILDER) https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.3.0/kubebuilder_$(shell uname | tr '[:upper:]' '[:lower:]')_amd64 +ifeq (,$(wildcard $(KUBEBUILDER))) + curl -sL -o $(KUBEBUILDER) https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.3.0/kubebuilder_$(shell uname | tr '[:upper:]' '[:lower:]')_amd64 chmod +x $(KUBEBUILDER) +endif -HUSKY = $(shell pwd)/bin/husky +HUSKY ?= $(shell pwd)/bin/husky .PHONY: husky husky: ## Download husky locally if necessary. $(call go-get-tool,$(HUSKY),github.com/automation-co/husky@v0.2.14) +EARTHLY ?= $(shell pwd)/bin/earthly +earthly: +ifeq (,$(wildcard $(EARTHLY))) + curl -sL https://github.com/earthly/earthly/releases/download/v0.7.1/earthly-linux-amd64 -o $(EARTHLY) + chmod +x $(EARTHLY) +endif + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + # go-get-tool will 'go get' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) define go-get-tool diff --git a/PROJECT b/PROJECT index 2b2ebcd..c1a38c5 100644 --- a/PROJECT +++ b/PROJECT @@ -16,4 +16,11 @@ resources: webhooks: validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + group: config.discoblocks.ondat.io + kind: OperatorConfig + path: github.com/ondat/discoblocks/api/config.discoblocks.ondat.io/v1 + version: v1 version: "3" diff --git a/api/config.discoblocks.ondat.io/v1/groupversion_info.go b/api/config.discoblocks.ondat.io/v1/groupversion_info.go new file mode 100644 index 0000000..97588f1 --- /dev/null +++ b/api/config.discoblocks.ondat.io/v1/groupversion_info.go @@ -0,0 +1,20 @@ +// Package v1 contains API Schema definitions for the config.discoblocks.ondat.io v1 API group +// +kubebuilder:object:generate=true +// +groupName=config.discoblocks.ondat.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "config.discoblocks.ondat.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/config.discoblocks.ondat.io/v1/operatorconfig_types.go b/api/config.discoblocks.ondat.io/v1/operatorconfig_types.go new file mode 100644 index 0000000..d7d8ef4 --- /dev/null +++ b/api/config.discoblocks.ondat.io/v1/operatorconfig_types.go @@ -0,0 +1,38 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// OperatorConfig is the Schema for the operatorconfigs API +type OperatorConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + cfg.ControllerManagerConfigurationSpec `json:",inline"` + + // SupportedCsiDrivers list of supported CSI driver IDs + SupportedCsiDrivers []string `json:"supportedCsiDrivers,omitempty"` + + // JobContainerImage is the container image for volume management operations + JobContainerImage string `json:"jobContainerImage,omitempty"` + + // JobContainerImage is the container image of volume metrics sidecar + ProxyContainerImage string `json:"proxyContainerImage,omitempty"` + + // SchedulerStrictMode defines scheduler's behavior on case of Discoblock errors + SchedulerStrictMode bool `json:"schedulerStrictMode,omitempty"` + + // MutatorStrictMode defines mutator's behavior on case of Discoblock errors + MutatorStrictMode bool `json:"mutatorStrictMode,omitempty"` +} + +func init() { + SchemeBuilder.Register(&OperatorConfig{}) +} diff --git a/api/config.discoblocks.ondat.io/v1/zz_generated.deepcopy.go b/api/config.discoblocks.ondat.io/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7e30f15 --- /dev/null +++ b/api/config.discoblocks.ondat.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,57 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfig) DeepCopyInto(out *OperatorConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) + if in.SupportedCsiDrivers != nil { + in, out := &in.SupportedCsiDrivers, &out.SupportedCsiDrivers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfig. +func (in *OperatorConfig) DeepCopy() *OperatorConfig { + if in == nil { + return nil + } + out := new(OperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go index 603af8f..85a8138 100644 --- a/api/v1/groupversion_info.go +++ b/api/v1/groupversion_info.go @@ -15,8 +15,8 @@ limitations under the License. */ // Package v1 contains API Schema definitions for the discoblocks.ondat.io v1 API group -//+kubebuilder:object:generate=true -//+groupName=discoblocks.ondat.io +// +kubebuilder:object:generate=true +// +groupName=discoblocks.ondat.io package v1 import ( diff --git a/config/crd/bases/config.discoblocks.io_operatorconfigs.yaml b/config/crd/bases/config.discoblocks.io_operatorconfigs.yaml new file mode 100644 index 0000000..7ab84de --- /dev/null +++ b/config/crd/bases/config.discoblocks.io_operatorconfigs.yaml @@ -0,0 +1,203 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: operatorconfigs.config.discoblocks.ondat.io +spec: + group: config.discoblocks.ondat.io + names: + kind: OperatorConfig + listKind: OperatorConfigList + plural: operatorconfigs + singular: operatorconfig + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OperatorConfig is the Schema for the operatorconfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + cacheNamespace: + description: "CacheNamespace if specified restricts the manager's cache + to watch objects in the desired namespace Defaults to all namespaces + \n Note: If a namespace is specified, controllers can still Watch for + a cluster-scoped resource (e.g Node). For namespaced resources the + cache will only hold objects from the desired namespace." + type: string + controller: + description: Controller contains global configuration options for controllers + registered within this manager. + properties: + cacheSyncTimeout: + description: CacheSyncTimeout refers to the time limit set to wait + for syncing caches. Defaults to 2 minutes if not set. + format: int64 + type: integer + groupKindConcurrency: + additionalProperties: + type: integer + description: "GroupKindConcurrency is a map from a Kind to the number + of concurrent reconciliation allowed for that controller. \n When + a controller is registered within this manager using the builder + utilities, users have to specify the type the controller reconciles + in the For(...) call. If the object's kind passed matches one of + the keys in this map, the concurrency for that controller is set + to the number specified. \n The key is expected to be consistent + in form with GroupKind.String(), e.g. ReplicaSet in apps group (regardless + of version) would be `ReplicaSet.apps`." + type: object + type: object + gracefulShutDown: + description: GracefulShutdownTimeout is the duration given to runnable + to stop before the manager actually returns on stop. To disable graceful + shutdown, set to time.Duration(0) To use graceful shutdown without timeout, + set to a negative duration, e.G. time.Duration(-1) The graceful shutdown + is skipped for safety reasons in case the leader election lease is lost. + type: string + health: + description: Health contains the controller health configuration + properties: + healthProbeBindAddress: + description: HealthProbeBindAddress is the TCP address that the controller + should bind to for serving health probes + type: string + livenessEndpointName: + description: LivenessEndpointName, defaults to "healthz" + type: string + readinessEndpointName: + description: ReadinessEndpointName, defaults to "readyz" + type: string + type: object + jobContainerImage: + description: JobContainerImage is the container image for volume management + operations + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + leaderElection: + description: LeaderElection is the LeaderElection config to be used when + configuring the manager.Manager leader election + properties: + leaderElect: + description: leaderElect enables a leader election client to gain + leadership before executing the main loop. Enable this when running + replicated components for high availability. + type: boolean + leaseDuration: + description: leaseDuration is the duration that non-leader candidates + will wait after observing a leadership renewal until attempting + to acquire leadership of a led but unrenewed leader slot. This is + effectively the maximum duration that a leader can be stopped before + it is replaced by another candidate. This is only applicable if + leader election is enabled. + type: string + renewDeadline: + description: renewDeadline is the interval between attempts by the + acting master to renew a leadership slot before it stops leading. + This must be less than or equal to the lease duration. This is only + applicable if leader election is enabled. + type: string + resourceLock: + description: resourceLock indicates the resource object type that + will be used to lock during leader election cycles. + type: string + resourceName: + description: resourceName indicates the name of resource object that + will be used to lock during leader election cycles. + type: string + resourceNamespace: + description: resourceName indicates the namespace of resource object + that will be used to lock during leader election cycles. + type: string + retryPeriod: + description: retryPeriod is the duration the clients should wait between + attempting acquisition and renewal of a leadership. This is only + applicable if leader election is enabled. + type: string + required: + - leaderElect + - leaseDuration + - renewDeadline + - resourceLock + - resourceName + - resourceNamespace + - retryPeriod + type: object + metadata: + type: object + metrics: + description: Metrics contains thw controller metrics configuration + properties: + bindAddress: + description: BindAddress is the TCP address that the controller should + bind to for serving prometheus metrics. It can be set to "0" to + disable the metrics serving. + type: string + type: object + mutatorStrictMode: + description: MutatorStrictMode defines mutator's behavior on case of Discoblock + errors + type: boolean + proxyContainerImage: + description: JobContainerImage is the container image of volume metrics + sidecar + type: string + schedulerStrictMode: + description: SchedulerStrictMode defines scheduler's behavior on case + of Discoblock errors + type: boolean + supportedCsiDrivers: + description: SupportedCsiDrivers list of supported CSI driver IDs + items: + type: string + type: array + syncPeriod: + description: SyncPeriod determines the minimum frequency at which watched + resources are reconciled. A lower period will correct entropy more quickly, + but reduce responsiveness to change if there are many watched resources. + Change this value only if you know what you are doing. Defaults to 10 + hours if unset. there will a 10 percent jitter between the SyncPeriod + of all controllers so that all controllers will not send list requests + simultaneously. + type: string + webhook: + description: Webhook contains the controllers webhook configuration + properties: + certDir: + description: CertDir is the directory that contains the server key + and certificate. if not set, webhook server would look up the server + key and certificate in {TempDir}/k8s-webhook-server/serving-certs. + The server key and certificate must be named tls.key and tls.crt, + respectively. + type: string + host: + description: Host is the hostname that the webhook server binds to. + It is used to set webhook.Server.Host. + type: string + port: + description: Port is the port that the webhook server serves at. It + is used to set webhook.Server.Port. + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/config.discoblocks.ondat.io_operatorconfigs.yaml b/config/crd/bases/config.discoblocks.ondat.io_operatorconfigs.yaml new file mode 100644 index 0000000..7ab84de --- /dev/null +++ b/config/crd/bases/config.discoblocks.ondat.io_operatorconfigs.yaml @@ -0,0 +1,203 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: operatorconfigs.config.discoblocks.ondat.io +spec: + group: config.discoblocks.ondat.io + names: + kind: OperatorConfig + listKind: OperatorConfigList + plural: operatorconfigs + singular: operatorconfig + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OperatorConfig is the Schema for the operatorconfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + cacheNamespace: + description: "CacheNamespace if specified restricts the manager's cache + to watch objects in the desired namespace Defaults to all namespaces + \n Note: If a namespace is specified, controllers can still Watch for + a cluster-scoped resource (e.g Node). For namespaced resources the + cache will only hold objects from the desired namespace." + type: string + controller: + description: Controller contains global configuration options for controllers + registered within this manager. + properties: + cacheSyncTimeout: + description: CacheSyncTimeout refers to the time limit set to wait + for syncing caches. Defaults to 2 minutes if not set. + format: int64 + type: integer + groupKindConcurrency: + additionalProperties: + type: integer + description: "GroupKindConcurrency is a map from a Kind to the number + of concurrent reconciliation allowed for that controller. \n When + a controller is registered within this manager using the builder + utilities, users have to specify the type the controller reconciles + in the For(...) call. If the object's kind passed matches one of + the keys in this map, the concurrency for that controller is set + to the number specified. \n The key is expected to be consistent + in form with GroupKind.String(), e.g. ReplicaSet in apps group (regardless + of version) would be `ReplicaSet.apps`." + type: object + type: object + gracefulShutDown: + description: GracefulShutdownTimeout is the duration given to runnable + to stop before the manager actually returns on stop. To disable graceful + shutdown, set to time.Duration(0) To use graceful shutdown without timeout, + set to a negative duration, e.G. time.Duration(-1) The graceful shutdown + is skipped for safety reasons in case the leader election lease is lost. + type: string + health: + description: Health contains the controller health configuration + properties: + healthProbeBindAddress: + description: HealthProbeBindAddress is the TCP address that the controller + should bind to for serving health probes + type: string + livenessEndpointName: + description: LivenessEndpointName, defaults to "healthz" + type: string + readinessEndpointName: + description: ReadinessEndpointName, defaults to "readyz" + type: string + type: object + jobContainerImage: + description: JobContainerImage is the container image for volume management + operations + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + leaderElection: + description: LeaderElection is the LeaderElection config to be used when + configuring the manager.Manager leader election + properties: + leaderElect: + description: leaderElect enables a leader election client to gain + leadership before executing the main loop. Enable this when running + replicated components for high availability. + type: boolean + leaseDuration: + description: leaseDuration is the duration that non-leader candidates + will wait after observing a leadership renewal until attempting + to acquire leadership of a led but unrenewed leader slot. This is + effectively the maximum duration that a leader can be stopped before + it is replaced by another candidate. This is only applicable if + leader election is enabled. + type: string + renewDeadline: + description: renewDeadline is the interval between attempts by the + acting master to renew a leadership slot before it stops leading. + This must be less than or equal to the lease duration. This is only + applicable if leader election is enabled. + type: string + resourceLock: + description: resourceLock indicates the resource object type that + will be used to lock during leader election cycles. + type: string + resourceName: + description: resourceName indicates the name of resource object that + will be used to lock during leader election cycles. + type: string + resourceNamespace: + description: resourceName indicates the namespace of resource object + that will be used to lock during leader election cycles. + type: string + retryPeriod: + description: retryPeriod is the duration the clients should wait between + attempting acquisition and renewal of a leadership. This is only + applicable if leader election is enabled. + type: string + required: + - leaderElect + - leaseDuration + - renewDeadline + - resourceLock + - resourceName + - resourceNamespace + - retryPeriod + type: object + metadata: + type: object + metrics: + description: Metrics contains thw controller metrics configuration + properties: + bindAddress: + description: BindAddress is the TCP address that the controller should + bind to for serving prometheus metrics. It can be set to "0" to + disable the metrics serving. + type: string + type: object + mutatorStrictMode: + description: MutatorStrictMode defines mutator's behavior on case of Discoblock + errors + type: boolean + proxyContainerImage: + description: JobContainerImage is the container image of volume metrics + sidecar + type: string + schedulerStrictMode: + description: SchedulerStrictMode defines scheduler's behavior on case + of Discoblock errors + type: boolean + supportedCsiDrivers: + description: SupportedCsiDrivers list of supported CSI driver IDs + items: + type: string + type: array + syncPeriod: + description: SyncPeriod determines the minimum frequency at which watched + resources are reconciled. A lower period will correct entropy more quickly, + but reduce responsiveness to change if there are many watched resources. + Change this value only if you know what you are doing. Defaults to 10 + hours if unset. there will a 10 percent jitter between the SyncPeriod + of all controllers so that all controllers will not send list requests + simultaneously. + type: string + webhook: + description: Webhook contains the controllers webhook configuration + properties: + certDir: + description: CertDir is the directory that contains the server key + and certificate. if not set, webhook server would look up the server + key and certificate in {TempDir}/k8s-webhook-server/serving-certs. + The server key and certificate must be named tls.key and tls.crt, + respectively. + type: string + host: + description: Host is the hostname that the webhook server binds to. + It is used to set webhook.Server.Host. + type: string + port: + description: Port is the port that the webhook server serves at. It + is used to set webhook.Server.Port. + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 3f63f10..f6b06e7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,6 +3,7 @@ # It should be run by config/default resources: - bases/discoblocks.ondat.io_diskconfigs.yaml +# - bases/config.discoblocks.ondat.io_operatorconfigs.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index cfc4df4..73aaed5 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -33,7 +33,7 @@ patchesStrategicMerge: # Mount the controller config file for loading manager configurations # through a ComponentConfig type -#- manager_config_patch.yaml +- manager_config_patch.yaml # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml @@ -73,14 +73,3 @@ vars: kind: Service version: v1 name: webhook-service -patches: - - patch: |- - - op: replace - path: "/spec/template/spec/containers/0/env/0" - value: - name: SUPPORTED_CSI_DRIVERS - value: "ebs.csi.aws.com,csi.storageos.com" - target: - kind: Deployment - namespace: system - name: controller-manager diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index 6c40015..bebbac4 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -9,7 +9,7 @@ spec: containers: - name: manager args: - - "--config=controller_manager_config.yaml" + - "--config=/controller_manager_config.yaml" volumeMounts: - name: manager-config mountPath: /controller_manager_config.yaml diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml index 038bd81..cb48e8c 100644 --- a/config/manager/controller_manager_config.yaml +++ b/config/manager/controller_manager_config.yaml @@ -1,11 +1,16 @@ -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 -kind: ControllerManagerConfig +apiVersion: config.discoblocks.ondat.io/v1 +kind: OperatorConfig health: + bindAddress: :8080 healthProbeBindAddress: :8081 metrics: bindAddress: 127.0.0.1:8080 webhook: port: 9443 -leaderElection: - leaderElect: true - resourceName: 49ccccaf.discoblocks.ondat.io +supportedCsiDrivers: +- ebs.csi.aws.com +- csi.storageos.com +jobContainerImage: nixery.dev/shell/gawk/gnugrep/gnused/coreutils-full/cri-tools/docker-client/nerdctl/nvme-cli +proxyContainerImage: nixery.dev/shell/frp +schedulerStrictMode: true +mutatorStrictMode: true diff --git a/config/manager/frps_container_patch.yaml b/config/manager/frps_container_patch.yaml new file mode 100644 index 0000000..60a35e2 --- /dev/null +++ b/config/manager/frps_container_patch.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: metrics-proxy + # Set runAsNonRoot to true once image has replaced + image: nixery.dev/shell/frp + command: + - sh + - -c + - | + cat < /tmp/frps.ini + [common] + ; log_level = trace + disable_log_color = true + bind_port = 63535 + proxy_bind_addr = 127.0.0.1 + enable_prometheus = true + user_conn_timeout = 5 + max_ports_per_client = 1 + max_pool_count = 1 + heartbeat_timeout = 10 + detailed_errors_to_client = true + dashboard_addr = 127.0.0.1 + dashboard_port = 8000 + tls_only = true + tls_enable = true + tls_cert_file = /tmp/k8s-webhook-server/metrics-certs/tls.crt + tls_key_file = /tmp/k8s-webhook-server/metrics-certs/tls.key + tls_trusted_ca_file = /tmp/k8s-webhook-server/metrics-certs/ca.crt + EOF + trap exit SIGTERM ; + while true; do frps -c /tmp/frps.ini & c=$! wait $c; done + securityContext: + privileged: false + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/metrics-certs + name: metrics-cert + readOnly: true + ports: + - containerPort: 63535 + protocol: TCP + resources: + requests: + cpu: 10m + memory: 128Mi \ No newline at end of file diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 4624b32..6043302 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,6 +6,9 @@ resources: generatorOptions: disableNameSuffixHash: true +patchesStrategicMerge: +- frps_container_patch.yaml + configMapGenerator: - files: - controller_manager_config.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index fa7a172..d49f8b1 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -31,17 +31,13 @@ spec: containers: - command: - /manager - args: - - --leader-elect image: controller:latest name: manager env: - - name: SUPPORTED_CSI_DRIVERS - value: "ebs.csi.aws.com" - - name: SCHEDULER_STRICT_MODE - value: "true" - - name: MUTATOR_STRICT_MODE - value: "true" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace securityContext: allowPrivilegeEscalation: false volumeMounts: @@ -66,48 +62,6 @@ spec: requests: cpu: 20m memory: 256Mi - - name: metrics-proxy - # Set runAsNonRoot to true once image has replaced - image: nixery.dev/shell/frp - command: - - sh - - -c - - | - cat < /tmp/frps.ini - [common] - ; log_level = trace - disable_log_color = true - bind_port = 63535 - proxy_bind_addr = 127.0.0.1 - enable_prometheus = true - user_conn_timeout = 5 - max_ports_per_client = 1 - max_pool_count = 1 - heartbeat_timeout = 10 - detailed_errors_to_client = true - dashboard_addr = 127.0.0.1 - dashboard_port = 8000 - tls_only = true - tls_enable = true - tls_cert_file = /tmp/k8s-webhook-server/metrics-certs/tls.crt - tls_key_file = /tmp/k8s-webhook-server/metrics-certs/tls.key - tls_trusted_ca_file = /tmp/k8s-webhook-server/metrics-certs/ca.crt - EOF - trap exit SIGTERM ; - while true; do frps -c /tmp/frps.ini & c=$! wait $c; done - securityContext: - privileged: false - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/metrics-certs - name: metrics-cert - readOnly: true - ports: - - containerPort: 63535 - protocol: TCP - resources: - requests: - cpu: 10m - memory: 128Mi serviceAccountName: controller-manager terminationGracePeriodSeconds: 10 volumes: diff --git a/controllers/pvc_controller.go b/controllers/pvc_controller.go index 0e1ee41..d0d76a4 100644 --- a/controllers/pvc_controller.go +++ b/controllers/pvc_controller.go @@ -58,9 +58,10 @@ type nodeCache interface { // PVCReconciler reconciles a PVC object type PVCReconciler struct { - EventService utils.EventService - NodeCache nodeCache - InProgress sync.Map + OperationImage string + EventService utils.EventService + NodeCache nodeCache + InProgress sync.Map client.Client Scheme *runtime.Scheme } @@ -195,6 +196,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R } // MonitorVolumes monitors volumes periodycally +// //nolint:gocyclo // It is complex we know func (r *PVCReconciler) MonitorVolumes() { logger := logf.Log.WithName("VolumeMonitor") @@ -876,7 +878,7 @@ WAIT_CSI: mountpoint := utils.RenderMountPoint(config.Spec.MountPointPattern, config.Name, nextIndex) - mountJob, err := utils.RenderMountJob(pod.Name, pvc.Name, pvc.Spec.VolumeName, pvc.Namespace, nodeName, pv.Spec.CSI.FSType, mountpoint, containerIDs, preMountCmd, volumeMeta, metav1.OwnerReference{ + mountJob, err := utils.RenderMountJob(r.OperationImage, pod.Name, pvc.Name, pvc.Spec.VolumeName, pvc.Namespace, nodeName, pv.Spec.CSI.FSType, mountpoint, containerIDs, preMountCmd, volumeMeta, metav1.OwnerReference{ APIVersion: parentPVC.APIVersion, Kind: parentPVC.Kind, Name: pvc.Name, @@ -1105,7 +1107,7 @@ func (r *PVCReconciler) resizePVC(config *discoblocksondatiov1.DiskConfig, pod * return } - resizeJob, err := utils.RenderResizeJob(pod.Name, pvc.Name, pvc.Spec.VolumeName, pvc.Namespace, nodeName, pv.Spec.CSI.FSType, preResizeCmd, volumeMeta, metav1.OwnerReference{ + resizeJob, err := utils.RenderResizeJob(r.OperationImage, pod.Name, pvc.Name, pvc.Spec.VolumeName, pvc.Namespace, nodeName, pv.Spec.CSI.FSType, preResizeCmd, volumeMeta, metav1.OwnerReference{ APIVersion: pvc.APIVersion, Kind: pvc.Kind, Name: pvc.Name, diff --git a/main.go b/main.go index f54bcb4..3c045ed 100644 --- a/main.go +++ b/main.go @@ -21,8 +21,6 @@ import ( "flag" "net/http" "os" - "strconv" - "strings" "sync" "time" @@ -30,6 +28,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -40,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/webhook" + configdiscoblockv1 "github.com/ondat/discoblocks/api/config.discoblocks.ondat.io/v1" discoblocksondatiov1 "github.com/ondat/discoblocks/api/v1" "github.com/ondat/discoblocks/controllers" "github.com/ondat/discoblocks/mutators" @@ -49,7 +49,8 @@ import ( ) const ( - webhookport = 9443 + // envPodNamespace is the operator's pod namespace environment variable. + envPodNamespace = "POD_NAMESPACE" ) var ( @@ -84,20 +85,23 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(discoblocksondatiov1.AddToScheme(scheme)) + utilruntime.Must(configdiscoblockv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } func main() { http.DefaultClient.Timeout = time.Minute - var metricsAddr string - var enableLeaderElection bool - var probeAddr string - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") + var configFile string + flag.StringVar(&configFile, "config", "", + "The controller will load its initial configuration from this file. "+ + "Omit this flag to use the default configuration values. ") + + const five = 5 + + var leaderRenewSeconds uint + flag.UintVar(&leaderRenewSeconds, "leader-renew-seconds", five, "Leader renewal frequency") + opts := zap.Options{ Development: true, } @@ -108,14 +112,42 @@ func main() { ctrl.SetLogger(zapLogger) klog.SetLogger(zapLogger) - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: webhookport, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: controllerID, - }) + currentNS := "" + if ns, ok := os.LookupEnv(envPodNamespace); ok { + currentNS = ns + } + + const ldm = 1.2 + const two = 2 + + renewDeadline := time.Duration(leaderRenewSeconds) * time.Second + leaseDuration := time.Duration(int(ldm*float64(leaderRenewSeconds))) * time.Second + leaderRetryDuration := renewDeadline / two + + options := ctrl.Options{ + Scheme: scheme, + LeaderElection: true, + LeaderElectionID: controllerID, + LeaderElectionNamespace: currentNS, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + RenewDeadline: &renewDeadline, + LeaseDuration: &leaseDuration, + RetryPeriod: &leaderRetryDuration, + } + + operatorConfig := configdiscoblockv1.OperatorConfig{} + + if configFile != "" { + var err error + cfg := ctrl.ConfigFile() + options, err = options.AndFrom(cfg.AtPath(configFile).OfKind(&operatorConfig)) + if err != nil { + setupLog.Error(err, "unable to load the config file") + os.Exit(1) + } + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) @@ -150,19 +182,18 @@ func main() { } if _, err = (&controllers.PVCReconciler{ - EventService: eventService, - NodeCache: nodeReconciler, - InProgress: sync.Map{}, - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + OperationImage: operatorConfig.JobContainerImage, + EventService: eventService, + NodeCache: nodeReconciler, + InProgress: sync.Map{}, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "PVC") os.Exit(1) } - provisioners := strings.Split(strings.ReplaceAll(os.Getenv("SUPPORTED_CSI_DRIVERS"), " ", ""), ",") - - discoblocksondatiov1.InitDiskConfigWebhookDeps(mgr.GetClient(), provisioners) + discoblocksondatiov1.InitDiskConfigWebhookDeps(mgr.GetClient(), operatorConfig.SupportedCsiDrivers) if err = (&discoblocksondatiov1.DiskConfig{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create validator", "validator", "DiskConfig") @@ -171,13 +202,7 @@ func main() { //+kubebuilder:scaffold:builder - strictMutator, err := parseBoolEnv("MUTATOR_STRICT_MODE") - if err != nil { - setupLog.Error(err, "unable to parse MUTATOR_STRICT_MODE") - os.Exit(1) - } - - podMutator := mutators.NewPodMutator(mgr.GetClient(), strictMutator) + podMutator := mutators.NewPodMutator(mgr.GetClient(), operatorConfig.MutatorStrictMode, operatorConfig.ProxyContainerImage) mgr.GetWebhookServer().Register("/mutate-v1-pod", &webhook.Admission{Handler: podMutator}) if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -190,13 +215,7 @@ func main() { os.Exit(1) } - strictScheduler, err := parseBoolEnv("SCHEDULER_STRICT_MODE") - if err != nil { - setupLog.Error(err, "unable to parse SCHEDULER_STRICT_MODE") - os.Exit(1) - } - - scheduler := schedulers.NewScheduler(mgr.GetClient(), strictScheduler) + scheduler := schedulers.NewScheduler(mgr.GetClient(), operatorConfig.SchedulerStrictMode) schedulerErrChan := scheduler.Start(context.Background()) go func() { setupLog.Error(<-schedulerErrChan, "there was an error in scheduler") @@ -209,12 +228,3 @@ func main() { os.Exit(1) } } - -func parseBoolEnv(key string) (bool, error) { - raw := os.Getenv(key) - if raw != "" { - return strconv.ParseBool(raw) - } - - return false, nil -} diff --git a/mutators/pod_mutator.go b/mutators/pod_mutator.go index d1d9fa8..d7f9652 100644 --- a/mutators/pod_mutator.go +++ b/mutators/pod_mutator.go @@ -51,14 +51,16 @@ var podMutatorLog = logf.Log.WithName("mutators.PodMutator") var _ admission.Handler = &PodMutator{} type PodMutator struct { - Client client.Client - strict bool - decoder *admission.Decoder + proxyImage string + client client.Client + strict bool + decoder *admission.Decoder } //+kubebuilder:webhook:path=/mutate-v1-pod,mutating=true,sideEffects=NoneOnDryRun,failurePolicy=fail,groups="",resources=pods,verbs=create,versions=v1,admissionReviewVersions=v1,name=mpod.kb.io // Handle pod mutation +// //nolint:gocyclo // It is complex we know func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admission.Response { logger := podMutatorLog.WithValues("req_name", req.Name, "namespace", req.Namespace) @@ -84,7 +86,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Fetch DiskConfigs...") diskConfigs := discoblocksondatiov1.DiskConfigList{} - if err := a.Client.List(ctx, &diskConfigs, &client.ListOptions{ + if err := a.client.List(ctx, &diskConfigs, &client.ListOptions{ Namespace: pod.Namespace, }); err != nil { metrics.NewError("DiskConfig", "", pod.Namespace, "Kube API", "list") @@ -171,7 +173,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Fetch StorageClass...") sc := storagev1.StorageClass{} - if err := a.Client.Get(ctx, types.NamespacedName{Name: config.Spec.StorageClassName}, &sc); err != nil { + if err := a.client.Get(ctx, types.NamespacedName{Name: config.Spec.StorageClassName}, &sc); err != nil { metrics.NewError("StorageClass", config.Spec.StorageClassName, "", "Kube API", "get") if apierrors.IsNotFound(err) { @@ -224,7 +226,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Fetch Node...") node := &corev1.Node{} - if err := a.Client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil { + if err := a.client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil { metrics.NewError("Node", nodeName, "", "Kube API", "get") return admission.Errored(http.StatusInternalServerError, err) @@ -249,7 +251,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Create StorageClass...") - if err = a.Client.Create(ctx, topologySC); err != nil && !apierrors.IsAlreadyExists(err) { + if err = a.client.Create(ctx, topologySC); err != nil && !apierrors.IsAlreadyExists(err) { metrics.NewError("StorageClass", topologySC.Name, "", "Kube API", "create") return admission.Errored(http.StatusInternalServerError, err) @@ -261,7 +263,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Create PVC...") - if err = a.Client.Create(ctx, pvc); err != nil { + if err = a.client.Create(ctx, pvc); err != nil { if !apierrors.IsAlreadyExists(err) { metrics.NewError("PersistentVolume", pvc.Name, pvc.Namespace, "Kube API", "create") @@ -275,7 +277,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Fetch PVC...") - if err = a.Client.Get(ctx, types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, pvc); err != nil { + if err = a.client.Get(ctx, types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, pvc); err != nil { metrics.NewError("PersistentVolumeClaim", pvc.Name, pvc.Namespace, "Kube API", "get") logger.Error(err, "Unable to fetch PVC", "name", pvc.Name) @@ -287,7 +289,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Update PVC finalizer...", "name", pvc.Name) - if err = a.Client.Update(ctx, pvc); err != nil { + if err = a.client.Update(ctx, pvc); err != nil { metrics.NewError("PersistentVolumeClaim", pvc.Name, pvc.Namespace, "Kube API", "update") logger.Error(err, "Unable to update PVC finalizer", "name", pvc.Name) @@ -307,7 +309,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Fetch PVCs...") pvcs := corev1.PersistentVolumeClaimList{} - if err = a.Client.List(ctx, &pvcs, &client.ListOptions{ + if err = a.client.List(ctx, &pvcs, &client.ListOptions{ Namespace: config.Namespace, LabelSelector: pvcSelector, }); err != nil { @@ -331,7 +333,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Update PVC child finalizer...", "name", pvcs.Items[i].Name) - if err = a.Client.Update(ctx, &pvcs.Items[i]); err != nil { + if err = a.client.Update(ctx, &pvcs.Items[i]); err != nil { metrics.NewError("PersistentVolumeClaim", pvcs.Items[i].Name, pvcs.Items[i].Namespace, "Kube API", "update") logger.Error(err, "Unable to update PVC finalizer", "name", pvcs.Items[i].Name) @@ -413,7 +415,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio }) } - metricsProxySideCar, err := utils.RenderMetricsProxySidecar(pod.Name, pod.Namespace) + metricsProxySideCar, err := utils.RenderMetricsProxySidecar(a.proxyImage, pod.Name, pod.Namespace) if err != nil { logger.Error(err, "Metrics Proxy sidecar template invalid") return admission.Allowed("Metrics Proxy sidecar template invalid") @@ -481,7 +483,7 @@ func (a *PodMutator) Handle(ctx context.Context, req admission.Request) admissio logger.Info("Create certificate secret...") - if err := a.Client.Create(ctx, &metricsCert); err != nil { + if err := a.client.Create(ctx, &metricsCert); err != nil { if !apierrors.IsAlreadyExists(err) { metrics.NewError("Secret", metricsCert.Name, metricsCert.Namespace, "Kube API", "create") @@ -508,9 +510,10 @@ func (a *PodMutator) InjectDecoder(d *admission.Decoder) error { } // NewPodMutator creates a new pod mutator -func NewPodMutator(kubeClient client.Client, strict bool) *PodMutator { +func NewPodMutator(kubeClient client.Client, strict bool, proxyImage string) *PodMutator { return &PodMutator{ - Client: kubeClient, - strict: strict, + proxyImage: proxyImage, + client: kubeClient, + strict: strict, } } diff --git a/pkg/utils/kube.go b/pkg/utils/kube.go index ed1a491..b10f131 100644 --- a/pkg/utils/kube.go +++ b/pkg/utils/kube.go @@ -38,7 +38,7 @@ securityContext: ` const metricsProxyTeamplate = `name: discoblocks-metrics-proxy -image: nixery.dev/shell/frp +image: "%s" command: - sh - -c @@ -78,40 +78,40 @@ volumeMounts: const hostJobTemplate = `apiVersion: batch/v1 kind: Job metadata: - name: "%s" - namespace: "%s" + name: "%[1]s" + namespace: "%[2]s" labels: app: discoblocks annotations: - discoblocks/operation: "%s" - discoblocks/pod: "%s" - discoblocks/pvc: "%s" + discoblocks/operation: "%[3]s" + discoblocks/pod: "%[4]s" + discoblocks/pvc: "%[5]s" spec: template: spec: hostPID: true - nodeName: "%s" + nodeName: "%[6]s" containers: - name: mount - image: nixery.dev/shell/gawk/gnugrep/gnused/coreutils-full/cri-tools/docker-client/nerdctl/nvme-cli + image: "%[7]s" env: - name: MOUNT_POINT - value: "%s" + value: "%[8]s" - name: CONTAINER_IDS - value: "%s" + value: "%[9]s" - name: PVC_NAME - value: "%s" + value: "%[5]s" - name: PV_NAME - value: "%s" + value: "%[10]s" - name: FS - value: "%s" + value: "%[11]s" - name: VOLUME_ATTACHMENT_META - value: "%s" + value: "%[12]s" command: - bash - -exc - | - %s + %[13]s volumeMounts: - mountPath: /run/containerd/containerd.sock name: containerd-socket @@ -176,9 +176,9 @@ func RenderMetricsSidecar() (*corev1.Container, error) { } // RenderMetricsProxySidecar returns the metrics sidecar -func RenderMetricsProxySidecar(name, namespace string) (*corev1.Container, error) { +func RenderMetricsProxySidecar(image, name, namespace string) (*corev1.Container, error) { sidecar := corev1.Container{} - if err := yaml.Unmarshal([]byte(fmt.Sprintf(metricsProxyTeamplate, namespace, name)), &sidecar); err != nil { + if err := yaml.Unmarshal([]byte(fmt.Sprintf(metricsProxyTeamplate, image, namespace, name)), &sidecar); err != nil { return nil, fmt.Errorf("unable to unmarshal container: %w", err) } @@ -186,7 +186,7 @@ func RenderMetricsProxySidecar(name, namespace string) (*corev1.Container, error } // RenderMountJob returns the mount job executed on host -func RenderMountJob(podName, pvcName, pvName, namespace, nodeName, fs, mountPoint string, containerIDs []string, preMountCommand, volumeMeta string, owner metav1.OwnerReference) (*batchv1.Job, error) { +func RenderMountJob(image, podName, pvcName, pvName, namespace, nodeName, fs, mountPoint string, containerIDs []string, preMountCommand, volumeMeta string, owner metav1.OwnerReference) (*batchv1.Job, error) { if preMountCommand != "" { preMountCommand += " && " } @@ -199,7 +199,7 @@ func RenderMountJob(podName, pvcName, pvName, namespace, nodeName, fs, mountPoin return nil, fmt.Errorf("unable to render resource name: %w", err) } - template := fmt.Sprintf(hostJobTemplate, jobName, namespace, "mount", podName, pvcName, nodeName, mountPoint, strings.Join(containerIDs, " "), pvcName, pvName, fs, volumeMeta, mountCommand) + template := fmt.Sprintf(hostJobTemplate, jobName, namespace, "mount", podName, pvcName, nodeName, image, mountPoint, strings.Join(containerIDs, " "), pvName, fs, volumeMeta, mountCommand) job := batchv1.Job{} if err := yaml.Unmarshal([]byte(template), &job); err != nil { @@ -215,7 +215,7 @@ func RenderMountJob(podName, pvcName, pvName, namespace, nodeName, fs, mountPoin } // RenderResizeJob returns the resize job executed on host -func RenderResizeJob(podName, pvcName, pvName, namespace, nodeName, fs, preResizeCommand, volumeMeta string, owner metav1.OwnerReference) (*batchv1.Job, error) { +func RenderResizeJob(image, podName, pvcName, pvName, namespace, nodeName, fs, preResizeCommand, volumeMeta string, owner metav1.OwnerReference) (*batchv1.Job, error) { if preResizeCommand != "" { preResizeCommand += " && " } @@ -228,7 +228,7 @@ func RenderResizeJob(podName, pvcName, pvName, namespace, nodeName, fs, preResiz return nil, fmt.Errorf("unable to render resource name: %w", err) } - template := fmt.Sprintf(hostJobTemplate, jobName, namespace, "resize", podName, pvcName, nodeName, "", "", pvcName, pvName, fs, volumeMeta, resizeCommand) + template := fmt.Sprintf(hostJobTemplate, jobName, namespace, "resize", podName, pvcName, nodeName, image, "", "", pvName, fs, volumeMeta, resizeCommand) job := batchv1.Job{} if err := yaml.Unmarshal([]byte(template), &job); err != nil { diff --git a/tests/e2e/kuttl/kuttl-config-1.23.yaml b/tests/e2e/kuttl/kuttl-config-1.23.yaml index dbac729..cd33540 100644 --- a/tests/e2e/kuttl/kuttl-config-1.23.yaml +++ b/tests/e2e/kuttl/kuttl-config-1.23.yaml @@ -1,9 +1,13 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestSuite testDirs: - ./tests/e2e/stable -kindConfig: tests/e2e/kind/kind-config-1.23.yaml startKIND: true +skipClusterDelete: true +kindConfig: tests/e2e/kind/kind-config-1.23.yaml +kindNodeCache: true kindContainers: - local/discoblocks:e2e -timeout: 500 + - local/discoblocks:job-e2e + - local/discoblocks:proxy-e2e +timeout: 240 diff --git a/tests/e2e/stable/storageos/00-assert.yaml b/tests/e2e/stable/storageos/00-assert.yaml index 70fa5cc..8d38cb3 100644 --- a/tests/e2e/stable/storageos/00-assert.yaml +++ b/tests/e2e/stable/storageos/00-assert.yaml @@ -1,3 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 360 +collectors: + - type: events + namespace: storageos + - type: command + command: kubectl get po -A -o wide + - type: command + command: kubectl get stos -n storageos storageoscluster -o yaml + - type: command + command: kubectl logs -n storageos -l app.kubernetes.io/component +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -11,6 +24,11 @@ kind: StorageOSCluster metadata: name: storageoscluster namespace: storageos +spec: + kvBackend: + address: storageos-etcd.storageos-etcd:2379 + secretRefName: storageos-api + storageClassName: storageos status: conditions: - message: Scheduler Ready @@ -29,9 +47,13 @@ status: reason: Ready status: "True" type: CSIReady + - message: CLI Ready + reason: Ready + status: "True" + type: CLIReady - message: Cluster Ready reason: Ready status: "True" type: Ready phase: Running - ready: 1/1 + ready: 1/1 \ No newline at end of file diff --git a/tests/e2e/stable/storageos/00-install-dependencies.yaml b/tests/e2e/stable/storageos/00-install-dependencies.yaml index a14228f..c7beaa4 100644 --- a/tests/e2e/stable/storageos/00-install-dependencies.yaml +++ b/tests/e2e/stable/storageos/00-install-dependencies.yaml @@ -1,8 +1,8 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - command: make -C ../../../.. deploy-cert-manager - command: kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v6.0.1/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml - command: kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v6.0.1/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml - command: kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v6.0.1/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml - - command: kubectl storageos install --include-etcd --etcd-replicas 1 --stos-version=v2.9.0 + - command: kubectl storageos install --include-etcd --etcd-replicas 1 --stos-version=v2.10.0 \ No newline at end of file diff --git a/tests/e2e/stable/storageos/01-assert.yaml b/tests/e2e/stable/storageos/01-assert.yaml index fb245a8..8e3b994 100644 --- a/tests/e2e/stable/storageos/01-assert.yaml +++ b/tests/e2e/stable/storageos/01-assert.yaml @@ -1,3 +1,38 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 180 +collectors: + - type: events + namespace: kube-system + - type: command + command: kubectl get po -A -o wide + - type: command + command: kubectl logs -n kube-system -l app=discoblocks +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: discoblocks-manager-config + namespace: kube-system +data: + controller_manager_config.yaml: | + apiVersion: config.discoblocks.ondat.io/v1 + kind: OperatorConfig + health: + bindAddress: :8080 + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + supportedCsiDrivers: + - ebs.csi.aws.com + - csi.storageos.com + jobContainerImage: local/discoblocks:job-e2e + proxyContainerImage: local/discoblocks:proxy-e2e + schedulerStrictMode: true + mutatorStrictMode: true +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/tests/e2e/stable/storageos/01-install-discoblocks.yaml b/tests/e2e/stable/storageos/01-install-discoblocks.yaml index f69e9ff..3315c1f 100644 --- a/tests/e2e/stable/storageos/01-install-discoblocks.yaml +++ b/tests/e2e/stable/storageos/01-install-discoblocks.yaml @@ -1,4 +1,6 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - - command: sh -c 'IMG=local/discoblocks:e2e make -C ../../../.. deploy' + - script: sed -i "s|$(grep jobContainerImage ../../../../config/manager/controller_manager_config.yaml | awk '{print $2}')|local/discoblocks:job-e2e|" ../../../../config/manager/controller_manager_config.yaml + - script: sed -i "s|$(grep proxyContainerImage ../../../../config/manager/controller_manager_config.yaml | awk '{print $2}')|local/discoblocks:proxy-e2e|" ../../../../config/manager/controller_manager_config.yaml + - script: REGISTRY=local IMAGE_NAME=discoblocks IMAGE_TAG=e2e make -C ../../../.. _deploy diff --git a/tests/e2e/stable/storageos/02-assert.yaml b/tests/e2e/stable/storageos/02-assert.yaml index ef8428e..5fabf44 100644 --- a/tests/e2e/stable/storageos/02-assert.yaml +++ b/tests/e2e/stable/storageos/02-assert.yaml @@ -1,3 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 120 +collectors: + - type: events + namespace: default + - type: events + namespace: kube-system + - type: command + command: kubectl get po -A -o wide + - type: command + command: kubectl logs -n kube-system -l app=discoblocks +--- apiVersion: discoblocks.ondat.io/v1 kind: DiskConfig metadata: diff --git a/tests/e2e/stable/storageos/02-install-workload.yaml b/tests/e2e/stable/storageos/02-install-workload.yaml index 6aa1baa..c5d8b0a 100644 --- a/tests/e2e/stable/storageos/02-install-workload.yaml +++ b/tests/e2e/stable/storageos/02-install-workload.yaml @@ -1,10 +1,10 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - command: kubectl apply -f diskconfig-readwriteonce.yaml - command: kubectl apply -f diskconfig-readwritesame.yaml - command: kubectl create deployment --image=nginx:1.23 nginx - - command: kubectl apply -f diskconfig-readwritedaemon.yaml + - command: kubectl apply -n kube-system -f diskconfig-readwritedaemon.yaml - command: kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml - command: kubectl create ns pod-with-host-pid - command: kubectl apply -n pod-with-host-pid -f diskconfig-readwriteonce.yaml diff --git a/tests/e2e/stable/storageos/03-validate.yaml b/tests/e2e/stable/storageos/03-validate.yaml index ecdd684..6585a5b 100644 --- a/tests/e2e/stable/storageos/03-validate.yaml +++ b/tests/e2e/stable/storageos/03-validate.yaml @@ -1,8 +1,8 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - - command: | - sh -c 'cat 03-assert.tpl.yaml | sed \ + - script: | + cat 03-assert.tpl.yaml | sed \ -e "s/#PVC_ONCE_NAME#/$(kubectl get pvc -l discoblocks=diskconfig-sample-storageos-once --no-headers -o custom-columns=":metadata.name")/" \ -e "s/#PV_ONCE_NAME#/$(kubectl get pvc -l discoblocks=diskconfig-sample-storageos-once --no-headers -o custom-columns=":spec.volumeName")/" \ -e "s/#PVC_SAME_NAME#/$(kubectl get pvc -l discoblocks=diskconfig-sample-storageos-same --no-headers -o custom-columns=":metadata.name")/" \ @@ -12,5 +12,5 @@ commands: -e "s/#PVC_HOSTPID_NAME#/$(kubectl get pvc -n pod-with-host-pid -l discoblocks=diskconfig-sample-storageos-once --no-headers -o custom-columns=":metadata.name")/" \ -e "s/#PV_HOSTPID_NAME#/$(kubectl get pvc -n pod-with-host-pid -l discoblocks=diskconfig-sample-storageos-once --no-headers -o custom-columns=":spec.volumeName")/" \ -e "s/#POD_NAME#/$(kubectl get po -l app=nginx --no-headers -o custom-columns=":metadata.name")/" \ - > workload/00-assert.yaml' + > workload/00-assert.yaml - command: kubectl-kuttl assert workload \ No newline at end of file diff --git a/tests/e2e/stable/storageos/04-fill-volume.yaml b/tests/e2e/stable/storageos/04-fill-volume.yaml index 114efe6..7a5567a 100644 --- a/tests/e2e/stable/storageos/04-fill-volume.yaml +++ b/tests/e2e/stable/storageos/04-fill-volume.yaml @@ -1,15 +1,15 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - - command: sh -c "kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-0/data0 count=1M" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-0/data0 count=1M" - - command: sh -c "kubectl exec -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers | tail -1 | awk '{print $1}') -- touch /media/discoblocks/daemon-0/data0" - - command: sleep 40 - - command: sh -c "kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-0/data0 count=3M" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-0/data0 count=3M" + - script: kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-0/data0 count=1M + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-0/data0 count=1M + - script: kubectl exec -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers | tail -1 | awk '{print $1}') -- touch /media/discoblocks/daemon-0/data0 - command: sleep 60 - - command: sh -c "kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-1/data1 count=1M" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-1/data1 count=1M" - - command: sleep 40 - - command: sh -c "kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-1/data1 count=3M" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-1/data1 count=3M" \ No newline at end of file + - script: kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-0/data0 count=3M + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-0/data0 count=3M + - command: sleep 90 + - script: kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-1/data1 count=1M + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-1/data1 count=1M + - command: sleep 60 + - script: kubectl exec $(kubectl get po --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/same-1/data1 count=3M + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid --no-headers | tail -1 | awk '{print $1}') -- dd if=/dev/zero of=/media/discoblocks/once-1/data1 count=3M \ No newline at end of file diff --git a/tests/e2e/stable/storageos/05-assert.yaml b/tests/e2e/stable/storageos/05-assert.yaml index 9936c53..8963f23 100644 --- a/tests/e2e/stable/storageos/05-assert.yaml +++ b/tests/e2e/stable/storageos/05-assert.yaml @@ -1,3 +1,16 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 120 +collectors: + - type: events + namespace: default + - type: events + namespace: kube-system + - type: command + command: kubectl get po -A -o wide + - type: command + command: kubectl logs -n kube-system -l app=discoblocks +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/tests/e2e/stable/storageos/05-restart-workload.yaml b/tests/e2e/stable/storageos/05-restart-workload.yaml index 62c5eeb..37892d6 100644 --- a/tests/e2e/stable/storageos/05-restart-workload.yaml +++ b/tests/e2e/stable/storageos/05-restart-workload.yaml @@ -1,5 +1,5 @@ -apiVersion: kuttl.dev/v1beta1 +apiVersion: kuttl.dev/v1 kind: TestStep commands: - - command: sh -c "kubectl delete po $(kubectl get po -l app=nginx --no-headers -o custom-columns=":metadata.name")" - - command: sh -c "kubectl delete po -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers -o custom-columns=":metadata.name")" \ No newline at end of file + - script: kubectl delete po $(kubectl get po -l app=nginx --no-headers -o custom-columns=":metadata.name") + - script: kubectl delete po -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers -o custom-columns=":metadata.name") \ No newline at end of file diff --git a/tests/e2e/stable/storageos/06-assert.yaml b/tests/e2e/stable/storageos/06-assert.yaml new file mode 100644 index 0000000..190898b --- /dev/null +++ b/tests/e2e/stable/storageos/06-assert.yaml @@ -0,0 +1,19 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 15 +collectors: + - type: events + namespace: default + - type: events + namespace: kube-system + - type: command + command: kubectl get po -A -o wide + - type: command + command: kubectl logs -n kube-system -l app=discoblocks +commands: + - script: kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-0 + - script: kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/same-0/data0 + - script: kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/same-1/data1 + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-0/data0 + - script: kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-1/data1 + - script: kubectl exec -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/daemon-0/data0 \ No newline at end of file diff --git a/tests/e2e/stable/storageos/06-validate-volumes.yaml b/tests/e2e/stable/storageos/06-validate-volumes.yaml deleted file mode 100644 index fa46d42..0000000 --- a/tests/e2e/stable/storageos/06-validate-volumes.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - command: sh -c "kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-0" - - command: sh -c "kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/same-0/data0" - - command: sh -c "kubectl exec $(kubectl get po | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/same-1/data1" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-0/data0" - - command: sh -c "kubectl exec -n pod-with-host-pid $(kubectl get po -n pod-with-host-pid | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/once-1/data1" - - command: sh -c "kubectl exec -n kube-system $(kubectl get po -n kube-system -l name=fluentd-elasticsearch --no-headers | tail -1 | awk '{print $1}') -- ls -l /media/discoblocks/daemon-0/data0" \ No newline at end of file diff --git a/tests/e2e/stable/storageos/diskconfig-readwritedaemon.yaml b/tests/e2e/stable/storageos/diskconfig-readwritedaemon.yaml index 1856031..a68e5d1 100644 --- a/tests/e2e/stable/storageos/diskconfig-readwritedaemon.yaml +++ b/tests/e2e/stable/storageos/diskconfig-readwritedaemon.yaml @@ -2,7 +2,6 @@ apiVersion: discoblocks.ondat.io/v1 kind: DiskConfig metadata: name: diskconfig-sample-storageos-daemon - namespace: kube-system labels: discoblocks: ok spec: