diff --git a/.github/scripts/capture_k8s_logs.sh b/.github/scripts/capture_k8s_logs.sh index 87b9e5df..2b617b98 100755 --- a/.github/scripts/capture_k8s_logs.sh +++ b/.github/scripts/capture_k8s_logs.sh @@ -4,7 +4,7 @@ set -e # Create a directory to store the logs mkdir -p logs/kind_cluster_logs LOG_FILE_PATH=".github/e2e_tests/artifacts/logs/kind_cluster_logs" -mkdir -p "$(dirname "$LOG_FILE_PATH")" # Ensure the log directory exists +mkdir -p "$LOG_FILE_PATH" # Ensure the log directory exists # Define namespaces to capture logs from namespaces=("default" "container-object-storage-system") diff --git a/.github/scripts/e2e_tests_brownfield_use_case.sh b/.github/scripts/e2e_tests_brownfield_use_case.sh index 3d972224..d84b00b3 100755 --- a/.github/scripts/e2e_tests_brownfield_use_case.sh +++ b/.github/scripts/e2e_tests_brownfield_use_case.sh @@ -9,7 +9,7 @@ SECRET_NAME="brownfield-bucket-secret" IAM_ENDPOINT="http://$HOST_IP:8600" S3_ENDPOINT="http://$HOST_IP:8000" BUCKET_NAME="brownfield-bucket" -NAMESPACE="container-object-storage-system" +NAMESPACE="default" REGION="us-west-1" # Error handling function diff --git a/.github/workflows/helm-validation.yml b/.github/workflows/helm-validation.yml index 06a3dfd6..4d683589 100644 --- a/.github/workflows/helm-validation.yml +++ b/.github/workflows/helm-validation.yml @@ -18,13 +18,29 @@ on: default: 5 jobs: - smoke-test-installation-with-helm: + e2e-tests-with-helm: runs-on: ubuntu-latest steps: - name: Check out repository uses: actions/checkout@v4 + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: "${{ github.repository_owner }}" + password: "${{ github.token }}" + + - name: Restore Cached Docker Images + id: cache_docker_images + uses: actions/cache@v4 + with: + path: /tmp/.docker_cache + key: docker-${{ runner.os }}-${{ hashFiles('.github/s3_and_iam_deployment/.env') }} + restore-keys: | + docker-${{ runner.os }}- + - name: Set up Helm uses: azure/setup-helm@v4.2.0 with: @@ -37,6 +53,35 @@ jobs: wait: 90s cluster_name: helm-test-cluster + - name: Verify KIND cluster is running + run: | + kubectl cluster-info + kubectl get nodes + + + - name: Setup COSI, S3 and IAM environments + run: | + set -e -o pipefail + ( + echo "=== Setup COSI Controller, CRDs and Driver ===" + kubectl create -k github.com/kubernetes-sigs/container-object-storage-interface + make container + kind load docker-image ghcr.io/scality/cosi-driver:latest --name helm-test-cluster + ) & + ( + echo "=== Loading cached S3 and IAM Docker images ===" + if [ -d /tmp/.docker_cache ] && [ "$(ls -A /tmp/.docker_cache 2>/dev/null)" ]; then + for image in /tmp/.docker_cache/*.tar; do + docker load -i "$image" || true # continue on failure + done + else + echo "No cached images found. Skipping load." + fi + ) & + + # Wait for both background processes + wait + - name: "Debug: SSH to runner" uses: scality/actions/action-ssh-to-runner@v1 with: @@ -49,15 +94,29 @@ jobs: timeout-minutes: 10 continue-on-error: true - - name: Build COSI Driver Docker Image - run: | - make container + - name: Setup IAM and S3 Services + run: |- + set -e -o pipefail; + mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb + chown -R runner:docker logs data + chmod -R ugo+rwx logs data + docker compose --profile iam_s3 up -d --quiet-pull + bash ../scripts/wait_for_local_port.bash 8600 30 + bash ../scripts/wait_for_local_port.bash 8000 30 + working-directory: .github/s3_and_iam_deployment - - name: Load Docker Image into Kind Cluster + - name: Save Images to Cache if not present + if: steps.cache_docker_images.outputs.cache-hit != 'true' run: | - kind load docker-image ghcr.io/scality/cosi-driver:latest --name helm-test-cluster + source .github/s3_and_iam_deployment/.env + echo "Vault Image: $VAULT_IMAGE" + echo "CloudServer Image: $CLOUDSERVER_IMAGE" + mkdir -p /tmp/.docker_cache + docker save "$VAULT_IMAGE" -o /tmp/.docker_cache/vault_image.tar + docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar + shell: bash - - name: Install Scality COSI Helm Chart + - name: Install Scality COSI Driver using Helm Chart run: | helm install scality-cosi-driver ./helm/scality-cosi-driver \ --namespace container-object-storage-system \ @@ -73,6 +132,14 @@ jobs: run: | .github/scripts/verify_helm_install.sh + - name: E2E tests for greenfield use case using kustomize + run: | + .github/scripts/e2e_tests_greenfield_use_case.sh + + - name: E2E tests for brownfield use case using kustomize + run: | + .github/scripts/e2e_tests_brownfield_use_case.sh + # the script accepts number of requests for APIs: CREATE_BUCKET, DELETE_BUCKET, GET_INFO # GRANT_ACCESS and REVOKE_ACCESS in order # Example below we are testing for those API counts: @@ -83,7 +150,7 @@ jobs: # - 0 REVOKE_ACCESS - name: Verify metrics for healthcheck route run: | - .github/scripts/e2e_tests_metrics.sh 0 0 1 0 0 + .github/scripts/e2e_tests_metrics.sh 2 1 1 2 2 - name: "Delay completion" if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} @@ -92,8 +159,35 @@ jobs: completion_delay_m: ${{ inputs.debug_delay_duration_minutes }} continue-on-error: true + - name: Cleaup IAM and S3 Services + run: docker compose --profile iam_s3 down + working-directory: .github/s3_and_iam_deployment + + - name: Move S3 and IAM logs and data to artifacts directory + if: always() + run: |- + set -e -o pipefail; + mkdir -p .github/e2e_tests/artifacts/logs .github/e2e_tests/artifacts/data + cp -r .github/s3_and_iam_deployment/logs/* .github/e2e_tests/artifacts/logs/ + cp -r .github/s3_and_iam_deployment/data/* .github/e2e_tests/artifacts/data/ + + - name: Capture Kubernetes Logs in artifacts directory + if: always() + run: | + .github/scripts/capture_k8s_logs.sh + - name: Cleanup Helm Release and Namespace run: | helm uninstall scality-cosi-driver -n container-object-storage-system kubectl delete namespace container-object-storage-system if: always() + + - name: Upload logs and data to Scality artifacts + if: always() + uses: scality/action-artifacts@v4 + with: + method: upload + url: https://artifacts.scality.net + user: ${{ secrets.ARTIFACTS_USER }} + password: ${{ secrets.ARTIFACTS_PASSWORD }} + source: .github/e2e_tests/artifacts diff --git a/.github/workflows/e2e-feature-tests.yml b/.github/workflows/kustomize-validation.yml similarity index 99% rename from .github/workflows/e2e-feature-tests.yml rename to .github/workflows/kustomize-validation.yml index dcd74c3e..24efaad6 100644 --- a/.github/workflows/e2e-feature-tests.yml +++ b/.github/workflows/kustomize-validation.yml @@ -1,4 +1,4 @@ -name: End to End Feature Tests +name: Kustomize Validation on: push: @@ -18,7 +18,7 @@ on: default: 5 jobs: - e2e-tests-with-kind: + e2e-tests-with-kustomize: runs-on: ubuntu-latest steps: diff --git a/cosi-examples/brownfield/bucket.yaml b/cosi-examples/brownfield/bucket.yaml index 065d9cfb..875653d5 100644 --- a/cosi-examples/brownfield/bucket.yaml +++ b/cosi-examples/brownfield/bucket.yaml @@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1 kind: Bucket metadata: name: brownfield-bucket # should be same as bucket name - namespace: container-object-storage-system spec: bucketClaim: {} bucketClassName: brownfield-bucket-class diff --git a/cosi-examples/brownfield/bucketaccess.yaml b/cosi-examples/brownfield/bucketaccess.yaml index fad52782..3bd9f659 100644 --- a/cosi-examples/brownfield/bucketaccess.yaml +++ b/cosi-examples/brownfield/bucketaccess.yaml @@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1 kind: BucketAccess metadata: name: brownfield-bucket-access - namespace: container-object-storage-system spec: bucketAccessClassName: brownfield-bucket-access-class bucketClaimName: brownfield-bucket-claim diff --git a/cosi-examples/brownfield/bucketaccessclass.yaml b/cosi-examples/brownfield/bucketaccessclass.yaml index 795a9305..aa9ab65f 100644 --- a/cosi-examples/brownfield/bucketaccessclass.yaml +++ b/cosi-examples/brownfield/bucketaccessclass.yaml @@ -2,7 +2,6 @@ kind: BucketAccessClass apiVersion: objectstorage.k8s.io/v1alpha1 metadata: name: brownfield-bucket-access-class - namespace: container-object-storage-system driverName: cosi.scality.com authenticationType: KEY parameters: diff --git a/cosi-examples/brownfield/bucketclaim.yaml b/cosi-examples/brownfield/bucketclaim.yaml index 42968254..2edf8d2a 100644 --- a/cosi-examples/brownfield/bucketclaim.yaml +++ b/cosi-examples/brownfield/bucketclaim.yaml @@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1 kind: BucketClaim metadata: name: brownfield-bucket-claim - namespace: container-object-storage-system spec: bucketClassName: brownfield-bucket-class existingBucketName: brownfield-bucket # name of Bucket object diff --git a/cosi-examples/brownfield/bucketclass.yaml b/cosi-examples/brownfield/bucketclass.yaml index 2daaa1cc..301a4215 100644 --- a/cosi-examples/brownfield/bucketclass.yaml +++ b/cosi-examples/brownfield/bucketclass.yaml @@ -2,7 +2,6 @@ apiVersion: objectstorage.k8s.io/v1alpha1 kind: BucketClass metadata: name: brownfield-bucket-class - namespace: container-object-storage-system driverName: cosi.scality.com deletionPolicy: Delete parameters: diff --git a/docs/Usage.md b/docs/Usage.md index f9d3f09d..ceb9068c 100644 --- a/docs/Usage.md +++ b/docs/Usage.md @@ -77,7 +77,6 @@ In the **Scality COSI Driver**, both **Greenfield** and **Brownfield** provision > Note: > For **fully working** examples, see the YAMLs in the [cosi-examples/brownfield](../cosi-examples/brownfield/) and [cosi-examples/greenfield](../cosi-examples/greenfield/) directories. -> For brownfield scenario it is madatory to create COSI CRs in the same namespace as COSI driver and controller. ### 1.1 Greenfield: Creating a New Bucket @@ -92,7 +91,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store, kind: BucketClass metadata: name: greenfield-bucketclass - namespace: container-object-storage-system driverName: cosi.scality.com deletionPolicy: Delete parameters: @@ -115,7 +113,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store, kind: BucketClaim metadata: name: my-greenfield-bucketclaim - namespace: container-object-storage-system spec: bucketClassName: greenfield-bucketclass protocols: @@ -131,8 +128,6 @@ Greenfield provisioning will create a brand-new S3 bucket in your object store, Brownfield provisioning allows you to manage an **already-existing** S3 bucket in Kubernetes. -> Note: For brownfield scenario, COSI CRs for Bucket and Access provisioning should be created in the same namespace as COSI driver and controller. - 1. **Verify Existing Bucket** Ensure the bucket already exists in S3 either through Storage Administrator or by running the following AWS CLI command: @@ -151,7 +146,6 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i kind: BucketClass metadata: name: brownfield-bucketclass - namespace: container-object-storage-system driverName: cosi.scality.com deletionPolicy: Delete parameters: @@ -172,7 +166,6 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i kind: Bucket metadata: name: "" - namespace: container-object-storage-system spec: bucketClaim: {} driverName: cosi.scality.com @@ -199,9 +192,8 @@ Brownfield provisioning allows you to manage an **already-existing** S3 bucket i kind: BucketClaim metadata: name: my-brownfield-bucketclaim - namespace: container-object-storage-system spec: - bucketClassName: brownfield-bucket-class + bucketClassName: brownfield-bucketclass existingBucketName: "" protocols: - S3 @@ -244,17 +236,15 @@ A `BucketAccessClass` defines how access (IAM policy or S3 keys) is granted: ```bash cat <