Skip to content

COSI-33: add helm charts for cosi-driver #29

COSI-33: add helm charts for cosi-driver

COSI-33: add helm charts for cosi-driver #29

Workflow file for this run

name: CI End-to-End Tests
on:
push:
branches:
- '**'
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: 'Run the build with tmate debugging enabled'
required: false
default: false
jobs:
e2e-tests-with-kind:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Create k8s KIND Cluster
uses: helm/[email protected]
with:
version: v0.21.0
wait: 90s
cluster_name: object-storage-cluster
- name: Verify KIND cluster is running
run: |
kubectl cluster-info
kubectl get nodes
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
with:
detached: true
- name: Setup COSI Controller, CRDs and Driver
run: |
pwd
chmod +x .github/scripts/setup_cosi_resources.sh
.github/scripts/setup_cosi_resources.sh
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: "${{ github.repository_owner }}"
password: "${{ github.token }}"
- name: Restore Cached Docker Images
id: cache_docker_images
uses: actions/cache@v4
with:
path: /tmp/.docker_cache
key: docker-${{ runner.os }}-${{ hashFiles('.github/s3_and_iam_deployment/.env') }}
restore-keys: |
docker-${{ runner.os }}-
- name: Load Cached Images
run: |
for image in /tmp/.docker_cache/*.tar; do
docker load -i "$image" || true
done
continue-on-error: true
- name: Setup IAM and S3 Services
run: |-
set -e -o pipefail;
mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb && chown -R runner:docker logs data && chmod -R ugo+rwx logs data
docker compose --profile iam_s3 up -d --quiet-pull
bash ../scripts/wait_for_local_port.bash 8600 30
bash ../scripts/wait_for_local_port.bash 8000 30
working-directory: .github/s3_and_iam_deployment
- name: Save Images to Cache if not present
if: steps.cache_docker_images.outputs.cache-hit != 'true'
run: |
source .github/s3_and_iam_deployment/.env
echo "Vault Image: $VAULT_IMAGE"
echo "CloudServer Image: $CLOUDSERVER_IMAGE"
mkdir -p /tmp/.docker_cache
docker save "$VAULT_IMAGE" -o /tmp/.docker_cache/vault_image.tar
docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar
shell: bash
- name: E2E tests for bucket creation via COSI driver
run: |
pwd
chmod +x .github/scripts/e2e_test_bucket_creation.sh
.github/scripts/e2e_test_bucket_creation.sh
- name: Cleaup IAM and S3 Services
run: docker compose --profile iam_s3 down
working-directory: .github/s3_and_iam_deployment
- name: Move S3 and IAM logs and data to artifacts directory
run: |-
set -e -o pipefail;
mkdir -p .github/e2e_tests/artifacts/logs .github/e2e_tests/artifacts/data
cp -r .github/s3_and_iam_deployment/logs/* .github/e2e_tests/artifacts/logs/
cp -r .github/s3_and_iam_deployment/data/* .github/e2e_tests/artifacts/data/
if: always()
- name: Capture Kubernetes Logs in artifacts directory
run: |
chmod +x .github/scripts/capture_k8s_logs.sh
.github/scripts/capture_k8s_logs.sh
if: always()
- name: Cleanup COSI CRDs, Controller, and Driver
run: |
chmod +x .github/scripts/cleanup_cosi_resources.sh
.github/scripts/cleanup_cosi_resources.sh
if: always()
- name: Upload logs and data to Scality artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: .github/e2e_tests/artifacts
if: always()