Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/cosi 15 add bucket creation e2e tests #1

Merged
merged 3 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions .github/scripts/cleanup_cosi_resources.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#!/bin/bash
set -e

LOG_FILE=".github/e2e_tests/artifacts/logs/kind_cluster_logs/cosi_deployment/cleanup_debug.log"
mkdir -p "$(dirname "$LOG_FILE")" # Ensure the log directory exists

error_handler() {
echo "An error occurred during the COSI cleanup. Check the log file for details." | tee -a "$LOG_FILE"
echo "Failed command: $BASH_COMMAND" | tee -a "$LOG_FILE"
exit 1
}

trap 'error_handler' ERR

log_and_run() {
echo "Running: $*" | tee -a "$LOG_FILE"
"$@" | tee -a "$LOG_FILE"
}

log_and_run echo "Removing COSI driver manifests and namespace..."
log_and_run kubectl delete -k . || { echo "COSI driver manifests not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete namespace scality-object-storage || { echo "Namespace scality-object-storage not found." | tee -a "$LOG_FILE"; }

log_and_run echo "Verifying namespace deletion..."
if kubectl get namespace scality-object-storage &>/dev/null; then
echo "Warning: Namespace scality-object-storage was not deleted." | tee -a "$LOG_FILE"
exit 1
fi

log_and_run echo "Removing Finalizers from Bucket Claim and Bucket"
log_and_run kubectl patch bucketclaim my-bucket-claim -p '{"metadata":{"finalizers":[]}}' --type=merge || { echo "Bucket Claim finalizers not found." | tee -a "$LOG_FILE"; }

BUCKET_NAMES=$(kubectl get bucket -o jsonpath='{.items[*].metadata.name}')

for BUCKET_NAME in $BUCKET_NAMES; do
log_and_run echo "Removing finalizers from bucket: $BUCKET_NAME"
log_and_run kubectl patch bucket "$BUCKET_NAME" -p '{"metadata":{"finalizers":[]}}' --type=merge || { echo "Finalizers not found for bucket: $BUCKET_NAME" | tee -a "$LOG_FILE"; }
done

log_and_run echo "Deleting Bucket Class and Bucket Claim..."
log_and_run kubectl delete -f cosi-examples/bucketclass.yaml || { echo "Bucket Class not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/bucketclaim.yaml || { echo "Bucket Claim not found." | tee -a "$LOG_FILE"; }

log_and_run echo "Deleting COSI CRDs..."
log_and_run kubectl delete -k github.com/kubernetes-sigs/container-object-storage-interface-api || { echo "COSI API CRDs not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -k github.com/kubernetes-sigs/container-object-storage-interface-controller || { echo "COSI Controller CRDs not found." | tee -a "$LOG_FILE"; }

log_and_run echo "Verifying COSI CRDs deletion..."
if kubectl get crd | grep 'container-object-storage-interface' &>/dev/null; then
echo "Warning: Some COSI CRDs were not deleted." | tee -a "$LOG_FILE"
exit 1
fi

log_and_run echo "COSI cleanup completed successfully."
102 changes: 102 additions & 0 deletions .github/scripts/e2e_test_bucket_creation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/bin/bash
set -e

# Define log file for debugging
LOG_FILE=".github/e2e_tests/artifacts/logs/e2e_tests/bucket_creation_test.log"
mkdir -p "$(dirname "$LOG_FILE")" # Ensure the log directory exists

# Error handling function
error_handler() {
echo "An error occurred during bucket creation tests. Check the log file for details." | tee -a "$LOG_FILE"
echo "Failed command: $BASH_COMMAND" | tee -a "$LOG_FILE"
exit 1
}

# Trap errors and call the error handler
trap 'error_handler' ERR

# Log command execution to the log file for debugging
log_and_run() {
echo "Running: $*" | tee -a "$LOG_FILE"
"$@" | tee -a "$LOG_FILE"
}

# Step 1: Create Account in Vault
log_and_run echo "Creating account in Vault container..."
CONTAINER_ID=$(docker ps -qf "name=s3_and_iam_deployment-iam-1")
log_and_run docker exec "$CONTAINER_ID" sh -c "ADMIN_ACCESS_KEY_ID=D4IT2AWSB588GO5J9T00 ADMIN_SECRET_ACCESS_KEY=UEEu8tYlsOGGrgf4DAiSZD6apVNPUWqRiPG0nTB6 ./node_modules/vaultclient/bin/vaultclient create-account --name cosi-account --email [email protected]"
log_and_run docker exec "$CONTAINER_ID" sh -c "ADMIN_ACCESS_KEY_ID=D4IT2AWSB588GO5J9T00 ADMIN_SECRET_ACCESS_KEY=UEEu8tYlsOGGrgf4DAiSZD6apVNPUWqRiPG0nTB6 ./node_modules/vaultclient/bin/vaultclient generate-account-access-key --name=cosi-account --accesskey=PBUOB68AVF39EVVAFNFL --secretkey=P+PK+uMB9spUc21huaQoOexqdJoV00tSnl+pc7t7"

# Retrieve the Host IP Address
HOST_IP=$(hostname -I | awk '{print $1}')
log_and_run echo "Using Host IP: $HOST_IP"

# Step 2: Configure AWS CLI in Home Directory
log_and_run echo "Configuring AWS CLI in home directory..."
log_and_run mkdir -p ~/.aws # Ensure the ~/.aws directory exists

# Create the AWS credentials file
cat <<EOF | tee -a "$LOG_FILE" > ~/.aws/credentials
[default]
aws_access_key_id = PBUOB68AVF39EVVAFNFL
aws_secret_access_key = P+PK+uMB9spUc21huaQoOexqdJoV00tSnl+pc7t7
EOF

# Create the AWS config file
cat <<EOF | tee -a "$LOG_FILE" > ~/.aws/config
[default]
region = us-east-1
output = json
EOF

# Step 3: Apply S3 Secret for COSI with Host IP
log_and_run echo "Applying S3 Secret for COSI with updated endpoint..."
cat <<EOF | kubectl apply -f - | tee -a "$LOG_FILE"
apiVersion: v1
kind: Secret
metadata:
name: s3-secret-for-cosi
namespace: default
type: Opaque
stringData:
COSI_S3_ACCESS_KEY_ID: PBUOB68AVF39EVVAFNFL
COSI_S3_SECRET_ACCESS_KEY: P+PK+uMB9spUc21huaQoOexqdJoV00tSnl+pc7t7
COSI_S3_ENDPOINT: http://$HOST_IP:8000
COSI_S3_REGION: us-west-1
EOF

# Step 4: Apply Bucket Class
log_and_run echo "Applying Bucket Class..."
log_and_run kubectl apply -f cosi-examples/bucketclass.yaml

# Step 5: Apply Bucket Claim
log_and_run echo "Applying Bucket Claim..."
log_and_run kubectl apply -f cosi-examples/bucketclaim.yaml

# Step 6: Verify Bucket Creation with Retry
log_and_run echo "Listing all S3 buckets before verification..."
log_and_run aws s3 ls --endpoint-url "http://localhost:8000"
sleep 5

log_and_run echo "Verifying bucket creation..."
BUCKET_NAME_PREFIX="my-bucket-class"

ATTEMPTS=12 # Total attempts (2 minutes / 10 seconds per attempt)
DELAY=10 # Delay between attempts in seconds

for ((i=1; i<=$ATTEMPTS; i++)); do
log_and_run aws --endpoint-url "http://localhost:8000" s3 ls
BUCKET_FOUND=$(aws --endpoint-url "http://localhost:8000" s3api list-buckets --query "Buckets[?starts_with(Name, 'my-bucket-class')].Name" --output text)

if [ -n "$BUCKET_FOUND" ]; then
log_and_run echo "Bucket created with prefix '$BUCKET_NAME_PREFIX': $BUCKET_FOUND"
exit 0
else
log_and_run echo "Attempt $i: Bucket with prefix '$BUCKET_NAME_PREFIX' not found. Retrying in $DELAY seconds..."
sleep $DELAY
fi
done

# If the bucket was not found within the timeout
log_and_run echo "Bucket with prefix '$BUCKET_NAME_PREFIX' was not created within the expected time."
exit 1
26 changes: 19 additions & 7 deletions .github/scripts/setup_cosi_resources.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
set -e
set -e # Exit on any command failure

# Define log file for debugging
LOG_FILE=".github/e2e_tests/artifacts/logs/kind_cluster_logs/cosi_deployment/setup_debug.log"
Expand All @@ -18,7 +18,10 @@ trap 'error_handler' ERR
# Log command execution to the log file for debugging
log_and_run() {
echo "Running: $*" | tee -a "$LOG_FILE"
"$@" | tee -a "$LOG_FILE"
if ! "$@" | tee -a "$LOG_FILE"; then
echo "Error: Command failed - $*" | tee -a "$LOG_FILE"
exit 1
fi
}

# Step 1: Install COSI CRDs
Expand All @@ -28,24 +31,33 @@ log_and_run kubectl create -k github.com/kubernetes-sigs/container-object-storag

# Step 2: Verify COSI Controller Pod Status
log_and_run echo "Verifying COSI Controller Pod status..."
log_and_run kubectl wait --namespace default --for=condition=ready pod -l app.kubernetes.io/name=container-object-storage-interface-controller --timeout=10s
if ! kubectl wait --namespace default --for=condition=ready pod -l app.kubernetes.io/name=container-object-storage-interface-controller --timeout=10s; then
echo "Error: COSI Controller pod did not reach ready state." | tee -a "$LOG_FILE"
exit 1
fi
log_and_run kubectl get pods --namespace default

# Step 3: Build COSI driver Docker image
log_and_run echo "Building COSI driver image..."
log_and_run docker build -t ghcr.io/scality/cosi:latest .
log_and_run docker build -t ghcr.io/scality/cosi-driver:latest .

# Step 4: Load COSI driver image into KIND cluster
log_and_run echo "Loading COSI driver image into KIND cluster..."
log_and_run kind load docker-image ghcr.io/scality/cosi:latest --name object-storage-cluster
log_and_run kind load docker-image ghcr.io/scality/cosi-driver:latest --name object-storage-cluster

# Step 5: Run COSI driver
log_and_run echo "Applying COSI driver manifests..."
log_and_run kubectl apply -k .
if ! kubectl apply -k .; then
echo "Error: Failed to apply COSI driver manifests." | tee -a "$LOG_FILE"
exit 1
fi

# Step 6: Verify COSI driver Pod Status
log_and_run echo "Verifying COSI driver Pod status..."
log_and_run kubectl wait --namespace scality-object-storage --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=20s
if ! kubectl wait --namespace scality-object-storage --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=20s; then
echo "Error: COSI driver Pod did not reach ready state." | tee -a "$LOG_FILE"
exit 1
fi
log_and_run kubectl get pods -n scality-object-storage

log_and_run echo "COSI setup completed successfully."
2 changes: 1 addition & 1 deletion .github/workflows/ci-build-and-unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ jobs:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
with:
context: .
name: cosi
name: cosi-driver
namespace: ${{ github.repository_owner }}
tag: ${{ github.sha }}
12 changes: 12 additions & 0 deletions .github/workflows/ci-e2e-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,12 @@ jobs:
docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar
shell: bash

- name: E2E tests for bucket creation via COSI driver
run: |
pwd
chmod +x .github/scripts/e2e_test_bucket_creation.sh
.github/scripts/e2e_test_bucket_creation.sh

- name: Cleaup IAM and S3 Services
run: docker compose --profile iam_s3 down
working-directory: .github/s3_and_iam_deployment
Expand All @@ -104,6 +110,12 @@ jobs:
.github/scripts/capture_k8s_logs.sh
if: always()

- name: Cleanup COSI CRDs, Controller, and Driver
run: |
chmod +x .github/scripts/cleanup_cosi_resources.sh
.github/scripts/cleanup_cosi_resources.sh
if: always()

- name: Upload logs and data to Scality artifacts
uses: scality/action-artifacts@v4
with:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
with:
context: .
name: cosi
name: cosi-driver
namespace: ${{ github.repository_owner }}
tag: ${{ inputs.tag }}
platforms: linux/amd64,linux/arm64
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ BIN_DIR = ./bin

# 'go env' vars aren't always available in make environments, so get defaults for needed ones
GOARCH ?= $(shell go env GOARCH)
IMAGE_NAME ?= ghcr.io/scality/cosi:latest
IMAGE_NAME ?= ghcr.io/scality/cosi-driver:latest

.PHONY: all build test clean

Expand Down
2 changes: 1 addition & 1 deletion cmd/scality-cosi-driver/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"flag"
"fmt"

"github.com/scality/cosi/pkg/driver"
"github.com/scality/cosi-driver/pkg/driver"
"k8s.io/klog/v2"

"sigs.k8s.io/container-object-storage-interface-provisioner-sidecar/pkg/provisioner"
Expand Down
4 changes: 2 additions & 2 deletions cosi-examples/bucketclaim.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
kind: BucketClaim
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: bucket-claim-1
name: my-bucket-claim
spec:
bucketClassName: bucket-1-name-prefix
bucketClassName: my-bucket-class
protocols:
- s3
4 changes: 2 additions & 2 deletions cosi-examples/bucketclass.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
kind: BucketClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: bucket-1-name-prefix # name of the bucket
name: my-bucket-class # bucket prefix, followed by UUID for every bucket claim
driverName: cosi.scality.com
deletionPolicy: Delete
deletionPolicy: Retain
parameters:
COSI_OBJECT_STORAGE_PROVIDER_SECRET_NAME: s3-secret-for-cosi
COSI_OBJECT_STORAGE_PROVIDER_SECRET_NAMESPACE: default
4 changes: 2 additions & 2 deletions cosi-examples/s3-secret-for-cosi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
namespace: default
type: Opaque
stringData:
COSI_S3_ACCESS_KEY_ID: accessKey1 # Plain text access key
COSI_S3_SECRET_ACCESS_KEY: verySecretKey1 # Plain text secret key
COSI_S3_ACCESS_KEY_ID: PBUOB68AVF39EVVAFNFL # Plain text access key, generated in the CI
COSI_S3_SECRET_ACCESS_KEY: P+PK+uMB9spUc21huaQoOexqdJoV00tSnl+pc7t7 # Plain text secret key
COSI_S3_ENDPOINT: http://localhost:8000 # Plain text endpoint
COSI_S3_REGION: us-west-1 # Plain text region
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
module github.com/scality/cosi
module github.com/scality/cosi-driver

go 1.22.6

Expand Down
2 changes: 1 addition & 1 deletion kustomize/base/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ spec:
serviceAccountName: scality-object-storage-provisioner
containers:
- name: scality-cosi-driver
image: ghcr.io/scality/cosi:latest
image: ghcr.io/scality/cosi-driver:latest
imagePullPolicy: IfNotPresent
args:
- "--driver-prefix=cosi"
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion pkg/driver/identity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/scality/cosi/pkg/driver"
"github.com/scality/cosi-driver/pkg/driver"

cosiapi "sigs.k8s.io/container-object-storage-interface-spec"
)
Expand Down
2 changes: 1 addition & 1 deletion pkg/driver/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
s3client "github.com/scality/cosi/pkg/util/s3client"
s3client "github.com/scality/cosi-driver/pkg/util/s3client"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down
4 changes: 2 additions & 2 deletions pkg/driver/provisioner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

"github.com/scality/cosi/pkg/driver"
s3client "github.com/scality/cosi/pkg/util/s3client"
"github.com/scality/cosi-driver/pkg/driver"
s3client "github.com/scality/cosi-driver/pkg/util/s3client"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/s3client/s3client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/scality/cosi/pkg/util/s3client"
"github.com/scality/cosi-driver/pkg/util/s3client"
)

// MockS3Client implements the S3API interface for testing
Expand Down
Loading