diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 1fc7708f0c..af3e274ed3 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -7,6 +7,10 @@ inputs: description: "The tag of the Zenko Operator image to use" required: false default: "" + extra_components: + description: "Extra components to add to zenkoversion" + required: false + default: "" runs: using: composite steps: @@ -74,6 +78,8 @@ runs: shell: bash run: bash deploy-zenko.sh end2end default working-directory: ./.github/scripts/end2end + env: + EXTRA_COMPONENTS: ${{ inputs.extra_components }} - name: Add Keycloak user and assign StorageManager role shell: bash run: bash keycloak-helper.sh add-user default diff --git a/.github/scripts/end2end/configs/keycloak_user.json b/.github/scripts/end2end/configs/keycloak_user.json index 22d48e9662..7dfe2e1126 100644 --- a/.github/scripts/end2end/configs/keycloak_user.json +++ b/.github/scripts/end2end/configs/keycloak_user.json @@ -5,7 +5,7 @@ "emailVerified": true, "firstName": "${OIDC_FIRST_NAME}", "lastName": "${OIDC_LAST_NAME}", - "email": "e2e@zenko.local", + "email": "${OIDC_EMAIL}", "attributes": { "instanceIds": [ "${INSTANCE_ID}" diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index 2f7179e119..68ba2bc344 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -24,11 +24,11 @@ spec: logging: logLevel: debug backbeat: - triggerExpirationsOneDayEarlierForTesting: true + triggerExpirationsOneDayEarlierForTesting: ${EXPIRE_ONE_DAY_EARLIER} lifecycleConductor: cronRule: "${BACKBEAT_LCC_CRON_RULE}" lifecycleBucketProcessor: - triggerTransitionsOneDayEarlierForTesting: true + triggerTransitionsOneDayEarlierForTesting: ${TRANSITION_ONE_DAY_EARLIER} logging: logLevel: trace mongodb: @@ -101,6 +101,8 @@ spec: e2e-cold: debug: "true" command-timeout: "60s" + pending-job-poll-after-age: "10s" + pending-job-poll-check-interval: "10s" ingress: workloadPlaneClass: 'nginx' controlPlaneClass: 'nginx' diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 32a547203c..5ba2998051 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -5,8 +5,6 @@ kind: ZenkoVersion metadata: name: '${ZENKO_VERSION_NAME}' spec: - featureFlags: - backbeatGCVaultAuthorized: true dashboards: backbeat: image: '${BACKBEAT_DASHBOARD}' @@ -129,3 +127,19 @@ spec: image: '${REDIS_EXPORTER_IMAGE}' tag: '${REDIS_EXPORTER_TAG}' kubedb: '${REDIS_KUBEDB_TAG}' + ${EXTRA_COMPONENTS} + defaults: + backbeatConcurrency: + lifecycleBucketProcessor: 30 + lifecycleObjectProcessor: 20 + replicaMultipliers: + cloudserver: 16 + internalCloudserver: 4 + vault: 4 + kafkaResources: + broker: + limitCPU: 3 + cruiseControl: + limitMemory: 3Gi + featureFlags: + backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/scripts/end2end/configure-e2e.sh b/.github/scripts/end2end/configure-e2e.sh index 9d1ce43098..dcb7f10c79 100755 --- a/.github/scripts/end2end/configure-e2e.sh +++ b/.github/scripts/end2end/configure-e2e.sh @@ -51,7 +51,7 @@ KAFKA_REGISTRY_NAME=$(yq eval ".kafka.sourceRegistry" ../../../solution/deps.yam KAFKA_IMAGE_NAME=$(yq eval ".kafka.image" ../../../solution/deps.yaml) KAFKA_IMAGE_TAG=$(yq eval ".kafka.tag" ../../../solution/deps.yaml) KAFKA_IMAGE=$KAFKA_REGISTRY_NAME/$KAFKA_IMAGE_NAME:$KAFKA_IMAGE_TAG -KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ +KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME} \ -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) KAFKA_HOST_PORT=${KAFKA_HOST_PORT:1:-1} diff --git a/.github/scripts/end2end/deploy-dr.sh b/.github/scripts/end2end/deploy-dr.sh deleted file mode 100644 index b72e8c7c03..0000000000 --- a/.github/scripts/end2end/deploy-dr.sh +++ /dev/null @@ -1,24 +0,0 @@ -export DR_SINK_NAME=${DR_SINK_NAME:-'end2end-pra-dr-sink'} -export DR_SOURCE_NAME=${DR_SOURCE_NAME:-'end2end-dr-source'} -export NAMESPACE=${NAMESPACE:-'default'} - -cat configs/zenko_dr_sink.yaml | envsubst | kubectl apply -f - - -k_cmd="kubectl -n ${NAMESPACE} get zenkodrsink/${DR_SINK_NAME}" -for i in $(seq 1 120); do - conditions=$($k_cmd -o "jsonpath={.status.conditions}") - if kubectl wait --for condition=Available --timeout 5s --namespace ${NAMESPACE} zenkodrsink/${DR_SINK_NAME}; then - break; - fi -done - - -cat configs/zenko_dr_source.yaml | envsubst | kubectl apply -f - - -k_cmd="kubectl -n ${NAMESPACE} get zenkodrsource/${DR_SOURCE_NAME}" -for i in $(seq 1 120); do - conditions=$($k_cmd -o "jsonpath={.status.conditions}") - if kubectl wait --for condition=Available --timeout 5s --namespace ${NAMESPACE} zenkodrsource/${DR_SOURCE_NAME}; then - break; - fi -done diff --git a/.github/scripts/end2end/deploy-zenko.sh b/.github/scripts/end2end/deploy-zenko.sh index 52dc115e32..c6b1819cbe 100755 --- a/.github/scripts/end2end/deploy-zenko.sh +++ b/.github/scripts/end2end/deploy-zenko.sh @@ -38,7 +38,7 @@ fi # TODO: use kustomize ZENKO_MONGODB_SHARDED=${ZENKO_MONGODB_SHARDED:-'false'} -if [ ${ZENKO_MONGODB_SHARDED} == 'true' ]; then +if [ "${ZENKO_MONGODB_SHARDED}" = 'true' ]; then export ZENKO_ANNOTATIONS="annotations: zenko.io/x-backbeat-oneshard-replicaset: data-db-mongodb-sharded-shard-0 zenko.io/x-backbeat-oneshard-replicaset-hosts: data-db-mongodb-sharded-shard0-data-0.data-db-mongodb-sharded-headless.default.svc.cluster.local:27017" @@ -50,6 +50,11 @@ else fi export ZENKO_MONGODB_DATABASE="${ZENKO_MONGODB_DATABASE:-'datadb'}" +if [ "${TIME_PROGRESSION_FACTOR}" -gt 1 ]; then + export ZENKO_ANNOTATIONS="${ZENKO_ANNOTATIONS:-annotations:} + zenko.io/time-progression-factor: \"${TIME_PROGRESSION_FACTOR}\"" +fi + function dependencies_image_env() { yq eval '.[] | .envsubst + "=" + (.sourceRegistry // "docker.io") + "/" + .image' ${DEPS_PATH} | diff --git a/.github/scripts/end2end/keycloak-helper.sh b/.github/scripts/end2end/keycloak-helper.sh index 6f9d08eb2d..fe943643b7 100755 --- a/.github/scripts/end2end/keycloak-helper.sh +++ b/.github/scripts/end2end/keycloak-helper.sh @@ -6,6 +6,7 @@ DIR=$(dirname "$0") COMMAND=${1:-''} NAMESPACE=${2:-default} +ZENKO_NAME=${3:-end2end} KEYCLOAK_EXEC="kubectl -n ${NAMESPACE} exec -i keycloak-0 --" @@ -28,7 +29,9 @@ case $COMMAND in "add-user") refresh_creds - export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko -o jsonpath='{.items[0].status.instanceID}'` + export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko ${ZENKO_NAME} -o jsonpath='{.status.instanceID}'` + + export OIDC_EMAIL=${OIDC_EMAIL:-"e2e@zenko.local"} envsubst < $DIR/configs/keycloak_user.json | \ ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh create users -r ${OIDC_REALM} -f - diff --git a/.github/scripts/end2end/patch-coredns.sh b/.github/scripts/end2end/patch-coredns.sh index 5e505d6c08..b297d596d6 100755 --- a/.github/scripts/end2end/patch-coredns.sh +++ b/.github/scripts/end2end/patch-coredns.sh @@ -25,6 +25,13 @@ corefile=" rewrite name exact sts.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact iam.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact shell-ui.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact keycloak.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact management.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact s3.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact sts.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact iam.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure fallthrough in-addr.arpa ip6.arpa diff --git a/.github/scripts/end2end/prepare-pra.sh b/.github/scripts/end2end/prepare-pra.sh index 965b21bb5a..215868d752 100644 --- a/.github/scripts/end2end/prepare-pra.sh +++ b/.github/scripts/end2end/prepare-pra.sh @@ -6,11 +6,14 @@ export MONGODB_PRA_DATABASE="${MONGODB_PRA_DATABASE:-'pradb'}" export ZENKO_MONGODB_DATABASE="${MONGODB_PRA_DATABASE}" export ZENKO_MONGODB_SECRET_NAME="mongodb-db-creds-pra" -export ZENKO_IAM_INGRESS="iam.zenko-pra.local" -export ZENKO_STS_INGRESS="sts.zenko-pra.local" -export ZENKO_MANAGEMENT_INGRESS="management.zenko-pra.local" -export ZENKO_S3_INGRESS="s3.zenko-pra.local" -export ZENKO_UI_INGRESS="ui.zenko-pra.local" +echo 'ZENKO_MONGODB_DATABASE="pradb"' >> $GITHUB_ENV +echo 'ZENKO_MONGODB_SECRET_NAME="mongodb-db-creds-pra"' >> $GITHUB_ENV + +echo 'ZENKO_IAM_INGRESS="iam.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_STS_INGRESS="sts.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_MANAGEMENT_INGRESS="management.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_S3_INGRESS="s3.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_UI_INGRESS="ui.dr.zenko.local"' >> $GITHUB_ENV MONGODB_ROOT_USERNAME="${MONGODB_ROOT_USERNAME:-'root'}" MONGODB_ROOT_PASSWORD="${MONGODB_ROOT_PASSWORD:-'rootpass'}" diff --git a/.github/scripts/end2end/run-e2e-ctst.sh b/.github/scripts/end2end/run-e2e-ctst.sh index ed6a2d5c2a..6a0e3cd6dc 100755 --- a/.github/scripts/end2end/run-e2e-ctst.sh +++ b/.github/scripts/end2end/run-e2e-ctst.sh @@ -5,6 +5,9 @@ ZENKO_NAME=${1:-end2end} COMMAND=${2:-"premerge"} PARALLEL_RUNS=${PARALLEL_RUNS:-$(( ( $(nproc) + 1 ) / 2 ))} RETRIES=${4:-3} + +shift 4 + JUNIT_REPORT_PATH=${JUNIT_REPORT_PATH:-"ctst-junit.xml"} # Zenko Version @@ -14,6 +17,8 @@ VERSION=$(cat ../../../VERSION | grep -Po 'VERSION="\K[^"]*') ZENKO_ACCOUNT_NAME="zenko-ctst" ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) +ADMIN_PRA_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) +ADMIN_PRA_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) STORAGE_MANAGER_USER_NAME="ctst_storage_manager" STORAGE_ACCOUNT_OWNER_USER_NAME="ctst_storage_account_owner" DATA_CONSUMER_USER_NAME="ctst_data_consumer" @@ -35,6 +40,9 @@ SORBET_FWD_2_ACCESSKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd SORBET_FWD_2_SECRETKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.secretKey}' | base64 -d) SERVICE_USERS_CREDENTIALS=$(echo '{"backbeat-lifecycle-bp-1":'${BACKBEAT_LCBP_1_CREDS}',"backbeat-lifecycle-conductor-1":'${BACKBEAT_LCC_1_CREDS}',"backbeat-lifecycle-op-1":'${BACKBEAT_LCOP_1_CREDS}',"backbeat-qp-1":'${BACKBEAT_QP_1_CREDS}',"sorbet-fwd-2":{"accessKey":"'${SORBET_FWD_2_ACCESSKEY}'","secretKey":"'${SORBET_FWD_2_SECRETKEY}'"}}' | jq -R) +DR_ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) +DR_ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) + # Extracting kafka host from bacbeat's config KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) @@ -48,6 +56,7 @@ KAFKA_CLEANER_INTERVAL=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.kaf WORLD_PARAMETERS="$(jq -c <> $GITHUB_ENV - KAFKA_IMAGE=$(get_image_from_deps kafka) - KAFKA_TAG=$(yq eval '.kafka.tag' deps.yaml) - KAFKA_CONNECT_IMAGE=$(get_image_from_deps kafka-connect) - KAFKA_CONNECT_TAG=$(yq eval '.kafka-connect.tag' deps.yaml) - JMX_JAVAAGENT_IMAGE=$(get_image_from_deps jmx-javaagent) - JMX_JAVAAGENT_TAG=$(yq eval '.jmx-javaagent.tag' deps.yaml) - MONGODB_CONNECTOR_TAG=$(yq eval '.mongodb-connector.tag' deps.yaml) - KAFKA_CLEANER_IMAGE=$(get_image_from_deps kafka-cleaner) - KAFKA_CLEANER_TAG=$(yq eval '.kafka-cleaner.tag' deps.yaml) - KAFKA_CRUISECONTROL_IMAGE=$(get_image_from_deps kafka-cruise-control) - KAFKA_CRUISECONTROL_TAG=$(yq eval '.kafka-cruise-control.tag' deps.yaml) - KAFKA_LAGEXPORTER_IMAGE=$(get_image_from_deps kafka-lag-exporter) - KAFKA_LAGEXPORTER_TAG=$(yq eval '.kafka-lag-exporter.tag' deps.yaml) - EOF - - name: Deploy zenko dr components - run: bash deploy-dr.sh + OIDC_USERNAME: 'zenko-end2end-pra' + OIDC_EMAIL: 'e2e-pra@zenko.local' + working-directory: ./.github/scripts/end2end + - name: Configure E2E PRA test environment + run: bash configure-e2e.sh end2end-pra ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} default + working-directory: ./.github/scripts/end2end + env: + OIDC_USERNAME: 'zenko-end2end-pra' + - name: Configure E2E CTST test environment + run: bash configure-e2e-ctst.sh + working-directory: ./.github/scripts/end2end + - name: Run CTST end to end tests + run: bash run-e2e-ctst.sh "" "" "" "" --tags @PRA working-directory: ./.github/scripts/end2end - name: Archive artifact logs and data uses: ./.github/actions/archive-artifacts @@ -685,11 +684,14 @@ jobs: GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} ZENKO_MONGODB_SHARDED: "true" ZENKO_ENABLE_SOSAPI: true + TIME_PROGRESSION_FACTOR: 86400 + TRANSITION_ONE_DAY_EARLIER: false + EXPIRE_ONE_DAY_EARLIER: false - name: Configure E2E test environment run: bash configure-e2e-ctst.sh working-directory: ./.github/scripts/end2end - name: Run CTST end to end tests - run: bash run-e2e-ctst.sh + run: bash run-e2e-ctst.sh "" "" "" "" --tags 'not @PRA' working-directory: ./.github/scripts/end2end - name: Upload results if: "!cancelled() && env.TRUNK_TOKEN" diff --git a/solution/deps.yaml b/solution/deps.yaml index b84b8d0dce..24cd72635d 100644 --- a/solution/deps.yaml +++ b/solution/deps.yaml @@ -95,6 +95,11 @@ sorbet: image: sorbet tag: v1.1.10 envsubst: SORBET_TAG +drctl: + sourceRegistry: ghcr.io/scality + image: zenko-drctl + tag: v1.0.2 + envsubst: DRCTL_TAG # To be enabled back when utapi is used in Zenko 2.x # utapi: # sourceRegistry: ghcr.io/scality diff --git a/tests/ctst/Dockerfile b/tests/ctst/Dockerfile index 64b0b43ca2..852cdc4a1d 100644 --- a/tests/ctst/Dockerfile +++ b/tests/ctst/Dockerfile @@ -1,4 +1,7 @@ ARG CTST_TAG +ARG DRCTL_TAG + +FROM ghcr.io/scality/zenko-drctl:$DRCTL_TAG as drctl FROM ghcr.io/scality/cli-testing:$CTST_TAG COPY package.json /tmp/package.json @@ -16,4 +19,6 @@ ENV SDK=true WORKDIR /ctst +COPY --from=drctl /zenko-drctl . + CMD ["./run"] diff --git a/tests/ctst/common/common.ts b/tests/ctst/common/common.ts index e0e07d4421..5c991ab239 100644 --- a/tests/ctst/common/common.ts +++ b/tests/ctst/common/common.ts @@ -1,11 +1,19 @@ import { ListObjectVersionsOutput } from '@aws-sdk/client-s3'; import { Given, setDefaultTimeout, Then, When } from '@cucumber/cucumber'; -import { Constants, S3, Utils } from 'cli-testing'; +import { Constants, Identity, IdentityEnum, S3, Utils } from 'cli-testing'; import Zenko from 'world/Zenko'; import { safeJsonParse } from './utils'; import assert from 'assert'; import { Admin, Kafka } from 'kafkajs'; -import { createBucketWithConfiguration, putObject, runActionAgainstBucket } from 'steps/utils/utils'; +import { + createBucketWithConfiguration, + putObject, + runActionAgainstBucket, + getObjectNameWithBackendFlakiness, + verifyObjectLocation, + restoreObject, + addTransitionWorkflow, +} from 'steps/utils/utils'; import { ActionPermissionsType } from 'steps/bucket-policies/utils'; setDefaultTimeout(Constants.DEFAULT_TIMEOUT); @@ -28,11 +36,10 @@ export async function cleanS3Bucket( const createdObjects = world.getSaved>('createdObjects'); if (createdObjects !== undefined) { const results = await S3.listObjectVersions(world.getCommandParameters()); - const res = safeJsonParse(results.stdout); + const res = safeJsonParse(results.stdout); assert(res.ok); - const parsedResults = res.result as ListObjectVersionsOutput; - const versions = parsedResults.Versions || []; - const deleteMarkers = parsedResults.DeleteMarkers || []; + const versions = res.result!.Versions || []; + const deleteMarkers = res.result!.DeleteMarkers || []; await Promise.all(versions.concat(deleteMarkers).map(obj => { world.addCommandParameter({ key: obj.Key }); world.addCommandParameter({ versionId: obj.VersionId }); @@ -45,35 +52,6 @@ export async function cleanS3Bucket( await S3.deleteBucket(world.getCommandParameters()); } -/** - * @param {Zenko} this world object - * @param {string} objectName object name - * @returns {string} the object name based on the backend flakyness - */ -function getObjectNameWithBackendFlakiness(this: Zenko, objectName: string) { - let objectNameFinal; - const backendFlakinessRetryNumber = this.getSaved('backendFlakinessRetryNumber'); - const backendFlakiness = this.getSaved('backendFlakiness'); - - if (!backendFlakiness || !backendFlakinessRetryNumber || !objectName) { - return objectName; - } - - switch (backendFlakiness) { - case 'command': - objectNameFinal = `${objectName}.scal-retry-command-${backendFlakinessRetryNumber}`; - break; - case 'archive': - case 'restore': - objectNameFinal = `${objectName}.scal-retry-${backendFlakiness}-job-${backendFlakinessRetryNumber}`; - break; - default: - this.logger.debug('Unknown backend flakyness', { backendFlakiness }); - return objectName; - } - return objectNameFinal; -} - async function addMultipleObjects(this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, userMD?: string) { let lastResult = null; @@ -154,6 +132,18 @@ Given('{int} objects {string} of size {int} bytes', await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes); }); +Given('{int} objects {string} of size {int} bytes on {string} site', + async function (this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, site: string) { + this.resetCommand(); + + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } + await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes); + }); + Given('{int} objects {string} of size {int} bytes with user metadata {string}', async function (this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, userMD: string) { await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes, userMD); @@ -188,9 +178,8 @@ Then('object {string} should have the tag {string} with value {string}', this.addCommandParameter({ versionId }); } await S3.getObjectTagging(this.getCommandParameters()).then(res => { - const parsed = safeJsonParse(res.stdout); - const head = parsed.result as { TagSet: [{Key: string, Value: string}] | undefined }; - assert(head.TagSet?.some(tag => tag.Key === tagKey && tag.Value === tagValue)); + const parsed = safeJsonParse<{ TagSet: [{Key: string, Value: string}] | undefined }>(res.stdout); + assert(parsed.result!.TagSet?.some(tag => tag.Key === tagKey && tag.Value === tagValue)); }); }); @@ -206,103 +195,25 @@ Then('object {string} should have the user metadata with key {string} and value const res = await S3.headObject(this.getCommandParameters()); assert.ifError(res.stderr); assert(res.stdout); - const parsed = safeJsonParse(res.stdout); + const parsed = safeJsonParse<{ Metadata: {[key: string]: string} | undefined }>(res.stdout); assert(parsed.ok); - const head = parsed.result as { Metadata: {[key: string]: string} | undefined }; - assert(head.Metadata); - assert(head.Metadata[userMDKey]); - assert(head.Metadata[userMDKey] === userMDValue); + assert(parsed.result!.Metadata); + assert(parsed.result!.Metadata[userMDKey]); + assert(parsed.result!.Metadata[userMDKey] === userMDValue); }); // add a transition workflow to a bucket Given('a transition workflow to {string} location', async function (this: Zenko, location: string) { - let conditionOk = false; - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - const lifecycleConfiguration = JSON.stringify({ - Rules: [ - { - Status: 'Enabled', - Prefix: '', - Transitions: [ - { - Days: 20, - StorageClass: location, - }, - ], - }, - ], - }); - this.addCommandParameter({ - lifecycleConfiguration, - }); - const commandParameters = this.getCommandParameters(); - while (!conditionOk) { - const res = await S3.putBucketLifecycleConfiguration(commandParameters); - conditionOk = res.err === null; - // Wait for the transition to be accepted because the deployment of the location's pods can take some time - await Utils.sleep(5000); - } + await addTransitionWorkflow.call(this, location); }); When('i restore object {string} for {int} days', async function (this: Zenko, objectName: string, days: number) { - const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - this.addCommandParameter({ key: objName }); - const versionId = this.getSaved>('createdObjects')?.get(objName); - if (versionId) { - this.addCommandParameter({ versionId }); - } - this.addCommandParameter({ restoreRequest: `Days=${days}` }); - const result = await S3.restoreObject(this.getCommandParameters()); - this.setResult(result); + await restoreObject.call(this, objectName, days); }); // wait for object to transition to a location or get restored from it -Then('object {string} should be {string} and have the storage class {string}', { timeout: 130000 }, - async function (this: Zenko, objectName: string, objectTransitionStatus: string, storageClass: string) { - const objName = - getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - this.addCommandParameter({ key: objName }); - const versionId = this.getSaved>('createdObjects')?.get(objName); - if (versionId) { - this.addCommandParameter({ versionId }); - } - let conditionOk = false; - while (!conditionOk) { - const res = await S3.headObject(this.getCommandParameters()); - if (res.err) { - break; - } - assert(res.stdout); - const parsed = safeJsonParse(res.stdout); - assert(parsed.ok); - const head = parsed.result as { - StorageClass: string | undefined, - Restore: string | undefined, - }; - const expectedClass = storageClass !== '' ? storageClass : undefined; - if (head?.StorageClass === expectedClass) { - conditionOk = true; - } - if (objectTransitionStatus == 'restored') { - const isRestored = !!head?.Restore && - head.Restore.includes('ongoing-request="false", expiry-date='); - // if restore didn't get initiated fail immediately - const isPendingRestore = !!head?.Restore && - head.Restore.includes('ongoing-request="true"'); - assert(isRestored || isPendingRestore, 'Restore didn\'t get initiated'); - conditionOk = conditionOk && isRestored; - } else if (objectTransitionStatus == 'cold') { - conditionOk = conditionOk && !head?.Restore; - } - await Utils.sleep(1000); - } - assert(conditionOk); - }); +Then('object {string} should be {string} and have the storage class {string}', + { timeout: 130000 }, verifyObjectLocation); When('i delete object {string}', async function (this: Zenko, objectName: string) { const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); diff --git a/tests/ctst/common/hooks.ts b/tests/ctst/common/hooks.ts index 80c8fd4c37..f1b70e75b9 100644 --- a/tests/ctst/common/hooks.ts +++ b/tests/ctst/common/hooks.ts @@ -7,12 +7,13 @@ import { import Zenko from '../world/Zenko'; import { Identity } from 'cli-testing'; import { prepareQuotaScenarios, teardownQuotaScenarios } from 'steps/quotas/quotas'; +import { displayDebuggingInformation, preparePRA } from 'steps/pra'; // HTTPS should not cause any error for CTST process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; const { atMostOnePicklePerTag } = parallelCanAssignHelpers; -const noParallelRun = atMostOnePicklePerTag(['@AfterAll']); +const noParallelRun = atMostOnePicklePerTag(['@AfterAll', '@PRA', '@ColdStorage']); setParallelCanAssign(noParallelRun); @@ -22,6 +23,16 @@ Before(async function (this: Zenko) { await Zenko.init(this.parameters); }); +Before({ tags: '@PRA' }, function () { + preparePRA(this as Zenko); +}); + +After({ tags: '@PRA' }, async function (this, results) { + if (results.result?.status === 'FAILED') { + await displayDebuggingInformation(this as Zenko); + } +}); + Before({ tags: '@Quotas', timeout: 1200000 }, async function (scenarioOptions) { await prepareQuotaScenarios(this as Zenko, scenarioOptions); }); diff --git a/tests/ctst/common/utils.ts b/tests/ctst/common/utils.ts index 9fec76eb80..f201bcd3b1 100644 --- a/tests/ctst/common/utils.ts +++ b/tests/ctst/common/utils.ts @@ -87,12 +87,12 @@ export const s3FunctionExtraParams: { [key: string]: Record[] } }], }; -export function safeJsonParse(jsonString: string): { ok: boolean, result: object } { - let result = {}; +export function safeJsonParse(jsonString: string): { ok: boolean, result: T | null } { + let result: T; try { - result = JSON.parse(jsonString) as object; + result = JSON.parse(jsonString) as T; } catch (err) { - return { ok: false, result }; + return { ok: false, result: null }; } return { ok: true, result }; } diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature new file mode 100644 index 0000000000..30f8f52c8b --- /dev/null +++ b/tests/ctst/features/pra.feature @@ -0,0 +1,44 @@ +Feature: PRA operations + + @2.6.0 + @PreMerge + @Dmf + @PRA + @ColdStorage + Scenario Outline: PRA (nominal case) + # Prepare objects in the primary site + Given a "" bucket + And a transition workflow to "e2e-cold" location + And objects "obj" of size bytes on "Primary" site + Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" + And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" + And dmf volume should contain objects + + # Deploy PRA + Given a DR installed + Then the DR source should be in phase "Running" + And the DR sink should be in phase "Running" + Then the kafka DR volume exists + + # Check that objects are transitioned in the DR site + Given access keys for the replicated account + Then object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + + # Test again the transition workflow + Given objects "obj2" of size bytes on "Pimary" site + Then object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site + And object "obj2-2" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site + Then object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj2-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + When i restore object "obj-1" for 2 days on "Primary" site + Then object "obj-1" should "" be "restored" and have the storage class "e2e-cold" on "Primary" site + And object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + + # Uninstall DR + #When I uninstall DR + #Then the DR custom resources should be deleted + + Examples: + | versioningConfiguration | objectCount | objectSize | + | Versioned | 2 | 100 | \ No newline at end of file diff --git a/tests/ctst/package.json b/tests/ctst/package.json index 66a014cf6e..cd5da7764a 100644 --- a/tests/ctst/package.json +++ b/tests/ctst/package.json @@ -26,7 +26,7 @@ "@typescript-eslint/eslint-plugin": "^5.45.0", "@typescript-eslint/parser": "^5.45.0", "babel-jest": "^29.3.1", - "cli-testing": "github:scality/cli-testing.git#1.0.1", + "cli-testing": "github:scality/cli-testing.git#v1.2.0", "eslint": "^8.28.0" }, "scripts": { diff --git a/tests/ctst/steps/dmf.ts b/tests/ctst/steps/dmf.ts new file mode 100644 index 0000000000..535e26da04 --- /dev/null +++ b/tests/ctst/steps/dmf.ts @@ -0,0 +1,23 @@ +import { Then, After } from '@cucumber/cucumber'; +import assert from 'assert'; +import { execShellCommand } from 'common/utils'; + +async function cleanDmfVolume() { + await execShellCommand('rm -rf /cold-data/*'); +} + +Then('dmf volume should contain {int} objects', async (objectCount: number) => { + let conditionOk = false; + while (!conditionOk) { + // Getting the number of objects inside the volume used + // by the mock dmf to store transitioned objects + const outStr = await execShellCommand('find /cold-data -type f | wc -l'); + // we store two files per object (content and manifest.json) + conditionOk = Number(outStr) === objectCount * 2; + } + assert(conditionOk); +}); + +After({ tags: '@Dmf' }, async () => { + await cleanDmfVolume(); +}); diff --git a/tests/ctst/steps/dr/drctl.ts b/tests/ctst/steps/dr/drctl.ts new file mode 100644 index 0000000000..32a70c90a8 --- /dev/null +++ b/tests/ctst/steps/dr/drctl.ts @@ -0,0 +1,233 @@ +import util from 'util'; +import { exec } from 'child_process'; + +import Zenko from 'world/Zenko'; + +type InstallConfig = { + sourceZenkoDrInstance?: string; + sourceKafkaReplicas?: number; + sourceConnectorReplicas?: number; + sinkZenkoDrInstance?: string; + sinkKafkaReplicas?: number; + sinkConnectorReplicas?: number; + kafkaClusterLocation?: string; + kafkaNodePortStartingPort?: number; + kafkaPersistenceExistingPv?: string; + kafkaPersistenceSize?: string; + kafkaPersistenceStorageClassName?: string; + kafkaPersistenceAnnotations?: string; + kafkaPersistenceSelector?: string; + locations?: string; + s3Bucket?: string; + + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + + sourceS3Endpoint?: string; + sourceS3UserSecretName?: string; + sourceSs3AccessKeyField?: string; + sourceS3SecretKeyField?: string; + sourceS3Region?: string; + + sinkS3Endpoint?: string; + sinkS3UserSecretName?: string; + sinkSs3AccessKeyField?: string; + sinkS3SecretKeyField?: string; + sinkS3Region?: string; +}; + +type BootstrapDumpConfig = { + createBucketIfNotExists?: boolean; + cleanupBucketBeforeDump?: boolean; + locations?: string[]; + oidcProviders?: string[]; + s3Bucket?: string; + mongodbHosts?: string[]; + mongodbUsername?: string; + mongodbPassword?: string; + mongodbDatabase?: string; + mongodbReplicaset?: string; + mongodbReadPref?: string; + mongodbAuthDatabase?: string; + s3Endpoint?: string; + s3AccessKey?: string; + s3SecretKey?: string; + s3Region?: string; +}; + +type BootstrapLoadConfig = { + mongodbSourceDatabase?: string; + parallel?: number; + dropCollections?: boolean; + s3Bucket?: string; + mongodbHosts?: string[]; + mongodbUsername?: string; + mongodbPassword?: string; + mongodbDatabase?: string; + mongodbReplicaset?: string; + mongodbReadPref?: string; + mongodbAuthDatabase?: string; + s3Endpoint?: string; + s3AccessKey?: string; + s3SecretKey?: string; + s3Region?: string; +}; + +type VolumeGetConfig = { + targetZenkoKubeconfigPath?: string; + targetZenkoKubeconfigData?: string; + targetZenkoInstance?: string; + targetZenkoNamespace?: string; + volumeName?: string; + volumeNodeName?: string; + timeout?: string; +}; + +type FailoverConfig = { + wait?: boolean; + timeout?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; +}; + +type UninstallConfig = { + sinkZenkoDrInstance?: string; + sourceZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; +}; + +type StatusConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + output?: string; +}; + +type ReplicationPauseConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; +}; + +type ReplicationResumeConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; +}; + +/** + * Helper class to run Drctl tool + */ +export default class ZenkoDrctl { + private world: Zenko; + + constructor(world: Zenko) { + this.world = world; + } + + private async runCommand(action: string, params: string, throwOnError = false) { + const command = `/ctst/zenko-drctl ${action} ${params}`; + try { + this.world.logger.debug('running zenko-drctl command', { command }); + const result = await util.promisify(exec)(command); + this.world.logger.debug('zenko-drctl command result', { result }); + return result.stdout; + } catch (err) { + this.world.logger.debug('zenko-drctl command failed', { err }); + if (throwOnError) { + throw err; + } + return null; + } + } + + async install(config: InstallConfig) { + return this.runCommand('install', this.paramToCli(config), true); + } + + async uninstall(config: UninstallConfig) { + return this.runCommand('uninstall', this.paramToCli(config), true); + } + + async bootstrapDump(config: BootstrapDumpConfig) { + return this.runCommand('bootstrap dump', this.paramToCli(config)); + } + + async bootstrapLoad(config: BootstrapLoadConfig) { + return this.runCommand('bootstrap load', this.paramToCli(config)); + } + + async failover(config: FailoverConfig) { + return this.runCommand('failover', this.paramToCli(config)); + } + + async status(config: StatusConfig) { + return this.runCommand('status', this.paramToCli(config)); + } + + async volumeGet(config: VolumeGetConfig) { + return this.runCommand('volume get', this.paramToCli(config)); + } + + async replicationPause(config: ReplicationPauseConfig) { + return this.runCommand('replication pause', this.paramToCli(config)); + } + + async replicationResume(config: ReplicationResumeConfig) { + return this.runCommand('replication resume', this.paramToCli(config)); + } + + paramToCli(params: Record): string { + const command: string[] = []; + Object.keys(params).forEach(key => { + const value = params[key]; + if (value !== undefined && value !== null) { + command.push(`--${key.replace(/([A-Z])/g, '-$1').toLowerCase()}`); + command.push(String(value)); + } + }); + return command.join(' '); + } +} diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts new file mode 100644 index 0000000000..a6b62b752e --- /dev/null +++ b/tests/ctst/steps/pra.ts @@ -0,0 +1,357 @@ +import { Given, Then, When } from '@cucumber/cucumber'; +import Zenko from 'world/Zenko'; +import ZenkoDrctl from './dr/drctl'; +import { + createSecret, + displayCRStatus, + getDRSink, + getDRSource, + getPVCFromLabel, +} from './utils/kubernetes'; +import { + restoreObject, + verifyObjectLocation, +} from 'steps/utils/utils'; +import { Constants, Identity, IdentityEnum, SuperAdmin, Utils } from 'cli-testing'; +import { safeJsonParse } from 'common/utils'; +import assert from 'assert'; + +enum ZenkoDrSinkPhases { + ZenkoDRSinkPhaseNew = 'New', + ZenkoDRSinkPhaseBootstrapWaiting = 'Bootstrap:Waiting', + ZenkoDRSinkPhaseBootstrapReceiving = 'Bootstrap:Receiving', + ZenkoDRSinkPhaseBootstrapFailed = 'Bootstrap:Failed', + ZenkoDRSinkPhaseRunning = 'Running', + ZenkoDRSinkPhasePaused = 'Paused', + ZenkoDRSinkPhaseFailover = 'Failover', +} + +enum ZenkoDrSourcePhases { + ZenkoDRSourcePhaseNew = 'New', + ZenkoDRSourcePhaseBootstrapWaiting = 'Bootstrap:Waiting', + ZenkoDRSourcePhaseBootstrapSending = 'Bootstrap:Sending', + ZenkoDRSourcePhaseBootstrapFailed = 'Bootstrap:Failed', + ZenkoDRSourcePhaseRunning = 'Running', + ZenkoDRSourcePhasePaused = 'Paused', +} + +interface DrState { + source: { + crStatus: { + phase: ZenkoDrSourcePhases; + }, + }; + sink: { + crStatus: { + phase: ZenkoDrSinkPhases; + }, + }; +} + +async function installPRA(world: Zenko, sinkS3Endpoint = 'http://s3.zenko.local') { + return world.zenkoDrCtl?.install({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + kafkaPersistenceSize: '1Gi', + kafkaPersistenceStorageClassName: 'standard', + locations: 'e2e-cold', // comma-separated list + s3Bucket: 'dump-db', + sinkZenkoInstance: 'end2end-pra', + sinkZenkoNamespace: 'default', + sourceZenkoInstance: 'end2end', + sourceZenkoNamespace: 'default', + sourceS3Endpoint: 'http://s3.zenko.local', + sinkS3Endpoint, + }); +} + +export function preparePRA(world: Zenko) { + // eslint-disable-next-line no-param-reassign + world.zenkoDrCtl = new ZenkoDrctl(world); +} + +export async function displayDebuggingInformation(world: Zenko) { + await displayCRStatus(world); + const drSource = await getDRSource(world); + const drSink = await getDRSink(world); + + world.logger.debug('Zenko DR custom resources', { + drSink, + drSource, + }); +} + +async function waitForPhase( + world: Zenko, + target: 'source' | 'sink', + state: ZenkoDrSinkPhases | ZenkoDrSourcePhases, + timeout = 130000, +): Promise { + const start = Date.now(); + + while (Date.now() - start < timeout) { + let phase; + + const currentStatus = await world.zenkoDrCtl?.status({ + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + sinkZenkoDrInstance: 'end2end-pra-sink', + sourceZenkoDrInstance: 'end2end-source', + output: 'json', + }); + + if (!currentStatus) { + world.logger.debug('Failed to get DR status, retrying', { + currentStatus, + }); + await Utils.sleep(1000); + continue; + } + + const lines = currentStatus.split('\n'); + let parsedStatus: DrState | null = null; + + for (const line of lines) { + try { + const json = safeJsonParse(line); + if (json.ok && json.result?.source && json.result?.source) { + parsedStatus = json.result; + break; + } + } catch (e) { + continue; + } + } + + if (!parsedStatus) { + world.logger.debug('Failed to parse DR status, retrying', { + parsedStatus, + }); + await Utils.sleep(1000); + continue; + } + + if (target === 'sink') { + phase = parsedStatus.sink.crStatus.phase; + } else { + phase = parsedStatus.source.crStatus.phase; + } + + world.logger.debug('current phase', { + phase, + target, + }); + + if (phase === state) { + return true; + } + await Utils.sleep(1000); + } + + return false; +} + +Given('a DR installed', { timeout: 130000 }, async function (this: Zenko) { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + const credentials = Identity.getCurrentCredentials(); + await createSecret(this, 'drctl-s3-creds', { + accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), + secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), + }); + await installPRA(this); + return; +}); + +Given('a DR failing to be installed', { timeout: 130000 }, async function (this: Zenko) { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + const credentials = Identity.getCurrentCredentials(); + await createSecret(this, 'drctl-s3-creds', { + accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), + secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), + }); + await installPRA(this, 'http://s3.dr.zenko.local'); + return; +}); + +Then('the DR sink should be in phase {string}', { timeout: 360000 }, async function (this: Zenko, state: string) { + let targetPhase; + switch (state) { + case 'New': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseNew; + break; + case 'Bootstrap:Waiting': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapWaiting; + break; + case 'Bootstrap:Receiving': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapReceiving; + break; + case 'Bootstrap:Failed': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapFailed; + break; + case 'Running': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseRunning; + break; + case 'Paused': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhasePaused; + break; + case 'Failover': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseFailover; + break; + default: + throw new Error(`Unknown state ${state}`); + } + + await waitForPhase(this, 'sink', targetPhase); +}); + +Then('the DR source should be in phase {string}', { timeout: 360000 }, async function (this: Zenko, state: string) { + let targetPhase; + switch (state) { + case 'New': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseNew; + break; + case 'Bootstrap:Waiting': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapWaiting; + break; + case 'Bootstrap:Sending': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapSending; + break; + case 'Bootstrap:Failed': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapFailed; + break; + case 'Running': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseRunning; + break; + case 'Paused': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhasePaused; + break; + default: + throw new Error(`Unknown state ${state}`); + } + + await waitForPhase(this, 'source', targetPhase); +}); + +Then('object {string} should {string} be {string} and have the storage class {string} on {string} site', + { timeout: 360000 }, + async function ( + this: Zenko, + objName: string, + isVerb: string, + objectTransitionStatus: string, + storageClass: string, + site: string) { + this.resetCommand(); + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } + try { + await verifyObjectLocation.call(this, objName, objectTransitionStatus, storageClass); + if (isVerb === 'not') { + throw new Error(`Object ${objName} should not be ${objectTransitionStatus}`); + } + } catch (err) { + if (isVerb !== 'not') { + throw err; + } + assert(err); + } + }); + +Then('the kafka DR volume exists', { timeout: 60000 }, async function (this: Zenko) { + const volumeClaim = await getPVCFromLabel(this, 'kafka_cr', 'end2end-pra-sink-base-queue'); + this.logger.debug('kafka volume claim', { volumeClaim }); + assert(volumeClaim); + const volume = await this.zenkoDrCtl?.volumeGet({ + volumeName: volumeClaim.spec?.volumeName, + timeout: '60s', + }); + this.logger.debug('kafka volume from drctl', { volume }); + assert(volume); + const volumeParsed = safeJsonParse<{'volume phase': string, 'volume name': string}>(volume); + if (!volumeParsed.ok) { + throw new Error('Failed to parse volume'); + } + assert(volumeParsed.result!['volume phase'] === 'Bound'); +}); + +When('I pause the DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.replicationPause({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + +When('I resume the DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.replicationResume({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + +When('I uninstall DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.uninstall({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + +Then('the DR custom resources should be deleted', { timeout: 360000 }, async function (this: Zenko) { + const drSource = await getDRSource(this); + const drSink = await getDRSink(this); + + assert(!drSource); + assert(!drSink); +}); + +Given('access keys for the replicated account', { timeout: 360000 }, async () => { + Identity.useIdentity(IdentityEnum.ADMIN, Zenko.sites['sink'].adminIdentityName); + // The account is the one from the source cluster: it replaces the sink account + // after the bootstrap phases + const targetAccount = Zenko.sites['source'].accountName; + + let account; + let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; + account = await SuperAdmin.getAccount({ + accountName: targetAccount, + }); + while (!account && remaining > 0) { + await Utils.sleep(500); + account = await SuperAdmin.getAccount({ + accountName: targetAccount, + }); + remaining--; + } + assert(account); + + const credentials = await SuperAdmin.generateAccountAccessKey({ + accountName: targetAccount, + }); + + Identity.addIdentity(IdentityEnum.ACCOUNT, `${targetAccount}-replicated`, credentials, undefined, true); +}); + +When('i restore object {string} for {int} days on {string} site', + async function (this: Zenko, objectName: string, days: number, site: string) { + this.resetCommand(); + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } + await restoreObject.call(this, objectName, days); + }); diff --git a/tests/ctst/steps/utils/kubernetes.ts b/tests/ctst/steps/utils/kubernetes.ts index 16cea40f3f..6a31359b62 100644 --- a/tests/ctst/steps/utils/kubernetes.ts +++ b/tests/ctst/steps/utils/kubernetes.ts @@ -25,14 +25,14 @@ export function createKubeBatchClient(world: Zenko) { if (!KubernetesHelper.clientBatch) { KubernetesHelper.init(world.parameters); } - return KubernetesHelper.clientBatch; + return KubernetesHelper.clientBatch!; } export function createKubeCoreClient(world: Zenko) { if (!KubernetesHelper.clientBatch) { KubernetesHelper.init(world.parameters); } - return KubernetesHelper.clientCore; + return KubernetesHelper.clientCore!; } export function createKubeWatchClient(world: Zenko) { @@ -67,7 +67,7 @@ export async function createJobAndWaitForCompletion(world: Zenko, jobName: strin const batchClient = createKubeBatchClient(world); const watchClient = createKubeWatchClient(world); try { - const cronJob = await batchClient!.readNamespacedCronJob(jobName, 'default'); + const cronJob = await batchClient.readNamespacedCronJob(jobName, 'default'); const cronJobSpec = cronJob.body.spec?.jobTemplate.spec; const job = new V1Job(); const metadata = new V1ObjectMeta(); @@ -85,7 +85,7 @@ export async function createJobAndWaitForCompletion(world: Zenko, jobName: strin } job.metadata = metadata; - const response = await batchClient!.createNamespacedJob('default', job); + const response = await batchClient.createNamespacedJob('default', job); world.logger.debug('job created', { job: response.body.metadata, }); @@ -293,3 +293,112 @@ export async function waitForDataServicesToStabilize(world: Zenko, timeout = 15 return allRunning; } + +export async function displayCRStatus(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha2', + namespace, + 'zenkos', + 'end2end', + ).catch(err => { + world.logger.error('Error getting Zenko CR', { + err: err as unknown, + }); + return null; + }); + + if (!zenkoCR) { + return; + } + + world.logger.debug('Checking Zenko CR status', { + zenkoCR, + }); +} + +export async function getDRSource(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha1', + namespace, + 'zenkodrsources', + 'end2end-source', + ).catch(err => { + world.logger.debug('Error getting Zenko CR', { + err: err as unknown, + }); + }); + + return zenkoCR?.body; +} + +export async function getDRSink(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha1', + namespace, + 'zenkodrsinks', + 'end2end-pra-sink', + ).catch(err => { + world.logger.debug('Error getting Zenko CR', { + err: err as unknown, + }); + }); + + return zenkoCR?.body; +} + +export async function getPVCFromLabel(world: Zenko, label: string, value: string, namespace = 'default') { + const coreClient = createKubeCoreClient(world); + + const pvcList = await coreClient.listNamespacedPersistentVolumeClaim(namespace); + const pvc = pvcList.body.items.find(pvc => pvc.metadata?.labels?.[label] === value); + + return pvc; +} + +export async function createSecret( + world: Zenko, + secretName: string, + data: Record, + namespace = 'default', +) { + const coreClient = createKubeCoreClient(world); + + const secret = { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: secretName, + }, + data, + }; + + try { + await coreClient.deleteNamespacedSecret(secretName, namespace); + } catch (err) { + world.logger.debug('Secret does not exist, creating new', { + secretName, + namespace, + }); + } + + try { + const response = await coreClient.createNamespacedSecret(namespace, secret); + return response; + } catch (err) { + world.logger.error('Error creating secret', { + namespace, + secret, + err, + }); + throw err; + } +} diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index 9deb3727a1..88f01b4ce2 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -9,8 +9,9 @@ import { AWSVersionObject, Command, } from 'cli-testing'; -import { extractPropertyFromResults, s3FunctionExtraParams } from 'common/utils'; +import { extractPropertyFromResults, s3FunctionExtraParams, safeJsonParse } from 'common/utils'; import Zenko from 'world/Zenko'; +import assert from 'assert'; enum AuthorizationType { ALLOW = 'Allow', @@ -252,6 +253,123 @@ async function emptyVersionedBucket(world: Zenko) { })); } +async function addTransitionWorkflow(this: Zenko, location: string, enabled = true) { + let conditionOk = false; + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + const enabledStr = enabled ? 'Enabled' : 'Disabled'; + const lifecycleConfiguration = JSON.stringify({ + Rules: [ + { + Status: enabledStr, + Prefix: '', + Transitions: [ + { + Days: 0, + StorageClass: location, + }, + ], + }, + ], + }); + this.addCommandParameter({ + lifecycleConfiguration, + }); + const commandParameters = this.getCommandParameters(); + while (!conditionOk) { + const res = await S3.putBucketLifecycleConfiguration(commandParameters); + conditionOk = res.err === null; + // Wait for the transition to be accepted because the deployment of the location's pods can take some time + await Utils.sleep(5000); + } +} + +async function verifyObjectLocation(this: Zenko, objectName: string, + objectTransitionStatus: string, storageClass: string) { + const objName = + getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + this.addCommandParameter({ key: objName }); + const versionId = this.getSaved>('createdObjects')?.get(objName); + if (versionId) { + this.addCommandParameter({ versionId }); + } + let conditionOk = false; + while (!conditionOk) { + const res = await S3.headObject(this.getCommandParameters()); + if (res.err?.includes('NotFound')) { + await Utils.sleep(1000); + continue; + } else if (res.err) { + break; + } + assert(res.stdout); + const parsed = safeJsonParse<{ + StorageClass: string | undefined, + Restore: string | undefined, + }>(res.stdout); + assert(parsed.ok); + const expectedClass = storageClass !== '' ? storageClass : undefined; + if (parsed.result?.StorageClass === expectedClass) { + conditionOk = true; + } + if (objectTransitionStatus == 'restored') { + const isRestored = !!parsed.result?.Restore && + parsed.result.Restore.includes('ongoing-request="false", expiry-date='); + conditionOk = conditionOk && isRestored; + } else if (objectTransitionStatus == 'cold') { + conditionOk = conditionOk && !parsed.result?.Restore; + } + await Utils.sleep(1000); + } + assert(conditionOk); +} + +async function restoreObject(this: Zenko, objectName: string, days: number) { + const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + this.addCommandParameter({ key: objName }); + const versionId = this.getSaved>('createdObjects')?.get(objName); + if (versionId) { + this.addCommandParameter({ versionId }); + } + this.addCommandParameter({ restoreRequest: `Days=${days}` }); + const result = await S3.restoreObject(this.getCommandParameters()); + this.setResult(result); +} + +/** + * @param {Zenko} this world object + * @param {string} objectName object name + * @returns {string} the object name based on the backend flakyness + */ +function getObjectNameWithBackendFlakiness(this: Zenko, objectName: string) { + let objectNameFinal; + const backendFlakinessRetryNumber = this.getSaved('backendFlakinessRetryNumber'); + const backendFlakiness = this.getSaved('backendFlakiness'); + + if (!backendFlakiness || !backendFlakinessRetryNumber || !objectName) { + return objectName; + } + + switch (backendFlakiness) { + case 'command': + objectNameFinal = `${objectName}.scal-retry-command-${backendFlakinessRetryNumber}`; + break; + case 'archive': + case 'restore': + objectNameFinal = `${objectName}.scal-retry-${backendFlakiness}-job-${backendFlakinessRetryNumber}`; + break; + default: + this.logger.debug('Unknown backend flakyness', { backendFlakiness }); + return objectName; + } + return objectNameFinal; +} + + export { AuthorizationType, AuthorizationConfiguration, @@ -261,4 +379,8 @@ export { putObject, emptyNonVersionedBucket, emptyVersionedBucket, + verifyObjectLocation, + getObjectNameWithBackendFlakiness, + restoreObject, + addTransitionWorkflow, }; diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index e4f82701bb..88f239ffc4 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -21,6 +21,8 @@ import { } from 'cli-testing'; import { extractPropertyFromResults } from '../common/utils'; +import ZenkoDrctl from 'steps/dr/drctl'; +import assert from 'assert'; interface ServiceUsersCredentials { @@ -43,6 +45,9 @@ export interface ZenkoWorldParameters extends ClientOptions { AccountName: string; AccountAccessKey: string; AccountSecretKey: string; + DRAdminAccessKey?: string; + DRAdminSecretKey?: string; + DRSubdomain?: string; VaultAuthHost: string; NotificationDestination: string; NotificationDestinationTopic: string; @@ -93,8 +98,20 @@ export default class Zenko extends World { private saved: Record = {}; + public zenkoDrCtl: ZenkoDrctl | null = null; + + static sites: { + [key: string]: { + accountName: string; + adminIdentityName: string; + }; + } = {}; + public logger: Werelogs.RequestLogger = new Werelogs.Logger('CTST').newRequestLogger(); + static readonly PRIMARY_SITE_NAME = 'admin'; + static readonly SECONDARY_SITE_NAME = 'dradmin'; + /** * @constructor * @param {Object} options - parameters provided as a CLI parameter when running the tests @@ -134,12 +151,39 @@ export default class Zenko extends World { } if (this.parameters.AdminAccessKey && this.parameters.AdminSecretKey && - !Identity.hasIdentity(IdentityEnum.ADMIN, 'admin')) { - Identity.addIdentity(IdentityEnum.ADMIN, 'admin', { + !Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME)) { + Identity.addIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME, { accessKeyId: this.parameters.AdminAccessKey, secretAccessKey: this.parameters.AdminSecretKey, - }); + }, undefined, undefined, undefined, this.parameters.subdomain); + + Zenko.sites['source'] = { + accountName: Identity.defaultAccountName, + adminIdentityName: Zenko.PRIMARY_SITE_NAME, + }; + } + + if (this.needsSecondarySite()) { + if (!Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME)) { + Identity.addIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME, { + accessKeyId: this.parameters.DRAdminAccessKey!, + secretAccessKey: this.parameters.DRAdminSecretKey!, + }, undefined, undefined, undefined, this.parameters.DRSubdomain); + } + + Zenko.sites['sink'] = { + accountName: `dr${this.parameters.AccountName}`, + adminIdentityName: Zenko.SECONDARY_SITE_NAME, + }; } + + this.logger.debug('Zenko sites', { + sites: Zenko.sites, + }); + } + + private needsSecondarySite() { + return this.parameters.DRAdminAccessKey && this.parameters.DRAdminSecretKey && this.parameters.DRSubdomain; } /** @@ -363,15 +407,19 @@ export default class Zenko extends World { } } - async createAccount(name?: string, force?: boolean) { + async createAccount(name?: string, force?: boolean, adminClientName?: string) { Identity.resetIdentity(); - const accountName = this.getSaved('accountName') || - name || `${Constants.ACCOUNT_NAME}${Utils.randomString()}`; + const accountName = name || this.getSaved('accountName') || + `${Constants.ACCOUNT_NAME}${Utils.randomString()}`; if (Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName) && !force) { Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); return; } + if (adminClientName && Identity.hasIdentity(IdentityEnum.ADMIN, adminClientName)) { + Identity.useIdentity(IdentityEnum.ADMIN, adminClientName); + } + await SuperAdmin.createAccount({ accountName }); const credentials = await SuperAdmin.generateAccountAccessKey({ accountName }); Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, credentials, undefined, true, true); @@ -541,59 +589,101 @@ export default class Zenko extends World { * @returns {undefined} */ static async init(parameters: ZenkoWorldParameters) { - const accountName = parameters.AccountName || Constants.ACCOUNT_NAME; CacheHelper.logger.debug('Initializing Zenko', { - accountName, parameters, }); - if (!Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName)) { - CacheHelper.adminClient = await Utils.getAdminCredentials(parameters); - - let account = null; + // Create the default account for each site configured + // and generate access keys for it + for (const siteKey in Zenko.sites) { + const site = Zenko.sites[siteKey]; + Identity.useIdentity(IdentityEnum.ADMIN, site.adminIdentityName); + const accountName = site.accountName; + assert(accountName, `Account name is not defined for site ${siteKey}`); + CacheHelper.logger.debug('Initializing account for Zenko site', { + siteKey, + accountName, + }); - // Create the account if already exist will not throw any error - try { - await SuperAdmin.createAccount({ accountName }); - /* eslint-disable */ - } catch (err: any) { - if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { - throw err; + if (!Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName)) { + Identity.useIdentity(IdentityEnum.ADMIN, site.adminIdentityName); + + let account = null; + CacheHelper.logger.debug('Creating account', { + accountName, + adminIdentityName: site.adminIdentityName, + credentials: Identity.getCurrentCredentials(), + }); + // Create the account if already exist will not throw any error + try { + await SuperAdmin.createAccount({ accountName }); + /* eslint-disable */ + } catch (err: any) { + CacheHelper.logger.debug('Error while creating account', { + accountName, + err, + }); + if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { + throw err; + } } - } - /* eslint-enable */ - // Waiting until the account exists, in case of parallel mode. - let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; - account = await SuperAdmin.getAccount({ accountName }); - while (!account && remaining > 0) { - await Utils.sleep(500); + /* eslint-enable */ + // Waiting until the account exists, in case of parallel mode. + let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; account = await SuperAdmin.getAccount({ accountName }); - remaining--; - } - if (!account) { - throw new Error(`Account ${accountName} not found.`); - } - - // Account was found, generate access keys if not provided - const accountAccessKeys = Identity.getCredentialsForIdentity( - IdentityEnum.ACCOUNT, accountName) || { - accessKeyId: '', - secretAccessKey: '', - }; - - if (!parameters.AccountName || !accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { - const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); - if (!Utils.isAccessKeys(accessKeys)) { - throw new Error('Failed to generate account access keys'); + while (!account && remaining > 0) { + await Utils.sleep(500); + account = await SuperAdmin.getAccount({ accountName }); + remaining--; + } + if (!account) { + throw new Error(`Account ${accountName} not found in site ${siteKey}.`); } - accountAccessKeys.accessKeyId = accessKeys.accessKeyId; - accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; + + // Account was found, generate access keys if not provided + const accountAccessKeys = Identity.getCredentialsForIdentity( + IdentityEnum.ACCOUNT, accountName) || { + accessKeyId: '', + secretAccessKey: '', + }; + + if (!accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { + const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); + if (!Utils.isAccessKeys(accessKeys)) { + throw new Error('Failed to generate account access keys for site ${siteKey}'); + } + accountAccessKeys.accessKeyId = accessKeys.accessKeyId; + accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; + } + + CacheHelper.logger.debug('Adding account identity', { + accountName, + accountAccessKeys, + }); + Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); } - + } + + const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName!; + const accountAccessKeys = Identity.getCredentialsForIdentity( + IdentityEnum.ACCOUNT, this.sites['source']?.accountName + || CacheHelper.parameters.AccountName!) || { + accessKeyId: '', + secretAccessKey: '', + }; + + if (!accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { + const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); + if (!Utils.isAccessKeys(accessKeys)) { + throw new Error('Failed to generate account access keys for site ${siteKey}'); + } + accountAccessKeys.accessKeyId = accessKeys.accessKeyId; + accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); - } else { - Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); } - } + + // Fallback to the primary site's account at the end of the init by default + Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); + } /** * Creates an IAM user with policy and access keys to be used in the tests. diff --git a/tests/ctst/yarn.lock b/tests/ctst/yarn.lock index 8721d6c831..5bed88af54 100644 --- a/tests/ctst/yarn.lock +++ b/tests/ctst/yarn.lock @@ -4807,9 +4807,9 @@ cli-table3@^0.6.0: optionalDependencies: "@colors/colors" "1.5.0" -"cli-testing@github:scality/cli-testing.git#1.0.1": - version "1.0.1" - resolved "git+ssh://git@github.com/scality/cli-testing.git#46c8ec4b9b22928ae806786f1175416949fa30eb" +"cli-testing@github:scality/cli-testing.git#v1.2.0": + version "1.2.0" + resolved "git+ssh://git@github.com/scality/cli-testing.git#e8111e42fed3741183885c209e128f44c5a477ee" dependencies: "@aws-crypto/sha256-universal" "^5.2.0" "@aws-sdk/client-iam" "^3.484.0" diff --git a/tests/zenko_tests/e2e_config/accounts.py b/tests/zenko_tests/e2e_config/accounts.py index 534ab9b56a..b484a16b0b 100644 --- a/tests/zenko_tests/e2e_config/accounts.py +++ b/tests/zenko_tests/e2e_config/accounts.py @@ -53,7 +53,13 @@ def create_account_secret(name, credentials, namespace="default"): string_data=credentials, ) - res = core.create_namespaced_secret(namespace, body=secret) + try: + res = core.create_namespaced_secret(namespace, body=secret) + except ApiException as e: + if e.status == 409: + _log.warning("secret already exists") + else: + raise e _log.info("created account secret")