From 20fa2c4c977d570e8f6497e7b1a38d1f903f78b5 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Mon, 29 Jul 2024 09:18:49 +0200 Subject: [PATCH 01/25] Pass custom zkop version for CI Issue: ZENKO-4773 --- .github/actions/deploy/action.yaml | 6 +- .github/scripts/end2end/configs/zenko.yaml | 6 +- .../end2end/configs/zenkoversion-pra.yaml | 147 +++++++++++++ .../scripts/end2end/configs/zenkoversion.yaml | 17 +- .github/scripts/end2end/deploy-dr.sh | 24 --- .github/scripts/end2end/deploy-zenko.sh | 7 +- .github/scripts/end2end/patch-coredns.sh | 7 + .github/scripts/end2end/prepare-pra.sh | 15 +- .github/scripts/end2end/run-e2e-ctst.sh | 4 +- .github/scripts/end2end/run-e2e-pra-ctst.sh | 144 +++++++++++++ .github/workflows/end2end.yaml | 64 +++--- solution/deps.yaml | 5 + tests/ctst/Dockerfile | 7 + tests/ctst/package.json | 2 +- tests/ctst/steps/dr/drctl.ts | 195 ++++++++++++++++++ tests/ctst/world/Zenko.ts | 4 + tests/ctst/yarn.lock | 6 +- 17 files changed, 578 insertions(+), 82 deletions(-) create mode 100644 .github/scripts/end2end/configs/zenkoversion-pra.yaml delete mode 100644 .github/scripts/end2end/deploy-dr.sh create mode 100644 .github/scripts/end2end/run-e2e-pra-ctst.sh create mode 100644 tests/ctst/steps/dr/drctl.ts diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 1fc7708f0c..5b9ddcba09 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -7,6 +7,10 @@ inputs: description: "The tag of the Zenko Operator image to use" required: false default: "" + zenko_version_path: + description: "The path to the ZenkoVersion file" + required: false + default: "./configs/zenkoversion.yaml" runs: using: composite steps: @@ -72,7 +76,7 @@ runs: OPERATOR_IMAGE_TAG: ${{ inputs.zkop_tag }} - name: Deploy Zenko Instance shell: bash - run: bash deploy-zenko.sh end2end default + run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' ${{ inputs.zenko_version_path }} working-directory: ./.github/scripts/end2end - name: Add Keycloak user and assign StorageManager role shell: bash diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index 2f7179e119..68ba2bc344 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -24,11 +24,11 @@ spec: logging: logLevel: debug backbeat: - triggerExpirationsOneDayEarlierForTesting: true + triggerExpirationsOneDayEarlierForTesting: ${EXPIRE_ONE_DAY_EARLIER} lifecycleConductor: cronRule: "${BACKBEAT_LCC_CRON_RULE}" lifecycleBucketProcessor: - triggerTransitionsOneDayEarlierForTesting: true + triggerTransitionsOneDayEarlierForTesting: ${TRANSITION_ONE_DAY_EARLIER} logging: logLevel: trace mongodb: @@ -101,6 +101,8 @@ spec: e2e-cold: debug: "true" command-timeout: "60s" + pending-job-poll-after-age: "10s" + pending-job-poll-check-interval: "10s" ingress: workloadPlaneClass: 'nginx' controlPlaneClass: 'nginx' diff --git a/.github/scripts/end2end/configs/zenkoversion-pra.yaml b/.github/scripts/end2end/configs/zenkoversion-pra.yaml new file mode 100644 index 0000000000..2d2fdf3da7 --- /dev/null +++ b/.github/scripts/end2end/configs/zenkoversion-pra.yaml @@ -0,0 +1,147 @@ +--- +# DO NOT EDIT - autogenerated file +apiVersion: zenko.io/v1alpha1 +kind: ZenkoVersion +metadata: + name: '${ZENKO_VERSION_NAME}' +spec: + dashboards: + backbeat: + image: '${BACKBEAT_DASHBOARD}' + tag: '${BACKBEAT_TAG}' + cloudserver: + image: '${CLOUDSERVER_DASHBOARD}' + tag: '${CLOUDSERVER_TAG}' + s3utils: + image: '${S3UTILS_DASHBOARD}' + tag: '${S3UTILS_TAG}' + scuba: + image: '${SCUBA_DASHBOARD}' + tag: '${SCUBA_TAG}' + kafkaCleaner: + image: '${KAFKA_CLEANER_DASHBOARD}' + tag: '${KAFKA_CLEANER_TAG}' + # kafka: + # image: kafka-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # kafkaConnect: + # image: kafka-connect-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # mongodb: + # image: mongodb-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # redis: + # image: redis-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # vault: + # image: '${VAULT_DASHBOARD}' + # tag: '${VAULT_TAG}' + # zookeeper: + # image: zookeeper-dashboard + # tag: '${ZENKO_VERSION_NAME}' + policies: + backbeat: + image: '${BACKBEAT_POLICY}' + tag: '${BACKBEAT_TAG}' + sorbet: + image: '${SORBET_POLICY}' + tag: '${SORBET_TAG}' + vault: + image: '${VAULT_POLICY}' + tag: '${VAULT_TAG}' + versions: + management: + ui: + image: '${ZENKO_UI_IMAGE}' + tag: '${ZENKO_UI_TAG}' + api: + image: '${PENSIEVE_API_IMAGE}' + tag: '${PENSIEVE_API_TAG}' + pushAPI: + image: '${PENSIEVE_API_IMAGE}' + tag: '${PENSIEVE_API_TAG}' + cloudserver: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + backbeat: + image: '${BACKBEAT_IMAGE}' + tag: '${BACKBEAT_TAG}' + utapi: + image: '${UTAPI_IMAGE}' + tag: '${UTAPI_TAG}' + secureChannelProxy: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + localData: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + metrics: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + s3utils: + image: '${S3UTILS_IMAGE}' + tag: '${S3UTILS_TAG}' + sorbet: + image: '${SORBET_IMAGE}' + tag: '${SORBET_TAG}' + drctl: + image: '${DRCTL_IMAGE}' + tag: '${DRCTL_TAG}' + zookeeper: + image: '${ZOOKEEPER_IMAGE}' + tag: '${ZOOKEEPER_TAG}' + kafka: + cleaner: + image: '${KAFKA_CLEANER_IMAGE}' + tag: '${KAFKA_CLEANER_TAG}' + cluster: + image: '${KAFKA_IMAGE}' + tag: '${KAFKA_TAG}' + connect: + image: '${KAFKA_CONNECT_IMAGE}' + tag: '${KAFKA_CONNECT_TAG}' + cruiseControl: + image: '${KAFKA_CRUISECONTROL_IMAGE}' + tag: '${KAFKA_CRUISECONTROL_TAG}' + lagExporter: + image: '${KAFKA_LAGEXPORTER_IMAGE}' + tag: '${KAFKA_LAGEXPORTER_TAG}' + monitoring: + image: '${JMX_JAVAAGENT_IMAGE}' + tag: '${JMX_JAVAAGENT_TAG}' + cleaner: + image: '${KAFKA_CLEANER_IMAGE}' + tag: '${KAFKA_CLEANER_TAG}' + vault: + image: '${VAULT_IMAGE}' + tag: '${VAULT_TAG}' + scuba: + image: '${SCUBA_IMAGE}' + tag: '${SCUBA_TAG}' + shell: + image: '${BUSYBOX_IMAGE}' + tag: '${BUSYBOX_TAG}' + mongodb: '${MONGODB_TAG}' + redis: + db: + image: '${REDIS_IMAGE}' + tag: '${REDIS_TAG}' + exporter: + image: '${REDIS_EXPORTER_IMAGE}' + tag: '${REDIS_EXPORTER_TAG}' + kubedb: '${REDIS_KUBEDB_TAG}' + defaults: + backbeatConcurrency: + lifecycleBucketProcessor: 30 + lifecycleObjectProcessor: 20 + replicaMultipliers: + cloudserver: 16 + internalCloudserver: 4 + vault: 4 + kafkaResources: + broker: + limitCPU: 3 + cruiseControl: + limitMemory: 3Gi + featureFlags: + backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 32a547203c..9d603440dc 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -5,8 +5,6 @@ kind: ZenkoVersion metadata: name: '${ZENKO_VERSION_NAME}' spec: - featureFlags: - backbeatGCVaultAuthorized: true dashboards: backbeat: image: '${BACKBEAT_DASHBOARD}' @@ -129,3 +127,18 @@ spec: image: '${REDIS_EXPORTER_IMAGE}' tag: '${REDIS_EXPORTER_TAG}' kubedb: '${REDIS_KUBEDB_TAG}' + defaults: + backbeatConcurrency: + lifecycleBucketProcessor: 30 + lifecycleObjectProcessor: 20 + replicaMultipliers: + cloudserver: 16 + internalCloudserver: 4 + vault: 4 + kafkaResources: + broker: + limitCPU: 3 + cruiseControl: + limitMemory: 3Gi + featureFlags: + backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/scripts/end2end/deploy-dr.sh b/.github/scripts/end2end/deploy-dr.sh deleted file mode 100644 index b72e8c7c03..0000000000 --- a/.github/scripts/end2end/deploy-dr.sh +++ /dev/null @@ -1,24 +0,0 @@ -export DR_SINK_NAME=${DR_SINK_NAME:-'end2end-pra-dr-sink'} -export DR_SOURCE_NAME=${DR_SOURCE_NAME:-'end2end-dr-source'} -export NAMESPACE=${NAMESPACE:-'default'} - -cat configs/zenko_dr_sink.yaml | envsubst | kubectl apply -f - - -k_cmd="kubectl -n ${NAMESPACE} get zenkodrsink/${DR_SINK_NAME}" -for i in $(seq 1 120); do - conditions=$($k_cmd -o "jsonpath={.status.conditions}") - if kubectl wait --for condition=Available --timeout 5s --namespace ${NAMESPACE} zenkodrsink/${DR_SINK_NAME}; then - break; - fi -done - - -cat configs/zenko_dr_source.yaml | envsubst | kubectl apply -f - - -k_cmd="kubectl -n ${NAMESPACE} get zenkodrsource/${DR_SOURCE_NAME}" -for i in $(seq 1 120); do - conditions=$($k_cmd -o "jsonpath={.status.conditions}") - if kubectl wait --for condition=Available --timeout 5s --namespace ${NAMESPACE} zenkodrsource/${DR_SOURCE_NAME}; then - break; - fi -done diff --git a/.github/scripts/end2end/deploy-zenko.sh b/.github/scripts/end2end/deploy-zenko.sh index 52dc115e32..c6b1819cbe 100755 --- a/.github/scripts/end2end/deploy-zenko.sh +++ b/.github/scripts/end2end/deploy-zenko.sh @@ -38,7 +38,7 @@ fi # TODO: use kustomize ZENKO_MONGODB_SHARDED=${ZENKO_MONGODB_SHARDED:-'false'} -if [ ${ZENKO_MONGODB_SHARDED} == 'true' ]; then +if [ "${ZENKO_MONGODB_SHARDED}" = 'true' ]; then export ZENKO_ANNOTATIONS="annotations: zenko.io/x-backbeat-oneshard-replicaset: data-db-mongodb-sharded-shard-0 zenko.io/x-backbeat-oneshard-replicaset-hosts: data-db-mongodb-sharded-shard0-data-0.data-db-mongodb-sharded-headless.default.svc.cluster.local:27017" @@ -50,6 +50,11 @@ else fi export ZENKO_MONGODB_DATABASE="${ZENKO_MONGODB_DATABASE:-'datadb'}" +if [ "${TIME_PROGRESSION_FACTOR}" -gt 1 ]; then + export ZENKO_ANNOTATIONS="${ZENKO_ANNOTATIONS:-annotations:} + zenko.io/time-progression-factor: \"${TIME_PROGRESSION_FACTOR}\"" +fi + function dependencies_image_env() { yq eval '.[] | .envsubst + "=" + (.sourceRegistry // "docker.io") + "/" + .image' ${DEPS_PATH} | diff --git a/.github/scripts/end2end/patch-coredns.sh b/.github/scripts/end2end/patch-coredns.sh index 5e505d6c08..b297d596d6 100755 --- a/.github/scripts/end2end/patch-coredns.sh +++ b/.github/scripts/end2end/patch-coredns.sh @@ -25,6 +25,13 @@ corefile=" rewrite name exact sts.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact iam.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local rewrite name exact shell-ui.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact keycloak.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact management.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact s3.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact sts.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact iam.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local + rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure fallthrough in-addr.arpa ip6.arpa diff --git a/.github/scripts/end2end/prepare-pra.sh b/.github/scripts/end2end/prepare-pra.sh index 965b21bb5a..6098bc67a0 100644 --- a/.github/scripts/end2end/prepare-pra.sh +++ b/.github/scripts/end2end/prepare-pra.sh @@ -6,11 +6,14 @@ export MONGODB_PRA_DATABASE="${MONGODB_PRA_DATABASE:-'pradb'}" export ZENKO_MONGODB_DATABASE="${MONGODB_PRA_DATABASE}" export ZENKO_MONGODB_SECRET_NAME="mongodb-db-creds-pra" -export ZENKO_IAM_INGRESS="iam.zenko-pra.local" -export ZENKO_STS_INGRESS="sts.zenko-pra.local" -export ZENKO_MANAGEMENT_INGRESS="management.zenko-pra.local" -export ZENKO_S3_INGRESS="s3.zenko-pra.local" -export ZENKO_UI_INGRESS="ui.zenko-pra.local" +echo 'ZENKO_MONGODB_DATABASE="pradb"' >> $GITHUB_ENV +echo 'ZENKO_MONGODB_SECRET_NAME="mongodb-db-creds-pra"' >> $GITHUB_ENV + +echo 'ZENKO_IAM_INGRESS="iam.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_STS_INGRESS="sts.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_MANAGEMENT_INGRESS="management.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_S3_INGRESS="s3.dr.zenko.local"' >> $GITHUB_ENV +echo 'ZENKO_UI_INGRESS="ui.dr.zenko.local"' >> $GITHUB_ENV MONGODB_ROOT_USERNAME="${MONGODB_ROOT_USERNAME:-'root'}" MONGODB_ROOT_PASSWORD="${MONGODB_ROOT_PASSWORD:-'rootpass'}" @@ -18,7 +21,7 @@ MONGODB_ROOT_PASSWORD="${MONGODB_ROOT_PASSWORD:-'rootpass'}" kubectl exec -it data-db-mongodb-sharded-mongos-0 -- mongo "admin" \ -u "root" \ -p "rootpass" \ - --eval "db.createUser({user:$MONGODB_PRA_USERNAME,pwd:$MONGODB_PRA_PASSWORD,roles:[{role:'enableSharding',db:$MONGODB_PRA_DATABASE },{role:'readWrite',db:$MONGODB_PRA_DATABASE },{role:'read',db:'local'}]})" + --eval "db.createUser({user:$MONGODB_PRA_USERNAME,pwd:$MONGODB_PRA_PASSWORD,roles:[{role:'enableSharding',db:$MONGODB_PRA_DATABASE },{role:'readWriteAnyDatabase',db:'admin'}]})" kubectl -n ${PRA_NAMESPACE} apply -f - <> $GITHUB_ENV - KAFKA_IMAGE=$(get_image_from_deps kafka) - KAFKA_TAG=$(yq eval '.kafka.tag' deps.yaml) - KAFKA_CONNECT_IMAGE=$(get_image_from_deps kafka-connect) - KAFKA_CONNECT_TAG=$(yq eval '.kafka-connect.tag' deps.yaml) - JMX_JAVAAGENT_IMAGE=$(get_image_from_deps jmx-javaagent) - JMX_JAVAAGENT_TAG=$(yq eval '.jmx-javaagent.tag' deps.yaml) - MONGODB_CONNECTOR_TAG=$(yq eval '.mongodb-connector.tag' deps.yaml) - KAFKA_CLEANER_IMAGE=$(get_image_from_deps kafka-cleaner) - KAFKA_CLEANER_TAG=$(yq eval '.kafka-cleaner.tag' deps.yaml) - KAFKA_CRUISECONTROL_IMAGE=$(get_image_from_deps kafka-cruise-control) - KAFKA_CRUISECONTROL_TAG=$(yq eval '.kafka-cruise-control.tag' deps.yaml) - KAFKA_LAGEXPORTER_IMAGE=$(get_image_from_deps kafka-lag-exporter) - KAFKA_LAGEXPORTER_TAG=$(yq eval '.kafka-lag-exporter.tag' deps.yaml) - EOF - - name: Deploy zenko dr components - run: bash deploy-dr.sh + ZENKO_MONGODB_SHARDED: "true" + ZENKO_MONGODB_DATABASE: "pradb" + working-directory: ./.github/scripts/end2end + - name: Configure E2E test environment + run: bash configure-e2e-ctst.sh + working-directory: ./.github/scripts/end2end + - name: Run CTST end to end tests + run: bash run-e2e-pra-ctst.sh working-directory: ./.github/scripts/end2end - name: Archive artifact logs and data uses: ./.github/actions/archive-artifacts @@ -685,6 +664,9 @@ jobs: GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} ZENKO_MONGODB_SHARDED: "true" ZENKO_ENABLE_SOSAPI: true + TIME_PROGRESSION_FACTOR: 86400 + TRANSITION_ONE_DAY_EARLIER: false + EXPIRE_ONE_DAY_EARLIER: false - name: Configure E2E test environment run: bash configure-e2e-ctst.sh working-directory: ./.github/scripts/end2end diff --git a/solution/deps.yaml b/solution/deps.yaml index 2a4154e747..b4582051e6 100644 --- a/solution/deps.yaml +++ b/solution/deps.yaml @@ -95,6 +95,11 @@ sorbet: image: sorbet tag: v1.1.10 envsubst: SORBET_TAG +drctl: + sourceRegistry: ghcr.io/scality + image: zenko-drctl + tag: v1.0.1 + envsubst: DRCTL_TAG # To be enabled back when utapi is used in Zenko 2.x # utapi: # sourceRegistry: ghcr.io/scality diff --git a/tests/ctst/Dockerfile b/tests/ctst/Dockerfile index 64b0b43ca2..3afd62b11d 100644 --- a/tests/ctst/Dockerfile +++ b/tests/ctst/Dockerfile @@ -1,6 +1,11 @@ ARG CTST_TAG +ARG DRCTL_TAG +ARG KUBECONFIG_PATH + +FROM ghcr.io/scality/zenko-drctl:$DRCTL_TAG as drctl FROM ghcr.io/scality/cli-testing:$CTST_TAG + COPY package.json /tmp/package.json COPY ./features /ctst/features COPY ./common /ctst/common @@ -16,4 +21,6 @@ ENV SDK=true WORKDIR /ctst +COPY --from=drctl /zenko-drctl . + CMD ["./run"] diff --git a/tests/ctst/package.json b/tests/ctst/package.json index 66a014cf6e..696d32f838 100644 --- a/tests/ctst/package.json +++ b/tests/ctst/package.json @@ -26,7 +26,7 @@ "@typescript-eslint/eslint-plugin": "^5.45.0", "@typescript-eslint/parser": "^5.45.0", "babel-jest": "^29.3.1", - "cli-testing": "github:scality/cli-testing.git#1.0.1", + "cli-testing": "github:scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d", "eslint": "^8.28.0" }, "scripts": { diff --git a/tests/ctst/steps/dr/drctl.ts b/tests/ctst/steps/dr/drctl.ts new file mode 100644 index 0000000000..6405a93a1e --- /dev/null +++ b/tests/ctst/steps/dr/drctl.ts @@ -0,0 +1,195 @@ +import util from 'util'; +import { exec } from 'child_process'; + +import Zenko from 'world/Zenko'; + +type InstallConfig = { + sourceZenkoDrInstance?: string; + sourceKafkaReplicas?: number; + sourceConnectorReplicas?: number; + sinkZenkoDrInstance?: string; + sinkKafkaReplicas?: number; + sinkConnectorReplicas?: number; + kafkaClusterLocation?: string; + kafkaNodePortStartingPort?: number; + kafkaPersistenceExistingPv?: string; + kafkaPersistenceSize?: string; + kafkaPersistenceStorageClassName?: string; + kafkaPersistenceAnnotations?: string; + kafkaPersistenceSelector?: string; + locations?: string; + s3Bucket?: string; + + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + + sourceS3Endpoint?: string; + sourceS3UserSecretName?: string; + sourceSs3AccessKeyField?: string; + sourceS3SecretKeyField?: string; + sourceS3Region?: string; + + sinkS3Endpoint?: string; + sinkS3UserSecretName?: string; + sinkSs3AccessKeyField?: string; + sinkS3SecretKeyField?: string; + sinkS3Region?: string; +}; + +type BootstrapDumpConfig = { + createBucketIfNotExists?: boolean; + cleanupBucketBeforeDump?: boolean; + locations?: string[]; + oidcProviders?: string[]; + s3Bucket?: string; + mongodbHosts?: string[]; + mongodbUsername?: string; + mongodbPassword?: string; + mongodbDatabase?: string; + mongodbReplicaset?: string; + mongodbReadPref?: string; + mongodbAuthDatabase?: string; + s3Endpoint?: string; + s3AccessKey?: string; + s3SecretKey?: string; + s3Region?: string; +}; + +type BootstrapLoadConfig = { + mongodbSourceDatabase?: string; + parallel?: number; + dropCollections?: boolean; + s3Bucket?: string; + mongodbHosts?: string[]; + mongodbUsername?: string; + mongodbPassword?: string; + mongodbDatabase?: string; + mongodbReplicaset?: string; + mongodbReadPref?: string; + mongodbAuthDatabase?: string; + s3Endpoint?: string; + s3AccessKey?: string; + s3SecretKey?: string; + s3Region?: string; +}; + +type VolumeGetConfig = { + targetZenkoKubeconfigPath?: string; + targetZenkoKubeconfigData?: string; + targetZenkoInstance?: string; + targetZenkoNamespace?: string; + volumeName?: string; + volumeNodeName?: string; + timeout?: string; +}; + +type FailoverConfig = { + wait?: boolean; + timeout?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; +}; + +type UninstallConfig = { + sinkZenkoDrInstance?: string; + sourceZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; +}; + +type StatusConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + output?: string; +}; + +/** + * Helper class to run Drctl tool + */ +export default class ZenkoDrctl { + private world: Zenko; + + constructor(world: Zenko) { + this.world = world; + } + + private async runCommand(action: string, params: string, throwOnError = false) { + const command = `/ctst/zenko-drctl ${action} ${params}`; + try { + this.world.logger.debug('running zenko-drctl command', { command }); + const result = await util.promisify(exec)(command); + this.world.logger.debug('zenko-drctl command result', { result }); + return result.stdout; + } catch (err) { + this.world.logger.debug('zenko-drctl command failed', { err }); + if (throwOnError) { + throw err; + } + return null; + } + } + + async install(config: InstallConfig) { + return this.runCommand('install', this.paramToCli(config), true); + } + + async uninstall(config: UninstallConfig) { + return this.runCommand('uninstall', this.paramToCli(config), true); + } + + async bootstrapDump(config: BootstrapDumpConfig) { + return this.runCommand('bootstrap dump', this.paramToCli(config)); + } + + async bootstrapLoad(config: BootstrapLoadConfig) { + return this.runCommand('bootstrap load', this.paramToCli(config)); + } + + async failover(config: FailoverConfig) { + return this.runCommand('failover', this.paramToCli(config)); + } + + async status(config: StatusConfig) { + return this.runCommand('status', this.paramToCli(config)); + } + + async volumeGet(config: VolumeGetConfig) { + return this.runCommand('volume get', this.paramToCli(config)); + } + + paramToCli(params: Record): string { + const command: string[] = []; + Object.keys(params).forEach(key => { + const value = params[key]; + if (value !== undefined && value !== null) { + command.push(`--${key.replace(/([A-Z])/g, '-$1').toLowerCase()}`); + command.push(String(value)); + } + }); + return command.join(' '); + } +} diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index e4f82701bb..e4a34a4dbf 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -21,6 +21,8 @@ import { } from 'cli-testing'; import { extractPropertyFromResults } from '../common/utils'; +import ZenkoDrctl from 'steps/dr/drctl'; +import assert from 'assert'; interface ServiceUsersCredentials { @@ -93,6 +95,8 @@ export default class Zenko extends World { private saved: Record = {}; + public zenkoDrCtl: ZenkoDrctl | null = null; + public logger: Werelogs.RequestLogger = new Werelogs.Logger('CTST').newRequestLogger(); /** diff --git a/tests/ctst/yarn.lock b/tests/ctst/yarn.lock index 8721d6c831..4185e60153 100644 --- a/tests/ctst/yarn.lock +++ b/tests/ctst/yarn.lock @@ -4807,9 +4807,9 @@ cli-table3@^0.6.0: optionalDependencies: "@colors/colors" "1.5.0" -"cli-testing@github:scality/cli-testing.git#1.0.1": - version "1.0.1" - resolved "git+ssh://git@github.com/scality/cli-testing.git#46c8ec4b9b22928ae806786f1175416949fa30eb" +"cli-testing@github:scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d": + version "1.2.0" + resolved "git+ssh://git@github.com/scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d" dependencies: "@aws-crypto/sha256-universal" "^5.2.0" "@aws-sdk/client-iam" "^3.484.0" From 611e3afa543040dc3f03bf9e86416a2771ef9046 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Mon, 29 Jul 2024 09:27:29 +0200 Subject: [PATCH 02/25] Setup Zenko world to support multiple sites Issue: ZENKO-4773 --- tests/ctst/common/common.ts | 82 ++-------------- tests/ctst/steps/utils/utils.ts | 78 ++++++++++++++- tests/ctst/world/Zenko.ts | 168 ++++++++++++++++++++++---------- 3 files changed, 204 insertions(+), 124 deletions(-) diff --git a/tests/ctst/common/common.ts b/tests/ctst/common/common.ts index e0e07d4421..4b7075d78c 100644 --- a/tests/ctst/common/common.ts +++ b/tests/ctst/common/common.ts @@ -5,7 +5,13 @@ import Zenko from 'world/Zenko'; import { safeJsonParse } from './utils'; import assert from 'assert'; import { Admin, Kafka } from 'kafkajs'; -import { createBucketWithConfiguration, putObject, runActionAgainstBucket } from 'steps/utils/utils'; +import { + createBucketWithConfiguration, + putObject, + runActionAgainstBucket, + getObjectNameWithBackendFlakiness, + verifyObjectLocation, +} from 'steps/utils/utils'; import { ActionPermissionsType } from 'steps/bucket-policies/utils'; setDefaultTimeout(Constants.DEFAULT_TIMEOUT); @@ -45,35 +51,6 @@ export async function cleanS3Bucket( await S3.deleteBucket(world.getCommandParameters()); } -/** - * @param {Zenko} this world object - * @param {string} objectName object name - * @returns {string} the object name based on the backend flakyness - */ -function getObjectNameWithBackendFlakiness(this: Zenko, objectName: string) { - let objectNameFinal; - const backendFlakinessRetryNumber = this.getSaved('backendFlakinessRetryNumber'); - const backendFlakiness = this.getSaved('backendFlakiness'); - - if (!backendFlakiness || !backendFlakinessRetryNumber || !objectName) { - return objectName; - } - - switch (backendFlakiness) { - case 'command': - objectNameFinal = `${objectName}.scal-retry-command-${backendFlakinessRetryNumber}`; - break; - case 'archive': - case 'restore': - objectNameFinal = `${objectName}.scal-retry-${backendFlakiness}-job-${backendFlakinessRetryNumber}`; - break; - default: - this.logger.debug('Unknown backend flakyness', { backendFlakiness }); - return objectName; - } - return objectNameFinal; -} - async function addMultipleObjects(this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, userMD?: string) { let lastResult = null; @@ -260,49 +237,8 @@ When('i restore object {string} for {int} days', async function (this: Zenko, ob }); // wait for object to transition to a location or get restored from it -Then('object {string} should be {string} and have the storage class {string}', { timeout: 130000 }, - async function (this: Zenko, objectName: string, objectTransitionStatus: string, storageClass: string) { - const objName = - getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - this.addCommandParameter({ key: objName }); - const versionId = this.getSaved>('createdObjects')?.get(objName); - if (versionId) { - this.addCommandParameter({ versionId }); - } - let conditionOk = false; - while (!conditionOk) { - const res = await S3.headObject(this.getCommandParameters()); - if (res.err) { - break; - } - assert(res.stdout); - const parsed = safeJsonParse(res.stdout); - assert(parsed.ok); - const head = parsed.result as { - StorageClass: string | undefined, - Restore: string | undefined, - }; - const expectedClass = storageClass !== '' ? storageClass : undefined; - if (head?.StorageClass === expectedClass) { - conditionOk = true; - } - if (objectTransitionStatus == 'restored') { - const isRestored = !!head?.Restore && - head.Restore.includes('ongoing-request="false", expiry-date='); - // if restore didn't get initiated fail immediately - const isPendingRestore = !!head?.Restore && - head.Restore.includes('ongoing-request="true"'); - assert(isRestored || isPendingRestore, 'Restore didn\'t get initiated'); - conditionOk = conditionOk && isRestored; - } else if (objectTransitionStatus == 'cold') { - conditionOk = conditionOk && !head?.Restore; - } - await Utils.sleep(1000); - } - assert(conditionOk); - }); +Then('object {string} should be {string} and have the storage class {string}', + { timeout: 130000 }, verifyObjectLocation); When('i delete object {string}', async function (this: Zenko, objectName: string) { const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index 9deb3727a1..4b184c2264 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -9,8 +9,9 @@ import { AWSVersionObject, Command, } from 'cli-testing'; -import { extractPropertyFromResults, s3FunctionExtraParams } from 'common/utils'; +import { extractPropertyFromResults, s3FunctionExtraParams, safeJsonParse } from 'common/utils'; import Zenko from 'world/Zenko'; +import assert from 'assert'; enum AuthorizationType { ALLOW = 'Allow', @@ -252,6 +253,79 @@ async function emptyVersionedBucket(world: Zenko) { })); } +async function verifyObjectLocation(this: Zenko, objectName: string, + objectTransitionStatus: string, storageClass: string) { + const objName = + getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + this.addCommandParameter({ key: objName }); + const versionId = this.getSaved>('createdObjects')?.get(objName); + if (versionId) { + this.addCommandParameter({ versionId }); + } + let conditionOk = false; + while (!conditionOk) { + const res = await S3.headObject(this.getCommandParameters()); + if (res.err) { + break; + } + assert(res.stdout); + const parsed = safeJsonParse<{ + StorageClass: string | undefined, + Restore: string | undefined, + }>(res.stdout); + assert(parsed.ok); + const expectedClass = storageClass !== '' ? storageClass : undefined; + if (parsed.result?.StorageClass === expectedClass) { + conditionOk = true; + } + if (objectTransitionStatus == 'restored') { + const isRestored = !!parsed.result?.Restore && + parsed.result.Restore.includes('ongoing-request="false", expiry-date='); + // if restore didn't get initiated fail immediately + const isPendingRestore = !!parsed.result?.Restore && + parsed.result.Restore.includes('ongoing-request="true"'); + assert(isRestored || isPendingRestore, 'Restore didn\'t get initiated'); + conditionOk = conditionOk && isRestored; + } else if (objectTransitionStatus == 'cold') { + conditionOk = conditionOk && !parsed.result?.Restore; + } + await Utils.sleep(1000); + } + assert(conditionOk); +} + +/** + * @param {Zenko} this world object + * @param {string} objectName object name + * @returns {string} the object name based on the backend flakyness + */ +function getObjectNameWithBackendFlakiness(this: Zenko, objectName: string) { + let objectNameFinal; + const backendFlakinessRetryNumber = this.getSaved('backendFlakinessRetryNumber'); + const backendFlakiness = this.getSaved('backendFlakiness'); + + if (!backendFlakiness || !backendFlakinessRetryNumber || !objectName) { + return objectName; + } + + switch (backendFlakiness) { + case 'command': + objectNameFinal = `${objectName}.scal-retry-command-${backendFlakinessRetryNumber}`; + break; + case 'archive': + case 'restore': + objectNameFinal = `${objectName}.scal-retry-${backendFlakiness}-job-${backendFlakinessRetryNumber}`; + break; + default: + this.logger.debug('Unknown backend flakyness', { backendFlakiness }); + return objectName; + } + return objectNameFinal; +} + + export { AuthorizationType, AuthorizationConfiguration, @@ -261,4 +335,6 @@ export { putObject, emptyNonVersionedBucket, emptyVersionedBucket, + verifyObjectLocation, + getObjectNameWithBackendFlakiness, }; diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index e4a34a4dbf..8aa52b5f81 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -45,6 +45,9 @@ export interface ZenkoWorldParameters extends ClientOptions { AccountName: string; AccountAccessKey: string; AccountSecretKey: string; + DRAdminAccessKey?: string; + DRAdminSecretKey?: string; + DRSubdomain?: string; VaultAuthHost: string; NotificationDestination: string; NotificationDestinationTopic: string; @@ -97,8 +100,18 @@ export default class Zenko extends World { public zenkoDrCtl: ZenkoDrctl | null = null; + static sites: { + [key: string]: { + accountName: string; + adminIdentityName: string; + }; + } = {}; + public logger: Werelogs.RequestLogger = new Werelogs.Logger('CTST').newRequestLogger(); + static readonly PRIMARY_SITE_NAME = 'admin'; + static readonly SECONDARY_SITE_NAME = 'dradmin'; + /** * @constructor * @param {Object} options - parameters provided as a CLI parameter when running the tests @@ -138,14 +151,41 @@ export default class Zenko extends World { } if (this.parameters.AdminAccessKey && this.parameters.AdminSecretKey && - !Identity.hasIdentity(IdentityEnum.ADMIN, 'admin')) { - Identity.addIdentity(IdentityEnum.ADMIN, 'admin', { + !Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME)) { + Identity.addIdentity(IdentityEnum.ADMIN, Zenko.PRIMARY_SITE_NAME, { accessKeyId: this.parameters.AdminAccessKey, secretAccessKey: this.parameters.AdminSecretKey, - }); + }, undefined, undefined, undefined, this.parameters.subdomain); + + Zenko.sites['source'] = { + accountName: this.parameters.AccountName, + adminIdentityName: Zenko.PRIMARY_SITE_NAME, + }; + } else { + delete Zenko.sites['source']; + } + + if (this.needsSecondarySite()) { + if (!Identity.hasIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME)) { + Identity.addIdentity(IdentityEnum.ADMIN, Zenko.SECONDARY_SITE_NAME, { + accessKeyId: this.parameters.DRAdminAccessKey!, + secretAccessKey: this.parameters.DRAdminSecretKey!, + }, undefined, undefined, undefined, this.parameters.DRSubdomain); + } + + Zenko.sites['sink'] = { + accountName: `dr${this.parameters.AccountName}`, + adminIdentityName: Zenko.SECONDARY_SITE_NAME, + }; + } else { + delete Zenko.sites['sink']; } } + private needsSecondarySite() { + return this.parameters.DRAdminAccessKey && this.parameters.DRAdminSecretKey && this.parameters.DRSubdomain; + } + /** * This function will dynamically determine if the result from the AWS command * is a success or a failure. Based on the fact that AWS either return an empty string @@ -367,15 +407,19 @@ export default class Zenko extends World { } } - async createAccount(name?: string, force?: boolean) { + async createAccount(name?: string, force?: boolean, adminClientName?: string) { Identity.resetIdentity(); - const accountName = this.getSaved('accountName') || - name || `${Constants.ACCOUNT_NAME}${Utils.randomString()}`; + const accountName = name || this.getSaved('accountName') || + `${Constants.ACCOUNT_NAME}${Utils.randomString()}`; if (Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName) && !force) { Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); return; } + if (adminClientName && Identity.hasIdentity(IdentityEnum.ADMIN, adminClientName)) { + Identity.useIdentity(IdentityEnum.ADMIN, adminClientName); + } + await SuperAdmin.createAccount({ accountName }); const credentials = await SuperAdmin.generateAccountAccessKey({ accountName }); Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, credentials, undefined, true, true); @@ -545,59 +589,83 @@ export default class Zenko extends World { * @returns {undefined} */ static async init(parameters: ZenkoWorldParameters) { - const accountName = parameters.AccountName || Constants.ACCOUNT_NAME; CacheHelper.logger.debug('Initializing Zenko', { - accountName, parameters, }); - if (!Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName)) { - CacheHelper.adminClient = await Utils.getAdminCredentials(parameters); - - let account = null; + // Create the default account for each site configured + // and generate access keys for it + for (const siteKey in Zenko.sites) { + const site = Zenko.sites[siteKey]; + Identity.useIdentity(IdentityEnum.ADMIN, site.adminIdentityName); + const accountName = site.accountName; + assert(accountName, `Account name is not defined for site ${siteKey}`); + CacheHelper.logger.debug('Initializing account for Zenko site', { + siteKey, + accountName, + }); - // Create the account if already exist will not throw any error - try { - await SuperAdmin.createAccount({ accountName }); - /* eslint-disable */ - } catch (err: any) { - if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { - throw err; + if (!Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName)) { + Identity.useIdentity(IdentityEnum.ADMIN, site.adminIdentityName); + + let account = null; + CacheHelper.logger.debug('Creating account', { + accountName, + adminIdentityName: site.adminIdentityName, + credentials: Identity.getCurrentCredentials(), + }); + // Create the account if already exist will not throw any error + try { + await SuperAdmin.createAccount({ accountName }); + /* eslint-disable */ + } catch (err: any) { + CacheHelper.logger.debug('Error while creating account', { + accountName, + err, + }); + if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { + throw err; + } } - } - /* eslint-enable */ - // Waiting until the account exists, in case of parallel mode. - let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; - account = await SuperAdmin.getAccount({ accountName }); - while (!account && remaining > 0) { - await Utils.sleep(500); + /* eslint-enable */ + // Waiting until the account exists, in case of parallel mode. + let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; account = await SuperAdmin.getAccount({ accountName }); - remaining--; - } - if (!account) { - throw new Error(`Account ${accountName} not found.`); - } - - // Account was found, generate access keys if not provided - const accountAccessKeys = Identity.getCredentialsForIdentity( - IdentityEnum.ACCOUNT, accountName) || { - accessKeyId: '', - secretAccessKey: '', - }; - - if (!parameters.AccountName || !accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { - const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); - if (!Utils.isAccessKeys(accessKeys)) { - throw new Error('Failed to generate account access keys'); + while (!account && remaining > 0) { + await Utils.sleep(500); + account = await SuperAdmin.getAccount({ accountName }); + remaining--; } - accountAccessKeys.accessKeyId = accessKeys.accessKeyId; - accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; + if (!account) { + throw new Error(`Account ${accountName} not found in site ${siteKey}.`); + } + + // Account was found, generate access keys if not provided + const accountAccessKeys = Identity.getCredentialsForIdentity( + IdentityEnum.ACCOUNT, accountName) || { + accessKeyId: '', + secretAccessKey: '', + }; + + if (!accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { + const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); + if (!Utils.isAccessKeys(accessKeys)) { + throw new Error('Failed to generate account access keys for site ${siteKey}'); + } + accountAccessKeys.accessKeyId = accessKeys.accessKeyId; + accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; + } + + CacheHelper.logger.debug('Adding account identity', { + accountName, + accountAccessKeys, + }); + Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); } - - Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); - } else { - Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); } - } + // Fallback to the primary site's account at the end of the init by default + Identity.useIdentity(IdentityEnum.ACCOUNT, this.sites['source']?.accountName + || CacheHelper.parameters.AccountName!); + } /** * Creates an IAM user with policy and access keys to be used in the tests. From 3f2979561cd8ae64ca5170172e3da14fb5a1ad0f Mon Sep 17 00:00:00 2001 From: williamlardier Date: Mon, 29 Jul 2024 09:30:17 +0200 Subject: [PATCH 03/25] Add kube client helpers for DR Issue: ZENKO-4773 --- .github/actions/archive-artifacts/action.yaml | 12 + solution/deps.yaml | 2 +- tests/ctst/common/common.ts | 23 +- tests/ctst/common/hooks.ts | 13 +- tests/ctst/common/utils.ts | 8 +- tests/ctst/features/pra.feature | 28 ++ tests/ctst/steps/dmf.ts | 23 ++ tests/ctst/steps/pra.ts | 290 ++++++++++++++++++ tests/ctst/steps/utils/kubernetes.ts | 117 ++++++- 9 files changed, 493 insertions(+), 23 deletions(-) create mode 100644 tests/ctst/features/pra.feature create mode 100644 tests/ctst/steps/dmf.ts create mode 100644 tests/ctst/steps/pra.ts diff --git a/.github/actions/archive-artifacts/action.yaml b/.github/actions/archive-artifacts/action.yaml index 375a56dc08..cc104e67b8 100644 --- a/.github/actions/archive-artifacts/action.yaml +++ b/.github/actions/archive-artifacts/action.yaml @@ -41,3 +41,15 @@ runs: sh -c "kubectl exec -i -n ${NAMESPACE} kcat -- \ kcat -L -b ${KAFKA_SERVICE} -t {} -C -o beginning -e -q -J \ > /tmp/artifacts/data/${STAGE}/kafka-messages-{}.log" + + KAFKA_SERVICE_PRA=$(kubectl get services -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME:-end2end-pra}-sink-base-queue -o jsonpath='{.items[0].metadata.name}') + KAFKA_PRA=$(kubectl get pods -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME_PRA:-end2end-pra}-sink-base-queue -o jsonpath='{.items[0].metadata.name}') + kubectl exec -in ${NAMESPACE} ${KAFKA_PRA} -c kafka -- \ + env KAFKA_OPTS= kafka-topics.sh --bootstrap-server :9092 --list \ + > /tmp/artifacts/data/${STAGE}/kafka-topics-pra.log + kubectl run -n ${NAMESPACE} k2cat --image=edenhill/kcat:1.7.1 --restart=Never --command -- sleep 300 + kubectl wait -n ${NAMESPACE} pod k2cat --for=condition=ready + cat /tmp/artifacts/data/${STAGE}/kafka-topics-pra.log | grep -v '^__' | xargs -P 15 -I {} \ + sh -c "kubectl exec -i -n ${NAMESPACE} k2cat -- \ + kcat -L -b ${KAFKA_SERVICE_PRA} -t {} -C -o beginning -e -q -J \ + > /tmp/artifacts/data/${STAGE}/kafka-messages-pra-{}.log" diff --git a/solution/deps.yaml b/solution/deps.yaml index b4582051e6..d219b84f20 100644 --- a/solution/deps.yaml +++ b/solution/deps.yaml @@ -98,7 +98,7 @@ sorbet: drctl: sourceRegistry: ghcr.io/scality image: zenko-drctl - tag: v1.0.1 + tag: afbb04a57973e391b4cbcaacca444a4d5da0a540 envsubst: DRCTL_TAG # To be enabled back when utapi is used in Zenko 2.x # utapi: diff --git a/tests/ctst/common/common.ts b/tests/ctst/common/common.ts index 4b7075d78c..77cde0b62d 100644 --- a/tests/ctst/common/common.ts +++ b/tests/ctst/common/common.ts @@ -34,11 +34,10 @@ export async function cleanS3Bucket( const createdObjects = world.getSaved>('createdObjects'); if (createdObjects !== undefined) { const results = await S3.listObjectVersions(world.getCommandParameters()); - const res = safeJsonParse(results.stdout); + const res = safeJsonParse(results.stdout); assert(res.ok); - const parsedResults = res.result as ListObjectVersionsOutput; - const versions = parsedResults.Versions || []; - const deleteMarkers = parsedResults.DeleteMarkers || []; + const versions = res.result!.Versions || []; + const deleteMarkers = res.result!.DeleteMarkers || []; await Promise.all(versions.concat(deleteMarkers).map(obj => { world.addCommandParameter({ key: obj.Key }); world.addCommandParameter({ versionId: obj.VersionId }); @@ -165,9 +164,8 @@ Then('object {string} should have the tag {string} with value {string}', this.addCommandParameter({ versionId }); } await S3.getObjectTagging(this.getCommandParameters()).then(res => { - const parsed = safeJsonParse(res.stdout); - const head = parsed.result as { TagSet: [{Key: string, Value: string}] | undefined }; - assert(head.TagSet?.some(tag => tag.Key === tagKey && tag.Value === tagValue)); + const parsed = safeJsonParse<{ TagSet: [{Key: string, Value: string}] | undefined }>(res.stdout); + assert(parsed.result!.TagSet?.some(tag => tag.Key === tagKey && tag.Value === tagValue)); }); }); @@ -183,12 +181,11 @@ Then('object {string} should have the user metadata with key {string} and value const res = await S3.headObject(this.getCommandParameters()); assert.ifError(res.stderr); assert(res.stdout); - const parsed = safeJsonParse(res.stdout); + const parsed = safeJsonParse<{ Metadata: {[key: string]: string} | undefined }>(res.stdout); assert(parsed.ok); - const head = parsed.result as { Metadata: {[key: string]: string} | undefined }; - assert(head.Metadata); - assert(head.Metadata[userMDKey]); - assert(head.Metadata[userMDKey] === userMDValue); + assert(parsed.result!.Metadata); + assert(parsed.result!.Metadata[userMDKey]); + assert(parsed.result!.Metadata[userMDKey] === userMDValue); }); // add a transition workflow to a bucket @@ -203,7 +200,7 @@ Given('a transition workflow to {string} location', async function (this: Zenko, Prefix: '', Transitions: [ { - Days: 20, + Days: 0, StorageClass: location, }, ], diff --git a/tests/ctst/common/hooks.ts b/tests/ctst/common/hooks.ts index 80c8fd4c37..f1b70e75b9 100644 --- a/tests/ctst/common/hooks.ts +++ b/tests/ctst/common/hooks.ts @@ -7,12 +7,13 @@ import { import Zenko from '../world/Zenko'; import { Identity } from 'cli-testing'; import { prepareQuotaScenarios, teardownQuotaScenarios } from 'steps/quotas/quotas'; +import { displayDebuggingInformation, preparePRA } from 'steps/pra'; // HTTPS should not cause any error for CTST process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; const { atMostOnePicklePerTag } = parallelCanAssignHelpers; -const noParallelRun = atMostOnePicklePerTag(['@AfterAll']); +const noParallelRun = atMostOnePicklePerTag(['@AfterAll', '@PRA', '@ColdStorage']); setParallelCanAssign(noParallelRun); @@ -22,6 +23,16 @@ Before(async function (this: Zenko) { await Zenko.init(this.parameters); }); +Before({ tags: '@PRA' }, function () { + preparePRA(this as Zenko); +}); + +After({ tags: '@PRA' }, async function (this, results) { + if (results.result?.status === 'FAILED') { + await displayDebuggingInformation(this as Zenko); + } +}); + Before({ tags: '@Quotas', timeout: 1200000 }, async function (scenarioOptions) { await prepareQuotaScenarios(this as Zenko, scenarioOptions); }); diff --git a/tests/ctst/common/utils.ts b/tests/ctst/common/utils.ts index 9fec76eb80..f201bcd3b1 100644 --- a/tests/ctst/common/utils.ts +++ b/tests/ctst/common/utils.ts @@ -87,12 +87,12 @@ export const s3FunctionExtraParams: { [key: string]: Record[] } }], }; -export function safeJsonParse(jsonString: string): { ok: boolean, result: object } { - let result = {}; +export function safeJsonParse(jsonString: string): { ok: boolean, result: T | null } { + let result: T; try { - result = JSON.parse(jsonString) as object; + result = JSON.parse(jsonString) as T; } catch (err) { - return { ok: false, result }; + return { ok: false, result: null }; } return { ok: true, result }; } diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature new file mode 100644 index 0000000000..8d99474626 --- /dev/null +++ b/tests/ctst/features/pra.feature @@ -0,0 +1,28 @@ +Feature: PRA operations + + @2.6.0 + @PreMerge + @Dmf + @PRA + @ColdStorage + Scenario Outline: PRA + Given a "" bucket + And a transition workflow to "e2e-cold" location + And objects "obj" of size bytes + Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" + And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" + And dmf volume should contain objects + Given a DR installed + Then the DR source should be in phase "Running" + And the DR sink should be in phase "Running" + Given access keys for the replicated account + Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on DR site + And the kafka DR volume exists + When I uninstall DR + Then the DR custom resources should be deleted + + Examples: + | versioningConfiguration | objectCount | objectSize | + | Non versioned | 2 | 100 | + + diff --git a/tests/ctst/steps/dmf.ts b/tests/ctst/steps/dmf.ts new file mode 100644 index 0000000000..535e26da04 --- /dev/null +++ b/tests/ctst/steps/dmf.ts @@ -0,0 +1,23 @@ +import { Then, After } from '@cucumber/cucumber'; +import assert from 'assert'; +import { execShellCommand } from 'common/utils'; + +async function cleanDmfVolume() { + await execShellCommand('rm -rf /cold-data/*'); +} + +Then('dmf volume should contain {int} objects', async (objectCount: number) => { + let conditionOk = false; + while (!conditionOk) { + // Getting the number of objects inside the volume used + // by the mock dmf to store transitioned objects + const outStr = await execShellCommand('find /cold-data -type f | wc -l'); + // we store two files per object (content and manifest.json) + conditionOk = Number(outStr) === objectCount * 2; + } + assert(conditionOk); +}); + +After({ tags: '@Dmf' }, async () => { + await cleanDmfVolume(); +}); diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts new file mode 100644 index 0000000000..26c498e4d1 --- /dev/null +++ b/tests/ctst/steps/pra.ts @@ -0,0 +1,290 @@ +import { Given, Then, When } from '@cucumber/cucumber'; +import Zenko from 'world/Zenko'; +import ZenkoDrctl from './dr/drctl'; +import { + createSecret, + displayCRStatus, + getDRSink, + getDRSource, + getPVCFromLabel, +} from './utils/kubernetes'; +import { + verifyObjectLocation, +} from 'steps/utils/utils'; +import { Constants, Identity, IdentityEnum, SuperAdmin, Utils } from 'cli-testing'; +import { safeJsonParse } from 'common/utils'; +import assert from 'assert'; + +enum ZenkoDrSinkPhases { + ZenkoDRSinkPhaseNew = 'New', + ZenkoDRSinkPhaseBootstrapWaiting = 'Bootstrap:Waiting', + ZenkoDRSinkPhaseBootstrapReceiving = 'Bootstrap:Receiving', + ZenkoDRSinkPhaseBootstrapFailed = 'Bootstrap:Failed', + ZenkoDRSinkPhaseRunning = 'Running', + ZenkoDRSinkPhasePaused = 'Paused', + ZenkoDRSinkPhaseFailover = 'Failover', +} + +enum ZenkoDrSourcePhases { + ZenkoDRSourcePhaseNew = 'New', + ZenkoDRSourcePhaseBootstrapWaiting = 'Bootstrap:Waiting', + ZenkoDRSourcePhaseBootstrapSending = 'Bootstrap:Sending', + ZenkoDRSourcePhaseBootstrapFailed = 'Bootstrap:Failed', + ZenkoDRSourcePhaseRunning = 'Running', + ZenkoDRSourcePhasePaused = 'Paused', +} + +interface DrState { + source: { + crStatus: { + phase: ZenkoDrSourcePhases; + }, + }; + sink: { + crStatus: { + phase: ZenkoDrSinkPhases; + }, + }; +} + +export function preparePRA(world: Zenko) { + // eslint-disable-next-line no-param-reassign + world.zenkoDrCtl = new ZenkoDrctl(world); +} + +export async function displayDebuggingInformation(world: Zenko) { + await displayCRStatus(world); + const drSource = await getDRSource(world); + const drSink = await getDRSink(world); + + world.logger.debug('Zenko DR custom resources', { + drSink, + drSource, + }); +} + +async function waitForPhase( + world: Zenko, + target: 'source' | 'sink', + state: ZenkoDrSinkPhases | ZenkoDrSourcePhases, + timeout = 130000, +): Promise { + const start = Date.now(); + + while (Date.now() - start < timeout) { + let phase; + + const currentStatus = await world.zenkoDrCtl?.status({ + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + sinkZenkoDrInstance: 'end2end-pra-sink', + sourceZenkoDrInstance: 'end2end-source', + output: 'json', + }); + + if (!currentStatus) { + world.logger.debug('Failed to get DR status, retrying', { + currentStatus, + }); + await Utils.sleep(1000); + continue; + } + + const lines = currentStatus.split('\n'); + let parsedStatus: DrState | null = null; + + for (const line of lines) { + try { + const json = safeJsonParse(line); + if (json.ok && json.result?.source && json.result?.source) { + parsedStatus = json.result; + break; + } + } catch (e) { + continue; + } + } + + if (!parsedStatus) { + world.logger.debug('Failed to parse DR status, retrying', { + parsedStatus, + }); + await Utils.sleep(1000); + continue; + } + + if (target === 'sink') { + phase = parsedStatus.sink.crStatus.phase; + } else { + phase = parsedStatus.source.crStatus.phase; + } + + world.logger.debug('current phase', { + phase, + target, + }); + + if (phase === state) { + return true; + } + + await Utils.sleep(1000); + } + + return false; +} + +Given('a DR installed', { timeout: 130000 }, async function (this: Zenko) { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + const credentials = Identity.getCurrentCredentials(); + await createSecret(this, 'drctl-s3-creds', { + accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), + secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), + }); + await this.zenkoDrCtl?.install({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + kafkaPersistenceSize: '1Gi', + kafkaPersistenceStorageClassName: 'standard', + locations: 'e2e-cold', + s3Bucket: 'dump-db', + sinkZenkoInstance: 'end2end-pra', + sinkZenkoNamespace: 'default', + sourceZenkoInstance: 'end2end', + sourceZenkoNamespace: 'default', + sourceS3Endpoint: 'http://s3.zenko.local', + sinkS3Endpoint: 'http://s3.zenko.local', + }); + return; +}); + +Then('the DR sink should be in phase {string}', { timeout: 360000 }, async function (this: Zenko,state: string) { + let targetPhase; + switch (state) { + case 'New': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseNew; + break; + case 'Bootstrap:Waiting': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapWaiting; + break; + case 'Bootstrap:Receiving': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapReceiving; + break; + case 'Bootstrap:Failed': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseBootstrapFailed; + break; + case 'Running': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseRunning; + break; + case 'Paused': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhasePaused; + break; + case 'Failover': + targetPhase = ZenkoDrSinkPhases.ZenkoDRSinkPhaseFailover; + break; + default: + throw new Error(`Unknown state ${state}`); + } + + await waitForPhase(this, 'sink', targetPhase); +}); + +Then('the DR source should be in phase {string}', { timeout: 360000 }, async function (this: Zenko, state: string) { + let targetPhase; + switch (state) { + case 'New': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseNew; + break; + case 'Bootstrap:Waiting': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapWaiting; + break; + case 'Bootstrap:Sending': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapSending; + break; + case 'Bootstrap:Failed': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseBootstrapFailed; + break; + case 'Running': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhaseRunning; + break; + case 'Paused': + targetPhase = ZenkoDrSourcePhases.ZenkoDRSourcePhasePaused; + break; + default: + throw new Error(`Unknown state ${state}`); + } + + await waitForPhase(this, 'source', targetPhase); +}); + +Then('object {string} should be {string} and have the storage class {string} on DR site', + async function (this: Zenko, objName: string, objectTransitionStatus: string, storageClass: string) { + this.resetCommand(); + // use source account: it should have been replicated + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + + await verifyObjectLocation.call(this, objName, objectTransitionStatus, storageClass); + }); + +Then('the kafka DR volume exists', { timeout: 60000 }, async function (this: Zenko) { + const volumeClaim = await getPVCFromLabel(this, 'kafka_cr', 'end2end-pra-sink-base-queue'); + this.logger.debug('kafka volume claim', { volumeClaim }); + assert(volumeClaim); + const volume = await this.zenkoDrCtl?.volumeGet({ + volumeName: volumeClaim.spec?.volumeName, + timeout: '60s', + }); + this.logger.debug('kafka volume from drctl', { volume }); + assert(volume); + const volumeParsed = safeJsonParse<{'volume phase': string, 'volume name': string}>(volume); + if (!volumeParsed.ok) { + throw new Error('Failed to parse volume'); + } + assert(volumeParsed.result!['volume phase'] === 'Bound'); +}); + +When('I uninstall DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.uninstall({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + +Then('the DR custom resources should be deleted', { timeout: 360000 }, async function (this: Zenko) { + const drSource = await getDRSource(this); + const drSink = await getDRSink(this); + + assert(!drSource); + assert(!drSink); +}); + +Given('access keys for the replicated account', { timeout: 360000 }, async () => { + Identity.useIdentity(IdentityEnum.ADMIN, Zenko.sites['sink'].adminIdentityName); + // The account is the one from the source cluster: it replaces the sink account + // after the bootstrap phases + const targetAccount = Zenko.sites['source'].accountName; + + let account; + let remaining = Constants.MAX_ACCOUNT_CHECK_RETRIES; + account = await SuperAdmin.getAccount({ + accountName: targetAccount, + }); + while (!account && remaining > 0) { + await Utils.sleep(500); + account = await SuperAdmin.getAccount({ + accountName: targetAccount, + }); + remaining--; + } + assert(account); + + const credentials = await SuperAdmin.generateAccountAccessKey({ + accountName: targetAccount, + }); + + Identity.addIdentity(IdentityEnum.ACCOUNT, `${targetAccount}-replicated`, credentials, undefined, true); +}); diff --git a/tests/ctst/steps/utils/kubernetes.ts b/tests/ctst/steps/utils/kubernetes.ts index 16cea40f3f..6a31359b62 100644 --- a/tests/ctst/steps/utils/kubernetes.ts +++ b/tests/ctst/steps/utils/kubernetes.ts @@ -25,14 +25,14 @@ export function createKubeBatchClient(world: Zenko) { if (!KubernetesHelper.clientBatch) { KubernetesHelper.init(world.parameters); } - return KubernetesHelper.clientBatch; + return KubernetesHelper.clientBatch!; } export function createKubeCoreClient(world: Zenko) { if (!KubernetesHelper.clientBatch) { KubernetesHelper.init(world.parameters); } - return KubernetesHelper.clientCore; + return KubernetesHelper.clientCore!; } export function createKubeWatchClient(world: Zenko) { @@ -67,7 +67,7 @@ export async function createJobAndWaitForCompletion(world: Zenko, jobName: strin const batchClient = createKubeBatchClient(world); const watchClient = createKubeWatchClient(world); try { - const cronJob = await batchClient!.readNamespacedCronJob(jobName, 'default'); + const cronJob = await batchClient.readNamespacedCronJob(jobName, 'default'); const cronJobSpec = cronJob.body.spec?.jobTemplate.spec; const job = new V1Job(); const metadata = new V1ObjectMeta(); @@ -85,7 +85,7 @@ export async function createJobAndWaitForCompletion(world: Zenko, jobName: strin } job.metadata = metadata; - const response = await batchClient!.createNamespacedJob('default', job); + const response = await batchClient.createNamespacedJob('default', job); world.logger.debug('job created', { job: response.body.metadata, }); @@ -293,3 +293,112 @@ export async function waitForDataServicesToStabilize(world: Zenko, timeout = 15 return allRunning; } + +export async function displayCRStatus(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha2', + namespace, + 'zenkos', + 'end2end', + ).catch(err => { + world.logger.error('Error getting Zenko CR', { + err: err as unknown, + }); + return null; + }); + + if (!zenkoCR) { + return; + } + + world.logger.debug('Checking Zenko CR status', { + zenkoCR, + }); +} + +export async function getDRSource(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha1', + namespace, + 'zenkodrsources', + 'end2end-source', + ).catch(err => { + world.logger.debug('Error getting Zenko CR', { + err: err as unknown, + }); + }); + + return zenkoCR?.body; +} + +export async function getDRSink(world: Zenko, namespace = 'default') { + const zenkoClient = createKubeCustomObjectClient(world); + + const zenkoCR = await zenkoClient.getNamespacedCustomObject( + 'zenko.io', + 'v1alpha1', + namespace, + 'zenkodrsinks', + 'end2end-pra-sink', + ).catch(err => { + world.logger.debug('Error getting Zenko CR', { + err: err as unknown, + }); + }); + + return zenkoCR?.body; +} + +export async function getPVCFromLabel(world: Zenko, label: string, value: string, namespace = 'default') { + const coreClient = createKubeCoreClient(world); + + const pvcList = await coreClient.listNamespacedPersistentVolumeClaim(namespace); + const pvc = pvcList.body.items.find(pvc => pvc.metadata?.labels?.[label] === value); + + return pvc; +} + +export async function createSecret( + world: Zenko, + secretName: string, + data: Record, + namespace = 'default', +) { + const coreClient = createKubeCoreClient(world); + + const secret = { + apiVersion: 'v1', + kind: 'Secret', + metadata: { + name: secretName, + }, + data, + }; + + try { + await coreClient.deleteNamespacedSecret(secretName, namespace); + } catch (err) { + world.logger.debug('Secret does not exist, creating new', { + secretName, + namespace, + }); + } + + try { + const response = await coreClient.createNamespacedSecret(namespace, secret); + return response; + } catch (err) { + world.logger.error('Error creating secret', { + namespace, + secret, + err, + }); + throw err; + } +} From 137e69dd91fc1971c1e137cb5f7a9f1b131be191 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Mon, 29 Jul 2024 10:23:54 +0200 Subject: [PATCH 04/25] Complete PRA logic Issue: ZENKO-4773 --- .github/workflows/end2end.yaml | 15 +++++++++++++-- tests/ctst/common/common.ts | 27 +++++++++++++++------------ tests/ctst/features/pra.feature | 20 +++++++++++++++++--- tests/ctst/steps/pra.ts | 25 ++++++++++++++++++++----- tests/ctst/steps/utils/utils.ts | 24 +++++++++++++++++++----- 5 files changed, 84 insertions(+), 27 deletions(-) diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index 6c234eb860..7c96ae1a8e 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -387,7 +387,7 @@ jobs: context: ./tests/ctst build-args: | CTST_TAG=${{ env.CTST_TAG }} - DRCTL_TAG=v1.0.1 + DRCTL_TAG=afbb04a57973e391b4cbcaacca444a4d5da0a540 tags: "${{ env.E2E_CTST_IMAGE_NAME }}:${{ env.E2E_IMAGE_TAG }}" cache-from: type=gha,scope=end2end-ctst cache-to: type=gha,mode=max,scope=end2end-ctst @@ -458,7 +458,7 @@ jobs: if: always() end2end-pra: - needs: [build-kafka, build-test-image, check-dashboard-versions, lint-and-build-ctst] + needs: [build-kafka, check-dashboard-versions, lint-and-build-ctst] runs-on: ubuntu-22.04-16core env: GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} @@ -494,9 +494,20 @@ jobs: - name: Configure E2E test environment run: bash configure-e2e-ctst.sh working-directory: ./.github/scripts/end2end + - name: "Debug: SSH to runner" + uses: scality/actions/action-ssh-to-runner@1.5.0 + with: + tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }} + tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }} + tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }} + tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }} + timeout-minutes: 30 - name: Run CTST end to end tests run: bash run-e2e-pra-ctst.sh working-directory: ./.github/scripts/end2end + - name: debug + run: sleep 7000 + if: always() - name: Archive artifact logs and data uses: ./.github/actions/archive-artifacts env: diff --git a/tests/ctst/common/common.ts b/tests/ctst/common/common.ts index 77cde0b62d..944c7d6fdb 100644 --- a/tests/ctst/common/common.ts +++ b/tests/ctst/common/common.ts @@ -1,6 +1,6 @@ import { ListObjectVersionsOutput } from '@aws-sdk/client-s3'; import { Given, setDefaultTimeout, Then, When } from '@cucumber/cucumber'; -import { Constants, S3, Utils } from 'cli-testing'; +import { Constants, Identity, IdentityEnum, S3, Utils } from 'cli-testing'; import Zenko from 'world/Zenko'; import { safeJsonParse } from './utils'; import assert from 'assert'; @@ -11,6 +11,7 @@ import { runActionAgainstBucket, getObjectNameWithBackendFlakiness, verifyObjectLocation, + restoreObject, } from 'steps/utils/utils'; import { ActionPermissionsType } from 'steps/bucket-policies/utils'; @@ -130,6 +131,18 @@ Given('{int} objects {string} of size {int} bytes', await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes); }); +Given('{int} objects {string} of size {int} bytes on {string} site', + async function (this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, site: string) { + this.resetCommand(); + + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } + await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes); + }); + Given('{int} objects {string} of size {int} bytes with user metadata {string}', async function (this: Zenko, numberObjects: number, objectName: string, sizeBytes: number, userMD: string) { await addMultipleObjects.call(this, numberObjects, objectName, sizeBytes, userMD); @@ -220,17 +233,7 @@ Given('a transition workflow to {string} location', async function (this: Zenko, }); When('i restore object {string} for {int} days', async function (this: Zenko, objectName: string, days: number) { - const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - this.addCommandParameter({ key: objName }); - const versionId = this.getSaved>('createdObjects')?.get(objName); - if (versionId) { - this.addCommandParameter({ versionId }); - } - this.addCommandParameter({ restoreRequest: `Days=${days}` }); - const result = await S3.restoreObject(this.getCommandParameters()); - this.setResult(result); + await restoreObject.call(this, objectName, days); }); // wait for object to transition to a location or get restored from it diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index 8d99474626..8315f2af2e 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -8,7 +8,7 @@ Feature: PRA operations Scenario Outline: PRA Given a "" bucket And a transition workflow to "e2e-cold" location - And objects "obj" of size bytes + And objects "obj" of size bytes on "Primary" site Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" And dmf volume should contain objects @@ -16,13 +16,27 @@ Feature: PRA operations Then the DR source should be in phase "Running" And the DR sink should be in phase "Running" Given access keys for the replicated account - Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on DR site + + Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + Given objects "obj2" of size bytes on "Pimary" site + Then object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site + And object "obj2-2" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site + Then object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj2-2" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + When i restore object "obj-1" for 2 days on "Primary" site + Then object "obj-1" should be "restored" and have the storage class "e2e-cold" on "Primary" site + And object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + When i restore object "obj2-1" for 2 days on "DR" site + Then object "obj2-1" should be "restored" and have the storage class "e2e-cold" on "DR" site + And object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site + And the kafka DR volume exists When I uninstall DR Then the DR custom resources should be deleted Examples: | versioningConfiguration | objectCount | objectSize | - | Non versioned | 2 | 100 | + | Versioned | 2 | 100 | diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts index 26c498e4d1..df5ed81c67 100644 --- a/tests/ctst/steps/pra.ts +++ b/tests/ctst/steps/pra.ts @@ -9,6 +9,7 @@ import { getPVCFromLabel, } from './utils/kubernetes'; import { + restoreObject, verifyObjectLocation, } from 'steps/utils/utils'; import { Constants, Identity, IdentityEnum, SuperAdmin, Utils } from 'cli-testing'; @@ -217,12 +218,15 @@ Then('the DR source should be in phase {string}', { timeout: 360000 }, async fun await waitForPhase(this, 'source', targetPhase); }); -Then('object {string} should be {string} and have the storage class {string} on DR site', - async function (this: Zenko, objName: string, objectTransitionStatus: string, storageClass: string) { +Then('object {string} should be {string} and have the storage class {string} on {string} site', + { timeout: 360000 }, + async function (this: Zenko, objName: string, objectTransitionStatus: string, storageClass: string, site: string) { this.resetCommand(); - // use source account: it should have been replicated - Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); - + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } await verifyObjectLocation.call(this, objName, objectTransitionStatus, storageClass); }); @@ -288,3 +292,14 @@ Given('access keys for the replicated account', { timeout: 360000 }, async () => Identity.addIdentity(IdentityEnum.ACCOUNT, `${targetAccount}-replicated`, credentials, undefined, true); }); + +When('i restore object {string} for {int} days on {string} site', + async function (this: Zenko, objectName: string, days: number, site: string) { + this.resetCommand(); + if (site === 'DR') { + Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); + } else { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + } + await restoreObject.call(this, objectName, days); + }); diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index 4b184c2264..ef0ae59038 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -267,7 +267,10 @@ async function verifyObjectLocation(this: Zenko, objectName: string, let conditionOk = false; while (!conditionOk) { const res = await S3.headObject(this.getCommandParameters()); - if (res.err) { + if (res.err?.includes('NotFound')) { + await Utils.sleep(1000); + continue; + } else if (res.err) { break; } assert(res.stdout); @@ -283,10 +286,6 @@ async function verifyObjectLocation(this: Zenko, objectName: string, if (objectTransitionStatus == 'restored') { const isRestored = !!parsed.result?.Restore && parsed.result.Restore.includes('ongoing-request="false", expiry-date='); - // if restore didn't get initiated fail immediately - const isPendingRestore = !!parsed.result?.Restore && - parsed.result.Restore.includes('ongoing-request="true"'); - assert(isRestored || isPendingRestore, 'Restore didn\'t get initiated'); conditionOk = conditionOk && isRestored; } else if (objectTransitionStatus == 'cold') { conditionOk = conditionOk && !parsed.result?.Restore; @@ -296,6 +295,20 @@ async function verifyObjectLocation(this: Zenko, objectName: string, assert(conditionOk); } +async function restoreObject(this: Zenko, objectName: string, days: number) { + const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved('objectName'); + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + this.addCommandParameter({ key: objName }); + const versionId = this.getSaved>('createdObjects')?.get(objName); + if (versionId) { + this.addCommandParameter({ versionId }); + } + this.addCommandParameter({ restoreRequest: `Days=${days}` }); + const result = await S3.restoreObject(this.getCommandParameters()); + this.setResult(result); +} + /** * @param {Zenko} this world object * @param {string} objectName object name @@ -337,4 +350,5 @@ export { emptyVersionedBucket, verifyObjectLocation, getObjectNameWithBackendFlakiness, + restoreObject, }; From ec1d4277cfc3cc839fae8832edf688391c238cf9 Mon Sep 17 00:00:00 2001 From: KillianG Date: Tue, 30 Jul 2024 11:09:40 +0200 Subject: [PATCH 05/25] Configure e2e --- .github/actions/archive-artifacts/action.yaml | 12 --- .github/scripts/end2end/common.sh | 1 - .../end2end/configs/keycloak_user.json | 2 +- .github/scripts/end2end/configure-e2e-pra.sh | 20 +++++ .github/scripts/end2end/keycloak-helper.sh | 5 +- .github/workflows/end2end.yaml | 28 +++--- solution/deps.yaml | 2 +- tests/ctst/common/common.ts | 29 +----- tests/ctst/features/pra.feature | 49 ++++++---- tests/ctst/package.json | 2 +- tests/ctst/steps/dr/drctl.ts | 38 ++++++++ tests/ctst/steps/pra.ts | 90 +++++++++++++++---- tests/ctst/steps/utils/utils.ts | 32 +++++++ tests/ctst/world/Zenko.ts | 8 +- tests/ctst/yarn.lock | 4 +- 15 files changed, 225 insertions(+), 97 deletions(-) create mode 100644 .github/scripts/end2end/configure-e2e-pra.sh diff --git a/.github/actions/archive-artifacts/action.yaml b/.github/actions/archive-artifacts/action.yaml index cc104e67b8..375a56dc08 100644 --- a/.github/actions/archive-artifacts/action.yaml +++ b/.github/actions/archive-artifacts/action.yaml @@ -41,15 +41,3 @@ runs: sh -c "kubectl exec -i -n ${NAMESPACE} kcat -- \ kcat -L -b ${KAFKA_SERVICE} -t {} -C -o beginning -e -q -J \ > /tmp/artifacts/data/${STAGE}/kafka-messages-{}.log" - - KAFKA_SERVICE_PRA=$(kubectl get services -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME:-end2end-pra}-sink-base-queue -o jsonpath='{.items[0].metadata.name}') - KAFKA_PRA=$(kubectl get pods -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME_PRA:-end2end-pra}-sink-base-queue -o jsonpath='{.items[0].metadata.name}') - kubectl exec -in ${NAMESPACE} ${KAFKA_PRA} -c kafka -- \ - env KAFKA_OPTS= kafka-topics.sh --bootstrap-server :9092 --list \ - > /tmp/artifacts/data/${STAGE}/kafka-topics-pra.log - kubectl run -n ${NAMESPACE} k2cat --image=edenhill/kcat:1.7.1 --restart=Never --command -- sleep 300 - kubectl wait -n ${NAMESPACE} pod k2cat --for=condition=ready - cat /tmp/artifacts/data/${STAGE}/kafka-topics-pra.log | grep -v '^__' | xargs -P 15 -I {} \ - sh -c "kubectl exec -i -n ${NAMESPACE} k2cat -- \ - kcat -L -b ${KAFKA_SERVICE_PRA} -t {} -C -o beginning -e -q -J \ - > /tmp/artifacts/data/${STAGE}/kafka-messages-pra-{}.log" diff --git a/.github/scripts/end2end/common.sh b/.github/scripts/end2end/common.sh index a8fc9d4246..41d6da2269 100644 --- a/.github/scripts/end2end/common.sh +++ b/.github/scripts/end2end/common.sh @@ -8,4 +8,3 @@ get_token() { https://localhost/auth/realms/${OIDC_REALM}/protocol/openid-connect/token | \ jq -cr '.id_token' } - diff --git a/.github/scripts/end2end/configs/keycloak_user.json b/.github/scripts/end2end/configs/keycloak_user.json index 22d48e9662..7dfe2e1126 100644 --- a/.github/scripts/end2end/configs/keycloak_user.json +++ b/.github/scripts/end2end/configs/keycloak_user.json @@ -5,7 +5,7 @@ "emailVerified": true, "firstName": "${OIDC_FIRST_NAME}", "lastName": "${OIDC_LAST_NAME}", - "email": "e2e@zenko.local", + "email": "${OIDC_EMAIL}", "attributes": { "instanceIds": [ "${INSTANCE_ID}" diff --git a/.github/scripts/end2end/configure-e2e-pra.sh b/.github/scripts/end2end/configure-e2e-pra.sh new file mode 100644 index 0000000000..ecf15d808d --- /dev/null +++ b/.github/scripts/end2end/configure-e2e-pra.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -exu + +. "$(dirname $0)/common.sh" + +OIDC_USERNAME=${OIDC_USERNAME:-zenko-end2end-pra} +OIDC_EMAIL=${OIDC_EMAIL:-e2e-pra@zenko.local} + +INSTANCE_ID=$(kubectl get zenko end2end-pra -o jsonpath='{.status.instanceID}') +TOKEN=$(get_token) + +LOCATION_PARAMS='{"name":"e2e-cold","locationType":"location-dmf-v1","details":{"endpoint":"ws://mock-sorbet:5001/session","username":"user1","password":"pass1","repoId":["233aead6-1d7b-4647-a7cf-0d3280b5d1d7","81e78de8-df11-4acd-8ad1-577ff05a68db"],"nsId":"65f9fd61-42fe-4a68-9ac0-6ba25311cc85"}}' + +curl -k -X POST \ +-H "Host: management.dr.zenko.local" \ +-H "X-Authentication-Token: $TOKEN" \ +-H "Content-Type: application/json" \ +-d "$LOCATION_PARAMS" \ +"https://localhost/api/v1/config/$INSTANCE_ID/location" \ No newline at end of file diff --git a/.github/scripts/end2end/keycloak-helper.sh b/.github/scripts/end2end/keycloak-helper.sh index 6f9d08eb2d..fe943643b7 100755 --- a/.github/scripts/end2end/keycloak-helper.sh +++ b/.github/scripts/end2end/keycloak-helper.sh @@ -6,6 +6,7 @@ DIR=$(dirname "$0") COMMAND=${1:-''} NAMESPACE=${2:-default} +ZENKO_NAME=${3:-end2end} KEYCLOAK_EXEC="kubectl -n ${NAMESPACE} exec -i keycloak-0 --" @@ -28,7 +29,9 @@ case $COMMAND in "add-user") refresh_creds - export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko -o jsonpath='{.items[0].status.instanceID}'` + export INSTANCE_ID=`kubectl -n ${NAMESPACE} get zenko ${ZENKO_NAME} -o jsonpath='{.status.instanceID}'` + + export OIDC_EMAIL=${OIDC_EMAIL:-"e2e@zenko.local"} envsubst < $DIR/configs/keycloak_user.json | \ ${KEYCLOAK_EXEC} /opt/jboss/keycloak/bin/kcadm.sh create users -r ${OIDC_REALM} -f - diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index 7c96ae1a8e..cc5b36b251 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -387,7 +387,7 @@ jobs: context: ./tests/ctst build-args: | CTST_TAG=${{ env.CTST_TAG }} - DRCTL_TAG=afbb04a57973e391b4cbcaacca444a4d5da0a540 + DRCTL_TAG=v1.0.2 tags: "${{ env.E2E_CTST_IMAGE_NAME }}:${{ env.E2E_IMAGE_TAG }}" cache-from: type=gha,scope=end2end-ctst cache-to: type=gha,mode=max,scope=end2end-ctst @@ -491,23 +491,25 @@ jobs: ZENKO_MONGODB_SHARDED: "true" ZENKO_MONGODB_DATABASE: "pradb" working-directory: ./.github/scripts/end2end - - name: Configure E2E test environment + - name: Add Keycloak pra user and assign StorageManager role + shell: bash + run: bash keycloak-helper.sh add-user default end2end-pra + env: + OIDC_USERNAME: 'zenko-end2end-pra' + OIDC_EMAIL: 'e2e-pra@zenko.local' + working-directory: ./.github/scripts/end2end + - name: Configure E2E PRA test environment + run: bash configure-e2e-pra.sh + working-directory: ./.github/scripts/end2end + env: + OIDC_USERNAME: 'zenko-end2end-pra' + OIDC_EMAIL: 'e2e-pra@zenko.local' + - name: Configure E2E CTST test environment run: bash configure-e2e-ctst.sh working-directory: ./.github/scripts/end2end - - name: "Debug: SSH to runner" - uses: scality/actions/action-ssh-to-runner@1.5.0 - with: - tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }} - tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }} - tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }} - tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }} - timeout-minutes: 30 - name: Run CTST end to end tests run: bash run-e2e-pra-ctst.sh working-directory: ./.github/scripts/end2end - - name: debug - run: sleep 7000 - if: always() - name: Archive artifact logs and data uses: ./.github/actions/archive-artifacts env: diff --git a/solution/deps.yaml b/solution/deps.yaml index d219b84f20..0b96963e9f 100644 --- a/solution/deps.yaml +++ b/solution/deps.yaml @@ -98,7 +98,7 @@ sorbet: drctl: sourceRegistry: ghcr.io/scality image: zenko-drctl - tag: afbb04a57973e391b4cbcaacca444a4d5da0a540 + tag: v1.0.2 envsubst: DRCTL_TAG # To be enabled back when utapi is used in Zenko 2.x # utapi: diff --git a/tests/ctst/common/common.ts b/tests/ctst/common/common.ts index 944c7d6fdb..5c991ab239 100644 --- a/tests/ctst/common/common.ts +++ b/tests/ctst/common/common.ts @@ -12,6 +12,7 @@ import { getObjectNameWithBackendFlakiness, verifyObjectLocation, restoreObject, + addTransitionWorkflow, } from 'steps/utils/utils'; import { ActionPermissionsType } from 'steps/bucket-policies/utils'; @@ -203,33 +204,7 @@ Then('object {string} should have the user metadata with key {string} and value // add a transition workflow to a bucket Given('a transition workflow to {string} location', async function (this: Zenko, location: string) { - let conditionOk = false; - this.resetCommand(); - this.addCommandParameter({ bucket: this.getSaved('bucketName') }); - const lifecycleConfiguration = JSON.stringify({ - Rules: [ - { - Status: 'Enabled', - Prefix: '', - Transitions: [ - { - Days: 0, - StorageClass: location, - }, - ], - }, - ], - }); - this.addCommandParameter({ - lifecycleConfiguration, - }); - const commandParameters = this.getCommandParameters(); - while (!conditionOk) { - const res = await S3.putBucketLifecycleConfiguration(commandParameters); - conditionOk = res.err === null; - // Wait for the transition to be accepted because the deployment of the location's pods can take some time - await Utils.sleep(5000); - } + await addTransitionWorkflow.call(this, location); }); When('i restore object {string} for {int} days', async function (this: Zenko, objectName: string, days: number) { diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index 8315f2af2e..af431f71e6 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -5,38 +5,53 @@ Feature: PRA operations @Dmf @PRA @ColdStorage - Scenario Outline: PRA + Scenario Outline: PRA (nominal case) + # Prepare objects in the primary site Given a "" bucket And a transition workflow to "e2e-cold" location And objects "obj" of size bytes on "Primary" site Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" And dmf volume should contain objects + # Deploy PRA Given a DR installed Then the DR source should be in phase "Running" And the DR sink should be in phase "Running" + Then the kafka DR volume exists + # CHeck that objects are transitioned in the DR site Given access keys for the replicated account - Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site - And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + Then object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + # Test again the transition workflow Given objects "obj2" of size bytes on "Pimary" site - Then object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site - And object "obj2-2" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site - Then object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site - And object "obj2-2" should be "transitioned" and have the storage class "e2e-cold" on "DR" site + Then object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site + And object "obj2-2" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site + Then object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + And object "obj2-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site When i restore object "obj-1" for 2 days on "Primary" site - Then object "obj-1" should be "restored" and have the storage class "e2e-cold" on "Primary" site - And object "obj-1" should be "transitioned" and have the storage class "e2e-cold" on "DR" site - When i restore object "obj2-1" for 2 days on "DR" site - Then object "obj2-1" should be "restored" and have the storage class "e2e-cold" on "DR" site - And object "obj2-1" should be "transitioned" and have the storage class "e2e-cold" on "Primary" site - - And the kafka DR volume exists - When I uninstall DR - Then the DR custom resources should be deleted + Then object "obj-1" should "" be "restored" and have the storage class "e2e-cold" on "Primary" site + And object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site Examples: | versioningConfiguration | objectCount | objectSize | | Versioned | 2 | 100 | - + @2.6.0 + @PreMerge + @Dmf + @PRA + @ColdStorage + Scenario Outline: PRA (failure case) + # Fail to deploy PRA + Given a DR failing to be installed + Then the DR source should be in phase "Bootstrap:Failed" + And the DR sink should be in phase "Bootstrap:Failed" + # Expect the operator to perform Reinit and to back to Waiting state + Then the DR source should be in phase "Bootstrap:Waiting" + And the DR sink should be in phase "Bootstrap:Waiting" + # Retry to deploy PRA + Given a DR installed + Then the DR source should be in phase "Running" + And the DR sink should be in phase "Running" + Then the kafka DR volume exists \ No newline at end of file diff --git a/tests/ctst/package.json b/tests/ctst/package.json index 696d32f838..cd5da7764a 100644 --- a/tests/ctst/package.json +++ b/tests/ctst/package.json @@ -26,7 +26,7 @@ "@typescript-eslint/eslint-plugin": "^5.45.0", "@typescript-eslint/parser": "^5.45.0", "babel-jest": "^29.3.1", - "cli-testing": "github:scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d", + "cli-testing": "github:scality/cli-testing.git#v1.2.0", "eslint": "^8.28.0" }, "scripts": { diff --git a/tests/ctst/steps/dr/drctl.ts b/tests/ctst/steps/dr/drctl.ts index 6405a93a1e..32a70c90a8 100644 --- a/tests/ctst/steps/dr/drctl.ts +++ b/tests/ctst/steps/dr/drctl.ts @@ -127,6 +127,36 @@ type StatusConfig = { output?: string; }; +type ReplicationPauseConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; +}; + +type ReplicationResumeConfig = { + sourceKubeconfigPath?: string; + sourceKubeconfigData?: string; + sinkKubeconfigPath?: string; + sinkKubeconfigData?: string; + sourceZenkoInstance?: string; + sourceZenkoNamespace?: string; + sinkZenkoInstance?: string; + sinkZenkoNamespace?: string; + sourceZenkoDrInstance?: string; + sinkZenkoDrInstance?: string; + wait?: boolean; + timeout?: string; +}; + /** * Helper class to run Drctl tool */ @@ -181,6 +211,14 @@ export default class ZenkoDrctl { return this.runCommand('volume get', this.paramToCli(config)); } + async replicationPause(config: ReplicationPauseConfig) { + return this.runCommand('replication pause', this.paramToCli(config)); + } + + async replicationResume(config: ReplicationResumeConfig) { + return this.runCommand('replication resume', this.paramToCli(config)); + } + paramToCli(params: Record): string { const command: string[] = []; Object.keys(params).forEach(key => { diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts index df5ed81c67..a6b62b752e 100644 --- a/tests/ctst/steps/pra.ts +++ b/tests/ctst/steps/pra.ts @@ -8,7 +8,7 @@ import { getDRSource, getPVCFromLabel, } from './utils/kubernetes'; -import { +import { restoreObject, verifyObjectLocation, } from 'steps/utils/utils'; @@ -48,6 +48,23 @@ interface DrState { }; } +async function installPRA(world: Zenko, sinkS3Endpoint = 'http://s3.zenko.local') { + return world.zenkoDrCtl?.install({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + kafkaPersistenceSize: '1Gi', + kafkaPersistenceStorageClassName: 'standard', + locations: 'e2e-cold', // comma-separated list + s3Bucket: 'dump-db', + sinkZenkoInstance: 'end2end-pra', + sinkZenkoNamespace: 'default', + sourceZenkoInstance: 'end2end', + sourceZenkoNamespace: 'default', + sourceS3Endpoint: 'http://s3.zenko.local', + sinkS3Endpoint, + }); +} + export function preparePRA(world: Zenko) { // eslint-disable-next-line no-param-reassign world.zenkoDrCtl = new ZenkoDrctl(world); @@ -128,7 +145,6 @@ async function waitForPhase( if (phase === state) { return true; } - await Utils.sleep(1000); } @@ -142,24 +158,22 @@ Given('a DR installed', { timeout: 130000 }, async function (this: Zenko) { accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), }); - await this.zenkoDrCtl?.install({ - sourceZenkoDrInstance: 'end2end-source', - sinkZenkoDrInstance: 'end2end-pra-sink', - kafkaPersistenceSize: '1Gi', - kafkaPersistenceStorageClassName: 'standard', - locations: 'e2e-cold', - s3Bucket: 'dump-db', - sinkZenkoInstance: 'end2end-pra', - sinkZenkoNamespace: 'default', - sourceZenkoInstance: 'end2end', - sourceZenkoNamespace: 'default', - sourceS3Endpoint: 'http://s3.zenko.local', - sinkS3Endpoint: 'http://s3.zenko.local', + await installPRA(this); + return; +}); + +Given('a DR failing to be installed', { timeout: 130000 }, async function (this: Zenko) { + Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); + const credentials = Identity.getCurrentCredentials(); + await createSecret(this, 'drctl-s3-creds', { + accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), + secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), }); + await installPRA(this, 'http://s3.dr.zenko.local'); return; }); -Then('the DR sink should be in phase {string}', { timeout: 360000 }, async function (this: Zenko,state: string) { +Then('the DR sink should be in phase {string}', { timeout: 360000 }, async function (this: Zenko, state: string) { let targetPhase; switch (state) { case 'New': @@ -218,16 +232,32 @@ Then('the DR source should be in phase {string}', { timeout: 360000 }, async fun await waitForPhase(this, 'source', targetPhase); }); -Then('object {string} should be {string} and have the storage class {string} on {string} site', +Then('object {string} should {string} be {string} and have the storage class {string} on {string} site', { timeout: 360000 }, - async function (this: Zenko, objName: string, objectTransitionStatus: string, storageClass: string, site: string) { + async function ( + this: Zenko, + objName: string, + isVerb: string, + objectTransitionStatus: string, + storageClass: string, + site: string) { this.resetCommand(); if (site === 'DR') { Identity.useIdentity(IdentityEnum.ACCOUNT, `${Zenko.sites['source'].accountName}-replicated`); } else { Identity.useIdentity(IdentityEnum.ACCOUNT, Zenko.sites['source'].accountName); } - await verifyObjectLocation.call(this, objName, objectTransitionStatus, storageClass); + try { + await verifyObjectLocation.call(this, objName, objectTransitionStatus, storageClass); + if (isVerb === 'not') { + throw new Error(`Object ${objName} should not be ${objectTransitionStatus}`); + } + } catch (err) { + if (isVerb !== 'not') { + throw err; + } + assert(err); + } }); Then('the kafka DR volume exists', { timeout: 60000 }, async function (this: Zenko) { @@ -247,6 +277,28 @@ Then('the kafka DR volume exists', { timeout: 60000 }, async function (this: Zen assert(volumeParsed.result!['volume phase'] === 'Bound'); }); +When('I pause the DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.replicationPause({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + +When('I resume the DR', { timeout: 360000 }, async function (this: Zenko) { + await this.zenkoDrCtl?.replicationResume({ + sourceZenkoDrInstance: 'end2end-source', + sinkZenkoDrInstance: 'end2end-pra-sink', + sinkZenkoNamespace: 'default', + sourceZenkoNamespace: 'default', + wait: true, + timeout: '6m', + }); +}); + When('I uninstall DR', { timeout: 360000 }, async function (this: Zenko) { await this.zenkoDrCtl?.uninstall({ sourceZenkoDrInstance: 'end2end-source', diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index ef0ae59038..88f01b4ce2 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -253,6 +253,37 @@ async function emptyVersionedBucket(world: Zenko) { })); } +async function addTransitionWorkflow(this: Zenko, location: string, enabled = true) { + let conditionOk = false; + this.resetCommand(); + this.addCommandParameter({ bucket: this.getSaved('bucketName') }); + const enabledStr = enabled ? 'Enabled' : 'Disabled'; + const lifecycleConfiguration = JSON.stringify({ + Rules: [ + { + Status: enabledStr, + Prefix: '', + Transitions: [ + { + Days: 0, + StorageClass: location, + }, + ], + }, + ], + }); + this.addCommandParameter({ + lifecycleConfiguration, + }); + const commandParameters = this.getCommandParameters(); + while (!conditionOk) { + const res = await S3.putBucketLifecycleConfiguration(commandParameters); + conditionOk = res.err === null; + // Wait for the transition to be accepted because the deployment of the location's pods can take some time + await Utils.sleep(5000); + } +} + async function verifyObjectLocation(this: Zenko, objectName: string, objectTransitionStatus: string, storageClass: string) { const objName = @@ -351,4 +382,5 @@ export { verifyObjectLocation, getObjectNameWithBackendFlakiness, restoreObject, + addTransitionWorkflow, }; diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 8aa52b5f81..8f9a415cbf 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -158,7 +158,7 @@ export default class Zenko extends World { }, undefined, undefined, undefined, this.parameters.subdomain); Zenko.sites['source'] = { - accountName: this.parameters.AccountName, + accountName: this.parameters.AccountName.concat(Utils.randomString()), adminIdentityName: Zenko.PRIMARY_SITE_NAME, }; } else { @@ -174,12 +174,16 @@ export default class Zenko extends World { } Zenko.sites['sink'] = { - accountName: `dr${this.parameters.AccountName}`, + accountName: `dr${this.parameters.AccountName}`.concat(Utils.randomString()), adminIdentityName: Zenko.SECONDARY_SITE_NAME, }; } else { delete Zenko.sites['sink']; } + + this.logger.debug('Zenko sites', { + sites: Zenko.sites, + }); } private needsSecondarySite() { diff --git a/tests/ctst/yarn.lock b/tests/ctst/yarn.lock index 4185e60153..5bed88af54 100644 --- a/tests/ctst/yarn.lock +++ b/tests/ctst/yarn.lock @@ -4807,9 +4807,9 @@ cli-table3@^0.6.0: optionalDependencies: "@colors/colors" "1.5.0" -"cli-testing@github:scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d": +"cli-testing@github:scality/cli-testing.git#v1.2.0": version "1.2.0" - resolved "git+ssh://git@github.com/scality/cli-testing.git#86fdf945c59912b03f8ca6d2fe1baa5cfc0f443d" + resolved "git+ssh://git@github.com/scality/cli-testing.git#e8111e42fed3741183885c209e128f44c5a477ee" dependencies: "@aws-crypto/sha256-universal" "^5.2.0" "@aws-sdk/client-iam" "^3.484.0" From 5672e3847ee24027470b68ae89d5a0dc2483786e Mon Sep 17 00:00:00 2001 From: KillianG Date: Thu, 1 Aug 2024 15:25:12 +0200 Subject: [PATCH 06/25] Remove second test until uninstall works --- tests/ctst/features/pra.feature | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index af431f71e6..80c79201de 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -13,16 +13,18 @@ Feature: PRA operations Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" And object "obj-2" should be "transitioned" and have the storage class "e2e-cold" And dmf volume should contain objects + # Deploy PRA Given a DR installed Then the DR source should be in phase "Running" And the DR sink should be in phase "Running" Then the kafka DR volume exists - # CHeck that objects are transitioned in the DR site - Given access keys for the replicated account + # Check that objects are transitioned in the DR site + Given access keys for the replicated account Then object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site And object "obj-2" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + # Test again the transition workflow Given objects "obj2" of size bytes on "Pimary" site Then object "obj2-1" should "" be "transitioned" and have the storage class "e2e-cold" on "Primary" site @@ -35,23 +37,4 @@ Feature: PRA operations Examples: | versioningConfiguration | objectCount | objectSize | - | Versioned | 2 | 100 | - - @2.6.0 - @PreMerge - @Dmf - @PRA - @ColdStorage - Scenario Outline: PRA (failure case) - # Fail to deploy PRA - Given a DR failing to be installed - Then the DR source should be in phase "Bootstrap:Failed" - And the DR sink should be in phase "Bootstrap:Failed" - # Expect the operator to perform Reinit and to back to Waiting state - Then the DR source should be in phase "Bootstrap:Waiting" - And the DR sink should be in phase "Bootstrap:Waiting" - # Retry to deploy PRA - Given a DR installed - Then the DR source should be in phase "Running" - And the DR sink should be in phase "Running" - Then the kafka DR volume exists \ No newline at end of file + | Versioned | 2 | 100 | \ No newline at end of file From 03ca080cda111822141b08f9b2d411d0dbf53b55 Mon Sep 17 00:00:00 2001 From: KillianG Date: Thu, 1 Aug 2024 15:36:14 +0200 Subject: [PATCH 07/25] Remove unnecessary code for Zenko sites --- tests/ctst/world/Zenko.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 8f9a415cbf..9e2f2b300a 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -161,8 +161,6 @@ export default class Zenko extends World { accountName: this.parameters.AccountName.concat(Utils.randomString()), adminIdentityName: Zenko.PRIMARY_SITE_NAME, }; - } else { - delete Zenko.sites['source']; } if (this.needsSecondarySite()) { @@ -177,8 +175,6 @@ export default class Zenko extends World { accountName: `dr${this.parameters.AccountName}`.concat(Utils.randomString()), adminIdentityName: Zenko.SECONDARY_SITE_NAME, }; - } else { - delete Zenko.sites['sink']; } this.logger.debug('Zenko sites', { From d0fb07ada51a98893d09511e5263f12655d15ef1 Mon Sep 17 00:00:00 2001 From: KillianG Date: Thu, 1 Aug 2024 18:58:55 +0200 Subject: [PATCH 08/25] Fix error with identity --- tests/ctst/world/Zenko.ts | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 9e2f2b300a..b7c3096d95 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -662,9 +662,27 @@ export default class Zenko extends World { Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); } } + + const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName! + const accountAccessKeys = Identity.getCredentialsForIdentity( + IdentityEnum.ACCOUNT, this.sites['source']?.accountName + || CacheHelper.parameters.AccountName!) || { + accessKeyId: '', + secretAccessKey: '', + }; + + if (!accountAccessKeys.accessKeyId || !accountAccessKeys.secretAccessKey) { + const accessKeys = await SuperAdmin.generateAccountAccessKey({ accountName }); + if (!Utils.isAccessKeys(accessKeys)) { + throw new Error('Failed to generate account access keys for site ${siteKey}'); + } + accountAccessKeys.accessKeyId = accessKeys.accessKeyId; + accountAccessKeys.secretAccessKey = accessKeys.secretAccessKey; + Identity.addIdentity(IdentityEnum.ACCOUNT, accountName, accountAccessKeys, undefined, true, true); + } + // Fallback to the primary site's account at the end of the init by default - Identity.useIdentity(IdentityEnum.ACCOUNT, this.sites['source']?.accountName - || CacheHelper.parameters.AccountName!); + Identity.useIdentity(IdentityEnum.ACCOUNT, accountName); } /** From 6659c54191f9e7d2b6b65b868a703213608be003 Mon Sep 17 00:00:00 2001 From: KillianG Date: Thu, 1 Aug 2024 19:23:59 +0200 Subject: [PATCH 09/25] Fix lint issue --- tests/ctst/world/Zenko.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index b7c3096d95..2a3c35094c 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -663,7 +663,7 @@ export default class Zenko extends World { } } - const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName! + const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName!; const accountAccessKeys = Identity.getCredentialsForIdentity( IdentityEnum.ACCOUNT, this.sites['source']?.accountName || CacheHelper.parameters.AccountName!) || { From cd07993c4145c2f56a4476f44eda1f1c42d69ed2 Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 2 Aug 2024 10:22:06 +0200 Subject: [PATCH 10/25] Try something --- tests/ctst/world/Zenko.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 2a3c35094c..d271897b67 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -158,7 +158,7 @@ export default class Zenko extends World { }, undefined, undefined, undefined, this.parameters.subdomain); Zenko.sites['source'] = { - accountName: this.parameters.AccountName.concat(Utils.randomString()), + accountName: Identity.defaultAccountName, adminIdentityName: Zenko.PRIMARY_SITE_NAME, }; } From f325fade445234082fc4e3a8c52a162fe7ffae87 Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 2 Aug 2024 11:46:48 +0200 Subject: [PATCH 11/25] Add scenarios for failure in install DR --- tests/ctst/features/pra.feature | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index 80c79201de..35ed9c621c 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -35,6 +35,36 @@ Feature: PRA operations Then object "obj-1" should "" be "restored" and have the storage class "e2e-cold" on "Primary" site And object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site + # Uninstall DR + When I uninstall DR + Then the DR custom resources should be deleted + Examples: | versioningConfiguration | objectCount | objectSize | - | Versioned | 2 | 100 | \ No newline at end of file + | Versioned | 2 | 100 | + + + @2.6.0 + @PreMerge + @Dmf + @PRA + @ColdStorage + Scenario Outline: PRA (failure case) + # Fail to deploy PRA + Given a DR failing to be installed + Then the DR source should be in phase "Bootstrap:Failed" + And the DR sink should be in phase "Bootstrap:Failed" + + # Expect the operator to perform Reinit and to back to Waiting state + Then the DR source should be in phase "Bootstrap:Waiting" + And the DR sink should be in phase "Bootstrap:Waiting" + + # Retry to deploy PRA + Given a DR installed + Then the DR source should be in phase "Running" + And the DR sink should be in phase "Running" + Then the kafka DR volume exists + + # Uninstall PRA + When I uninstall DR + Then the DR custom resources should be deleted \ No newline at end of file From 39d570d148445b99796980a9fc1209ffca8ee3c2 Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 2 Aug 2024 13:36:52 +0200 Subject: [PATCH 12/25] Remove uninstall for test while uninstall not working --- tests/ctst/features/pra.feature | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index 35ed9c621c..a0f14cada9 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -36,8 +36,8 @@ Feature: PRA operations And object "obj-1" should "" be "transitioned" and have the storage class "e2e-cold" on "DR" site # Uninstall DR - When I uninstall DR - Then the DR custom resources should be deleted + #When I uninstall DR + #Then the DR custom resources should be deleted Examples: | versioningConfiguration | objectCount | objectSize | @@ -66,5 +66,5 @@ Feature: PRA operations Then the kafka DR volume exists # Uninstall PRA - When I uninstall DR - Then the DR custom resources should be deleted \ No newline at end of file + #When I uninstall DR + #Then the DR custom resources should be deleted \ No newline at end of file From e58042ac82dd14bf7a61488155959da94dac1988 Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 2 Aug 2024 14:52:54 +0200 Subject: [PATCH 13/25] Remove randomness --- tests/ctst/world/Zenko.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index d271897b67..88f239ffc4 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -172,7 +172,7 @@ export default class Zenko extends World { } Zenko.sites['sink'] = { - accountName: `dr${this.parameters.AccountName}`.concat(Utils.randomString()), + accountName: `dr${this.parameters.AccountName}`, adminIdentityName: Zenko.SECONDARY_SITE_NAME, }; } From a513427fe5976b957cc5472c1e6ca2e12b6e8a3c Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 2 Aug 2024 15:41:11 +0200 Subject: [PATCH 14/25] Complete PRA logic --- tests/ctst/features/pra.feature | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/tests/ctst/features/pra.feature b/tests/ctst/features/pra.feature index a0f14cada9..30f8f52c8b 100644 --- a/tests/ctst/features/pra.feature +++ b/tests/ctst/features/pra.feature @@ -41,30 +41,4 @@ Feature: PRA operations Examples: | versioningConfiguration | objectCount | objectSize | - | Versioned | 2 | 100 | - - - @2.6.0 - @PreMerge - @Dmf - @PRA - @ColdStorage - Scenario Outline: PRA (failure case) - # Fail to deploy PRA - Given a DR failing to be installed - Then the DR source should be in phase "Bootstrap:Failed" - And the DR sink should be in phase "Bootstrap:Failed" - - # Expect the operator to perform Reinit and to back to Waiting state - Then the DR source should be in phase "Bootstrap:Waiting" - And the DR sink should be in phase "Bootstrap:Waiting" - - # Retry to deploy PRA - Given a DR installed - Then the DR source should be in phase "Running" - And the DR sink should be in phase "Running" - Then the kafka DR volume exists - - # Uninstall PRA - #When I uninstall DR - #Then the DR custom resources should be deleted \ No newline at end of file + | Versioned | 2 | 100 | \ No newline at end of file From ad49c861c3bd24103b8143b0b49c26e180080dfa Mon Sep 17 00:00:00 2001 From: KillianG Date: Tue, 6 Aug 2024 08:50:45 +0200 Subject: [PATCH 15/25] Remove useless zenkoversion, and change e2e config script Issue: ZENKO-4773 --- .github/actions/deploy/action.yaml | 6 +- .../end2end/configs/zenkoversion-pra.yaml | 147 ------------------ .../scripts/end2end/configs/zenkoversion.yaml | 3 + .github/scripts/end2end/configure-e2e-pra.sh | 20 --- .github/workflows/end2end.yaml | 6 +- 5 files changed, 6 insertions(+), 176 deletions(-) delete mode 100644 .github/scripts/end2end/configs/zenkoversion-pra.yaml delete mode 100644 .github/scripts/end2end/configure-e2e-pra.sh diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 5b9ddcba09..36d15b03ce 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -7,10 +7,6 @@ inputs: description: "The tag of the Zenko Operator image to use" required: false default: "" - zenko_version_path: - description: "The path to the ZenkoVersion file" - required: false - default: "./configs/zenkoversion.yaml" runs: using: composite steps: @@ -76,7 +72,7 @@ runs: OPERATOR_IMAGE_TAG: ${{ inputs.zkop_tag }} - name: Deploy Zenko Instance shell: bash - run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' ${{ inputs.zenko_version_path }} + run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' './configs/zenkoversion.yaml' working-directory: ./.github/scripts/end2end - name: Add Keycloak user and assign StorageManager role shell: bash diff --git a/.github/scripts/end2end/configs/zenkoversion-pra.yaml b/.github/scripts/end2end/configs/zenkoversion-pra.yaml deleted file mode 100644 index 2d2fdf3da7..0000000000 --- a/.github/scripts/end2end/configs/zenkoversion-pra.yaml +++ /dev/null @@ -1,147 +0,0 @@ ---- -# DO NOT EDIT - autogenerated file -apiVersion: zenko.io/v1alpha1 -kind: ZenkoVersion -metadata: - name: '${ZENKO_VERSION_NAME}' -spec: - dashboards: - backbeat: - image: '${BACKBEAT_DASHBOARD}' - tag: '${BACKBEAT_TAG}' - cloudserver: - image: '${CLOUDSERVER_DASHBOARD}' - tag: '${CLOUDSERVER_TAG}' - s3utils: - image: '${S3UTILS_DASHBOARD}' - tag: '${S3UTILS_TAG}' - scuba: - image: '${SCUBA_DASHBOARD}' - tag: '${SCUBA_TAG}' - kafkaCleaner: - image: '${KAFKA_CLEANER_DASHBOARD}' - tag: '${KAFKA_CLEANER_TAG}' - # kafka: - # image: kafka-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # kafkaConnect: - # image: kafka-connect-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # mongodb: - # image: mongodb-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # redis: - # image: redis-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # vault: - # image: '${VAULT_DASHBOARD}' - # tag: '${VAULT_TAG}' - # zookeeper: - # image: zookeeper-dashboard - # tag: '${ZENKO_VERSION_NAME}' - policies: - backbeat: - image: '${BACKBEAT_POLICY}' - tag: '${BACKBEAT_TAG}' - sorbet: - image: '${SORBET_POLICY}' - tag: '${SORBET_TAG}' - vault: - image: '${VAULT_POLICY}' - tag: '${VAULT_TAG}' - versions: - management: - ui: - image: '${ZENKO_UI_IMAGE}' - tag: '${ZENKO_UI_TAG}' - api: - image: '${PENSIEVE_API_IMAGE}' - tag: '${PENSIEVE_API_TAG}' - pushAPI: - image: '${PENSIEVE_API_IMAGE}' - tag: '${PENSIEVE_API_TAG}' - cloudserver: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - backbeat: - image: '${BACKBEAT_IMAGE}' - tag: '${BACKBEAT_TAG}' - utapi: - image: '${UTAPI_IMAGE}' - tag: '${UTAPI_TAG}' - secureChannelProxy: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - localData: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - metrics: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - s3utils: - image: '${S3UTILS_IMAGE}' - tag: '${S3UTILS_TAG}' - sorbet: - image: '${SORBET_IMAGE}' - tag: '${SORBET_TAG}' - drctl: - image: '${DRCTL_IMAGE}' - tag: '${DRCTL_TAG}' - zookeeper: - image: '${ZOOKEEPER_IMAGE}' - tag: '${ZOOKEEPER_TAG}' - kafka: - cleaner: - image: '${KAFKA_CLEANER_IMAGE}' - tag: '${KAFKA_CLEANER_TAG}' - cluster: - image: '${KAFKA_IMAGE}' - tag: '${KAFKA_TAG}' - connect: - image: '${KAFKA_CONNECT_IMAGE}' - tag: '${KAFKA_CONNECT_TAG}' - cruiseControl: - image: '${KAFKA_CRUISECONTROL_IMAGE}' - tag: '${KAFKA_CRUISECONTROL_TAG}' - lagExporter: - image: '${KAFKA_LAGEXPORTER_IMAGE}' - tag: '${KAFKA_LAGEXPORTER_TAG}' - monitoring: - image: '${JMX_JAVAAGENT_IMAGE}' - tag: '${JMX_JAVAAGENT_TAG}' - cleaner: - image: '${KAFKA_CLEANER_IMAGE}' - tag: '${KAFKA_CLEANER_TAG}' - vault: - image: '${VAULT_IMAGE}' - tag: '${VAULT_TAG}' - scuba: - image: '${SCUBA_IMAGE}' - tag: '${SCUBA_TAG}' - shell: - image: '${BUSYBOX_IMAGE}' - tag: '${BUSYBOX_TAG}' - mongodb: '${MONGODB_TAG}' - redis: - db: - image: '${REDIS_IMAGE}' - tag: '${REDIS_TAG}' - exporter: - image: '${REDIS_EXPORTER_IMAGE}' - tag: '${REDIS_EXPORTER_TAG}' - kubedb: '${REDIS_KUBEDB_TAG}' - defaults: - backbeatConcurrency: - lifecycleBucketProcessor: 30 - lifecycleObjectProcessor: 20 - replicaMultipliers: - cloudserver: 16 - internalCloudserver: 4 - vault: 4 - kafkaResources: - broker: - limitCPU: 3 - cruiseControl: - limitMemory: 3Gi - featureFlags: - backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 9d603440dc..2d2fdf3da7 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -84,6 +84,9 @@ spec: sorbet: image: '${SORBET_IMAGE}' tag: '${SORBET_TAG}' + drctl: + image: '${DRCTL_IMAGE}' + tag: '${DRCTL_TAG}' zookeeper: image: '${ZOOKEEPER_IMAGE}' tag: '${ZOOKEEPER_TAG}' diff --git a/.github/scripts/end2end/configure-e2e-pra.sh b/.github/scripts/end2end/configure-e2e-pra.sh deleted file mode 100644 index ecf15d808d..0000000000 --- a/.github/scripts/end2end/configure-e2e-pra.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -set -exu - -. "$(dirname $0)/common.sh" - -OIDC_USERNAME=${OIDC_USERNAME:-zenko-end2end-pra} -OIDC_EMAIL=${OIDC_EMAIL:-e2e-pra@zenko.local} - -INSTANCE_ID=$(kubectl get zenko end2end-pra -o jsonpath='{.status.instanceID}') -TOKEN=$(get_token) - -LOCATION_PARAMS='{"name":"e2e-cold","locationType":"location-dmf-v1","details":{"endpoint":"ws://mock-sorbet:5001/session","username":"user1","password":"pass1","repoId":["233aead6-1d7b-4647-a7cf-0d3280b5d1d7","81e78de8-df11-4acd-8ad1-577ff05a68db"],"nsId":"65f9fd61-42fe-4a68-9ac0-6ba25311cc85"}}' - -curl -k -X POST \ --H "Host: management.dr.zenko.local" \ --H "X-Authentication-Token: $TOKEN" \ --H "Content-Type: application/json" \ --d "$LOCATION_PARAMS" \ -"https://localhost/api/v1/config/$INSTANCE_ID/location" \ No newline at end of file diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index cc5b36b251..b17bb769aa 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -481,13 +481,11 @@ jobs: ZENKO_MONGODB_SHARDED: "true" with: zkop_tag: 1.6.0-preview.2 - zenko_version_path: './configs/zenkoversion-pra.yaml' - name: Prepare PRA environment run: bash prepare-pra.sh working-directory: ./.github/scripts/end2end - name: Deploy second Zenko for PRA - run: bash deploy-zenko.sh end2end-pra default './configs/zenko.yaml' './configs/zenkoversion-pra.yaml' - env: + run: bash deploy-zenko.sh end2end-pra ZENKO_MONGODB_SHARDED: "true" ZENKO_MONGODB_DATABASE: "pradb" working-directory: ./.github/scripts/end2end @@ -499,7 +497,7 @@ jobs: OIDC_EMAIL: 'e2e-pra@zenko.local' working-directory: ./.github/scripts/end2end - name: Configure E2E PRA test environment - run: bash configure-e2e-pra.sh + run: bash configure-e2e.sh end2end-pra working-directory: ./.github/scripts/end2end env: OIDC_USERNAME: 'zenko-end2end-pra' From 3ba68b71a914b65099a02bf287e91ad20d487867 Mon Sep 17 00:00:00 2001 From: KillianG Date: Tue, 6 Aug 2024 10:19:43 +0200 Subject: [PATCH 16/25] Update mongodb user, update dockerfile and run script Issue: ZENKO-4773 --- .github/scripts/end2end/prepare-pra.sh | 2 +- .github/scripts/end2end/run-e2e-ctst.sh | 3 +-- .github/scripts/end2end/run-e2e-pra-ctst.sh | 7 +++---- .github/workflows/end2end.yaml | 1 + tests/ctst/Dockerfile | 2 -- 5 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/scripts/end2end/prepare-pra.sh b/.github/scripts/end2end/prepare-pra.sh index 6098bc67a0..215868d752 100644 --- a/.github/scripts/end2end/prepare-pra.sh +++ b/.github/scripts/end2end/prepare-pra.sh @@ -21,7 +21,7 @@ MONGODB_ROOT_PASSWORD="${MONGODB_ROOT_PASSWORD:-'rootpass'}" kubectl exec -it data-db-mongodb-sharded-mongos-0 -- mongo "admin" \ -u "root" \ -p "rootpass" \ - --eval "db.createUser({user:$MONGODB_PRA_USERNAME,pwd:$MONGODB_PRA_PASSWORD,roles:[{role:'enableSharding',db:$MONGODB_PRA_DATABASE },{role:'readWriteAnyDatabase',db:'admin'}]})" + --eval "db.createUser({user:$MONGODB_PRA_USERNAME,pwd:$MONGODB_PRA_PASSWORD,roles:[{role:'enableSharding',db:$MONGODB_PRA_DATABASE },{role:'readWrite',db:$MONGODB_PRA_DATABASE },{role:'read',db:'local'}]})" kubectl -n ${PRA_NAMESPACE} apply -f - < Date: Tue, 6 Aug 2024 10:46:36 +0200 Subject: [PATCH 17/25] Use specific zenkoversion for PRA due to drctl field unknown with older version of zkop Issue: ZENKO-4773 --- .github/actions/deploy/action.yaml | 6 +- .../end2end/configs/zenkoversion-pra.yaml | 147 ++++++++++++++++++ .github/workflows/end2end.yaml | 3 +- 3 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 .github/scripts/end2end/configs/zenkoversion-pra.yaml diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 36d15b03ce..5b9ddcba09 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -7,6 +7,10 @@ inputs: description: "The tag of the Zenko Operator image to use" required: false default: "" + zenko_version_path: + description: "The path to the ZenkoVersion file" + required: false + default: "./configs/zenkoversion.yaml" runs: using: composite steps: @@ -72,7 +76,7 @@ runs: OPERATOR_IMAGE_TAG: ${{ inputs.zkop_tag }} - name: Deploy Zenko Instance shell: bash - run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' './configs/zenkoversion.yaml' + run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' ${{ inputs.zenko_version_path }} working-directory: ./.github/scripts/end2end - name: Add Keycloak user and assign StorageManager role shell: bash diff --git a/.github/scripts/end2end/configs/zenkoversion-pra.yaml b/.github/scripts/end2end/configs/zenkoversion-pra.yaml new file mode 100644 index 0000000000..2d2fdf3da7 --- /dev/null +++ b/.github/scripts/end2end/configs/zenkoversion-pra.yaml @@ -0,0 +1,147 @@ +--- +# DO NOT EDIT - autogenerated file +apiVersion: zenko.io/v1alpha1 +kind: ZenkoVersion +metadata: + name: '${ZENKO_VERSION_NAME}' +spec: + dashboards: + backbeat: + image: '${BACKBEAT_DASHBOARD}' + tag: '${BACKBEAT_TAG}' + cloudserver: + image: '${CLOUDSERVER_DASHBOARD}' + tag: '${CLOUDSERVER_TAG}' + s3utils: + image: '${S3UTILS_DASHBOARD}' + tag: '${S3UTILS_TAG}' + scuba: + image: '${SCUBA_DASHBOARD}' + tag: '${SCUBA_TAG}' + kafkaCleaner: + image: '${KAFKA_CLEANER_DASHBOARD}' + tag: '${KAFKA_CLEANER_TAG}' + # kafka: + # image: kafka-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # kafkaConnect: + # image: kafka-connect-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # mongodb: + # image: mongodb-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # redis: + # image: redis-dashboard + # tag: '${ZENKO_VERSION_NAME}' + # vault: + # image: '${VAULT_DASHBOARD}' + # tag: '${VAULT_TAG}' + # zookeeper: + # image: zookeeper-dashboard + # tag: '${ZENKO_VERSION_NAME}' + policies: + backbeat: + image: '${BACKBEAT_POLICY}' + tag: '${BACKBEAT_TAG}' + sorbet: + image: '${SORBET_POLICY}' + tag: '${SORBET_TAG}' + vault: + image: '${VAULT_POLICY}' + tag: '${VAULT_TAG}' + versions: + management: + ui: + image: '${ZENKO_UI_IMAGE}' + tag: '${ZENKO_UI_TAG}' + api: + image: '${PENSIEVE_API_IMAGE}' + tag: '${PENSIEVE_API_TAG}' + pushAPI: + image: '${PENSIEVE_API_IMAGE}' + tag: '${PENSIEVE_API_TAG}' + cloudserver: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + backbeat: + image: '${BACKBEAT_IMAGE}' + tag: '${BACKBEAT_TAG}' + utapi: + image: '${UTAPI_IMAGE}' + tag: '${UTAPI_TAG}' + secureChannelProxy: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + localData: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + metrics: + image: '${CLOUDSERVER_IMAGE}' + tag: '${CLOUDSERVER_TAG}' + s3utils: + image: '${S3UTILS_IMAGE}' + tag: '${S3UTILS_TAG}' + sorbet: + image: '${SORBET_IMAGE}' + tag: '${SORBET_TAG}' + drctl: + image: '${DRCTL_IMAGE}' + tag: '${DRCTL_TAG}' + zookeeper: + image: '${ZOOKEEPER_IMAGE}' + tag: '${ZOOKEEPER_TAG}' + kafka: + cleaner: + image: '${KAFKA_CLEANER_IMAGE}' + tag: '${KAFKA_CLEANER_TAG}' + cluster: + image: '${KAFKA_IMAGE}' + tag: '${KAFKA_TAG}' + connect: + image: '${KAFKA_CONNECT_IMAGE}' + tag: '${KAFKA_CONNECT_TAG}' + cruiseControl: + image: '${KAFKA_CRUISECONTROL_IMAGE}' + tag: '${KAFKA_CRUISECONTROL_TAG}' + lagExporter: + image: '${KAFKA_LAGEXPORTER_IMAGE}' + tag: '${KAFKA_LAGEXPORTER_TAG}' + monitoring: + image: '${JMX_JAVAAGENT_IMAGE}' + tag: '${JMX_JAVAAGENT_TAG}' + cleaner: + image: '${KAFKA_CLEANER_IMAGE}' + tag: '${KAFKA_CLEANER_TAG}' + vault: + image: '${VAULT_IMAGE}' + tag: '${VAULT_TAG}' + scuba: + image: '${SCUBA_IMAGE}' + tag: '${SCUBA_TAG}' + shell: + image: '${BUSYBOX_IMAGE}' + tag: '${BUSYBOX_TAG}' + mongodb: '${MONGODB_TAG}' + redis: + db: + image: '${REDIS_IMAGE}' + tag: '${REDIS_TAG}' + exporter: + image: '${REDIS_EXPORTER_IMAGE}' + tag: '${REDIS_EXPORTER_TAG}' + kubedb: '${REDIS_KUBEDB_TAG}' + defaults: + backbeatConcurrency: + lifecycleBucketProcessor: 30 + lifecycleObjectProcessor: 20 + replicaMultipliers: + cloudserver: 16 + internalCloudserver: 4 + vault: 4 + kafkaResources: + broker: + limitCPU: 3 + cruiseControl: + limitMemory: 3Gi + featureFlags: + backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index 7b3a037516..190f436bdb 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -481,11 +481,12 @@ jobs: ZENKO_MONGODB_SHARDED: "true" with: zkop_tag: 1.6.0-preview.2 + zenko_version_path: './configs/zenkoversion-pra.yaml' - name: Prepare PRA environment run: bash prepare-pra.sh working-directory: ./.github/scripts/end2end - name: Deploy second Zenko for PRA - run: bash deploy-zenko.sh end2end-pra + run: bash deploy-zenko.sh end2end-pra default './configs/zenko.yaml' './configs/zenkoversion-pra.yaml' env: ZENKO_MONGODB_SHARDED: "true" ZENKO_MONGODB_DATABASE: "pradb" From 43aa96afb81894eb8205352120490cff51eb163f Mon Sep 17 00:00:00 2001 From: KillianG Date: Tue, 6 Aug 2024 10:48:57 +0200 Subject: [PATCH 18/25] Remove service user credentials Issue: ZENKO-4773 --- .github/scripts/end2end/run-e2e-pra-ctst.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/scripts/end2end/run-e2e-pra-ctst.sh b/.github/scripts/end2end/run-e2e-pra-ctst.sh index ca9d6495e2..0f55202879 100644 --- a/.github/scripts/end2end/run-e2e-pra-ctst.sh +++ b/.github/scripts/end2end/run-e2e-pra-ctst.sh @@ -63,7 +63,6 @@ WORLD_PARAMETERS="$(jq -c < Date: Tue, 6 Aug 2024 11:02:29 +0200 Subject: [PATCH 19/25] Remove drctl from zenkoversion.yaml --- .github/scripts/end2end/configs/zenkoversion.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 2d2fdf3da7..9d603440dc 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -84,9 +84,6 @@ spec: sorbet: image: '${SORBET_IMAGE}' tag: '${SORBET_TAG}' - drctl: - image: '${DRCTL_IMAGE}' - tag: '${DRCTL_TAG}' zookeeper: image: '${ZOOKEEPER_IMAGE}' tag: '${ZOOKEEPER_TAG}' From 8493425577985e82b52cab1eef2d7cfb42a56eaa Mon Sep 17 00:00:00 2001 From: KillianG Date: Tue, 6 Aug 2024 11:56:00 +0200 Subject: [PATCH 20/25] Update kafka topic creation script to use --if-not-exists flag Issue: ZENKO-4773 --- .github/scripts/end2end/configure-e2e.sh | 8 +- .github/scripts/end2end/run-e2e-ctst.sh | 14 +- .github/scripts/end2end/run-e2e-pra-ctst.sh | 142 -------------------- .github/workflows/end2end.yaml | 7 +- tests/zenko_tests/e2e_config/accounts.py | 8 +- 5 files changed, 26 insertions(+), 153 deletions(-) delete mode 100644 .github/scripts/end2end/run-e2e-pra-ctst.sh diff --git a/.github/scripts/end2end/configure-e2e.sh b/.github/scripts/end2end/configure-e2e.sh index 9d1ce43098..7fb7339081 100755 --- a/.github/scripts/end2end/configure-e2e.sh +++ b/.github/scripts/end2end/configure-e2e.sh @@ -63,10 +63,10 @@ kubectl run kafka-topics \ --restart=Never \ --attach=True \ --command -- bash -c \ - "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT" + "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists" kubectl run ${POD_NAME} \ --image ${E2E_IMAGE} \ diff --git a/.github/scripts/end2end/run-e2e-ctst.sh b/.github/scripts/end2end/run-e2e-ctst.sh index 1aa9f159f6..52daac972a 100755 --- a/.github/scripts/end2end/run-e2e-ctst.sh +++ b/.github/scripts/end2end/run-e2e-ctst.sh @@ -5,6 +5,9 @@ ZENKO_NAME=${1:-end2end} COMMAND=${2:-"premerge"} PARALLEL_RUNS=${PARALLEL_RUNS:-$(( ( $(nproc) + 1 ) / 2 ))} RETRIES=${4:-3} + +shift 4 + JUNIT_REPORT_PATH=${JUNIT_REPORT_PATH:-"ctst-junit.xml"} # Zenko Version @@ -37,6 +40,9 @@ SORBET_FWD_2_ACCESSKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd SORBET_FWD_2_SECRETKEY=$(kubectl get secret -l app.kubernetes.io/name=sorbet-fwd-creds,app.kubernetes.io/instance=end2end -o jsonpath='{.items[0].data.secretKey}' | base64 -d) SERVICE_USERS_CREDENTIALS=$(echo '{"backbeat-lifecycle-bp-1":'${BACKBEAT_LCBP_1_CREDS}',"backbeat-lifecycle-conductor-1":'${BACKBEAT_LCC_1_CREDS}',"backbeat-lifecycle-op-1":'${BACKBEAT_LCOP_1_CREDS}',"backbeat-qp-1":'${BACKBEAT_QP_1_CREDS}',"sorbet-fwd-2":{"accessKey":"'${SORBET_FWD_2_ACCESSKEY}'","secretKey":"'${SORBET_FWD_2_SECRETKEY}'"}}' | jq -R) +DR_ADMIN_ACCESS_KEY_ID=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.accessKey}' | base64 -d) +DR_ADMIN_SECRET_ACCESS_KEY=$(kubectl get secret end2end-pra-management-vault-admin-creds.v1 -o jsonpath='{.data.secretKey}' | base64 -d) + # Extracting kafka host from bacbeat's config KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) @@ -50,6 +56,7 @@ KAFKA_CLEANER_INTERVAL=$(kubectl get zenko ${ZENKO_NAME} -o jsonpath='{.spec.kaf WORLD_PARAMETERS="$(jq -c < Date: Wed, 7 Aug 2024 10:56:28 +0200 Subject: [PATCH 21/25] Remove duplicate zenkoversion and add extra component variable Issue: ZENKO-4773 --- .github/actions/deploy/action.yaml | 10 +- .../end2end/configs/zenkoversion-pra.yaml | 147 ------------------ .../scripts/end2end/configs/zenkoversion.yaml | 1 + .github/workflows/end2end.yaml | 11 +- 4 files changed, 16 insertions(+), 153 deletions(-) delete mode 100644 .github/scripts/end2end/configs/zenkoversion-pra.yaml diff --git a/.github/actions/deploy/action.yaml b/.github/actions/deploy/action.yaml index 5b9ddcba09..af3e274ed3 100644 --- a/.github/actions/deploy/action.yaml +++ b/.github/actions/deploy/action.yaml @@ -7,10 +7,10 @@ inputs: description: "The tag of the Zenko Operator image to use" required: false default: "" - zenko_version_path: - description: "The path to the ZenkoVersion file" + extra_components: + description: "Extra components to add to zenkoversion" required: false - default: "./configs/zenkoversion.yaml" + default: "" runs: using: composite steps: @@ -76,8 +76,10 @@ runs: OPERATOR_IMAGE_TAG: ${{ inputs.zkop_tag }} - name: Deploy Zenko Instance shell: bash - run: bash deploy-zenko.sh end2end default './configs/zenko.yaml' ${{ inputs.zenko_version_path }} + run: bash deploy-zenko.sh end2end default working-directory: ./.github/scripts/end2end + env: + EXTRA_COMPONENTS: ${{ inputs.extra_components }} - name: Add Keycloak user and assign StorageManager role shell: bash run: bash keycloak-helper.sh add-user default diff --git a/.github/scripts/end2end/configs/zenkoversion-pra.yaml b/.github/scripts/end2end/configs/zenkoversion-pra.yaml deleted file mode 100644 index 2d2fdf3da7..0000000000 --- a/.github/scripts/end2end/configs/zenkoversion-pra.yaml +++ /dev/null @@ -1,147 +0,0 @@ ---- -# DO NOT EDIT - autogenerated file -apiVersion: zenko.io/v1alpha1 -kind: ZenkoVersion -metadata: - name: '${ZENKO_VERSION_NAME}' -spec: - dashboards: - backbeat: - image: '${BACKBEAT_DASHBOARD}' - tag: '${BACKBEAT_TAG}' - cloudserver: - image: '${CLOUDSERVER_DASHBOARD}' - tag: '${CLOUDSERVER_TAG}' - s3utils: - image: '${S3UTILS_DASHBOARD}' - tag: '${S3UTILS_TAG}' - scuba: - image: '${SCUBA_DASHBOARD}' - tag: '${SCUBA_TAG}' - kafkaCleaner: - image: '${KAFKA_CLEANER_DASHBOARD}' - tag: '${KAFKA_CLEANER_TAG}' - # kafka: - # image: kafka-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # kafkaConnect: - # image: kafka-connect-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # mongodb: - # image: mongodb-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # redis: - # image: redis-dashboard - # tag: '${ZENKO_VERSION_NAME}' - # vault: - # image: '${VAULT_DASHBOARD}' - # tag: '${VAULT_TAG}' - # zookeeper: - # image: zookeeper-dashboard - # tag: '${ZENKO_VERSION_NAME}' - policies: - backbeat: - image: '${BACKBEAT_POLICY}' - tag: '${BACKBEAT_TAG}' - sorbet: - image: '${SORBET_POLICY}' - tag: '${SORBET_TAG}' - vault: - image: '${VAULT_POLICY}' - tag: '${VAULT_TAG}' - versions: - management: - ui: - image: '${ZENKO_UI_IMAGE}' - tag: '${ZENKO_UI_TAG}' - api: - image: '${PENSIEVE_API_IMAGE}' - tag: '${PENSIEVE_API_TAG}' - pushAPI: - image: '${PENSIEVE_API_IMAGE}' - tag: '${PENSIEVE_API_TAG}' - cloudserver: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - backbeat: - image: '${BACKBEAT_IMAGE}' - tag: '${BACKBEAT_TAG}' - utapi: - image: '${UTAPI_IMAGE}' - tag: '${UTAPI_TAG}' - secureChannelProxy: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - localData: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - metrics: - image: '${CLOUDSERVER_IMAGE}' - tag: '${CLOUDSERVER_TAG}' - s3utils: - image: '${S3UTILS_IMAGE}' - tag: '${S3UTILS_TAG}' - sorbet: - image: '${SORBET_IMAGE}' - tag: '${SORBET_TAG}' - drctl: - image: '${DRCTL_IMAGE}' - tag: '${DRCTL_TAG}' - zookeeper: - image: '${ZOOKEEPER_IMAGE}' - tag: '${ZOOKEEPER_TAG}' - kafka: - cleaner: - image: '${KAFKA_CLEANER_IMAGE}' - tag: '${KAFKA_CLEANER_TAG}' - cluster: - image: '${KAFKA_IMAGE}' - tag: '${KAFKA_TAG}' - connect: - image: '${KAFKA_CONNECT_IMAGE}' - tag: '${KAFKA_CONNECT_TAG}' - cruiseControl: - image: '${KAFKA_CRUISECONTROL_IMAGE}' - tag: '${KAFKA_CRUISECONTROL_TAG}' - lagExporter: - image: '${KAFKA_LAGEXPORTER_IMAGE}' - tag: '${KAFKA_LAGEXPORTER_TAG}' - monitoring: - image: '${JMX_JAVAAGENT_IMAGE}' - tag: '${JMX_JAVAAGENT_TAG}' - cleaner: - image: '${KAFKA_CLEANER_IMAGE}' - tag: '${KAFKA_CLEANER_TAG}' - vault: - image: '${VAULT_IMAGE}' - tag: '${VAULT_TAG}' - scuba: - image: '${SCUBA_IMAGE}' - tag: '${SCUBA_TAG}' - shell: - image: '${BUSYBOX_IMAGE}' - tag: '${BUSYBOX_TAG}' - mongodb: '${MONGODB_TAG}' - redis: - db: - image: '${REDIS_IMAGE}' - tag: '${REDIS_TAG}' - exporter: - image: '${REDIS_EXPORTER_IMAGE}' - tag: '${REDIS_EXPORTER_TAG}' - kubedb: '${REDIS_KUBEDB_TAG}' - defaults: - backbeatConcurrency: - lifecycleBucketProcessor: 30 - lifecycleObjectProcessor: 20 - replicaMultipliers: - cloudserver: 16 - internalCloudserver: 4 - vault: 4 - kafkaResources: - broker: - limitCPU: 3 - cruiseControl: - limitMemory: 3Gi - featureFlags: - backbeatGCVaultAuthorized: true \ No newline at end of file diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 9d603440dc..5ba2998051 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -127,6 +127,7 @@ spec: image: '${REDIS_EXPORTER_IMAGE}' tag: '${REDIS_EXPORTER_TAG}' kubedb: '${REDIS_KUBEDB_TAG}' + ${EXTRA_COMPONENTS} defaults: backbeatConcurrency: lifecycleBucketProcessor: 30 diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index 4b639ff279..84b7443d52 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -481,15 +481,22 @@ jobs: ZENKO_MONGODB_SHARDED: "true" with: zkop_tag: 1.6.0-preview.2 - zenko_version_path: './configs/zenkoversion-pra.yaml' + extra_components: | + drctl: + image: ghcr.io/scality/zenko-drctl + tag: v1.0.2 - name: Prepare PRA environment run: bash prepare-pra.sh working-directory: ./.github/scripts/end2end - name: Deploy second Zenko for PRA - run: bash deploy-zenko.sh end2end-pra default './configs/zenko.yaml' './configs/zenkoversion-pra.yaml' + run: bash deploy-zenko.sh end2end-pra default './configs/zenko.yaml' env: ZENKO_MONGODB_SHARDED: "true" ZENKO_MONGODB_DATABASE: "pradb" + EXTRA_COMPONENTS: | + drctl: + image: 'ghcr.io/scality/zenko-drctl' + tag: 'v1.0.2' working-directory: ./.github/scripts/end2end - name: Add Keycloak pra user and assign StorageManager role shell: bash From ebffb82045a6695fdae97614c7838582469bbb2a Mon Sep 17 00:00:00 2001 From: KillianG Date: Fri, 9 Aug 2024 08:44:01 +0200 Subject: [PATCH 22/25] Fix workflow Issue: ZENKO-4773 --- .github/scripts/end2end/common.sh | 1 + .github/scripts/end2end/configure-e2e.sh | 10 +++++----- .github/scripts/end2end/run-e2e-ctst.sh | 2 +- .github/workflows/end2end.yaml | 7 ++++--- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/scripts/end2end/common.sh b/.github/scripts/end2end/common.sh index 41d6da2269..a8fc9d4246 100644 --- a/.github/scripts/end2end/common.sh +++ b/.github/scripts/end2end/common.sh @@ -8,3 +8,4 @@ get_token() { https://localhost/auth/realms/${OIDC_REALM}/protocol/openid-connect/token | \ jq -cr '.id_token' } + diff --git a/.github/scripts/end2end/configure-e2e.sh b/.github/scripts/end2end/configure-e2e.sh index 7fb7339081..dcb7f10c79 100755 --- a/.github/scripts/end2end/configure-e2e.sh +++ b/.github/scripts/end2end/configure-e2e.sh @@ -51,7 +51,7 @@ KAFKA_REGISTRY_NAME=$(yq eval ".kafka.sourceRegistry" ../../../solution/deps.yam KAFKA_IMAGE_NAME=$(yq eval ".kafka.image" ../../../solution/deps.yaml) KAFKA_IMAGE_TAG=$(yq eval ".kafka.tag" ../../../solution/deps.yaml) KAFKA_IMAGE=$KAFKA_REGISTRY_NAME/$KAFKA_IMAGE_NAME:$KAFKA_IMAGE_TAG -KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ +KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME} \ -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) KAFKA_HOST_PORT=${KAFKA_HOST_PORT:1:-1} @@ -63,10 +63,10 @@ kubectl run kafka-topics \ --restart=Never \ --attach=True \ --command -- bash -c \ - "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ - kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ - kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ - kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists" + "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ + kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ + kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT ; \ + kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT" kubectl run ${POD_NAME} \ --image ${E2E_IMAGE} \ diff --git a/.github/scripts/end2end/run-e2e-ctst.sh b/.github/scripts/end2end/run-e2e-ctst.sh index 52daac972a..6a0e3cd6dc 100755 --- a/.github/scripts/end2end/run-e2e-ctst.sh +++ b/.github/scripts/end2end/run-e2e-ctst.sh @@ -157,4 +157,4 @@ kubectl run $POD_NAME \ } ] } -}' -- ./run "$COMMAND" $WORLD_PARAMETERS --parallel $PARALLEL_RUNS --retry $RETRIES "$@" --retry-tag-filter @Flaky --format junit:$JUNIT_REPORT_PATH +}' -- ./run "$COMMAND" $WORLD_PARAMETERS --parallel $PARALLEL_RUNS --retry $RETRIES --retry-tag-filter @Flaky --format junit:$JUNIT_REPORT_PATH "$@" diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index 84b7443d52..1cde094fe4 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -92,6 +92,7 @@ env: KUBECTL_VERSION: "1.30.0" TILT_VERSION: "0.23.4" KIND_VERSION: "v0.12.0" + DRCTL_TAG: "v1.0.2" ZENKO_ENABLE_SOSAPI: false TIME_PROGRESSION_FACTOR: 1 EXPIRE_ONE_DAY_EARLIER: true @@ -387,7 +388,7 @@ jobs: context: ./tests/ctst build-args: | CTST_TAG=${{ env.CTST_TAG }} - DRCTL_TAG=v1.0.2 + DRCTL_TAG=${{ env.DRCTL_TAG}} tags: "${{ env.E2E_CTST_IMAGE_NAME }}:${{ env.E2E_IMAGE_TAG }}" cache-from: type=gha,scope=end2end-ctst cache-to: type=gha,mode=max,scope=end2end-ctst @@ -484,7 +485,7 @@ jobs: extra_components: | drctl: image: ghcr.io/scality/zenko-drctl - tag: v1.0.2 + tag: ${{ env.DRCTL_TAG }} - name: Prepare PRA environment run: bash prepare-pra.sh working-directory: ./.github/scripts/end2end @@ -496,7 +497,7 @@ jobs: EXTRA_COMPONENTS: | drctl: image: 'ghcr.io/scality/zenko-drctl' - tag: 'v1.0.2' + tag: ${{ env.DRCTL_TAG }} working-directory: ./.github/scripts/end2end - name: Add Keycloak pra user and assign StorageManager role shell: bash From e7850f8fa3b50e52cee875f31ffea3e6993ea515 Mon Sep 17 00:00:00 2001 From: KillianG Date: Mon, 12 Aug 2024 09:26:16 +0200 Subject: [PATCH 23/25] Update configure-e2e.sh script for end2end testing --- .github/scripts/end2end/configure-e2e.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/scripts/end2end/configure-e2e.sh b/.github/scripts/end2end/configure-e2e.sh index 1d7bf58ece..e78a338951 100755 --- a/.github/scripts/end2end/configure-e2e.sh +++ b/.github/scripts/end2end/configure-e2e.sh @@ -51,7 +51,7 @@ KAFKA_REGISTRY_NAME=$(yq eval ".kafka.sourceRegistry" ../../../solution/deps.yam KAFKA_IMAGE_NAME=$(yq eval ".kafka.image" ../../../solution/deps.yaml) KAFKA_IMAGE_TAG=$(yq eval ".kafka.tag" ../../../solution/deps.yaml) KAFKA_IMAGE=$KAFKA_REGISTRY_NAME/$KAFKA_IMAGE_NAME:$KAFKA_IMAGE_TAG -KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=${ZENKO_NAME} \ +KAFKA_HOST_PORT=$(kubectl get secret -l app.kubernetes.io/name=backbeat-config,app.kubernetes.io/instance=end2end \ -o jsonpath='{.items[0].data.config\.json}' | base64 -di | jq .kafka.hosts) KAFKA_HOST_PORT=${KAFKA_HOST_PORT:1:-1} From 3a85e4ae017feaecdf9df4837b11b05c67201a4b Mon Sep 17 00:00:00 2001 From: KillianG Date: Mon, 12 Aug 2024 10:14:12 +0200 Subject: [PATCH 24/25] Update kafka topic creation script to use --if-not-exists flag --- .github/scripts/end2end/configure-e2e-ctst.sh | 4 ++-- .github/scripts/end2end/configure-e2e.sh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/scripts/end2end/configure-e2e-ctst.sh b/.github/scripts/end2end/configure-e2e-ctst.sh index 9084153b9f..a1ab882ade 100755 --- a/.github/scripts/end2end/configure-e2e-ctst.sh +++ b/.github/scripts/end2end/configure-e2e-ctst.sh @@ -35,5 +35,5 @@ kubectl run kafka-topics \ --restart=Never \ --attach=True \ --command -- bash -c \ - "kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT" + "kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists " diff --git a/.github/scripts/end2end/configure-e2e.sh b/.github/scripts/end2end/configure-e2e.sh index e78a338951..e47496eaa5 100755 --- a/.github/scripts/end2end/configure-e2e.sh +++ b/.github/scripts/end2end/configure-e2e.sh @@ -63,10 +63,10 @@ kubectl run kafka-topics \ --restart=Never \ --attach=True \ --command -- bash -c \ - "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT ; \ - kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT" + "kafka-topics.sh --create --topic $UUID.backbeat-replication-replay-0 --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $UUID.backbeat-data-mover --partitions 5 --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $NOTIF_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists ; \ + kafka-topics.sh --create --topic $NOTIF_ALT_DEST_TOPIC --bootstrap-server $KAFKA_HOST_PORT --if-not-exists " kubectl run ${POD_NAME} \ --image ${E2E_IMAGE} \ From 7499ce8eee4888e172587f1638632e4aff723258 Mon Sep 17 00:00:00 2001 From: KillianG Date: Mon, 12 Aug 2024 10:51:01 +0200 Subject: [PATCH 25/25] Update zenkoversion.yaml to include extra components --- .github/scripts/end2end/configs/zenkoversion.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/scripts/end2end/configs/zenkoversion.yaml b/.github/scripts/end2end/configs/zenkoversion.yaml index 5245596cc4..ecc2fe133c 100644 --- a/.github/scripts/end2end/configs/zenkoversion.yaml +++ b/.github/scripts/end2end/configs/zenkoversion.yaml @@ -127,6 +127,7 @@ spec: image: '${REDIS_EXPORTER_IMAGE}' tag: '${REDIS_EXPORTER_TAG}' kubedb: '${REDIS_KUBEDB_TAG}' + ${EXTRA_COMPONENTS} defaults: backbeatConcurrency: lifecycleBucketProcessor: 30