forked from dCache/dcache
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.gitlab-ci.yml
531 lines (480 loc) · 18.8 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
#
# A dCache build/deploy/test pipeline file.
#
# The following environment variables injected by gitlab CI
#
# DCACHE_ORG_PGP_KEY: GPG key used to sign RPM and DEB packages
# DCACHE_ORG_KEY_NAME: GPG key name
# DCACHE_ORG_PGP_KEY_PASS: GPG key password
#
# PKG_UPLOAD_URL: URL to upload dCache release packages
# PKG_UPLOAD_USER: user name to use for authorization
# PKG_UPLOAD_PASS: password
#
# DOCKER_HUB_USER: user name on docker hub
# DOCKER_HUB_ACCESS_KEY: Access Key or Password of the docker user
#
#
#
# KUBECONFIG: env file that contains kubernetes configuration to access the cluster
#
# dCache deployment in kubernetes managed by helm chart
# https://gitlab.desy.de/dcache/dcache-helm
#
#
# The kubernetes based jobs don't use directly any job artefact, thus pulling artefact
# is explicitly disabled by default.
stages:
- build
- sign
- testenv_pre
- test_infra
- test_deploy
- testing
- testenv_post
- upload
variables:
MAVEN_CLI_OPTS: "--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true -Dmaven.repo.local=.m2/repository"
K8S_NAMESPACE: dcache-build-$CI_PIPELINE_ID
CHECK_TIMEOUT: --timeout=300s
HELM_OPTS: --replace --timeout 10m0s
AUTOCA_URL: https://ci.dcache.org/ca
DCACHE_HELM_REPO: https://gitlab.desy.de/api/v4/projects/7648/packages/helm/test
# let's debug nodes where job is running
before_script:
- |
set +x
echo "============== GitLab Agent =============="
uname -a
echo "Runner : $CI_RUNNER_DESCRIPTION"
echo "Runner id : $CI_RUNNER_ID"
echo "Runner version : $CI_RUNNER_VERSION"
echo "Runner revision : $CI_RUNNER_REVISION"
echo "Runner tags : $CI_RUNNER_TAGS"
echo "=========================================="
default:
retry:
max: 2
when:
- runner_system_failure
- api_failure
#
# default tags and image for testing stages/kubernetes/helm
#
.kubernetes_tags:
tags:
- kubernetes
- dcache-dev
dependencies: []
.kubernetes_image:
extends: .kubernetes_tags
image: bitnami/kubectl:latest
.helm_image:
extends: .kubernetes_tags
image:
name: dtzar/helm-kubectl:latest
entrypoint: ['']
#
# default cache konfiguration for maven build jobs
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
#
.build_cache:
cache:
key:
files:
- pom.xml
prefix: "$CI_JOB_NAME"
paths:
- ./.m2/repository
#
# default rules for upload stage
#
.upload_rules:
rules:
- if: $CI_COMMIT_TAG
rpm:
stage: build
image: dcache/maven-java17-rpm-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -Drun.slow.tests -am -pl packages/fhs -P rpm clean package
artifacts:
reports:
junit:
- "**/target/surefire-reports/TEST-*.xml"
- "**/target/failsafe-reports/TEST-*.xml"
paths:
- "packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm"
expire_in: 2 days
srm_client_rpm:
stage: build
image: dcache/maven-java17-rpm-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl modules/srm-client package -P rpm
artifacts:
paths:
- "modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm"
expire_in: 2 days
deb:
stage: build
image: dcache/maven-java17-deb-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/fhs -P deb clean package
artifacts:
paths:
- "packages/fhs/target/dcache_*.deb"
expire_in: 2 days
tar:
stage: build
image: dcache/maven-java17-tar-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar clean package
artifacts:
paths:
- "packages/tar/target/dcache-*.tar.gz"
expire_in: 2 days
spotbugs:
stage: build
image: dcache/maven-java17-tar-build
extends: .build_cache
script:
- dnf -y -q install jq
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar -DskipTests package com.github.spotbugs:spotbugs-maven-plugin:4.8.3.0:spotbugs verify
- find . -name gl-code-quality-report.json -print | xargs cat | jq -s "add" > merged-gl-code-quality-report.json
artifacts:
reports:
codequality:
- merged-gl-code-quality-report.json
paths:
- merged-gl-code-quality-report.json
expire_in: 2 days
container:
stage: build
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: gcr.io/kaniko-project/executor:debug
needs:
- tar
dependencies:
- tar
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=testing
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--destination $CI_REGISTRY_IMAGE:$tag
sign_rpm:
stage: sign
image: almalinux:9-minimal
needs: ["rpm"]
script:
- microdnf install -y rpm-sign
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- gpg -a --export "$DCACHE_ORG_KEY_NAME" > RPM-GPG-KEY
- rpmsign --addsign --define "_signature gpg" --define "_gpg_name $DCACHE_ORG_KEY_NAME" --define "_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase $DCACHE_ORG_PGP_KEY_PASS" packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
- rpmkeys --import RPM-GPG-KEY
- rpm --checksig -v packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
artifacts:
paths:
- packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
sign_srm_client_rpm:
stage: sign
image: almalinux:9-minimal
needs: ["srm_client_rpm"]
script:
- microdnf install -y rpm-sign
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- gpg -a --export "$DCACHE_ORG_KEY_NAME" > RPM-GPG-KEY
- rpmsign --addsign --define "_signature gpg" --define "_gpg_name $DCACHE_ORG_KEY_NAME" --define "_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase $DCACHE_ORG_PGP_KEY_PASS" modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
- rpmkeys --import RPM-GPG-KEY
- rpm --checksig -v modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
artifacts:
paths:
- modules/srm-client/target/rpmbuild/RPMS/noarch/dcache-srmclient*.rpm
sign_deb:
stage: sign
image: ubuntu:22.04
needs: ["deb"]
script:
- apt-get -qq update
- apt-get -qq install debsigs gpg
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- echo $DCACHE_ORG_PGP_KEY_PASS > $HOME/.gnupg/gpg-passphrase
- echo "passphrase-file $HOME/.gnupg/gpg-passphrase" >> "$HOME/.gnupg/gpg.conf"
- echo 'allow-loopback-pinentry' >> "$HOME/.gnupg/gpg-agent.conf"
- echo 'pinentry-mode loopback' >> "$HOME/.gnupg/gpg.conf"
- echo 'use-agent' >> "$HOME/.gnupg/gpg.conf"
- echo RELOADAGENT | gpg-connect-agent
- debsigs --sign=origin --verify --check -v -k "$DCACHE_ORG_KEY_NAME" packages/fhs/target/dcache_*.deb
artifacts:
paths:
- packages/fhs/target/dcache_*.deb
install_rpm:
stage: test_deploy
image: centos:7
script:
- yum --nogpgcheck install -y packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
#install_deb:
# stage: test_deploy
# image: ubuntu:21.10
# script:
# - apt-get update
# - DEBIAN_FRONTEND=noninteractive apt install -y -f ./packages/fhs/target/dcache_*.deb
upload_rpm:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_rpm
extends: .upload_rules
script:
- RPM_NAME=`ls packages/fhs/target/rpmbuild/RPMS/noarch/ | grep dcache`
- VERSION=`echo $RPM_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/rpmbuild/RPMS/noarch/$RPM_NAME --ftp-create-dirs "$PKG_UPLOAD_URL/$VERSION/$RPM_NAME"
upload_srm_client_rpm:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_srm_client_rpm
extends: .upload_rules
script:
- RPM_NAME=`ls modules/srm-client/target/rpmbuild/RPMS/noarch/ | grep dcache-srmclient`
- VERSION=`echo $RPM_NAME | cut -d'-' -f 3 | cut -d'.' -f 1,2`
- curl -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file modules/srm-client/target/rpmbuild/RPMS/noarch/$RPM_NAME --ftp-create-dirs "$PKG_UPLOAD_URL/$VERSION/$RPM_NAME"
upload_deb:
stage: upload
image: almalinux:9-minimal
dependencies:
- sign_deb
extends: .upload_rules
script:
- DEB_NAME=`ls packages/fhs/target/ | grep dcache`
- VERSION=`echo $DEB_NAME | cut -d'_' -f 2 | cut -d'.' -f 1,2`
- curl -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/$DEB_NAME --ftp-create-dirs "$PKG_UPLOAD_URL/$VERSION/$DEB_NAME"
upload_tar:
stage: upload
image: almalinux:9-minimal
dependencies:
- tar
extends: .upload_rules
script:
- TAR_NAME=`ls packages/tar/target/ | grep dcache`
- VERSION=`echo $TAR_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/tar/target/$TAR_NAME --ftp-create-dirs "$PKG_UPLOAD_URL/$VERSION/$TAR_NAME"
upload_container:
stage: upload
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: gcr.io/kaniko-project/executor:debug
dependencies:
- tar
extends: .upload_rules
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_ACCESS_KEY\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=GA
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--destination dcache/dcache:$tag
#
# This jobs needs that the number of changes to fetch from GitLab when cloning a repository is high enough to generate
# the changelog.
Generate release notes:
image: almalinux:9-minimal
stage: upload
extends: .upload_rules
dependencies:
- sign_deb
- sign_rpm
- sign_srm_client_rpm
- tar
script:
- microdnf install -y git-core
- .ci/generate-changelog.sh >> release-$CI_COMMIT_TAG.md
artifacts:
paths:
- release-*.md
#
# prepare kubernetes env for the build
#
Prepare k8s environment:
stage: testenv_pre
extends: .kubernetes_image
script:
- kubectl create namespace ${K8S_NAMESPACE}
#
# collect all logs
#
Collect container logs:
stage: testenv_post
extends: .kubernetes_image
when: always
allow_failure: true
script:
- kubectl -n $K8S_NAMESPACE get pods | grep Running | awk '{print $1}' | xargs -n1 kubectl -n $K8S_NAMESPACE logs | tee $K8S_NAMESPACE.log
- kubectl -n $K8S_NAMESPACE run -ti --rm --image=edenhill/kcat:1.7.1 kcat -- kcat -C -t billing -b billing-kafka:9092 -p 0 -e -q > $K8S_NAMESPACE-billing.json || true
artifacts:
name: "logs-$CI_PIPELINE_ID"
paths:
- "$K8S_NAMESPACE.log"
- "$K8S_NAMESPACE-billing.json"
#
# dispose kubernetes resources
#
Clean k8s environment:
stage: testenv_post
extends: .kubernetes_image
needs:
- Collect container logs
when: always
script:
- kubectl delete namespace ${K8S_NAMESPACE} --grace-period=1 --ignore-not-found=true
#
# infrastructure required to run dCache
#
Deploy 3rd party infrastructure services:
stage: test_infra
extends: .helm_image
script:
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set auth.username=dcache --set auth.password=let-me-in --set auth.database=chimera chimera bitnami/postgresql
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait cells bitnami/zookeeper
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set externalZookeeper.servers=cells-zookeeper --set kraft.enabled=false billing bitnami/kafka --version 23.0.7
Add CVMFS Volume:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/cvmfs-volume-storageclass-pvc.yaml
Deploy dCache Helm Chart:
stage: test_deploy
extends: .helm_image
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- helm repo add dcache ${DCACHE_HELM_REPO}
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --wait --set image.tag=${tag} --set image.repository=${CI_REGISTRY_IMAGE} store dcache/dcache
#
# Grid tests expects cluster-wide cvmfs volumes to be present.
#
# The deployment can be done as:
#
# kubectl create -f ./ci/cvmfs-storageclass.yaml
# helm repo add cern https://registry.cern.ch/chartrepo/cern
# helm repo update
# helm -n dcache-dev-infrastructure install -f .ci/values-cvmfs.yaml cvmfs cern/cvmfs-csi
#
# More info: https://github.com/cvmfs-contrib/cvmfs-csi/tree/master/docs
#
Grid EL7 WN tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod grid-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec grid-tester -- ls /cvmfs/grid.cern.ch/umd-c7wn-latest/etc/profile.d/setup-c7-wn-example.sh
- kubectl -n $K8S_NAMESPACE cp .ci/init-grid-ui.sh grid-tester:/init-grid-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-grid-tests.sh grid-tester:/run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE exec grid-tester -- /bin/sh /run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE cp grid-tester:/xunit .
artifacts:
reports:
junit:
- "xunit*.xml"
gsi_xroot_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs-xroot.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod xroot-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec xroot-tester -- ls /cvmfs/grid.cern.ch/umd-c7wn-latest/etc/profile.d/setup-c7-wn-example.sh
- kubectl -n $K8S_NAMESPACE cp .ci/init-grid-ui.sh xroot-tester:/init-grid-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-xroot-tests.sh xroot-tester:/run-xroot-tests.sh
- kubectl -n $K8S_NAMESPACE exec xroot-tester -- /bin/sh /run-xroot-tests.sh
webdav_with_x509_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/webdav-wn-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod webdav-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec webdav-tester -- ls /cvmfs/grid.cern.ch/umd-c7wn-latest/etc/profile.d/setup-c7-wn-example.sh
- kubectl -n $K8S_NAMESPACE cp .ci/init-grid-ui.sh webdav-tester:/init-grid-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-webdav-tests.sh webdav-tester:/run-webdav-tests.sh
- kubectl -n $K8S_NAMESPACE exec webdav-tester -- /bin/sh /run-webdav-tests.sh
NFS4.x protocol compliance tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl config set-context --current --namespace=${K8S_NAMESPACE}
- kubectl run pynfs-tester --image=dcache/pynfs:0.5 --restart=Never --command -- sleep 3600
- while ! kubectl wait --for=condition=Ready pod pynfs-tester; do sleep 1; done
- kubectl exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --maketree store-door-svc:/data OPEN5; exit 0"
- |-
kubectl exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --xml=/xunit-report-v40.xml \
--noinit store-door-svc:/data all \
noACC2a noACC2b noACC2c noACC2d noACC2f noACC2r noACC2s \
noCID1 noCID2 noCID4a noCID4b noCID4c noCID4d noCID4e \
noCLOSE10 noCLOSE12 noCLOSE5 noCLOSE6 noCLOSE8 noCLOSE9 \
noCMT1aa noCMT1b noCMT1c noCMT1d noCMT1e noCMT1f noCMT2a noCMT2b noCMT2c noCMT2d noCMT2f \
noCMT2s noCMT3 noCMT4 noCR12 noLKT1 noLKT2a noLKT2b noLKT2c noLKT2d noLKT2f noLKT2s noLKT3 \
noLKT4 noLKT6 noLKT7 noLKT8 noLKT9 noLKU10 noLKU3 noLKU4 noLKU5 noLKU6 noLKU6b noLKU7 noLKU8 \
noLKU9 noLKUNONE noLOCK12a noLOCK12b noLOCK13 noLOCKRNG noLOCKCHGU noLOCKCHGD noRLOWN3 \
noOPCF1 noOPCF6 noOPDG2 noOPDG3 noOPDG6 noOPDG7 noOPEN15 noOPEN18 noOPEN2 noOPEN20 noOPEN22 \
noOPEN23 noOPEN24 noOPEN26 noOPEN27 noOPEN28 noOPEN3 noOPEN30 noOPEN4 noRENEW3 noRD1 noRD10 \
noRD2 noRD3 noRD5 noRD5a noRD6 noRD7a noRD7b noRD7c noRD7d noRD7f noRD7s noRDDR12 noRDDR11 \
noRPLY1 noRPLY10 noRPLY12 \
noRPLY14 noRPLY2 noRPLY3 noRPLY5 noRPLY6 noRPLY7 noRPLY8 noRPLY9 noSATT3d noSATT4 noSATT6d \
noSATT6r noSATT18 noSEC7 noWRT1 noWRT11 noWRT13 noWRT14 noWRT15 noWRT18 noWRT19 noWRT1b noWRT2 \
noWRT3 noWRT6a noWRT6b noWRT6c noWRT6d noWRT6f noWRT6s noWRT8 noWRT9; \
exit 0"
- |-
kubectl exec pynfs-tester -- /bin/bash -c "/run-nfs4.1.sh --minorversion=2 --xml=/xunit-report-v41.xml \
--noinit store-door-svc:/data all xattr \
noCOUR2 noCSESS25 noCSESS26 noCSESS27 noCSESS28 noCSESS29 noCSID3 noCSID4 noCSID9 noEID5f \
noEID50 noOPEN31 noSEQ6 noRECC3 noSEQ7 noSEQ10b noSEQ2 noXATT11 noXATT10 noALLOC1 noALLOC2 noALLOC3; \
exit 0"
- kubectl cp pynfs-tester:/xunit-report-v40.xml xunit-report-v40.xml
- kubectl cp pynfs-tester:/xunit-report-v41.xml xunit-report-v41.xml
- nfs40_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs40_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs41_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- nfs41_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- exit $(( $nfs40_errors + $nfs41_errors + $nfs40_failures + $nfs41_failures ))
environment: testing
artifacts:
reports:
junit:
- "xunit*.xml"