-
Notifications
You must be signed in to change notification settings - Fork 5
272 lines (237 loc) · 9.98 KB
/
build_test_images.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
name: Build, publish and test images
on:
workflow_call:
jobs:
read_builds:
runs-on: ubuntu-latest
outputs:
builds: ${{ steps.builds-as-json.outputs.builds }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
- name: Install script dependencies
run: pip install -r ./requirements.txt
- name: Get builds as JSON
id: builds-as-json
run: ./bin/builds-as-json
build_images:
needs: [read_builds]
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.read_builds.outputs.builds) }}
name: ${{ matrix.name }}
permissions:
contents: read
packages: write
id-token: write # required to get an OIDC token for signing
security-events: write # required to upload SARIF files
steps:
- name: Check out the repository
uses: actions/checkout@v3
with:
submodules: recursive
- name: Write OpenStack credentials
run: echo "$OS_CLOUDS" > ./clouds.yaml
env:
OS_CLOUDS: ${{ secrets.OS_CLOUDS }}
- name: Set up Packer environment
run: ./bin/setup
env:
PACKER_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build image
id: build-image
run: ./bin/build-image
env:
PACKER_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Build the image on the target cloud
OS_CLOUD: ${{ vars.TARGET_CLOUD }}
ENVIRONMENT: ${{ vars.TARGET_CLOUD }}
PACKER_TEMPLATE: ${{ matrix.template }}
ENV_VAR_FILES: ${{ matrix.var-files }}
- name: Install cosign
uses: sigstore/[email protected]
- name: Publish image
id: publish-image
run: ./bin/publish-image
env:
IMAGE_NAME: ${{ steps.build-image.outputs.image-name }}
IMAGE_DISK_FORMAT: ${{ steps.build-image.outputs.image-disk-format }}
IMAGE_SOURCE_FILE: ${{ steps.build-image.outputs.image-source-file }}
# Upload images to a fixed S3 host
S3_HOST: ${{ vars.S3_HOST }}
S3_BUCKET: ${{ vars.S3_BUCKET }}
S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
S3_SECRET_KEY: ${{ secrets.S3_SECRET_KEY }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: install libguestfs
run: sudo apt-get -y install libguestfs-tools
- name: mkdir for mount
run: sudo mkdir -p './${{ steps.publish-image.outputs.image-name }}'
- name: mount qcow2 file
run: sudo guestmount -a ${{ steps.publish-image.outputs.image-name }}.qcow2 -i --ro -o allow_other './${{ steps.publish-image.outputs.image-name }}'
# - name: Run Trivy vulnerability scanner
# uses: aquasecurity/[email protected]
# with:
# scan-type: fs
# scan-ref: "./${{ steps.publish-image.outputs.image-name }}"
# scanners: "vuln"
# format: sarif
# output: "${{ steps.publish-image.outputs.image-name }}.sarif"
# # limit to high and critical as we get too many results for GH security otherwise
# limit-severities-for-sarif: 'true'
# severity: 'HIGH,CRITICAL'
# env:
# TRIVY_DB_REPOSITORY: ghcr.io/azimuth-cloud/trivy-db:2
# - name: Upload Trivy scan results to GitHub Security tab
# uses: github/codeql-action/upload-sarif@v3
# with:
# sarif_file: "${{ steps.publish-image.outputs.image-name }}.sarif"
# category: "${{ matrix.name }}"
- name: Fail if scan has CRITICAL vulnerabilities
uses: aquasecurity/[email protected]
with:
scan-type: fs
scan-ref: "./${{ steps.publish-image.outputs.image-name }}"
scanners: "vuln"
format: table
exit-code: '1'
severity: 'CRITICAL'
ignore-unfixed: true
env:
TRIVY_DB_REPOSITORY: ghcr.io/azimuth-cloud/trivy-db:2
- name: Write matrix outputs
uses: cloudposse/[email protected]
with:
matrix-step-name: ${{ github.job }}
matrix-key: ${{ matrix.name }}
outputs: |-
name: ${{ steps.publish-image.outputs.image-name }}
url: ${{ steps.publish-image.outputs.image-url }}
checksum: ${{ steps.publish-image.outputs.image-checksum }}
cosign-bundle-url: ${{ steps.publish-image.outputs.cosign-bundle-url }}
manifest-extra: ${{ steps.build-image.outputs.manifest-extra }}
publish_manifest:
# this job should always run, but needs to run after the build matrix
needs: [build_images]
if: ${{ always() }}
runs-on: ubuntu-latest
outputs:
manifest-url-encoded: ${{ steps.encode-manifest-url.outputs.encoded }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
- name: Read matrix outputs
id: matrix-outputs
uses: cloudposse/[email protected]
with:
matrix-step-name: build_images
- name: Write outputs
uses: DamianReeves/write-file-action@0a7fcbe1960c53fc08fe789fa4850d24885f4d84
with:
path: build-outputs.json
write-mode: overwrite
contents: ${{ steps.matrix-outputs.outputs.result }}
- name: Generate manifest
run: ./bin/generate-manifest
env:
BUILD_OUTPUTS_FILE: ./build-outputs.json
MANIFEST_FILE: ./manifest.json
- name: Install s3cmd
run: |
sudo apt-get update -y
sudo apt-get install -y s3cmd
- name: Publish manifest to S3
id: publish-manifest
run: ./bin/publish-manifest
env:
MANIFEST_FILE: ./manifest.json
S3_HOST: ${{ vars.S3_HOST }}
S3_BUCKET: ${{ vars.S3_BUCKET }}
S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
S3_SECRET_KEY: ${{ secrets.S3_SECRET_KEY }}
# The manifest URL that publish-manifest outputs is a signed URL
# This means that it contains the S3 access key which, although it does not necessarily need
# to be kept secret, is provided using a GitHub secret
# GitHub does not allow outputs that include secrets to be transferred between jobs
# To get around this, we encrypt the manifest URL using GPG and use that as the output of this workflow
- name: Encode manifest URL using GPG
id: encode-manifest-url
run: |
result=$(gpg --symmetric --batch --passphrase "${PASSPHRASE}" --output - <(echo "${INPUT}") | base64 -w0)
echo "encoded=${result}" >> $GITHUB_OUTPUT
env:
PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
INPUT: ${{ steps.publish-manifest.outputs.manifest-url }}
run_azimuth_tests:
needs: [publish_manifest]
runs-on: ubuntu-latest
steps:
# The manifest URL that publish-manifest outputs is a signed URL
# This means that it contains the S3 access key which, although it does not necessarily need
# to be kept secret, is provided using a GitHub secret
# GitHub does not allow outputs that include secrets to be transferred between jobs
# To get around this, the manifest URL is encrypted using GPG that we must now decrypt to use
- name: Decode manifest URL using GPG
id: decode-manifest-url
run: |
result=$(gpg --decrypt --quiet --batch --passphrase "${PASSPHRASE}" --output - <(echo "${INPUT}" | base64 -d))
echo "decoded=${result}" >> $GITHUB_OUTPUT
env:
PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
INPUT: ${{ needs.publish_manifest.outputs.manifest-url-encoded }}
# Check out the configuration repository
- name: Set up Azimuth environment
uses: azimuth-cloud/azimuth-config/.github/actions/setup@devel
with:
os-clouds: ${{ secrets.OS_CLOUDS }}
target-cloud: ${{ vars.TARGET_CLOUD }}
environment-prefix: images-ci
# Use the manifest that we just built
# We want to run all the CaaS tests except Slurm
# We want to run the Kubernetes tests _for all Kubernetes versions_
# We don't need to run the apps tests
extra-vars: |
community_images_azimuth_images_manifest_url: ${{ steps.decode-manifest-url.outputs.decoded }}
generate_tests_caas_test_case_slurm_enabled: false
generate_tests_kubernetes_test_cases_latest_only: false
generate_tests_kubernetes_apps_suite_enabled: false
# GitHub terminates jobs after 6 hours
# We don't want jobs to acquire the lock then get timed out before they can finish
# So wait a maximum of 3 hours to acquire the lock, leaving 3 hours for other tasks in the job
timeout-minutes: 180
- name: Provision Azimuth
uses: azimuth-cloud/azimuth-config/.github/actions/provision@devel
- name: Run Azimuth tests
uses: azimuth-cloud/azimuth-config/.github/actions/test@devel
- name: Destroy Azimuth
uses: azimuth-cloud/azimuth-config/.github/actions/destroy@devel
if: ${{ always() }}
# Purge the images that we just tested from OpenStack
purge_images:
needs: [run_azimuth_tests]
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install s3cmd
run: |
sudo apt-get update -y
sudo apt-get install -y s3cmd
- name: Install script dependencies
run: pip install -r ./requirements.txt
- name: Write OpenStack credentials
run: echo "$OS_CLOUDS" > ./clouds.yaml
env:
OS_CLOUDS: ${{ secrets.OS_CLOUDS }}
- name: Purge images for manifest
run: ./bin/purge-images "${GITHUB_SHA}.manifest"
env:
REPO_ROOT: ${{ github.workspace }}
OS_CLOUD: ${{ vars.TARGET_CLOUD }}
S3_HOST: ${{ vars.S3_HOST }}
S3_BUCKET: ${{ vars.S3_BUCKET }}
S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
S3_SECRET_KEY: ${{ secrets.S3_SECRET_KEY }}