-
Notifications
You must be signed in to change notification settings - Fork 22
2140 lines (1909 loc) · 105 KB
/
yocto-build-deploy.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
name: "Yocto Build-Test-Deploy"
on:
workflow_call:
secrets:
# BALENA_API_DEPLOY_KEY is a secret that should be specific to the runtime environment
# It requires permissions to deploy hostApp releases, and fetch supervisor release images (via yocto recipes)
BALENA_API_DEPLOY_KEY:
description: balena API key for the deploy environment, used for deploying hostApps and fetching supervisor releases
required: false
# BALENA_API_TEST_KEY is a secret that should be specific to the runtime environment
# It requires permissions to manage autokit workers, and create test fleets
BALENA_API_TEST_KEY:
description: balena API key for the test environment, used for finding autokit workers and creating test fleets
required: false
# Dockerhub secrets are used only for pulling the helper image for "Prepare files for S3" step - if we simplify this to not use the
# helper image, these secrets can be removed
DOCKERHUB_USER:
description: Dockerhub user for pulling private helper images
required: false
DOCKERHUB_TOKEN:
description: Dockerhub token for pulling private helper images
required: false
SIGN_KMOD_KEY_APPEND:
description: Base64-encoded public key of a kernel module signing keypair
required: false
# SIGN_API_KEY is a secret that should be specific to the runtime environment
# It requires permissions to access the image signing server
SIGN_API_KEY:
description: balena API key that provides access to the signing server
required: false
BALENAOS_CI_APP_PRIVATE_KEY:
description: "GPG Private Key for GitHub App to generate ephemeral tokens (used with vars.BALENAOS_CI_APP_ID)"
required: false
PBDKF2_PASSPHRASE:
description: "Passphrase used to encrypt/decrypt balenaOS assets at rest in GitHub."
required: false
YOCTO_CACHE_SECRET_KEY:
description: "Self-hosted runner S3 secret key for the yocto-svcacct user."
required: false
YOCTO_SSH_PRIVATE_KEY_B64:
description: "SSH key to access balena-os private repositories."
required: false
inputs:
build-runs-on:
description: The runner labels to use for the build job(s)
required: false
type: string
default: >
[
"self-hosted",
"X64",
"yocto"
]
device-repo:
description: balenaOS device repository (owner/repo)
required: false
type: string
default: ${{ github.repository }}
device-repo-ref:
description: balenaOS device repository tag, branch, or commit to build
required: false
type: string
default: ${{ github.ref }}
meta-balena-ref:
description: meta-balena ref if not the currently pinned version
required: false
type: string
yocto-scripts-ref:
description: balena-yocto-scripts ref if not the currently pinned version
required: false
type: string
machine:
description: yocto board name
required: true
type: string
slug:
description: Device type slug to deploy to - defaults to machine name
required: false
type: string
deploy-environment:
description: The balena environment to use for hostApp deployment - includes the related vars and secrets
required: false
type: string
default: balena-cloud.com
signing-environment:
description: The signing environment to use for the bitbake build - includes the related vars and secrets
required: false
type: string
default: ''
source-mirror-environment:
description: The AWS environment to use for the S3 source mirror - includes related vars and OIDC role(s)
required: false
type: string
# default: balena-production.us-east-1
# This input exists because we want the option to not auto-finalise for some device types, even if they have tests and those tests pass - for example some custom device types, the customer doesn't want new releases published until they green light it
finalize-on-push-if-tests-passed:
description: Whether to finalize a hostApp container image to a balena environment, if tests pass.
required: false
type: boolean
default: true # Default behaviour is auto-finalise if tests pass, unless opted out by customer
# For use when we need to force deploy a release, for example after manual testing (negates finalize-on-push-if-tests-pass)
force-finalize:
description: Force deploy a finalized release
required: false
type: boolean
default: false
deploy-ami:
description: Whether to deploy an AMI to AWS
required: false
type: boolean
default: false # This only works currently for generic-amd64, so default to false, and enable only in the caller workflow for that DT
sign-image:
description: Whether to sign image for secure boot
required: false
type: boolean
default: false # Always false by default, override on specific device types which this is relevant in the device repo
build-args:
description: Extra barys build arguments
required: false
type: string
# Supported fields for the test matrix:
# - test_suite: (required) The test suite to run. The valid test suites are `os`, `hup`, and `cloud`
# - environment: (required) The balenaCloud environment to use for testing, e.g. `bm.balena-dev.com` or `balena-cloud.com`
# - worker_type: The worker type to use for testing. The valid worker types are `qemu` and `testbot`. The default worker type is `testbot`
# - worker_fleets: The testbot fleets for finding available Leviathan workers. Not used for QEMU workers. Can accept a list of apps separated by commas, no spaces in between
# - test_org: The organization to use for testing cloud functionality. This default org is `testbot`
# - runs_on: A JSON array of runner labels to use for the test job(s). For qemu workers use the labels `["self-hosted", "X64", "kvm"]`.
# - secure_boot: (truthy) Enable secure boot testing flags QEMU_SECUREBOOT=1 and FLASHER_SECUREBOOT=1. Default is false.
# To use specific settings for each test job, create an include array like this...
# {"include": [
# {
# "test_suite": "os",
# "environment": "bm.balena-dev.com"
# },
# {
# "test_suite": "cloud",
# "environment": "balena-cloud.com",
# "test_org": "testbot"
# },
# {
# "test_suite": "hup",
# "environment": "balena-cloud.com",
# "worker_type": "qemu",
# "runs_on": ["self-hosted", "X64", "kvm"]
# }
# ]}
# Alternatively, you can have the matrix run on a combinatorial match on the provided values where every single permutation of the values will be executed ...
# {
# "test_suite": ["os","cloud","hup"],
# "environment": ["bm.balena-dev.com"],
# "worker_type": ["qemu","testbot"],
# "runs_on": [["self-hosted", "X64", "kvm"]]
# }
test_matrix:
description: "JSON Leviathan test matrix to use for testing. No tests will be run if not provided."
required: false
type: string
# https://docs.github.com/en/actions/using-jobs/using-concurrency
# https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/control-the-concurrency-of-workflows-and-jobs
# The following concurrency group cancels in-progress jobs or runs on pull_request events only;
# if github.head_ref is undefined, the concurrency group will fallback to the run ID,
# which is guaranteed to be both unique and defined for the run.
# From: https://github.com/orgs/community/discussions/69704#discussioncomment-7803351
# The available contexts for cancel-in-progress expressions are:
# - github: This context provides access to various GitHub-specific variables,
# such as github.event_name, github.ref, and github.workflow.
# - inputs: This context allows you to access input parameters defined in the workflow.
# This is particularly useful for conditional cancellation based on user-specified settings.
# - vars: This context provides access to workflow-defined variables,
# which can be used to store intermediate values or constants.
# When evaluating expressions for cancel-in-progress, certain parameters may not be available at the time of evaluation.
# For instance, the github.job context is not accessible, as it's specific to the running job and not the concurrency group.
# Note that we do not use github.ref here, as PRs from forks will have a
# ref of 'refs/heads/master' and collide with each other. Instead, we use github.head_ref
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}-${{ inputs.machine }}-${{ inputs.deploy-environment }}
# Cancel jobs in-progress for open PRs, but not merged or closed PRs, by checking for the merge ref.
# Note that for pull_request_target events (PRs from forks), the github.ref value is
# usually 'refs/heads/master' so we can't rely on that to determine if it is a merge event or not.
# As a result pull_request_target events will never cancel in-progress jobs and will be queued instead.
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
env:
WORKSPACE: ${{ github.workspace }}
MACHINE: ${{ inputs.machine }}
SLUG: ${{ inputs.slug || inputs.machine }}
VERBOSE: verbose
WORKFLOW_NAME: ${{ github.workflow }}
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
permissions: {}
jobs:
approved-commit:
name: Approved commit
runs-on: ubuntu-24.04
permissions:
pull-requests: write # Write is required to create PR comments for workflow approvals.
contents: read
steps:
# Combining pull_request_target workflow trigger with an explicit checkout of an
# untrusted PR is a dangerous practice that may lead to repository compromise.
# https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/
# This action requires approvals via reactions for each workflow run.
# https://github.com/product-os/review-commit-action
- name: Wait for approval on pull_request_target events
if: github.event_name == 'pull_request_target' && github.event.pull_request.merged != true
timeout-minutes: 90
uses: product-os/review-commit-action@5de80f19607c9052fa1c3cce94c1d0571dfc13c7 # v0.2.3
with:
poll-interval: "10"
allow-authors: false
# This job runs first and all other jobs depend on it.
# It is responsible for setting up the device-type and fetching the necessary information
# to build and deploy the device-type.
balena-lib:
name: Device info
runs-on: ubuntu-24.04
# Depend on approved-commit just so we don't run without approvals
needs:
- approved-commit
# This environment requires the following variables:
# - BALENA_HOST
# This environment requires the following secrets:
# - BALENA_API_DEPLOY_KEY - used to authenticate with the balena API
environment: ${{ inputs.deploy-environment || 'balena-cloud.com' }}
env:
automation_dir: "${{ github.workspace }}/balena-yocto-scripts/automation"
BALENARC_BALENA_URL: ${{ vars.BALENA_HOST || vars.BALENARC_BALENA_URL || 'balena-cloud.com' }}
permissions:
actions: read # We are fetching workflow run results of a merge commit when workflow is triggered by new tag, to see if tests pass
pull-requests: write # Read is required to fetch the PR that merged, in order to get the test results.
contents: read
defaults:
run:
working-directory: .
shell: bash --noprofile --norc -eo pipefail -x {0}
outputs:
device_slug: ${{ steps.balena-lib.outputs.device_slug }}
os_version: ${{ steps.balena-lib.outputs.os_version }}
meta_balena_version: ${{ steps.balena-lib.outputs.meta_balena_version }}
device_repo_revision: ${{ steps.balena-lib.outputs.device_repo_revision }}
yocto_scripts_ref: ${{ steps.balena-lib.outputs.yocto_scripts_ref }}
yocto_scripts_version: ${{ steps.balena-lib.outputs.yocto_scripts_version }}
deploy_artifact: ${{ steps.balena-lib.outputs.deploy_artifact }}
dt_arch: ${{ steps.balena-lib.outputs.dt_arch }}
is_private: ${{ steps.is-private.outputs.result }}
should_finalize: ${{ steps.merge-test-result.outputs.finalize == 'true' || inputs.force-finalize }}
is_esr: ${{ (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v20')) || (github.event_name == 'workflow_dispatch' && startsWith(github.ref_name, '20')) }}
deploy_path: ${{ github.workspace }}/deploy/${{ steps.balena-lib.outputs.device_slug }}/${{ steps.balena-lib.outputs.os_version }}
balena_host: ${{ env.BALENARC_BALENA_URL }}
steps:
# Generate an app installation token that has access to
# all repos where the app is installed (usually the whole org)
# Owner input to make token valid for all repositories in the org
# This behvaiour is required for private submodules
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1.11.6
id: app-token
with:
app-id: ${{ vars.BALENAOS_CI_APP_ID }}
private-key: ${{ secrets.BALENAOS_CI_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
# Generate another app token for the balena-io organization
# so we can checkout private contracts
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token (balena-io)
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1.11.6
id: app-token-balena-io
with:
app-id: ${{ vars.BALENAOS_CI_APP_ID }}
private-key: ${{ secrets.BALENAOS_CI_APP_PRIVATE_KEY }}
owner: balena-io
# https://github.com/actions/checkout
- name: Clone device repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: ${{ inputs.device-repo }}
token: ${{ steps.app-token.outputs.token || secrets.GITHUB_TOKEN }}
ref: ${{ inputs.device-repo-ref }} # In the case of a new tagged version, this will be the new tag, claimed from ${{ github.events.push.ref }}
submodules: true
fetch-depth: 0 # DEBUG - this is for testing on a device repo
fetch-tags: true
# Do not persist the app installation token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Checkout the right ref for meta-balena submodule
- name: Update meta-balena submodule to ${{ inputs.meta-balena-ref }}
if: inputs.meta-balena-ref != ''
working-directory: ./layers/meta-balena
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.meta-balena-ref }}"
git submodule update --init --recursive
# Checkout the right ref for balena-yocto-scripts submodule
- name: Update balena-yocto-scripts submodule to ${{ inputs.yocto-scripts-ref }}
if: inputs.yocto-scripts-ref != ''
working-directory: ./balena-yocto-scripts
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.yocto-scripts-ref }}"
git submodule update --init --recursive
# Check if the repository is a yocto device respository
- name: Device repository check
run: |
if [ "$(yq '.type' repo.yml)" != "yocto-based OS image" ]; then
echo "::error::Repository does not appear to be of type 'yocto-based OS image'"
exit 1
fi
# A lot of outputs inferred from here are used everywhere else in the workflow
- name: Set build outputs
id: balena-lib
env:
CURL: "curl --silent --retry 10 --location --compressed"
TRANSLATION: "v6"
BALENAOS_TOKEN: ${{ secrets.BALENA_API_DEPLOY_KEY }}
API_ENV: ${{ env.BALENARC_BALENA_URL }}
run: |
source "${automation_dir}/include/balena-api.inc"
source "${automation_dir}/include/balena-lib.inc"
./balena-yocto-scripts/build/build-device-type-json.sh
device_slug="$(balena_lib_get_slug "${SLUG}")"
echo "device_slug=${device_slug}" >>"${GITHUB_OUTPUT}"
# As we use this to determine the os version from the device repository - when checking out the repo we need enough fetch depth to get tags
os_version=$(git describe --abbrev=0)
echo "os_version=${os_version#v*}" >>"${GITHUB_OUTPUT}"
meta_balena_version="$(balena_lib_get_meta_balena_base_version)"
echo "meta_balena_version=${meta_balena_version}" >>"${GITHUB_OUTPUT}"
device_repo_revision="$(git rev-parse --short HEAD)"
echo "device_repo_revision=${device_repo_revision}" >>"${GITHUB_OUTPUT}"
yocto_scripts_ref="$(git submodule status balena-yocto-scripts | awk '{print $1}')"
echo "yocto_scripts_ref=${yocto_scripts_ref}" >>"${GITHUB_OUTPUT}"
yocto_scripts_version="$(cd balena-yocto-scripts && head -n1 VERSION)"
echo "yocto_scripts_version=${yocto_scripts_version}" >>"${GITHUB_OUTPUT}"
deploy_artifact="$(balena_lib_get_deploy_artifact "${SLUG}")"
echo "deploy_artifact=${deploy_artifact}" >>"${GITHUB_OUTPUT}"
dt_arch="$(balena_lib_get_dt_arch "${SLUG}")"
echo "dt_arch=${dt_arch}" >>"${GITHUB_OUTPUT}"
# Unrolled balena_api_is_dt_private function - https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L424
# Had to be unrolled due to this: https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-lib.inc#L191 function relying on a jenkins env var to select the balena env - so failed
# is_private=$(${CURL} -XGET -H "Content-type: application/json" -H "Authorization: bearer ${BALENAOS_TOKEN}" --silent --retry 5 "https://api.${API_ENV}/${TRANSLATION}/device_type?\$filter=slug%20eq%20%27${device_slug}%27&\$select=slug,is_private" | jq -r '.d[0].is_private')
# echo "is_private=${is_private}" >>"${GITHUB_OUTPUT}"
- name: Check if device-type is private
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
id: is-private
env:
API_ENV: ${{ env.BALENARC_BALENA_URL }}
TRANSLATION: "v6"
DEVICE_SLUG: ${{ steps.balena-lib.outputs.device_slug }}
with:
result-encoding: json
script: |
const result = await fetch(`https://api.${process.env.API_ENV}/${process.env.TRANSLATION}/device_type?\$filter=slug%20eq%20%27${process.env.DEVICE_SLUG}%27&\$select=slug,is_private`, {
headers: {
'Content-type': 'application/json',
'Authorization': `Bearer ${{ secrets.BALENA_API_DEPLOY_KEY }}`
}
})
const data = await result.json()
console.log(JSON.stringify(data, null, 2))
return data.d[0].is_private
# In the old workflow we had to fetch the merge commit, get the check runs from the PR, and check if a device type passed or failed
# reference: https://github.com/balena-os/github-workflows/blob/master/.github/workflows/build_and_deploy.yml#L89
# NOTE: This will not be necessary if we had a way to deploy artifacts and mark as final like with fleet releases
# We're also checking out the tag in this step, so the subsequent build is done from the tagged version of the device repo
- name: "Fetch merge commit"
id: set-merge-commit
if: ${{ github.event_name == 'push' }} # Only perform on push event - i.e a new version tag
run: |
merge_commit=$(git rev-parse :/"^Merge pull request")
echo "Found merge commit ${merge_commit}"
echo "merge_commit=${merge_commit}" >>"${GITHUB_OUTPUT}"
# This will control the deployment of the hostapp only - it will determine if it is marked as final or not
# The hostapp being finalised is what determines if the API will present this OS version to user
# If the test_matrix is empty - it means there are no tests for the DT - so don't check tests, and don't finalise, unless manually done with "force-finalize" input
- name: Check test results
# https://docs.github.com/en/actions/learn-github-actions/expressions#functions
# this expression checks that the test_matrix input is truthy - there is no test_matrix input provided in the device-repo workflow file, test results won't be checked, and
# the release can't be finlized
if: github.event_name == 'push' && inputs.test_matrix && inputs.finalize-on-push-if-tests-passed
id: merge-test-result
env:
REPO: ${{ inputs.device-repo }}
COMMIT: ${{ steps.set-merge-commit.outputs.merge_commit }}
# environment variables used by gh CLI
# https://cli.github.com/manual/gh_help_environment
GH_DEBUG: "true"
GH_PAGER: "cat"
GH_PROMPT_DISABLED: "true"
GH_REPO: "${{ github.repository }}"
GH_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
run: |
# Gets the PR number of the merge commit
prid=$(gh api -H "Accept: application/vnd.github+json" "/repos/${REPO}/commits/$COMMIT" --jq '.commit.message' | head -n1 | cut -d "#" -f2 | awk '{ print $1}')
# Gets the head commit of the PR - needed to fetch workflows ran on that commit
head=$(gh api -H "Accept: application/vnd.github+json" "/repos/${REPO}/pulls/${prid}" --jq '.head.sha')
# Fetching workflow runs and filtering by the commit of the head of the PR returns the latest attempts of the workflow for that commit
# Selecting for workflows with the same name as the workflow name ("github.workflow")
# There will be "pull_request" and "pull_request_trigger" triggered workflow runs in the response - one will be skipped, one will be success/fail
# So selecting for .conclusion==success will give us a response and evaluate to true in the following "if" statement if either we successful
passed="false"
conclusion="$(gh run list -w "${WORKFLOW_NAME}" -c "${head}" --json conclusion --jq '.[] | select(.conclusion == "success").conclusion')"
if [[ "${conclusion}" = "success" ]]; then
passed="true"
fi
echo "finalize=${passed}" >>"${GITHUB_OUTPUT}"
# This job is used to separate the AWS environment from the build environment,
# but still allow authentication to the AWS environment at build time.
source-mirror-setup:
name: Source mirror IAM role
runs-on: ubuntu-24.04
# Depend on approved-commit just so we don't run without approvals
needs:
- approved-commit
# This environment should contain the following variables:
# - AWS_IAM_ROLE: AWS IAM role to assume
# - SOURCE_MIRROR_S3_SSE_ALGORITHM: AWS S3 server-side encryption algorithm
# - SOURCE_MIRROR_S3_URL: AWS S3 URL of the source mirror
# - SOURCE_MIRROR_URL: HTTPS URL of the source mirror
# - SOURCE_MIRROR_REGION: AWS region of the source mirror
environment: ${{ inputs.source-mirror-environment || inputs.deploy-environment }}
outputs:
# Include a number of similar variable keys to allow for flexibility in the environment and backwards compatibility
aws-iam-role: ${{ vars.AWS_IAM_ROLE }}
aws-region: ${{ vars.SOURCE_MIRROR_REGION || vars.AWS_REGION || vars.AWS_S3_REGION || vars.S3_REGION || 'us-east-1' }}
s3-url: ${{ vars.SOURCE_MIRROR_S3_URL || vars.AWS_S3_URL || vars.S3_URL || 's3://yocto-72c1c258-81bb-11ef-b722-0efcede062c9/shared-downloads' }}
https-url: ${{ vars.SOURCE_MIRROR_URL || vars.AWS_S3_HTTPS_URL || vars.S3_HTTPS_URL || 'https://yocto-72c1c258-81bb-11ef-b722-0efcede062c9.s3.us-east-1.amazonaws.com/shared-downloads/' }}
aws-s3-sse-algorithm: ${{ vars.SOURCE_MIRROR_S3_SSE_ALGORITHM || vars.AWS_S3_SSE_ALGORITHM || vars.S3_SSE_ALGORITHM || vars.SSE_ALGORITHM || vars.SSE || 'AES256' }}
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
permissions:
id-token: write # This is required for requesting the JWT #https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#requesting-the-access-token
steps:
# We don't use this session, it's just to check if the credentials are valid
# https://github.com/aws-actions/configure-aws-credentials
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
continue-on-error: true # Don't fail at this point as there is still value in running builds and tests
with:
role-to-assume: ${{ vars.AWS_IAM_ROLE }}
role-session-name: github-${{ github.job }}-${{ github.run_id }}-${{ github.run_attempt }}
aws-region: ${{ vars.SOURCE_MIRROR_REGION || vars.AWS_REGION || vars.AWS_S3_REGION || vars.S3_REGION || 'us-east-1' }}
# https://github.com/orgs/community/discussions/26636#discussioncomment-3252664
mask-aws-account-id: false
build:
name: Build
runs-on: ${{ fromJSON(inputs.build-runs-on) }}
needs:
- approved-commit
- balena-lib
- source-mirror-setup
# This environment supports the following variables:
# - SIGN_API_URL
# - SIGN_GRUB_KEY_ID
# - SIGN_HAB_PKI_ID
# This environment also supports the following secrets:
# - SIGN_API_KEY
# - SIGN_KMOD_KEY_APPEND
# - YOCTO_SSH_PRIVATE_KEY_B64: used to pull private yocto sources from within the same organization
environment: ${{ inputs.signing-environment }}
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
# https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
permissions:
id-token: write # This is required for requesting the JWT #https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#requesting-the-access-token
packages: read
contents: read
env:
automation_dir: "${{ github.workspace }}/balena-yocto-scripts/automation"
BARYS_ARGUMENTS_VAR: ${{ inputs.build-args || '' }}
# https://docs.yoctoproject.org/3.1.21/overview-manual/overview-manual-concepts.html#user-configuration
# Create an autobuilder configuration file that is loaded before local.conf
AUTO_CONF_FILE: "${{ github.workspace }}/build/conf/auto.conf"
BALENARC_BALENA_URL: ${{ needs.balena-lib.outputs.balena_host }}
DEPLOY_PATH: ${{ github.workspace }}/deploy
defaults:
run:
working-directory: .
shell: bash --noprofile --norc -eo pipefail -x {0}
steps:
# this must be done before putting files in the workspace
# https://github.com/easimon/maximize-build-space
- name: Maximize build space
if: contains(fromJSON(inputs.build-runs-on), 'ubuntu-latest') == true
uses: easimon/maximize-build-space@fc881a613ad2a34aca9c9624518214ebc21dfc0c
with:
root-reserve-mb: "4096"
temp-reserve-mb: "1024"
swap-size-mb: "4096"
remove-dotnet: "true"
remove-android: "true"
remove-haskell: "true"
remove-codeql: "true"
remove-docker-images: "true"
# Generate an app installation token that has access to
# all repos where the app is installed (usually the whole org)
# Owner input to make token valid for all repositories in the org
# This behvaiour is required for private submodules
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1.11.6
id: app-token
with:
app-id: ${{ vars.BALENAOS_CI_APP_ID }}
private-key: ${{ secrets.BALENAOS_CI_APP_PRIVATE_KEY }}
owner: ${{ github.repository_owner }}
# Generate another app token for the balena-io organization
# so we can checkout private contracts
# https://github.com/actions/create-github-app-token
- name: Create GitHub App installation token (balena-io)
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1.11.6
id: app-token-balena-io
with:
app-id: ${{ vars.BALENAOS_CI_APP_ID }}
private-key: ${{ secrets.BALENAOS_CI_APP_PRIVATE_KEY }}
owner: balena-io
# https://github.com/actions/checkout
- name: Clone device repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: ${{ inputs.device-repo }}
token: ${{ steps.app-token.outputs.token || secrets.GITHUB_TOKEN }}
ref: ${{ inputs.device-repo-ref }} # In the case of a new tagged version, this will be the new tag, claimed from ${{ github.events.push.ref }}
submodules: true
fetch-depth: 0 # DEBUG - this is for testing on a device repo
fetch-tags: true
# Do not persist the app installation token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Checkout the right ref for meta-balena submodule
- name: Update meta-balena submodule to ${{ inputs.meta-balena-ref }}
if: inputs.meta-balena-ref != ''
working-directory: ./layers/meta-balena
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.meta-balena-ref }}"
git submodule update --init --recursive
# Checkout the right ref for balena-yocto-scripts submodule
- name: Update balena-yocto-scripts submodule to ${{ inputs.yocto-scripts-ref }}
if: inputs.yocto-scripts-ref != ''
working-directory: ./balena-yocto-scripts
run: |
git config --add remote.origin.fetch '+refs/pull/*:refs/remotes/origin/pr/*'
git fetch --all
git checkout --force "${{ inputs.yocto-scripts-ref }}"
git submodule update --init --recursive
- name: Checkout private Contracts
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: needs.balena-lib.outputs.is_private == 'true'
with:
repository: balena-io/private-contracts
token: ${{ steps.app-token-balena-io.outputs.token }}
path: ${{ github.workspace }}/private-contracts
# Do not persist the token credentials,
# and prefer that each step provide credentials where required
persist-credentials: false
# Unrolled balena_api_is_dt_private function - https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-api.inc#L424
# Had to be unrolled due to this: https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-lib.inc#L191 function relying on a jenkins env var to select the balena env - so failed
- name: Build OS contract
env:
CONTRACTS_BUILD_DIR: "${{ github.workspace }}/balena-yocto-scripts/build/contracts"
NODE: node
DEVICE_TYPE_SLUG: ${{ needs.balena-lib.outputs.device_slug }}
CONTRACTS_OUTPUT_DIR: "${{ github.workspace }}/build/contracts"
run: |
npm --prefix="${CONTRACTS_BUILD_DIR}" ci > /dev/null || (>&2 echo "[balena_lib_build_contracts]: npm failed installing dependencies" && return 1)
NODE_PATH="${CONTRACTS_BUILD_DIR}/node_modules" ${NODE} "${CONTRACTS_BUILD_DIR}/generate-oscontracts.js" > /dev/null
if [ -f "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml" ]; then
echo "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml"
else
>&2 echo "[balena_lib_build_contracts]: Failed to build OS contract for ${DEVICE_TYPE_SLUG}. Ensure a hw.deviceType contract is in the appropriate repo"
return 1
fi
# Move newly generated OS contract to location expected later on in the workflow
cp "${CONTRACTS_OUTPUT_DIR}/${DEVICE_TYPE_SLUG}/balena-os/balena.yml" "${WORKSPACE}/balena.yml"
# Causes tarballs of the source control repositories (e.g. Git repositories), including metadata, to be placed in the DL_DIR directory.
# https://docs.yoctoproject.org/4.0.5/ref-manual/variables.html?highlight=compress#term-BB_GENERATE_MIRROR_TARBALLS
# The github-script action is a safer method of writing to outputs and variables, vs a shell step.
# https://github.com/actions/github-script
- name: Enable mirror tarballs
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
script: |
const currentValue = process.env.BARYS_ARGUMENTS_VAR || '';
const newValue = `${currentValue} -a BB_GENERATE_MIRROR_TARBALLS=1`;
core.exportVariable('BARYS_ARGUMENTS_VAR', newValue);
- name: Enable signed images
if: inputs.sign-image == true
env:
SIGN_API: "${{ vars.SIGN_API_URL || 'https://sign.balena-cloud.com' }}"
SIGN_API_KEY: "${{ secrets.SIGN_API_KEY }}"
SIGN_GRUB_KEY_ID: "${{ vars.SIGN_GRUB_KEY_ID || '2EB29B4CE0132F6337897F5FB8A88D1C62FCC729' }}"
SIGN_KMOD_KEY_APPEND: "${{ secrets.SIGN_KMOD_KEY_APPEND }}"
SIGN_HAB_PKI_ID: "${{ vars.SIGN_HAB_PKI_ID || '6d74b15cbc5df27fdc8d470a7c71edb3' }}"
run: |
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_API=${SIGN_API}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_API_KEY=${SIGN_API_KEY}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_GRUB_KEY_ID=${SIGN_GRUB_KEY_ID}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_KMOD_KEY_APPEND=${SIGN_KMOD_KEY_APPEND}"
BARYS_ARGUMENTS_VAR="${BARYS_ARGUMENTS_VAR} -a SIGN_HAB_PKI_ID=${SIGN_HAB_PKI_ID}"
echo "BARYS_ARGUMENTS_VAR=${BARYS_ARGUMENTS_VAR}" >>"${GITHUB_ENV}"
# https://docs.yoctoproject.org/4.0.10/ref-manual/classes.html?highlight=source_mirror#own-mirrors-bbclass
# https://github.com/openembedded/openembedded/blob/master/classes/own-mirrors.bbclass
# The own-mirrors class makes it easier to set up your own PREMIRRORS from which to first fetch source before
# attempting to fetch it from the upstream specified in SRC_URI within each recipe.
- name: Add S3 shared-downloads to MIRRORS
if: needs.source-mirror-setup.outputs.s3-url
env:
SOURCE_MIRROR_URL: ${{ needs.source-mirror-setup.outputs.s3-url }}
run: |
mkdir -p "$(dirname "${AUTO_CONF_FILE}")"
cat <<EOF >> "${AUTO_CONF_FILE}"
MIRRORS:append = "\\
cvs://.*/.* ${SOURCE_MIRROR_URL} \\
svn://.*/.* ${SOURCE_MIRROR_URL} \\
git://.*/.* ${SOURCE_MIRROR_URL} \\
hg://.*/.* ${SOURCE_MIRROR_URL} \\
bzr://.*/.* ${SOURCE_MIRROR_URL} \\
https?$://.*/.* ${SOURCE_MIRROR_URL} \\
ftp://.*/.* ${SOURCE_MIRROR_URL} \\
"
EOF
cat "${AUTO_CONF_FILE}"
# Use local S3 cache on self-hosted runners
# https://github.com/tespkg/actions-cache
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
- name: Restore sstate cache
id: sstate-restore
uses: tespkg/actions-cache/restore@91b54a6e03abb8fcec12d3743633d23a1cfcd269 # v1.7.2
# Unset AWS credentials so they don't override the minio credentials
env:
AWS_ACCESS_KEY_ID: yocto-svcacct
AWS_SECRET_ACCESS_KEY: ${{ secrets.YOCTO_CACHE_SECRET_KEY }}
AWS_SESSION_TOKEN: ''
AWS_DEFAULT_REGION: local
AWS_REGION: local
with:
endpoint: minio
port: 9000
insecure: "true"
accessKey: yocto-svcacct
secretKey: ${{ secrets.YOCTO_CACHE_SECRET_KEY }}
bucket: yocto-cache
region: local
use-fallback: ${{ github.event.repository.private != true }}
key: ${{ inputs.machine }}-sstate-${{ github.sha }}
restore-keys: |
${{ inputs.machine }}-sstate-
path: |
${{ github.workspace }}/shared/${{ inputs.machine }}/sstate
# Install openssh-client to use the ssh-agent
- name: Install openssh-client package
run: |
sudo apt-get update
sudo apt-get install -y openssh-client
# All preperation complete before this step
# Start building balenaOS
- name: Build
id: build
env:
HELPER_IMAGE_REPO: ghcr.io/balena-os/balena-yocto-scripts
SHARED_BUILD_DIR: ${{ github.workspace }}/shared
YOCTO_SSH_PRIVATE_KEY_B64: ${{ secrets.YOCTO_SSH_PRIVATE_KEY_B64 }}
run: |
# When building for non-x86 device types, meson, after building binaries must try to run them via qemu if possible , maybe as some sanity check or test?
# Therefore qemu must be used - and our runner mmap_min_addr is set to 4096 (default, set here: https://github.com/product-os/github-runner-kernel/blob/ef5a66951599dc64bf2920d896c36c6d9eda8df6/config/5.10/microvm-kernel-x86_64-5.10.config#L858
# Using a value of 4096 leads to issues https://gitlab.com/qemu-project/qemu/-/issues/447 so we must set it to 65536
# We do this in the workflow instead of the runner kernel as it makes this portable across runners
sysctl vm.mmap_min_addr
sudo sysctl -w vm.mmap_min_addr=65536
sysctl vm.mmap_min_addr
mkdir -p "${SHARED_BUILD_DIR}"
cat "${AUTO_CONF_FILE}"
>&2 eval "$(ssh-agent)"
echo "${{ secrets.YOCTO_SSH_PRIVATE_KEY_B64 }}" | base64 -d | ssh-add - >&2
./balena-yocto-scripts/build/balena-build.sh \
-d "${MACHINE}" \
-s "${SHARED_BUILD_DIR}" \
-g "${BARYS_ARGUMENTS_VAR}" | tee balena-build.log
if grep -R "ERROR: " build/tmp/log/*; then
exit 1
fi
if ! grep -q "Build for ${{ inputs.machine }} suceeded" balena-build.log; then
exit 1
fi
# We don't need to encrypt these as they are not sensitive if the repo is public anyway.
# https://github.com/actions/upload-artifact
- name: Upload build logs
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
# always upload build logs, even if the build fails
if: always()
with:
name: build-logs
if-no-files-found: error
retention-days: 7
compression-level: 7
path: |
balena-build.log
build/tmp/log/**/*.log
build/tmp/work/**/run.*
build/tmp/work/**/log.*
# If there was a cache miss for this key, save a new cache.
# Use local S3 cache on self-hosted runners.
# https://github.com/tespkg/actions-cache
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
- name: Save sstate cache
uses: tespkg/actions-cache/save@91b54a6e03abb8fcec12d3743633d23a1cfcd269 # v1.7.2
# Do not save cache for pull_request_target events
# as they run in the context of the main branch and would be vulnerable to cache poisoning.
# https://0xn3va.gitbook.io/cheat-sheets/ci-cd/github/actions#cache-poisoning
# https://adnanthekhan.com/2024/05/06/the-monsters-in-your-build-cache-github-actions-cache-poisoning/
if: steps.sstate-restore.outputs.cache-hit != true && github.event_name != 'pull_request_target'
# Unset AWS credentials so they don't override the minio credentials
env:
AWS_ACCESS_KEY_ID: yocto-svcacct
AWS_SECRET_ACCESS_KEY: ${{ secrets.YOCTO_CACHE_SECRET_KEY }}
AWS_SESSION_TOKEN: ''
AWS_DEFAULT_REGION: local
AWS_REGION: local
with:
endpoint: minio
port: 9000
insecure: "true"
accessKey: yocto-svcacct
secretKey: ${{ secrets.YOCTO_CACHE_SECRET_KEY }}
bucket: yocto-cache
region: local
use-fallback: ${{ github.event.repository.private != true }}
key: ${{ inputs.machine }}-sstate-${{ github.sha }}
path: |
${{ github.workspace }}/shared/${{ inputs.machine }}/sstate
# https://github.com/unfor19/install-aws-cli-action
# https://github.com/aws/aws-cli/tags
- name: Setup awscli
uses: unfor19/install-aws-cli-action@e8b481e524a99f37fbd39fdc1dcb3341ab091367 # v1
env:
# renovate: datasource=github-tags depName=aws/aws-cli
AWSCLI_VERSION: 2.24.21
with:
version: "${{ env.AWSCLI_VERSION }}"
# https://github.com/aws-actions/configure-aws-credentials
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: ${{ needs.source-mirror-setup.outputs.aws-iam-role }}
role-session-name: github-${{ github.job }}-${{ github.run_id }}-${{ github.run_attempt }}
aws-region: ${{ needs.source-mirror-setup.outputs.aws-region }}
# https://github.com/orgs/community/discussions/26636#discussioncomment-3252664
mask-aws-account-id: false
# Sync shared downloads to S3 to use as a sources mirror in case original sources are not available.
# Exlude all directories and temp files as we only want the content and the .done files.
# https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/sync.html
- name: Sync shared downloads to S3
# Do not publish shared downloads for pull_request_target events to prevent cache poisoning
# Do not publish shared downloads for private device-types as the mirror is public-read
if: github.event_name != 'pull_request_target' && needs.balena-lib.outputs.is_private == 'false' && needs.source-mirror-setup.outputs.s3-url
# Ignore errors for now, as we may have upload conflicts with other jobs
continue-on-error: true
env:
SHARED_DOWNLOADS_DIR: ${{ github.workspace }}/shared/shared-downloads
S3_SSE: ${{ needs.source-mirror-setup.outputs.aws-s3-sse-algorithm }}
S3_URL: ${{ needs.source-mirror-setup.outputs.s3-url }}
S3_REGION: ${{ needs.source-mirror-setup.outputs.aws-region }}
# Create a symlink to the from the relative container path to the workspace in order to resolve symlinks
# created in the build container runtime.
run: |
sudo ln -sf "${{ github.workspace }}" /work
du -cksh "${SHARED_DOWNLOADS_DIR}/*"
aws s3 sync --sse="${S3_SSE}" "${SHARED_DOWNLOADS_DIR}/" "${S3_URL}/" \
--exclude "*/*" --exclude "*.tmp" --size-only --follow-symlinks --no-progress
# TODO: Unroll balena_deploy_artifacts into the workflow shell directly
# and only package up what is needed for s3 deploy, hostapp deploy, and leviathan tests.
# Note that the option to remove compressed files is set to true, as we want to avoid duplicate image files in the upload,
# and they can be uncompressed in the s3 deploy step.
- name: Prepare artifacts
env:
ARTIFACTS_TAR: "${{ runner.temp }}/artifacts.tar.zst"
run: |
if ! command -v zip; then
sudo apt-get update
sudo apt-get install -y zip
fi
source "${automation_dir}/include/balena-deploy.inc"
# https://github.com/balena-os/balena-yocto-scripts/blob/master/automation/include/balena-deploy.inc#L23
balena_deploy_artifacts "${SLUG}" "${DEPLOY_PATH}" true
cp -v "${WORKSPACE}/balena.yml" "${DEPLOY_PATH}/balena.yml"
du -cksh "${DEPLOY_PATH}"
find "${DEPLOY_PATH}" -type f -exec du -h {} \;
tar -I zstd -cf "${ARTIFACTS_TAR}" -C "${DEPLOY_PATH}" .
du -h "${ARTIFACTS_TAR}"
# Encrypt artifacts and remove the original tarball so it doesn't get uploaded
- name: Encrypt artifacts
if: inputs.sign-image || needs.balena-lib.outputs.is_private == 'true'
env:
ARTIFACTS_TAR: "${{ runner.temp }}/artifacts.tar.zst"
ARTIFACTS_ENC: "${{ runner.temp }}/artifacts.tar.zst.enc"
PBDKF2_PASSPHRASE: ${{ secrets.PBDKF2_PASSPHRASE }}
run: |
openssl enc -v -e -aes-256-cbc -k "${PBDKF2_PASSPHRASE}" -pbkdf2 -iter 310000 -md sha256 -salt -in "${ARTIFACTS_TAR}" -out "${ARTIFACTS_ENC}"
rm "${ARTIFACTS_TAR}"
# Upload either the encrypted or the unencrypted artifacts, whichever is present
# https://github.com/actions/upload-artifact
- name: Upload artifacts
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
env:
ARTIFACTS_TAR: "${{ runner.temp }}/artifacts.tar.zst"
ARTIFACTS_ENC: "${{ runner.temp }}/artifacts.tar.zst.enc"
with:
name: build-artifacts
if-no-files-found: error
retention-days: 3
compression-level: 7
path: |
${{ env.ARTIFACTS_TAR }}
${{ env.ARTIFACTS_ENC }}
##############################
# hostapp Deploy
##############################
hostapp-deploy:
name: Deploy hostApp
runs-on: ${{ fromJSON(inputs.build-runs-on) }}
# We want to push a hostapp on push events (PR merge) or dispatch
# These conditions should match s3-deploy
# Force finlize will finalize no matter what - so we want to make sure there is something to finlize - so it will always trigger this if true
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' || inputs.force-finalize
needs:
- approved-commit
- build
- balena-lib
# This environment should contain the following variables:
# - BALENA_HOST
# - HOSTAPP_ORG
# This environment should contain the following secrets:
# - BALENA_API_DEPLOY_KEY
environment: ${{ inputs.deploy-environment }}
env:
BALENARC_BALENA_URL: ${{ vars.BALENA_HOST || vars.BALENARC_BALENA_URL || 'balena-cloud.com' }}
DEPLOY_PATH: ${{ github.workspace }}/deploy
HOSTAPP_ORG: ${{ vars.HOSTAPP_ORG || 'balena_os' }}
steps:
# https://github.com/actions/download-artifact
- name: Fetch build artifacts
uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4
with:
name: build-artifacts
path: ${{ runner.temp }}
- name: Decrypt artifacts
if: inputs.sign-image || needs.balena-lib.outputs.is_private == 'true'
env:
ARTIFACTS_TAR: "${{ runner.temp }}/artifacts.tar.zst"
ARTIFACTS_ENC: "${{ runner.temp }}/artifacts.tar.zst.enc"
PBDKF2_PASSPHRASE: ${{ secrets.PBDKF2_PASSPHRASE }}
run: |
openssl enc -v -d -aes-256-cbc -k "${PBDKF2_PASSPHRASE}" -pbkdf2 -iter 310000 -md sha256 -salt -in "${ARTIFACTS_ENC}" -out "${ARTIFACTS_TAR}"
# Only decompress the balena-image.docker and balena.yml files for hostapp deployment\
# List the contents of the tar file to make sure we're decompressing the right files
- name: Decompress artifacts
env:
ARTIFACTS_TAR: "${{ runner.temp }}/artifacts.tar.zst"
run: |
set -x
mkdir -p "${DEPLOY_PATH}"
tar -tf "${ARTIFACTS_TAR}"
tar -I zstd -xvf "${ARTIFACTS_TAR}" -C "${DEPLOY_PATH}" ./balena-image.docker ./balena.yml
cp -v "${DEPLOY_PATH}/balena.yml" "${WORKSPACE}/balena.yml"
- name: Setup balena CLI
uses: balena-io-examples/setup-balena-action@41338eb4bb2b2e8b239d8ca5b8523d1a707333bf # v0.0.6
env:
# renovate: datasource=github-releases depName=balena-io/balena-cli
BALENA_CLI_VERSION: v20.2.10
with:
# balena CLI version to install
cli-version: ${{ env.BALENA_CLI_VERSION }}
# balenaCloud API token to login automatically
balena-token: ${{ secrets.BALENA_API_DEPLOY_KEY }}
# TODO: replace this with balena-io/deploy-to-balena-action when it supports deploy-only
# https://github.com/balena-io/deploy-to-balena-action/issues/286
- name: Deploy to balena
id: deploy-hostapp
env:
# BALENA_API_DEPLOY_KEY is a secret that should be specific to the runtime environment
# It requires permissions to deploy hostApp releases, and fetch supervisor release images (via yocto recipes)
# This step should never run untrusted user code, as we have a secret in the environment
BALENAOS_TOKEN: ${{ secrets.BALENA_API_DEPLOY_KEY }}
BALENAOS_ACCOUNT: ${{ env.HOSTAPP_ORG }}
SLUG: "${{ needs.balena-lib.outputs.device_slug }}"
APPNAME: "${{ needs.balena-lib.outputs.device_slug }}"
DEVICE_REPO_REV: "${{ needs.balena-lib.outputs.device_repo_revision }}"
META_BALENA_VERSION: "${{ needs.balena-lib.outputs.meta_balena_version }}"
RELEASE_VERSION: "${{ needs.balena-lib.outputs.os_version }}"
BOOTABLE: 1
TRANSLATION: "v6"
FINAL: ${{ needs.balena-lib.outputs.should_finalize }}
ESR: "${{ needs.balena-lib.outputs.is_esr }}"
balenaCloudEmail: # TODO: currently trying to use named API key only, its possible email/pw auth no longer has the additional privileges that it used to
balenaCloudPassword: # TODO: currently trying to use named API key only, its possible email/pw auth no longer has the additional privileges that it used to
CURL: "curl --silent --retry 10 --location --compressed"
VERSION: ${{ needs.balena-lib.outputs.os_version }}
# Used when creating a new hostapp APP - to give the relevant access to the relevant team
HOSTAPP_ACCESS_TEAM: OS%20Devs
HOSTAPP_ACCESS_ROLE: developer
API_ENV: ${{ env.BALENARC_BALENA_URL }}
run: |