forked from pytorch/executorch
-
Notifications
You must be signed in to change notification settings - Fork 0
312 lines (286 loc) · 11.8 KB
/
android-perf.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
name: android-perf
on:
schedule:
- cron: 0 0 * * *
# Note: GitHub has an upper limit of 10 inputs
workflow_dispatch:
inputs:
models:
description: Models to be benchmarked
required: false
type: string
default: stories110M
devices:
description: Target devices to run benchmark
required: false
type: string
default: samsung_galaxy_s22
delegates:
description: Backend delegates
required: false
type: string
default: xnnpack
threadpool:
description: Run with threadpool?
required: false
type: boolean
default: false
benchmark_configs:
description: The list of configs used the benchmark
required: false
type: string
test_spec:
description: The test spec to drive the test on AWS devices
required: false
type: string
workflow_call:
inputs:
models:
description: Models to be benchmarked
required: false
type: string
default: stories110M
devices:
description: Target devices to run benchmark
required: false
type: string
default: samsung_galaxy_s22
delegates:
description: Backend delegates
required: false
type: string
default: xnnpack
threadpool:
description: Run with threadpool?
required: false
type: boolean
default: false
benchmark_configs:
description: The list of configs used the benchmark
required: false
type: string
test_spec:
description: The test spec to drive the test on AWS devices
required: false
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
jobs:
set-parameters:
runs-on: linux.2xlarge
outputs:
models: ${{ steps.set-parameters.outputs.models }}
devices: ${{ steps.set-parameters.outputs.devices }}
delegates: ${{ steps.set-parameters.outputs.delegates }}
steps:
- name: Set parameters
id: set-parameters
shell: bash
env:
# Separate default values from the workflow dispatch. To ensure defaults are accessible
# during scheduled runs and to provide flexibility for different defaults between
# on-demand and periodic benchmarking.
CRON_DEFAULT_MODELS: "stories110M,dl3,mv3,mv2,ic4,ic3,vit"
CRON_DEFAULT_DEVICES: "samsung_galaxy_s22"
CRON_DEFAULT_DELEGATES: "xnnpack,qnn"
run: |
set -ex
MODELS="${{ inputs.models }}"
if [ -z "$MODELS" ]; then
MODELS="$CRON_DEFAULT_MODELS"
fi
DEVICES="${{ inputs.devices }}"
if [ -z "$DEVICES" ]; then
DEVICES="$CRON_DEFAULT_DEVICES"
fi
DELEGATES="${{ inputs.delegates }}"
if [ -z "$DELEGATES" ]; then
DELEGATES="$CRON_DEFAULT_DELEGATES"
fi
# Mapping devices to their corresponding device-pool-arn
declare -A DEVICE_POOL_ARNS
DEVICE_POOL_ARNS[samsung_galaxy_s22]="arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/e59f866a-30aa-4aa1-87b7-4510e5820dfa"
DEVICE_POOL_ARNS[samsung_galaxy_s24]="arn:aws:devicefarm:us-west-2:308535385114:devicepool:02a2cf0f-6d9b-45ee-ba1a-a086587469e6/98f8788c-2e25-4a3c-8bb2-0d1e8897c0db"
# Resolve device names with their corresponding ARNs
if [[ ! $(echo "$DEVICES" | jq empty 2>/dev/null) ]]; then
DEVICES=$(echo "$DEVICES" | jq -Rc 'split(",")')
fi
declare -a MAPPED_ARNS=()
for DEVICE in $(echo "$DEVICES" | jq -r '.[]'); do
if [[ -z "${DEVICE_POOL_ARNS[$DEVICE]}" ]]; then
echo "Error: No ARN found for device '$DEVICE'. Abort." >&2
exit 1
fi
MAPPED_ARNS+=("${DEVICE_POOL_ARNS[$DEVICE]}")
done
echo "models=$(echo $MODELS | jq -Rc 'split(",")')" >> $GITHUB_OUTPUT
MAPPED_ARNS_JSON=$(printf '%s\n' "${MAPPED_ARNS[@]}" | jq -R . | jq -s .)
echo "devices=$(echo "$MAPPED_ARNS_JSON" | jq -c .)" >> $GITHUB_OUTPUT
echo "delegates=$(echo $DELEGATES | jq -Rc 'split(",")')" >> $GITHUB_OUTPUT
export-models:
name: export-models
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
needs: set-parameters
strategy:
matrix:
model: ${{ fromJson(needs.set-parameters.outputs.models) }}
delegate: ${{ fromJson(needs.set-parameters.outputs.delegates) }}
fail-fast: false
with:
runner: linux.4xlarge
docker-image: executorch-ubuntu-22.04-qnn-sdk
submodules: 'true'
timeout: 60
upload-artifact: android-models
upload-artifact-to-s3: true
script: |
# The generic Linux job chooses to use base env, not the one setup by the image
echo "::group::Setting up dev environment"
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"
if [[ ${{ matrix.delegate }} == "qnn" ]]; then
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
fi
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded/${{ matrix.model }}_${{ matrix.delegate }}
echo "::endgroup::"
echo "::group::Exporting ${{ matrix.delegate }} model: ${{ matrix.model }}"
BUILD_MODE="cmake"
DTYPE="fp32"
if [[ ${{ matrix.model }} =~ ^stories* ]]; then
# Install requirements for export_llama
PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh
# Test llama2
if [[ ${{ matrix.delegate }} == "xnnpack" ]]; then
DELEGATE_CONFIG="xnnpack+custom+qe"
elif [[ ${{ matrix.delegate }} == "qnn" ]]; then
DELEGATE_CONFIG="qnn"
else
echo "Unsupported delegate ${{ matrix.delegate }}"
exit 1
fi
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh "${{ matrix.model }}" "${BUILD_MODE}" "${DTYPE}" "${DELEGATE_CONFIG}" "${ARTIFACTS_DIR_NAME}"
else
PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${{ matrix.model }}" "${BUILD_MODE}" "${{ matrix.delegate }}" "${ARTIFACTS_DIR_NAME}"
fi
echo "::endgroup::"
build-benchmark-app:
name: build-benchmark-app
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
needs: set-parameters
with:
runner: linux.2xlarge
docker-image: executorch-ubuntu-22.04-clang12-android
submodules: 'true'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
upload-artifact: android-apps
upload-artifact-to-s3: true
script: |
set -eux
# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake
export ARTIFACTS_DIR_NAME=artifacts-to-be-uploaded
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
export ANDROID_ABIS="arm64-v8a"
PYTHON_EXECUTABLE=python EXECUTORCH_BUILD_QNN=ON QNN_SDK_ROOT=/tmp/qnn/2.25.0.240728 bash build/build_android_llm_demo.sh ${ARTIFACTS_DIR_NAME}
# Let's see how expensive this job is, we might want to tone it down by running it periodically
benchmark-on-device:
if: always()
permissions:
id-token: write
contents: read
uses: pytorch/test-infra/.github/workflows/mobile_job.yml@main
needs:
- set-parameters
- build-benchmark-app
- export-models
strategy:
matrix:
model: ${{ fromJson(needs.set-parameters.outputs.models) }}
delegate: ${{ fromJson(needs.set-parameters.outputs.delegates) }}
device: ${{ fromJson(needs.set-parameters.outputs.devices) }}
fail-fast: false
with:
# Due to scheduling a job may be pushed beyond the default 60m threshold
timeout: 120
device-type: android
runner: linux.2xlarge
test-infra-ref: ''
# This is the ARN of ExecuTorch project on AWS
project-arn: arn:aws:devicefarm:us-west-2:308535385114:project:02a2cf0f-6d9b-45ee-ba1a-a086587469e6
device-pool-arn: ${{ matrix.device }}
android-app-archive: https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/minibench/app-debug.apk
android-test-archive: https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/minibench/app-debug-androidTest.apk
# NB: Need to set the default spec here so that it works for periodic too
test-spec: ${{ inputs.test_spec || 'https://ossci-android.s3.amazonaws.com/executorch/android-llm-device-farm-test-spec.yml' }}
# Uploaded to S3 from the previous job
extra-data: https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.delegate }}/model.zip
upload-benchmark-results:
needs:
- benchmark-on-device
if: always()
runs-on: linux.2xlarge
environment: upload-benchmark-results
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v3
with:
submodules: false
- name: Authenticate with AWS
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-benchmark-results
# The max duration enforced by the server side
role-duration-seconds: 18000
aws-region: us-east-1
- name: Setup conda
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
with:
python-version: '3.10'
- name: Download the list of artifacts from S3
env:
ARTIFACTS_S3_DIR: s3://gha-artifacts/device_farm/${{ github.run_id }}/${{ github.run_attempt }}/artifacts/
shell: bash
run: |
set -eux
${CONDA_RUN} python -mpip install awscli==1.32.18
mkdir -p artifacts
pushd artifacts
${CONDA_RUN} aws s3 sync "${ARTIFACTS_S3_DIR}" .
popd
ls -lah artifacts
- name: Extract the benchmark results JSON
shell: bash
run: |
set -eux
mkdir -p benchmark-results
for ARTIFACTS_BY_JOB in artifacts/*.json; do
[ -f "${ARTIFACTS_BY_JOB}" ] || break
echo "${ARTIFACTS_BY_JOB}"
${CONDA_RUN} python .github/scripts/extract_benchmark_results.py \
--artifacts "${ARTIFACTS_BY_JOB}" \
--output-dir benchmark-results \
--repo ${{ github.repository }} \
--head-branch ${{ github.head_ref || github.ref_name }} \
--workflow-name "${{ github.workflow }}" \
--workflow-run-id ${{ github.run_id }} \
--workflow-run-attempt ${{ github.run_attempt }}
done
ls -lah benchmark-results
for BENCHMARK_RESULTS in benchmark-results/*.json; do
cat "${BENCHMARK_RESULTS}"
echo
done
- name: Upload the benchmark results
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
with:
benchmark-results-dir: 'benchmark-results'
dry-run: false