Skip to content

Track Benchmarks

Track Benchmarks #898

Workflow file for this run

on:
workflow_run:
workflows: [Run and Cache Benchmarks]
types:
- completed
name: Track Benchmarks
jobs:
track_benchmarks:
if: github.event.workflow_run.conclusion == 'success'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
backend: ["postgres", "sqlite", "mysql"]
env:
BENCHER_PROJECT: diesel
BENCHER_ADAPTER: rust_criterion
BENCHER_TESTBED: ubuntu-latest-${{ matrix.backend }}
PR_BENCHMARK_RESULTS: pr_${{ matrix.backend }}.txt
BASE_BENCHMARK_RESULTS: base_${{ matrix.backend }}.txt
GITHUB_EVENT: event_${{ matrix.backend }}.json
# This is the confidence interval for the t-test Threshold
# Adjust this value to lower to make the test more sensitive to changes
# Adjust this value to higher to make the test less sensitive to changes
# https://bencher.dev/docs/explanation/thresholds/#t-test-threshold-upper-boundary
UPPER_BOUNDARY: 0.98
steps:
- name: Download Benchmark Results
uses: actions/github-script@v6
with:
script: |
async function downloadArtifact(artifactName) {
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == artifactName
})[0];
if (!matchArtifact) {
core.setFailed(`Failed to find artifact: ${artifactName}`);
}
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
let fs = require('fs');
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/${artifactName}.zip`, Buffer.from(download.data));
}
await downloadArtifact(process.env.PR_BENCHMARK_RESULTS);
await downloadArtifact(process.env.BASE_BENCHMARK_RESULTS);
await downloadArtifact(process.env.GITHUB_EVENT);
- name: Unzip Benchmark Results
run: |
unzip $PR_BENCHMARK_RESULTS.zip
unzip $BASE_BENCHMARK_RESULTS.zip
unzip $GITHUB_EVENT.zip
- name: Export GitHub Event Data
uses: actions/github-script@v6
with:
script: |
let fs = require('fs');
let githubEvent = JSON.parse(fs.readFileSync("event.json", {encoding: 'utf8'}));
console.log(githubEvent);
core.exportVariable("PR_HEAD", `${githubEvent.pull_request.head.ref}-${githubEvent.pull_request.head.sha.slice(0, 8)}`);
core.exportVariable("PR_ID", `${githubEvent.pull_request.head.ref}/${process.env.BENCHER_TESTBED}/${process.env.BENCHER_ADAPTER}`);
core.exportVariable("PR_NUMBER", githubEvent.number);
- uses: bencherdev/bencher@main
- name: Track base Benchmarks
run: |
bencher run \
--if-branch '${{ env.PR_HEAD }}' \
--else-branch \
--token "${{ secrets.BENCHER_API_TOKEN }}" \
--file "$BASE_BENCHMARK_RESULTS"
- name: Create PR threshold
run: |
bencher threshold create \
--project "$BENCHER_PROJECT" \
--branch '${{ env.PR_HEAD }}' \
--testbed "$BENCHER_TESTBED" \
--measure latency \
--test t \
--upper-boundary ${{ env.UPPER_BOUNDARY }} \
--token "${{ secrets.BENCHER_API_TOKEN }}"
- name: Track PR Benchmarks
run: |
bencher run \
--branch '${{ env.PR_HEAD }}' \
--token "${{ secrets.BENCHER_API_TOKEN }}" \
--ci-id '${{ env.PR_ID }}' \
--ci-number '${{ env.PR_NUMBER }}' \
--github-actions "${{ secrets.GITHUB_TOKEN }}" \
--err \
--file "$PR_BENCHMARK_RESULTS"