Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(deps): update tinybench to 3.x.x #7100

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
"release": "tsx scripts/release.ts",
"test": "pnpm --filter test-core test:threads",
"test:ci": "CI=true pnpm -r --reporter-hide-prefix --stream --sequential --filter '@vitest/test-*' --filter !test-browser run test",
"test:benchmark": "CI=true pnpm -r --reporter-hide-prefix --stream --sequential --filter '@vitest/test-benchmark' --filter !test-browser run test",
"test:examples": "CI=true pnpm -r --reporter-hide-prefix --stream --filter '@vitest/example-*' run test",
"test:ecosystem-ci": "ECOSYSTEM_CI=true pnpm test:ci",
"typecheck": "tsc -p tsconfig.check.json --noEmit",
Expand Down
2 changes: 1 addition & 1 deletion packages/vitest/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@
"magic-string": "^0.30.17",
"pathe": "^1.1.2",
"std-env": "^3.8.0",
"tinybench": "^2.9.0",
"tinybench": "^3.0.7",
"tinyexec": "^0.3.1",
"tinypool": "^1.0.2",
"tinyrainbow": "^1.2.0",
Expand Down
2 changes: 1 addition & 1 deletion packages/vitest/src/node/reporters/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ export abstract class BaseReporter implements Reporter {
.sort((a, b) => a.result!.benchmark!.rank - b.result!.benchmark!.rank)

for (const sibling of siblings) {
const number = (sibling.result!.benchmark!.mean / bench.result!.benchmark!.mean).toFixed(2)
const number = (sibling.result!.benchmark!.latency.mean / bench.result!.benchmark!.latency.mean).toFixed(2)
this.log(c.green(` ${number}x `) + c.gray('faster than ') + sibling.name)
}

Expand Down
30 changes: 15 additions & 15 deletions packages/vitest/src/node/reporters/benchmark/table/tableRender.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ function formatNumber(number: number) {

const tableHead = [
'name',
'hz',
'mean',
'min',
'max',
'mean',
'p50/median',
'p75',
'p99',
'p995',
Expand All @@ -60,16 +60,16 @@ const tableHead = [
function renderBenchmarkItems(result: BenchmarkResult) {
return [
result.name,
formatNumber(result.hz || 0),
formatNumber(result.min || 0),
formatNumber(result.max || 0),
formatNumber(result.mean || 0),
formatNumber(result.p75 || 0),
formatNumber(result.p99 || 0),
formatNumber(result.p995 || 0),
formatNumber(result.p999 || 0),
`±${(result.rme || 0).toFixed(2)}%`,
(result.sampleCount || 0).toString(),
formatNumber(result.latency.mean || 0),
formatNumber(result.latency.min || 0),
formatNumber(result.latency.max || 0),
formatNumber(result.latency.p50 || 0),
formatNumber(result.latency.p75 || 0),
formatNumber(result.latency.p99 || 0),
formatNumber(result.latency.p995 || 0),
formatNumber(result.latency.p999 || 0),
`±${(result.latency.rme || 0).toFixed(2)}%`,
(result.numberOfSamples || 0).toString(),
]
}

Expand All @@ -93,16 +93,16 @@ function renderBenchmark(result: BenchmarkResult, widths: number[]) {
const padded = padRow(renderBenchmarkItems(result), widths)
return [
padded[0], // name
c.blue(padded[1]), // hz
c.blue(padded[1]), // mean
c.cyan(padded[2]), // min
c.cyan(padded[3]), // max
c.cyan(padded[4]), // mean
c.cyan(padded[4]), // p50/median
c.cyan(padded[5]), // p75
c.cyan(padded[6]), // p99
c.cyan(padded[7]), // p995
c.cyan(padded[8]), // p999
c.dim(padded[9]), // rem
c.dim(padded[10]), // sample
c.dim(padded[10]), // samples
].join(' ')
}

Expand Down
6 changes: 3 additions & 3 deletions packages/vitest/src/public/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,14 @@ export type {
} from '../runtime/config'

export type {
BenchFactory,
Bench as BenchFactory,
BenchFunction,
Benchmark,
BenchmarkAPI,
BenchmarkResult,
BenchOptions,
BenchTask,
BenchTaskResult,
TinybenchTask as BenchTask,
TinybenchTaskResult as BenchTaskResult,
} from '../runtime/types/benchmark'
export { assertType } from '../typecheck/assertType'

Expand Down
8 changes: 4 additions & 4 deletions packages/vitest/src/runtime/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@ import { noop } from '@vitest/utils'
import { getWorkerState } from './utils'

const benchFns = new WeakMap<Test, BenchFunction>()
const benchOptsMap = new WeakMap()
const benchOptsMap = new WeakMap<Test, BenchOptions>()

export function getBenchOptions(key: Test): BenchOptions {
return benchOptsMap.get(key)
return benchOptsMap.get(key)!
}

export function getBenchFn(key: Test): BenchFunction {
return benchFns.get(key)!
}

export const bench = createBenchmark(function (
name,
name: string | Function,
fn: BenchFunction = noop,
options: BenchOptions = {},
) {
Expand All @@ -32,7 +32,7 @@ export const bench = createBenchmark(function (
},
})
benchFns.set(task, fn)
benchOptsMap.set(task, options)
benchOptsMap.set(task, { ...options, name: formatName(name) })
})

function createBenchmark(
Expand Down
86 changes: 36 additions & 50 deletions packages/vitest/src/runtime/runners/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import type { VitestExecutor } from '../execute'
import type {
Benchmark,
BenchmarkResult,
BenchTask,
TinybenchTask,
} from '../types/benchmark'
import { updateTask as updateRunnerTask } from '@vitest/runner'
import { createDefer, getSafeTimers } from '@vitest/utils'
Expand All @@ -20,20 +20,15 @@ function createBenchmarkResult(name: string): BenchmarkResult {
return {
name,
rank: 0,
rme: 0,
samples: [] as number[],
numberOfSamples: 0,
} as BenchmarkResult
}

const benchmarkTasks = new WeakMap<Benchmark, import('tinybench').Task>()

async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
const { Task, Bench } = await runner.importTinybench()

const start = performance.now()

const benchmarkGroup: Benchmark[] = []
const benchmarkSuiteGroup = []
const benchmarkSuiteGroup: Suite[] = []
for (const task of suite.tasks) {
if (task.mode !== 'run' && task.mode !== 'queued') {
continue
Expand All @@ -47,12 +42,15 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
}
}

// run sub suites sequentially
for (const subSuite of benchmarkSuiteGroup) {
await runBenchmarkSuite(subSuite, runner)
if (benchmarkSuiteGroup.length) {
for (const subSuite of benchmarkSuiteGroup) {
await runBenchmarkSuite(subSuite, runner)
}
}

if (benchmarkGroup.length) {
const { Bench } = await runner.importTinybench()

const defer = createDefer()
suite.result = {
state: 'run',
Expand All @@ -62,24 +60,25 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
updateTask(suite)

const addBenchTaskListener = (
task: InstanceType<typeof Task>,
task: InstanceType<typeof TinybenchTask>,
benchmark: Benchmark,
) => {
task.addEventListener('start', () => {
benchmark.result!.state = 'run'
updateTask(benchmark)
})
task.addEventListener(
'complete',
(e) => {
const task = e.task
const taskRes = task.result!
benchmark.result!.state = 'pass'
const result = benchmark.result!.benchmark!
Object.assign(result, taskRes)
// compute extra stats and free raw samples as early as possible
const samples = result.samples
result.sampleCount = samples.length
result.median = samples.length % 2
? samples[Math.floor(samples.length / 2)]
: (samples[samples.length / 2] + samples[samples.length / 2 - 1]) / 2
Object.assign(result, task!.result!)
result.numberOfSamples = result.latency.samples.length
if (!runner.config.benchmark?.includeSamples) {
result.samples.length = 0
result.latency.samples.length = 0
result.throughput.samples.length = 0
}
updateTask(benchmark)
},
Expand All @@ -90,58 +89,45 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
task.addEventListener(
'error',
(e) => {
const task = e.task
defer.reject(benchmark ? task.result!.error : e)
benchmark.result!.state = 'fail'
updateTask(benchmark)
defer.reject(e.error)
},
{
once: true,
},
)
}

benchmarkGroup.forEach((benchmark) => {
const options = getBenchOptions(benchmark)
const benchmarkInstance = new Bench(options)

const benchmarkFn = getBenchFn(benchmark)

const { setTimeout } = getSafeTimers()
const tasks: [TinybenchTask, Benchmark][] = []
for (const benchmark of benchmarkGroup) {
const bench = new Bench(getBenchOptions(benchmark))
bench.add(benchmark.name, getBenchFn(benchmark))
benchmark.result = {
state: 'run',
startTime: start,
benchmark: createBenchmarkResult(benchmark.name),
}

const task = new Task(benchmarkInstance, benchmark.name, benchmarkFn)
benchmarkTasks.set(benchmark, task)
addBenchTaskListener(task, benchmark)
addBenchTaskListener(bench.getTask(benchmark.name)!, benchmark)
updateTask(benchmark)
})

const { setTimeout } = getSafeTimers()
const tasks: [BenchTask, Benchmark][] = []
for (const benchmark of benchmarkGroup) {
const task = benchmarkTasks.get(benchmark)!
await task.warmup()
tasks.push([
await new Promise<BenchTask>(resolve =>
setTimeout(async () => {
resolve(await task.run())
}),
),
benchmark,
])
tasks.push([await new Promise<TinybenchTask>(resolve =>
setTimeout(async () => {
resolve((await bench.run())[0])
}),
), benchmark])
}

suite.result!.duration = performance.now() - start
suite.result!.state = 'pass'
suite.result!.state = benchmarkGroup.every(benchmark => benchmark.result!.state === 'pass') ? 'pass' : 'fail'

tasks
.sort(([taskA], [taskB]) => taskA.result!.mean - taskB.result!.mean)
.sort(([taskA], [taskB]) => taskA.result!.latency.mean - taskB.result!.latency.mean)
.forEach(([, benchmark], idx) => {
benchmark.result!.state = 'pass'
if (benchmark) {
const result = benchmark.result!.benchmark!
result.rank = Number(idx) + 1
result.rank = idx + 1
updateTask(benchmark)
}
})
Expand Down
20 changes: 9 additions & 11 deletions packages/vitest/src/runtime/types/benchmark.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,26 @@
import type { Test } from '@vitest/runner'
import type { ChainableFunction } from '@vitest/runner/utils'
import type {
Bench as BenchFactory,
Options as BenchOptions,
Task as BenchTask,
TaskResult as BenchTaskResult,
TaskResult as TinybenchResult,
Bench,
Fn as BenchFunction,
BenchOptions,
Task as TinybenchTask,
TaskResult as TinybenchTaskResult,
} from 'tinybench'

export interface Benchmark extends Test {
meta: {
benchmark: true
result?: BenchTaskResult
result?: TinybenchTaskResult
}
}

export interface BenchmarkResult extends TinybenchResult {
export interface BenchmarkResult extends TinybenchTaskResult {
name: string
rank: number
sampleCount: number
median: number
numberOfSamples: number
}

export type BenchFunction = (this: BenchFactory) => Promise<void> | void
type ChainableBenchmarkAPI = ChainableFunction<
'skip' | 'only' | 'todo',
(name: string | Function, fn?: BenchFunction, options?: BenchOptions) => void
Expand All @@ -32,4 +30,4 @@ export type BenchmarkAPI = ChainableBenchmarkAPI & {
runIf: (condition: any) => ChainableBenchmarkAPI
}

export { BenchFactory, BenchOptions, BenchTask, BenchTaskResult }
export { Bench, BenchFunction, BenchOptions, TinybenchTask, TinybenchTaskResult }
11 changes: 6 additions & 5 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion test/benchmark/test/reporter.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ it.for([true, false])('includeSamples %s', async (includeSamples) => {
assert(result.ctx)
const allSamples = [...result.ctx.state.idMap.values()]
.filter(t => t.meta.benchmark)
.map(t => t.result?.benchmark?.samples)
.map(t => t.result?.benchmark?.latency.samples)
if (includeSamples) {
expect(allSamples[0]).not.toEqual([])
}
Expand Down
Loading