diff --git a/benchmark/.gitignore b/benchmark/.gitignore index 496420140..d7f132308 100644 --- a/benchmark/.gitignore +++ b/benchmark/.gitignore @@ -1,3 +1,4 @@ Manifest.toml *.json -*.o \ No newline at end of file +*.o +jl_* \ No newline at end of file diff --git a/benchmark/Project.toml b/benchmark/Project.toml index 493ea907e..4584513ac 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -1,4 +1,5 @@ [deps] +ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" Finch = "9177782c-1635-4eb9-9bfb-d9dfa25e6bce" MatrixDepot = "b51810bb-c9f3-55da-ae3c-350fc1fbce05" @@ -6,4 +7,4 @@ MatrixMarket = "4d4711f2-db25-561a-b6b3-d35e7d4047d3" PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" [compat] -BenchmarkTools = "1.5" \ No newline at end of file +BenchmarkTools = "1.5" diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 63d60f1e1..7935d5308 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -10,6 +10,29 @@ using BenchmarkTools using MatrixDepot using SparseArrays using Random +using ArgParse + +s = ArgParseSettings("Run Finch.jl benchmarks. By default, all tests are run unless --include or --exclude options are provided. +If the environment variable FINCH_BENCHMARK_ARGS is set, it will override the given arguments.") + +@add_arg_table! s begin + "--include", "-i" + nargs = '*' + default = [] + help = "list of benchmark suites to include, e.g., --include high-level structures" + + "--exclude", "-e" + nargs = '*' + default = [] + help = "list of benchmark suites to exclude, e.g., --exclude compile graphs" +end + +if "FINCH_BENCHMARK_ARGS" in keys(ENV) + ARGS = split(ENV["FINCH_BENCHMARK_ARGS"], " ") +end + +parsed_args = parse_args(ARGS, s) + include(joinpath(@__DIR__, "../docs/examples/bfs.jl")) include(joinpath(@__DIR__, "../docs/examples/pagerank.jl")) include(joinpath(@__DIR__, "../docs/examples/shortest_paths.jl")) @@ -20,73 +43,97 @@ SUITE = BenchmarkGroup() SUITE["high-level"] = BenchmarkGroup() -let - k = Ref(0.0) - A = Tensor(Dense(Sparse(Element(0.0))), fsprand(10000, 10000, 0.01)) - x = rand(1) - y = rand(1) - SUITE["high-level"]["permutedims(Dense(Sparse()))"] = @benchmarkable(permutedims($A, (2, 1))) -end +for (scheduler_name, scheduler) in [ + "default_scheduler" => Finch.default_scheduler(), + "galley_scheduler" => Finch.galley_scheduler(), +] + Finch.with_scheduler(scheduler) do + let + A = Tensor(Dense(Sparse(Element(0.0))), fsprand(10000, 10000, 0.01)) + SUITE["high-level"]["permutedims(Dense(Sparse()))"][scheduler_name] = @benchmarkable(permutedims($A, (2, 1))) + end -let - k = Ref(0.0) - A = Tensor(Dense(Dense(Element(0.0))), rand(10000, 10000)) - x = rand(1) - y = rand(1) - SUITE["high-level"]["permutedims(Dense(Dense()))"] = @benchmarkable(permutedims($A, (2, 1))) -end + let + A = Tensor(Dense(Dense(Element(0.0))), rand(10000, 10000)) + SUITE["high-level"]["permutedims(Dense(Dense()))"][scheduler_name] = @benchmarkable(permutedims($A, (2, 1))) + end -let - k = Ref(0.0) - x = rand(1) - y = rand(1) - SUITE["high-level"]["einsum_spmv_compile_overhead"] = @benchmarkable( - begin - A, x, y = (A, $x, $y) - @einsum y[i] += A[i, j] * x[j] - end, - setup = (A = Tensor(Dense(SparseList(Element($k[] += 1))), fsprand(1, 1, 1))) - ) -end + let + k = Ref(0.0) + x = rand(1) + y = rand(1) + SUITE["high-level"]["einsum_spmv_compile_overhead"][scheduler_name] = @benchmarkable( + begin + A, x, y = (A, $x, $y) + @einsum y[i] += A[i, j] * x[j] + end, + setup = (A = Tensor(Dense(SparseList(Element($k[] += 1))), fsprand(1, 1, 1))) + ) + end -let - A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1)) - x = rand(1) - SUITE["high-level"]["einsum_spmv_call_overhead"] = @benchmarkable( - begin - A, x = ($A, $x) - @einsum y[i] += A[i, j] * x[j] - end, - ) -end + let + N = 10 + P = 0.0001 + C = 16.0 + SUITE["high-level"]["einsum_matmul_adaptive_overhead"][scheduler_name] = @benchmarkable( + begin + @einsum C[i, j] += A[i, k] * B[k, j] + end, + setup = begin + (N, P, C) = ($N, $P, $C) + n = floor(Int, N * C^(rand())) + m = floor(Int, N * C^(rand())) + l = floor(Int, N * C^(rand())) + p = floor(Int, P * C^(rand())) + q = floor(Int, P * C^(rand())) + A = fsprand(n, l, p) + B = fsprand(l, m, q) + end, + evals = 1 + ) + end -let - N = 1_000 - K = 1_000 - p = 0.001 - A = Tensor(Dense(Dense(Element(0.0))), rand(N, K)) - B = Tensor(Dense(Dense(Element(0.0))), rand(K, N)) - M = Tensor(Dense(SparseList(Element(0.0))), fsprand(N, N, p)) - - SUITE["high-level"]["sddmm_fused"] = @benchmarkable( - begin - M = lazy($M) - A = lazy($A) - B = lazy($B) - compute(M .* (A * B)) - end, - ) + let + A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1)) + x = rand(1) + SUITE["high-level"]["einsum_spmv_call_overhead"][scheduler_name] = @benchmarkable( + begin + A, x = ($A, $x) + @einsum y[i] += A[i, j] * x[j] + end, + ) + end - SUITE["high-level"]["sddmm_unfused"] = @benchmarkable( - begin - M = $M - A = $A - B = $B - M .* (A * B) - end, - ) + let + N = 1_000 + K = 1_000 + p = 0.001 + A = Tensor(Dense(Dense(Element(0.0))), rand(N, K)) + B = Tensor(Dense(Dense(Element(0.0))), rand(K, N)) + M = Tensor(Dense(SparseList(Element(0.0))), fsprand(N, N, p)) + + SUITE["high-level"]["sddmm_fused"][scheduler_name] = @benchmarkable( + begin + M = lazy($M) + A = lazy($A) + B = lazy($B) + compute(M .* (A * B)) + end, + ) + + SUITE["high-level"]["sddmm_unfused"][scheduler_name] = @benchmarkable( + begin + M = $M + A = $A + B = $B + M .* (A * B) + end, + ) + end + end end + eval(let A = Tensor(Dense(SparseList(Element(0.0))), fsprand(1, 1, 1)) x = rand(1) @@ -348,4 +395,13 @@ x = rand(N) SUITE["structure"]["banded"]["SparseList"] = @benchmarkable spmv_serial($A_ref, $x) SUITE["structure"]["banded"]["SparseBand"] = @benchmarkable spmv_serial($A, $x) -SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x) \ No newline at end of file +SUITE["structure"]["banded"]["SparseInterval"] = @benchmarkable spmv_serial($A2, $x) + +if !isempty(parsed_args["include"]) + inc = reduce((a, b) -> :($a || $b), parsed_args["include"]) + SUITE = eval(:(SUITE[@tagged $inc])) +end +if !isempty(parsed_args["exclude"]) + exc = reduce((a, b) -> :($a || $b), parsed_args["exclude"]) + SUITE = eval(:(SUITE[@tagged !$exc])) +end \ No newline at end of file diff --git a/benchmark/runbenchmarks.jl b/benchmark/runbenchmarks.jl index 5741f752c..a1a50f30e 100755 --- a/benchmark/runbenchmarks.jl +++ b/benchmark/runbenchmarks.jl @@ -11,7 +11,7 @@ end using PkgBenchmark benchmarkpkg( dirname(@__DIR__), - BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "8")), + BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "8", "FINCH_BENCHMARK_ARGS" => get(ENV, "FINCH_BENCHMARK_ARGS", join(ARGS, " ")))), resultfile = joinpath(@__DIR__, "result.json"), ) diff --git a/benchmark/runjudge.jl b/benchmark/runjudge.jl index 273a304cc..e306adb85 100755 --- a/benchmark/runjudge.jl +++ b/benchmark/runjudge.jl @@ -11,19 +11,25 @@ end using PkgBenchmark function mkconfig(; kwargs...) - return BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "1"); kwargs...) + return BenchmarkConfig(env = Dict("JULIA_NUM_THREADS" => "1", "FINCH_BENCHMARK_ARGS" => get(ENV, "FINCH_BENCHMARK_ARGS", join(ARGS, " "))); kwargs...) end +script = tempname(joinpath(@__DIR__)) + +cp(joinpath(@__DIR__, "benchmarks.jl"), script) + group_target = benchmarkpkg( dirname(@__DIR__), mkconfig(), resultfile = joinpath(@__DIR__, "result-target.json"), + script = script, ) group_baseline = benchmarkpkg( dirname(@__DIR__), mkconfig(id = "main"), resultfile = joinpath(@__DIR__, "result-baseline.json"), + script = script, ) judgement = judge(group_target, group_baseline) diff --git a/test/runtests.jl b/test/runtests.jl index b3e0205c5..66f481012 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -9,21 +9,29 @@ end using Test using ArgParse -s = ArgParseSettings("Run Finch.jl tests. All tests are run by default. Specific -test suites may be specified as positional arguments. Finch compares to -reference output which depends on the system word size (currently -$(Sys.WORD_SIZE)-bit). To overwrite $(Sys.WORD_SIZE==32 ? 64 : 32)-bit output, -run this with a $(Sys.WORD_SIZE==32 ? 64 : 32)-bit julia executable.") +s = ArgParseSettings("Run Finch.jl tests. By default, all tests are run unless --include or --exclude options are provided. +Finch compares to reference output which depends on the system word size (currently $(Sys.WORD_SIZE)-bit).To overwrite $(Sys.WORD_SIZE==32 ? 64 : 32)-bit output, run this with a $(Sys.WORD_SIZE==32 ? 64 : 32)-bit julia executable. +If the environment variable FINCH_TEST_ARGS is set, it will override the given arguments.") @add_arg_table! s begin "--overwrite", "-w" action = :store_true help = "overwrite reference output for $(Sys.WORD_SIZE)-bit systems" - "suites" + "--include", "-i" nargs = '*' - default = ["all"] - help = "names of test suites to run, from: print, constructors, representation, merges, index, typical, kernels, issues, interface, galley, continuous, continuousexamples, simple, examples, fileio, docs, parallel, algebra." + default = [] + help = "list of test suites to include, e.g., --include constructors merges" + + "--exclude", "-e" + nargs = '*' + default = [] + help = "list of test suites to exclude, e.g., --exclude parallel algebra" end + +if "FINCH_TEST_ARGS" in keys(ENV) + ARGS = split(ENV["FINCH_TEST_ARGS"], " ") +end + parsed_args = parse_args(ARGS, s) """ @@ -61,7 +69,9 @@ end function should_run(name) global parsed_args - return ("all" in parsed_args["suites"] || name in parsed_args["suites"]) + inc = parsed_args["include"] + exc = parsed_args["exclude"] + return (isempty(inc) || name in inc) && !(name in exc) end macro repl(io, ex, quiet = false) @@ -109,7 +119,6 @@ include("utils.jl") end end if should_run("parallel") include("test_parallel.jl") end - #if should_run("continuous") include("test_continuous.jl") end #algebra goes at the end since it calls refresh() if should_run("algebra") include("test_algebra.jl") end end