diff --git a/.gitignore b/.gitignore index 2028a0d..9ae5cf6 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,9 @@ deps/src/ docs/build/ docs/site/ +# Benchmark artifacts +tune.json + # File generated by Pkg, the package manager, based on a corresponding Project.toml # It records a fixed state of all packages used by the project. As such, it should not be # committed for packages, but should be committed for applications that require a static diff --git a/Project.toml b/Project.toml index c09bb45..f8a5672 100644 --- a/Project.toml +++ b/Project.toml @@ -6,6 +6,7 @@ authors = ["SmalRat <30555080+SmalRat@users.noreply.github.com>", version = "0.1.0" [deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" Cairo = "159f3aea-2a34-519c-b102-8c37f9878175" Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" Dagger = "d58978e5-989f-55fb-8d15-ea34adc7bf54" @@ -20,6 +21,7 @@ Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" LightGraphs = "093fc24a-ae57-5d10-9952-331d41423f4d" MetaGraphs = "626554b9-1ddb-594c-aa3c-2596fe9399a5" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" TimespanLogging = "a526e669-04d3-4846-9525-c66122c55f63" [compat] @@ -29,4 +31,4 @@ Dagger = "0.18.11" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Test"] \ No newline at end of file +test = ["Test"] diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 0000000..cc3621e --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,45 @@ +# FrameworkDemo benchmarks + +Run benchmarks from the project's main directory + +## Usage + +Run benchmark script + +``` +julia --project benchmark/benchmarks.jl +``` + +or benchmark with [PkgBenchmark](https://github.com/JuliaCI/PkgBenchmark.jl) + +```julia +using PkgBenchmark +import FrameworkDemo + +benchmarkpkg(FrameworkDemo) +``` + + +## Developing benchmarks + +The benchmarks are based on [BenchmarkTools](https://github.com/JuliaCi/BenchmarkTools.jl) and try to follow a standard benchmark structure with `BenchmarkTools::BenchmarkGroups` + +Add new benchmarks: + +```julia +SUITE["new_benchmark"] = BenchmarkGroup(["tag1", "tag2", "etc"]) +SUITE["new_benchmark"]["foo"] = @benchmarkable foo($bar) +``` + +Add result processing function (e.g. for visualization) + +```julia +function plot_foo(results::BenchmarkGroup) + foo_results = results["new_benchmark"]["foo"] + do_something(foo_results) +end + +push!(result_processors, plot_foo) # register function +``` + +The functions added to `results_processors` will be called automatically when executing `benchmark/benchmarks.jl` script. Alternatively the functions can be added with `postprocess`argument of `PkgBenchmark.benchmarkpkg`. diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 0000000..151753d --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,19 @@ +using FrameworkDemo +using BenchmarkTools +using Plots + +SUITE = BenchmarkGroup() +result_processors = Function[] + +include("suite/cpu_crunching.jl") + +if abspath(PROGRAM_FILE) == @__FILE__ + @info "tuning benchmark suite" + tune!(SUITE, verbose=true) + @info "running benchmark suite" + results = run(SUITE, verbose=true) + @info "running benchmark suite" results + for processor in result_processors + processor(results) + end +end diff --git a/benchmark/suite/cpu_crunching.jl b/benchmark/suite/cpu_crunching.jl new file mode 100644 index 0000000..45b40cf --- /dev/null +++ b/benchmark/suite/cpu_crunching.jl @@ -0,0 +1,54 @@ +import Printf + +SUITE["cpu_crunching"] = BenchmarkGroup(["cpu_crunching"]) + +SUITE["cpu_crunching"]["find_primes"] = BenchmarkGroup(["find_primes"]) +for i in exp10.(range(0, stop=6, length=10)) + n = ceil(Int, i) + SUITE["cpu_crunching"]["find_primes"][n] = @benchmarkable FrameworkDemo.find_primes($n) evals = 1 samples = 1 +end + +SUITE["cpu_crunching"]["crunch_for_seconds"] = BenchmarkGroup(["crunch_for_seconds"]) +coef = FrameworkDemo.calculate_coefficients() +for i in exp10.(range(-6, stop=1.5, length=10)) + SUITE["cpu_crunching"]["crunch_for_seconds"][i] = @benchmarkable FrameworkDemo.crunch_for_seconds($i, $coef) evals = 1 samples = 1 +end + +function log_ticks(range) + start = floor(log10(minimum(range))) + stop = ceil(log10(maximum(range))) + return 10.0 .^ (start:stop) +end + +function plot_find_primes(results::BenchmarkGroup) + primes_r = sort(collect(results["cpu_crunching"]["find_primes"]), by=first) + x = first.(primes_r) + y = primes_r .|> last .|> minimum .|> time |> x -> x * 1e-9 + p = plot(x, y, xaxis=:log10, yaxis=:log10, xlabel="n", ylabel="time [s]", + title="find_primes(n)", label="find_primes", + marker=(:circle, 5), linewidth=3, + xticks=log_ticks(x), yticks=log_ticks(y), + xguidefonthalign=:right, yguidefontvalign=:top, legend=:topleft) + filename = "bench_find_primes.png" + savefig(p, filename) + @info "Results of benchmark cpu_crunching/find_primes written to $filename" +end + +push!(result_processors, plot_find_primes) + +function plot_crunch_for_seconds(results::BenchmarkGroup) + crunch_r = sort(collect(results["cpu_crunching"]["crunch_for_seconds"]), by=first) + x = first.(crunch_r) + y = crunch_r .|> last .|> minimum .|> time |> x -> x * 1e-9 + p = plot(x, (y - x) ./ x, xaxis=:log10, xlabel="t [s]", ylabel="Time relative error", + yformatter=x -> Printf.@sprintf("%.1f%%", 100 * x), + xticks=log_ticks(x), + title="crunch_for_seconds(t)", label="crunch_for_seconds", + marker=(:circle, 5), linewidth=3, + xguidefonthalign=:right, yguidefontvalign=:top, legend=:bottomright) + filename = "bench_crunch_for_seconds.png" + savefig(p, filename) + @info "Results of benchmark cpu_crunching/crunch_for_seconds written to $filename" +end + +push!(result_processors, plot_crunch_for_seconds)