Mercurial > repos > public > sbplib_julia
comparison benchmark/benchmark_utils.jl @ 1192:339cb6eacb0c tooling/benchmarks
Rename the script file
| author | Jonatan Werpers <jonatan@werpers.com> |
|---|---|
| date | Fri, 27 Jan 2023 22:02:43 +0100 |
| parents | benchmark/run_and_view.jl@12b525cd3da6 |
| children | 5e20c0815e07 |
comparison
equal
deleted
inserted
replaced
| 1191:12b525cd3da6 | 1192:339cb6eacb0c |
|---|---|
| 1 import PkgBenchmark | |
| 2 import Markdown | |
| 3 import Mustache | |
| 4 import Dates | |
| 5 | |
| 6 import Sbplib | |
| 7 | |
| 8 const sbplib_root = splitpath(pathof(Sbplib))[1:end-2] |> joinpath | |
| 9 const results_dir = mkpath(joinpath(sbplib_root, "benchmark/results")) | |
| 10 const template_path = joinpath(sbplib_root, "benchmark/result.tmpl") | |
| 11 | |
| 12 """ | |
| 13 main(args...; kwargs...) | |
| 14 | |
| 15 Calls `run_benchmark(args...; kwargs...)` and writes the results as an HTML file in `benchmark/results`. | |
| 16 See [`run_benchmark`](@ref) for possible arguments. | |
| 17 """ | |
| 18 function main(args...; kwargs...) | |
| 19 r = run_benchmark(args...; kwargs...) | |
| 20 file_path = write_result_html(r) | |
| 21 open_in_default_browser(file_path) | |
| 22 end | |
| 23 | |
| 24 | |
| 25 """ | |
| 26 run_benchmark() | |
| 27 | |
| 28 Runs the benchmark suite for the current working directory and returns a `PkgBenchmark.BenchmarkResult` | |
| 29 """ | |
| 30 function run_benchmark() | |
| 31 r = PkgBenchmark.benchmarkpkg(Sbplib) | |
| 32 | |
| 33 rev = hg_id() | |
| 34 | |
| 35 return add_rev_info(r, rev) | |
| 36 end | |
| 37 | |
| 38 """ | |
| 39 run_benchmark(rev) | |
| 40 | |
| 41 Updates the repository to the given revison and runs the benchmark suite. When done, updates the repository to the origianl state. | |
| 42 `rev` can be any identifier compatible with `hg update`. | |
| 43 | |
| 44 Returns a `PkgBenchmark.BenchmarkResult` | |
| 45 """ | |
| 46 function run_benchmark(rev) | |
| 47 rev_before = hg_rev() | |
| 48 hg_update(rev) | |
| 49 r = run_benchmark() | |
| 50 hg_update(rev_before) | |
| 51 | |
| 52 return r | |
| 53 end | |
| 54 | |
| 55 """ | |
| 56 run_benchmark(target, baseline, f=minimum; judgekwargs=Dict()) | |
| 57 | |
| 58 Runs the benchmark at revisions `target` and `baseline` and compares them using `PkgBenchmark.judge`. | |
| 59 `f` is the function used to compare. `judgekwargs` are keyword arguments passed to `judge`. | |
| 60 | |
| 61 `target` and `baseline` can be any identifier compatible with `hg update`. | |
| 62 | |
| 63 Returns a `PkgBenchmark.BenchmarkJudgement` | |
| 64 """ | |
| 65 function run_benchmark(target, baseline, f=minimum; judgekwargs=Dict()) | |
| 66 rev_before = hg_rev() | |
| 67 hg_update(target) | |
| 68 t = run_benchmark() | |
| 69 hg_update(baseline) | |
| 70 b = run_benchmark() | |
| 71 hg_update(rev_before) | |
| 72 | |
| 73 return PkgBenchmark.judge(t,b,f; judgekwargs...) | |
| 74 end | |
| 75 | |
| 76 | |
| 77 function add_rev_info(benchmarkresult, rev) | |
| 78 return PkgBenchmark.BenchmarkResults( | |
| 79 benchmarkresult.name, | |
| 80 rev, | |
| 81 benchmarkresult.benchmarkgroup, | |
| 82 benchmarkresult.date, | |
| 83 benchmarkresult.julia_commit, | |
| 84 benchmarkresult.vinfo, | |
| 85 benchmarkresult.benchmarkconfig, | |
| 86 ) | |
| 87 end | |
| 88 | |
| 89 | |
| 90 function write_result_html(io, r) | |
| 91 iobuffer = IOBuffer() | |
| 92 PkgBenchmark.export_markdown(iobuffer, r) | |
| 93 | |
| 94 parsed_md = Markdown.parse(String(take!(iobuffer))) | |
| 95 content = Markdown.html(parsed_md) | |
| 96 | |
| 97 template = Mustache.load(template_path) | |
| 98 | |
| 99 dt = Dates.format(PkgBenchmark.date(r), "yyyy-mm-dd HH:MM:SS") | |
| 100 Mustache.render(io, template, Dict("title"=>dt, "content"=>content)) | |
| 101 end | |
| 102 | |
| 103 function write_result_html(r) | |
| 104 dt = Dates.format(PkgBenchmark.date(r), "yyyy-mm-dd HHMMSS") | |
| 105 file_path = joinpath(results_dir, dt*".html") | |
| 106 | |
| 107 open(file_path, "w") do io | |
| 108 write_result_html(io, r) | |
| 109 end | |
| 110 | |
| 111 return file_path | |
| 112 end | |
| 113 | |
| 114 | |
| 115 PkgBenchmark.date(j::PkgBenchmark.BenchmarkJudgement) = PkgBenchmark.date(PkgBenchmark.target_result(j)) | |
| 116 | |
| 117 | |
| 118 function hg_id() | |
| 119 cmd = Cmd(`hg id`, dir=sbplib_root) | |
| 120 return readchomp(addenv(cmd, "HGPLAIN"=>"")) | |
| 121 end | |
| 122 | |
| 123 function hg_rev() | |
| 124 cmd = Cmd(`hg id -i`, dir=sbplib_root) | |
| 125 return readchomp(addenv(cmd, "HGPLAIN"=>"")) | |
| 126 end | |
| 127 | |
| 128 function hg_update(rev) | |
| 129 cmd = Cmd(`hg update --check -r $rev`, dir=sbplib_root) | |
| 130 run(addenv(cmd, "HGPLAIN"=>"")) | |
| 131 end | |
| 132 | |
| 133 | |
| 134 # From Pluto.jl/src/webserver/WebServer.jl (2023-01-24) | |
| 135 function open_in_default_browser(url::AbstractString)::Bool | |
| 136 try | |
| 137 if Sys.isapple() | |
| 138 Base.run(`open $url`) | |
| 139 true | |
| 140 elseif Sys.iswindows() || detectwsl() | |
| 141 Base.run(`powershell.exe Start "'$url'"`) | |
| 142 true | |
| 143 elseif Sys.islinux() | |
| 144 Base.run(`xdg-open $url`) | |
| 145 true | |
| 146 else | |
| 147 false | |
| 148 end | |
| 149 catch ex | |
| 150 false | |
| 151 end | |
| 152 end | |
| 153 | |
| 154 | |
| 155 main | |
| 156 | |
| 157 # TODO: Better logging of what is happening | |
| 158 # TODO: Improve the workflow? How? | |
| 159 | |
| 160 # TODO: Clean up the HTML output? | |
| 161 # TODO: Make the codeblocks in the table look nicer | |
| 162 # TODO: Change width of tables and code blocks so everything is visible | |
| 163 # TODO: Fix the commit id, it chops off all the important info | |
| 164 # TODO: Make title less verbose | |
| 165 # TBD: Do we have to replace export_markdown? Could use a template instead. | |
| 166 | |
| 167 | |
| 168 # TBD: How to compare against current working directory? Possible to create a temporary commit? | |
| 169 # TBD: What parts are PkgBenchmark contributing? Can it be stripped out? |
