diff --git a/benchmarks/math/expression_gen.py b/benchmarks/math/expression_gen.py index a857efa..fa3c91e 100755 --- a/benchmarks/math/expression_gen.py +++ b/benchmarks/math/expression_gen.py @@ -1,60 +1,66 @@ #!/usr/bin/env python3 + import json import random NUM_EXPRESSIONS = 20 NUM_VARS = 3000 # Should be kept in sync with NUM_VARS in expression.gd + def _var_names(): - rv = [] - for i in range(NUM_VARS): - rv.append("x" + str(i)) - return rv + rv = [] + for i in range(NUM_VARS): + rv.append("x" + str(i)) + return rv + def _var_values(): - rv = [] - for i in range (NUM_VARS): - rv.append((i+1)*10) - return rv + rv = [] + for i in range(NUM_VARS): + rv.append((i + 1) * 10) + return rv + def _combine(nodes, ia, ib, op): - na = nodes[ia] - nb = nodes[ib] - del nodes[ib] - del nodes[ia] - nodes.append("(" + str(na) + " " + op + " " + str(nb) + ")") + na = nodes[ia] + nb = nodes[ib] + del nodes[ib] + del nodes[ia] + nodes.append("(" + str(na) + " " + op + " " + str(nb) + ")") + def _generate_string(): - nodes = [] - operators = ["+", "-", "*", "/"] + nodes = [] + operators = ["+", "-", "*", "/"] - nodes += _var_names() + nodes += _var_names() - while len(nodes) > 1: - ia = random.randrange(0, len(nodes)) - ib = random.randrange(0, len(nodes)) - io = random.randrange(0, len(operators)) - op = operators[io] + while len(nodes) > 1: + ia = random.randrange(0, len(nodes)) + ib = random.randrange(0, len(nodes)) + io = random.randrange(0, len(operators)) + op = operators[io] - if ia == ib: - ib = ia+1 - if ib == len(nodes): - ib = 0 + if ia == ib: + ib = ia + 1 + if ib == len(nodes): + ib = 0 - if ia < ib: - _combine(nodes, ia, ib, op) - elif ib < ia: - _combine(nodes, ib, ia, op) + if ia < ib: + _combine(nodes, ia, ib, op) + elif ib < ia: + _combine(nodes, ib, ia, op) + + return nodes[0] - return nodes[0] def _generate_strings(): - random.seed(234) - rv = [] - for i in range(NUM_EXPRESSIONS): - rv.append(_generate_string()) - return rv + random.seed(234) + rv = [] + for i in range(NUM_EXPRESSIONS): + rv.append(_generate_string()) + return rv strings = _generate_strings() -print("const EXPRESSIONS = ", json.dumps(strings, indent=4).replace(' ', '\t')) +print("const EXPRESSIONS = ", json.dumps(strings, indent=4).replace(" ", "\t")) diff --git a/web/.gitignore b/web/.gitignore index 6d870b9..466d74f 100644 --- a/web/.gitignore +++ b/web/.gitignore @@ -4,7 +4,11 @@ public/ # Temporary lock file while building .hugo_build.lock -# Result JSON files -# (should be committed to https://github.com/godotengine/godot-benchmarks-results instead) -content/*.md -!_content/_index.md +# Generated files +content/benchmark/*.md +content/graph/*.md +data/data.json + +# Untracked source files +src-data/benchmarks/*.md +src-data/benchmarks/*.json diff --git a/web/generate-content.py b/web/generate-content.py new file mode 100755 index 0000000..df47723 --- /dev/null +++ b/web/generate-content.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# +# Usage: +# generate-content.py [benchmarks-folder] +# +# This script generates the content and data files for hugo. +# This should be ran before trying to build the site. +# +# It takes as input two files, a "graph.json" in the ./src-data folder, +# and the results of our godot benchmarks (produced by ../run-benchmarks.sh) +# By default, benchmarks (.json or .md, but all should be JSON inside ¯\_(ツ)_/¯) +# are taken from the "./src-data/benchmarks". But you can specify an optional +# folder otherwise as argument. + +import json +import sys +from os import listdir +from os.path import isdir, isfile, join + +# Source data paths. +graphs_path = "./src-data/graphs.json" +if len(sys.argv) == 1: + benchmarks_path = "./src-data/benchmarks" +elif len(sys.argv) == 2: + benchmarks_path = sys.argv[1] + if not isdir(benchmarks_path): + raise ValueError(benchmarks_path + " is not a valid folder") +else: + raise ValueError("Invalid number of arguments") + + +# Bnase data.json dictionary. +data_output_json = { + "benchmarks": [], + "graphs": [], +} + +### BENCHMARKS ### + +# Fetch the list of benchmark files +benchmark_input_filename_test = lambda f: (f.endswith(".json") or f.endswith(".md")) +benchmarks_files = [ + f for f in listdir(benchmarks_path) if (isfile(join(benchmarks_path, f)) and benchmark_input_filename_test(f)) +] + +# Add the list of benchmarks. +for f in benchmarks_files: + json_file = open(join(benchmarks_path, f)) + + # Extract data from filename. + key = f.removesuffix(".json") + date = key.split("_")[0] + commit = key.split("_")[1] + + # Load and modify the benchmark file. + output_dict = json.load(json_file) + output_dict["date"] = date + output_dict["commit"] = commit + + # Merge category and name into a single "path" field. + output_benchmark_list = [] + for benchmark in output_dict["benchmarks"]: + output_benchmark_list.append( + { + "path": [el.strip() for el in benchmark["category"].split(">")] + [benchmark["name"]], + "results": benchmark["results"], + } + ) + output_dict["benchmarks"] = output_benchmark_list + + # Add it to the list. + data_output_json["benchmarks"].append(output_dict) + json_file.close() + +### GRAPHS ### + +# Add the graphs. +json_file = open(graphs_path) +data_output_json["graphs"] = json.load(json_file) +json_file.close() + +### DUMPING data.json ### + +# Create a big json with all of the data. +data_filename = "./data/data.json" +data_file = open(data_filename, "w") +json.dump(data_output_json, data_file, indent=4) +data_file.close() + +### CREATE .md FILES (for the pages) ### + +# Create a .md file for each benchmark. +benchmarks_content_path = "./content/benchmark" +for benchmark in data_output_json["benchmarks"]: + filename = benchmark["date"] + "_" + benchmark["commit"] + ".md" + open(join(benchmarks_content_path, filename), "a").close() + +# Create a .md file for each graph. +graphs_content_path = "./content/graph" +for graph in data_output_json["graphs"]: + filename = graph["id"] + ".md" + open(join(graphs_content_path, filename), "a").close() diff --git a/web/layouts/_default/baseof.html b/web/layouts/_default/baseof.html index ed850e2..47a1c21 100644 --- a/web/layouts/_default/baseof.html +++ b/web/layouts/_default/baseof.html @@ -30,6 +30,9 @@ window.location.href = window.location.href.replace('://godotengine.github.io/godot-benchmarks-results/','://benchmarks.godotengine.org/'); } + {{ block "javascript" . }}{{end}} diff --git a/web/layouts/_default/single.html b/web/layouts/benchmark/single.html similarity index 66% rename from web/layouts/_default/single.html rename to web/layouts/benchmark/single.html index 2065996..b8673ca 100644 --- a/web/layouts/_default/single.html +++ b/web/layouts/benchmark/single.html @@ -1,20 +1,31 @@ {{ define "main" }} +{{ $date := index (split (path.BaseName .Permalink) "_") 0 }} +{{ $commit := index (split (path.BaseName .Permalink) "_") 1 }} + +{{ $benchmark := where .Site.Data.data.benchmarks "commit" "eq" $commit }} +{{ $benchmark := where $benchmark "date" "eq" $date }} +{{ $benchmark := index $benchmark 0 }} +

{{ index (split (path.BaseName .Permalink) "_") 0 }} - {{ slicestr .Params.engine.version_hash 0 9 }}

+ {{ slicestr $benchmark.engine.version_hash 0 9 }} +
{{/* Order is inverted for this site. */}}
{{with .Site.RegularPages.Next . }} « Previous: {{ index (split (path.BaseName .Permalink) "_") 0 }} - {{ slicestr .Params.engine.version_hash 0 9 }} + {{ slicestr $benchmark.engine.version_hash 0 9 }} + {{end}} +
{{with .Site.RegularPages.Prev . }} Next: {{ index (split (path.BaseName .Permalink) "_") 0 }} - {{ slicestr .Params.engine.version_hash 0 9 }} » + {{ slicestr $benchmark.engine.version_hash 0 9 }} » + {{end}}
@@ -50,36 +61,36 @@ Time to build - Debug {{ mul .Params.build_time.debug 0.001 | lang.FormatNumber 0 }} seconds
- Release {{ mul .Params.build_time.release 0.001 | lang.FormatNumber 0 }} seconds + Debug {{ mul $benchmark.build_time.debug 0.001 | lang.FormatNumber 0 }} seconds
+ Release {{ mul $benchmark.build_time.release 0.001 | lang.FormatNumber 0 }} seconds Build peak memory usage - Debug {{ mul .Params.build_peak_memory_usage.debug 0.001 | lang.FormatNumber 2 }} MB
- Release {{ mul .Params.build_peak_memory_usage.release 0.001 | lang.FormatNumber 2 }} MB + Debug {{ mul $benchmark.build_peak_memory_usage.debug 0.001 | lang.FormatNumber 2 }} MB
+ Release {{ mul $benchmark.build_peak_memory_usage.release 0.001 | lang.FormatNumber 2 }} MB Startup + shutdown time - Debug {{ .Params.empty_project_startup_shutdown_time.debug | lang.FormatNumber 0 }} ms
- Release {{ .Params.empty_project_startup_shutdown_time.release | lang.FormatNumber 0 }} ms + Debug {{ $benchmark.empty_project_startup_shutdown_time.debug | lang.FormatNumber 0 }} ms
+ Release {{ $benchmark.empty_project_startup_shutdown_time.release | lang.FormatNumber 0 }} ms Startup + shutdown peak memory usage - Debug {{ mul .Params.empty_project_startup_shutdown_peak_memory_usage.debug 0.001 | lang.FormatNumber 2 }} MB
- Release {{ mul .Params.empty_project_startup_shutdown_peak_memory_usage.release 0.001 | lang.FormatNumber 2 }} MB + Debug {{ mul $benchmark.empty_project_startup_shutdown_peak_memory_usage.debug 0.001 | lang.FormatNumber 2 }} MB
+ Release {{ mul $benchmark.empty_project_startup_shutdown_peak_memory_usage.release 0.001 | lang.FormatNumber 2 }} MB Binary size - Debug {{ mul .Params.binary_size.debug 0.001 | lang.FormatNumber 0 }} KB
- Release {{ mul .Params.binary_size.release 0.001 | lang.FormatNumber 0 }} KB + Debug {{ mul $benchmark.binary_size.debug 0.001 | lang.FormatNumber 0 }} KB
+ Release {{ mul $benchmark.binary_size.release 0.001 | lang.FormatNumber 0 }} KB @@ -100,10 +111,16 @@ {{/* Check CPU debug data only, but also get data from release CPU runs. */}} {{/* These runs are expected to have the same number of results available. */}} - {{ range .Params.benchmarks }} + {{ range $benchmark.benchmarks }} {{ if gt .results.cpu_debug.time 0 }} - {{ .category }}
{{ .name }} + + + {{ delimit (first (sub (len .path) 1) .path) " > "}} + +
+ {{ index (last 1 .path) 0 }} + {{ if gt .results.cpu_debug.idle 0 }} Debug {{ .results.cpu_debug.idle }} mspf
@@ -113,7 +130,7 @@ {{ if gt .results.cpu_debug.physics 0 }} Debug {{ .results.cpu_debug.physics }} mspf
- Release {{ .results.cpu_debug.physics }} mspf + Release {{ .results.cpu_release.physics }} mspf {{ end }} @@ -142,10 +159,16 @@ {{/* Check GPU AMD data only, but also get data from Intel and NVIDIA GPU runs. */}} {{/* These runs are expected to have the same number of results available. */}} - {{ range .Params.benchmarks }} + {{ range $benchmark.benchmarks }} {{ if gt .results.amd.render_cpu 0 }} - {{ .category }}
{{ .name }} + + + {{ delimit (first (sub (len .path) 1) .path) " > "}} + +
+ {{ index (last 1 .path) 0 }} + {{ if gt .results.amd.render_cpu 0 }} diff --git a/web/layouts/graph/single.html b/web/layouts/graph/single.html new file mode 100644 index 0000000..4a7545e --- /dev/null +++ b/web/layouts/graph/single.html @@ -0,0 +1,29 @@ +{{ define "main" }} + +{{ $graphID := path.BaseName .Permalink }} +{{ $graph := index (where .Site.Data.data.graphs "id" $graphID) 0 }} + +

+ {{ $graph.title }} (lower is better) +

+ +
+ +
+
+
+ +{{end}} + +{{ define "javascript" }} + + + + +{{ end }} diff --git a/web/layouts/index.html b/web/layouts/index.html index d8bfde7..373c6d7 100644 --- a/web/layouts/index.html +++ b/web/layouts/index.html @@ -1,6 +1,9 @@ {{ define "main" }} -
+{{ $benchmarks := .Site.Data.data.benchmarks}} +{{ $graphs := .Site.Data.data.graphs}} + +

This page tracks Godot Engine performance running on a benchmark suite. @@ -8,14 +11,32 @@ regressions over time.

+

Graphs

+
+
+ Normalized (percentage of the average time), lower is better. +
+
+
+ {{ range $graphs }} +
+
+ {{ .title }} +
+
+
+
+ {{ end }} +
+

Latest benchmark runs

-

Benchmarking machine

@@ -73,6 +94,15 @@ {{ end }} {{ define "javascript" }} + {{ end }} diff --git a/web/src-data/graphs.json b/web/src-data/graphs.json new file mode 100644 index 0000000..fa4afec --- /dev/null +++ b/web/src-data/graphs.json @@ -0,0 +1,77 @@ +[ + { + "id": "core-callables", + "title": "Callables", + "benchmark-path-prefix": "Core/Callable" + }, + { + "id": "core-crypto", + "title": "Crypto", + "benchmark-path-prefix": "Core/Crypto" + }, + { + "id": "core-rng", + "title": "Random Number Generator", + "benchmark-path-prefix": "Core/Random Number Generator" + }, + { + "id": "core-signal", + "title": "Signals", + "benchmark-path-prefix": "Core/Signal" + }, + { + "id": "gdscript-allocations", + "title": "GDscript allocations", + "benchmark-path-prefix": "Gdscript/Alloc" + }, + { + "id": "gdscript-arrays", + "title": "GDscript arrays", + "benchmark-path-prefix": "Gdscript/Array" + }, + { + "id": "gdscript-string-checksum", + "title": "GDscript String Checksums", + "benchmark-path-prefix": "Gdscript/String Checksum" + }, + { + "id": "gdscript-string-format", + "title": "GDscript String Format", + "benchmark-path-prefix": "Gdscript/String Format" + }, + { + "id": "gdscript-string-manipulations", + "title": "GDscript String Manipulations", + "benchmark-path-prefix": "Gdscript/String Manipulation" + }, + { + "id": "physics", + "title": "Rigid Body 3D", + "benchmark-path-prefix": "Physics/Rigid Body 3d" + }, + { + "id": "rendering-culling", + "title": "Culling", + "benchmark-path-prefix": "Rendering/Culling" + }, + { + "id": "rendering-hlod", + "title": "Hlod", + "benchmark-path-prefix": "Rendering/Hlod" + }, + { + "id": "rendering-labels", + "title": "Rendering labels", + "benchmark-path-prefix": "Rendering/Hlod" + }, + { + "id": "rendering-light-and-meches", + "title": "Lights and Meshes", + "benchmark-path-prefix": "Rendering/Lights And Meshes" + }, + { + "id": "rendering-polygon-sprite-2d", + "title": "Polygon Sprite 2d", + "benchmark-path-prefix": "Rendering/Polygon Sprite 2d" + } +] diff --git a/web/static/graphs.js b/web/static/graphs.js new file mode 100644 index 0000000..99f38a2 --- /dev/null +++ b/web/static/graphs.js @@ -0,0 +1,209 @@ +function getAllowedMetrics() { + const allowedMetrics = new Set(); + Database.benchmarks.forEach((benchmark) => { + benchmark.benchmarks.forEach((instance) => { + Object.entries(instance.results).forEach(([key, value]) => { + allowedMetrics.add(key); + }); + }); + }); + return allowedMetrics; +} + +function displayGraph(targetDivID, graphID, type = "full", filter = "") { + if (!["full", "compact"].includes(type)) { + throw Error("Unknown chart type"); + } + + // Include benchmark data JSON to generate graphs. + const allBenchmarks = Database.benchmarks.sort( + (a, b) => `${a.date}.${a.commit}` > `${b.date}.${b.commit}`, + ); + const graph = Database.graphs.find((g) => g.id == graphID); + if (!graph) { + throw new Error("Invalid graph ID"); + } + // Group by series. + const xAxis = []; + const series = new Map(); + const processResult = (path, data, process) => { + Object.entries(data).forEach(([key, value]) => { + if (typeof value === "object") { + processResult(path + "/" + key, value, process); + } else { + // Number + process(path + "/" + key, value); + } + }); + }; + + // Get list all series and fill it in. + allBenchmarks.forEach((benchmark, count) => { + // Process a day/commit + xAxis.push(benchmark.date + "." + benchmark.commit); + + // Get all series. + benchmark.benchmarks.forEach((instance) => { + let instanceKey = instance.path.join("/"); + if (!instanceKey.startsWith(graph["benchmark-path-prefix"])) { + return; + } + instanceKey = instanceKey.slice( + graph["benchmark-path-prefix"].length + 1, + ); + + processResult(instanceKey, instance.results, (path, value) => { + // Filter out paths that do not fit the filter + if (filter && !path.includes(filter)) { + return; + } + if (!series.has(path)) { + series.set(path, Array(count).fill(null)); + } + series.get(path).push(value); + }); + }); + }); + + let customColor = undefined; + + if (type === "compact") { + // Kind of "normalize" the series, dividing by the average. + series.forEach((serie, key) => { + let count = 0; + let mean = 0.0; + serie.forEach((el) => { + if (el != null) { + mean += el; + count += 1; + } + }); + mean = mean / count; + + //const std = Math.sqrt(input.map(x => Math.pow(x - mean, 2)).reduce((a, b) => a + b) / n) + series.set( + key, + serie.map((v) => { + if (v != null) { + return v / mean; // Devide by the mean. + } + return null; + }), + ); + }); + // Combine all into a single, averaged serie. + const outputSerie = []; + for (let i = 0; i < allBenchmarks.length; i++) { + let count = 0; + let sum = 0; + series.forEach((serie, key) => { + if (serie[i] != null) { + count += 1; + sum += serie[i]; + } + }); + let point = null; + if (count >= 1) { + point = Math.round((sum * 1000) / count) / 10; // Round to 3 decimals. + } + outputSerie.push(point); + } + series.clear(); + series.set("Average", outputSerie); + + // Detect whether we went down or not on the last 10 benchmarks. + const lastElementsCount = 3; + const totalConsideredCount = 10; + const lastElements = outputSerie.slice(-lastElementsCount); + const comparedTo = outputSerie.slice( + -totalConsideredCount, + -lastElementsCount, + ); + const avgLast = lastElements.reduce((a, b) => a + b) / lastElements.length; + const avgComparedTo = + comparedTo.reduce((a, b) => a + b) / comparedTo.length; + const trend = avgLast - avgComparedTo; + + if (trend > 10) { + customColor = "#E20000"; + } else if (trend < -10) { + customColor = "#00E200"; + } + } + + var options = { + series: Array.from(series.entries()).map(([key, value]) => ({ + name: key, + data: value, + })), + chart: { + foreColor: "var(--text-bright)", + background: "var(--background)", + height: type === "compact" ? 200 : 600, + type: "line", + zoom: { + enabled: false, + }, + toolbar: { + show: false, + }, + animations: { + enabled: false, + }, + }, + tooltip: { + theme: "dark", + y: { + formatter: (value, opts) => (type === "compact" ? value + "%" : value), + }, + }, + dataLabels: { + enabled: false, + }, + stroke: { + curve: "straight", + width: 2, + }, + theme: { + palette: "palette4", + }, + fill: + type === "compact" + ? { + type: "gradient", + gradient: { + shade: "dark", + gradientToColors: ["#4ecdc4"], + shadeIntensity: 1, + type: "horizontal", + opacityFrom: 1, + opacityTo: 1, + stops: [0, 100], + }, + } + : {}, + colors: + type === "compact" + ? customColor + ? [customColor] + : ["#4ecdc4"] + : undefined, + xaxis: { + categories: xAxis, + labels: { + show: type !== "compact", + }, + }, + yaxis: { + tickAmount: 4, + min: type === "compact" ? 0 : undefined, + max: type === "compact" ? 200 : undefined, + }, + legend: { + show: type !== "compact", + }, + }; + + var chart = new ApexCharts(document.querySelector(targetDivID), options); + chart.render(); +}