Add command line arguments to include/exclude specific benchmarks

- Automatically quit on end when using `--run-benchmarks` CLI argument.
- Improve command line printing to be more helpful and descriptive.
- Add documentation on running benchmarks in the README.
This commit is contained in:
Hugo Locurcio
2022-07-27 20:17:43 +02:00
parent d26efe3afc
commit fb514f8e22
5 changed files with 134 additions and 50 deletions

View File

@@ -5,3 +5,67 @@ used to test performance of different areas of [Godot](https://godotengine.org)
such as rendering and scripting.
**Interested in adding new benchmarks?** See [CONTRIBUTING.md](CONTRIBUTING.md).
## Running benchmarks
### Using a graphical interface
Open the project in the editor, then run it from the editor or from an export
template binary. Select benchmarks you want to run, then click the **Run** button
in the bottom-right corner.
Once benchmarks are run, you can copy the results JSON using the
**Copy JSON to Clipboard** button at the bottom. The results JSON is also printed to
standard output, which you can see if you're running the project from a terminal.
### Using the command line
After opening the project in the editor (required so that resources can be imported),
you can run benchmarks from an editor or export template binary. The project will
automatically quit after running benchmarks.
The results JSON is printed to standard output once all benchmarks are run.
> **Note**
>
> To import the project in the editor from the command line, use `godot --editor --quit`.
> If this doesn't work, use `timeout 30 godot --editor`.
> **Note**
>
> `godot` is assumed to be in your `PATH` environment variable here. If this is
> not the case, replace `godot` with the absolute path to your Godot editor or export template
> binary.
#### Run all benchmarks
```bash
# The first `--` is important.
# Otherwise, Godot won't pass the CLI arguments to the project.
godot -- --run-benchmarks
```
#### Run a single benchmark
The `--include-benchmarks` CLI argument can be used to specify the name.
The project will print a message to acknowledge that your argument was taken
into account for filtering benchmarks.
Benchmark names all follow `category/some_name` naming, with `category` being the
name of the *last* path component (folder) and `some_name` being the name of the
benchmark's scene file.
```
godot -- --run-benchmarks --include-benchmarks="culling/static_cull"
```
#### Run a category of benchmarks
Use glob syntax (with `*` acting as a wildcard) to run a category of benchmarks:
```
--include-benchmarks="culling/static_cull"
```
You can exclude specific benchmarks using the `--exclude-benchmarks` command line argument.
This argument also supports globbing and can be used at the same time as `--include-benchmarks`.

60
main.gd
View File

@@ -1,6 +1,8 @@
extends Panel
var items := []
var include_benchmarks_glob := ""
var exclude_benchmarks_glob := ""
@onready var tree := $Tree as Tree
@@ -20,12 +22,12 @@ func _ready() -> void:
var root := tree.create_item()
var categories := {}
for i in Manager.get_test_count():
var test_name := Manager.get_test_name(i)
var category := Manager.get_test_category(i)
var results := Manager.get_test_result(i)
if category not in categories:
var c := tree.create_item(root)
c.set_text(0, category)
@@ -35,7 +37,7 @@ func _ready() -> void:
item.set_cell_mode(0, TreeItem.CELL_MODE_CHECK)
item.set_text(0, test_name)
item.set_editable(0, true)
if results:
if results.render_cpu:
item.set_text(1, "%s ms" % str(results.render_cpu).pad_decimals(2))
@@ -51,11 +53,27 @@ func _ready() -> void:
item.set_text(5, "%d ms" % results.time)
items.append(item)
# Select all benchmarks since the user most likely wants to run all of them by default.
_on_SelectAll_pressed()
if "--run-benchmarks" in OS.get_cmdline_args():
# Parse valid command-line arguments of the form `--key=value` into a dictionary.
for argument in OS.get_cmdline_user_args():
if argument.begins_with("--include-benchmarks="):
var key_value := argument.split("=")
# Remove quotes around the argument's value, so that "`culling/*`" becomes `culling/*` for globbing.
include_benchmarks_glob = key_value[1].trim_prefix('"').trim_suffix('"').trim_prefix("'").trim_suffix("'")
print("Using benchmark include glob specified on command line: %s" % include_benchmarks_glob)
if argument.begins_with("--exclude-benchmarks="):
var key_value := argument.split("=")
# Remove quotes around the argument's value, so that "`culling/*`" becomes `culling/*` for globbing.
exclude_benchmarks_glob = key_value[1].trim_prefix('"').trim_suffix('"').trim_prefix("'").trim_suffix("'")
print("Using benchmark exclude glob specified on command line: %s" % exclude_benchmarks_glob)
if "--run-benchmarks" in OS.get_cmdline_user_args():
Manager.run_from_cli = true
print("Running benchmarks as specified on command line.\n")
_on_Run_pressed()
@@ -79,16 +97,32 @@ func _on_CopyJSON_pressed() -> void:
func _on_Run_pressed() -> void:
var queue := []
var index := 0
var paths := []
for item in items:
if item.is_checked(0):
queue.append(index)
index += 1
if index == 0:
return
var path := str(item.get_parent().get_text(0) + "/" + item.get_text(0)).to_lower().replace(" ", "_")
if not include_benchmarks_glob.is_empty():
if not path.match(include_benchmarks_glob):
continue
if not exclude_benchmarks_glob.is_empty():
if path.match(exclude_benchmarks_glob):
continue
print("Running %d benchmarks..." % items.size())
Manager.benchmark(queue, $TestTime.value, "res://main.tscn")
if item.is_checked(0):
queue.push_back(index)
paths.push_back(path)
index += 1
if index >= 1:
print_rich("[b]Running %d benchmarks:[/b] %s " % [queue.size(), ", ".join(paths)])
Manager.benchmark(queue, $TestTime.value, "res://main.tscn")
else:
print_rich("[color=red][b]ERROR:[/b] No benchmarks to run.[/color]")
if Manager.run_from_cli:
print("Double-check the syntax of the benchmarks include/exclude glob (quotes are required).")
print_rich('Example usage: [code]godot --run-benchmarks --include-benchmarks="culling/*" --exclude-benchmarks="culling/static_cull"[/code]')
get_tree().quit(1)
func _on_Tree_item_edited() -> void:

View File

@@ -5,10 +5,7 @@
[node name="Main" type="Panel"]
anchor_right = 1.0
anchor_bottom = 1.0
script = ExtResource( "1" )
__meta__ = {
"_edit_use_anchors_": false
}
script = ExtResource("1")
[node name="Label" type="Label" parent="."]
offset_left = 24.0
@@ -17,9 +14,6 @@ offset_right = 197.0
offset_bottom = 52.0
theme_override_font_sizes/font_size = 16
text = "Available Benchmarks:"
__meta__ = {
"_edit_use_anchors_": false
}
[node name="Tree" type="Tree" parent="."]
anchor_right = 1.0
@@ -29,9 +23,6 @@ offset_top = 40.0
offset_right = -18.0
offset_bottom = -45.0
hide_root = true
__meta__ = {
"_edit_use_anchors_": false
}
[node name="SelectAll" type="Button" parent="."]
anchor_left = 1.0
@@ -42,7 +33,6 @@ offset_left = -592.0
offset_top = -38.0
offset_right = -510.0
offset_bottom = -9.0
rect_pivot_offset = Vector2(41, 20)
text = "Select All"
[node name="SelectNone" type="Button" parent="."]
@@ -54,7 +44,6 @@ offset_left = -499.0
offset_top = -38.0
offset_right = -396.0
offset_bottom = -9.0
rect_pivot_offset = Vector2(41, 20)
text = "Select None"
[node name="CopyJSON" type="Button" parent="."]
@@ -66,7 +55,6 @@ offset_left = -344.0
offset_top = -38.0
offset_right = -153.0
offset_bottom = -9.0
rect_pivot_offset = Vector2(41, 20)
text = "Copy JSON to clipboard"
[node name="Run" type="Button" parent="."]
@@ -78,14 +66,10 @@ offset_left = -109.0
offset_top = -41.0
offset_right = -27.0
offset_bottom = -7.0
rect_pivot_offset = Vector2(41, 20)
theme_override_font_sizes/font_size = 20
disabled = true
text = "Run
"
__meta__ = {
"_edit_use_anchors_": false
}
[node name="Label2" type="Label" parent="."]
offset_left = 35.0
@@ -93,9 +77,6 @@ offset_top = 563.0
offset_right = 155.0
offset_bottom = 603.0
text = "Test Time (sec)"
__meta__ = {
"_edit_use_anchors_": false
}
[node name="TestTime" type="SpinBox" parent="."]
offset_left = 160.0
@@ -104,9 +85,6 @@ offset_right = 342.0
offset_bottom = 593.0
min_value = 1.0
value = 5.0
__meta__ = {
"_edit_use_anchors_": false
}
[connection signal="item_edited" from="Tree" to="." method="_on_Tree_item_edited"]
[connection signal="pressed" from="SelectAll" to="." method="_on_SelectAll_pressed"]

View File

@@ -42,6 +42,7 @@ var tests_queue_initial_size := 0
var test_time := 5.0
var return_to_scene : = ""
var skip_first := false
var run_from_cli := false
var record_render_gpu := false
var record_render_cpu := false
@@ -58,7 +59,7 @@ func _ready():
func _process(delta: float) -> void:
if not recording:
return
if skip_first:
skip_first = false
return
@@ -112,8 +113,12 @@ func benchmark(queue: Array, time: float, return_path: String) -> void:
func begin_test() -> void:
DisplayServer.window_set_title("%d/%d - Running - Godot Benchmarks" % [tests_queue_initial_size - tests_queue.size() + 1, tests_queue_initial_size])
print("Running benchmark %d of %d..." % [tests_queue_initial_size - tests_queue.size() + 1, tests_queue_initial_size])
print("Running benchmark %d of %d: %s" % [
tests_queue_initial_size - tests_queue.size() + 1,
tests_queue_initial_size,
tests[tests_queue[0]].path.trim_prefix("res://").trim_suffix(".tscn")]
)
results = Results.new()
recording = true
results = Results.new()
@@ -121,9 +126,9 @@ func begin_test() -> void:
remaining_time = test_time
set_process(true)
get_tree().change_scene(tests[tests_queue[0]].path)
var benchmark_group := get_tree().get_nodes_in_group("benchmark_config")
if benchmark_group.size() >= 1:
var benchmark: Node = benchmark_group[0]
record_render_cpu = benchmark.test_render_cpu
@@ -153,7 +158,7 @@ func end_test() -> void:
tests[tests_queue[0]].results = results
results = null
tests_queue.pop_front()
# If more tests are still pending, go to the next test.
if tests_queue.size() > 0:
begin_test()
@@ -161,8 +166,11 @@ func end_test() -> void:
get_tree().change_scene(return_to_scene)
return_to_scene = ""
DisplayServer.window_set_title("[DONE] %d benchmarks - Godot Benchmarks" % tests_queue_initial_size)
print("Done running %d benchmarks. Results JSON:\n" % tests_queue_initial_size)
print_rich("[color=green][b]Done running %d benchmarks.[/b] Results JSON:[/color]\n" % tests_queue_initial_size)
print(get_results_dict())
if run_from_cli:
# Automatically exit after running benchmarks for automation purposes.
get_tree().quit()
func get_results_dict() -> Dictionary:
@@ -172,7 +180,7 @@ func get_results_dict() -> Dictionary:
version_string = "v%d.%d.%d.%s.%s" % [version_info.major, version_info.minor, version_info.patch, version_info.status, version_info.build]
else:
version_string = "v%d.%d.%s.%s" % [version_info.major, version_info.minor, version_info.status, version_info.build]
var dict := {
engine = {
version = version_string,
@@ -192,14 +200,14 @@ func get_results_dict() -> Dictionary:
gpu_vendor = RenderingServer.get_video_adapter_vendor(),
}
}
var benchmarks := []
for i in Manager.get_test_count():
var test := {
category = Manager.get_test_category(i),
name = Manager.get_test_name(i),
}
var result: Manager.Results = Manager.get_test_result(i)
if result:
test.results = {
@@ -211,9 +219,9 @@ func get_results_dict() -> Dictionary:
}
else:
test.results = {}
benchmarks.push_back(test)
dict.benchmarks = benchmarks
return dict

View File

@@ -22,8 +22,8 @@ _global_script_class_icons={
config/name="Godot Benchmarks"
run/main_scene="res://main.tscn"
config/icon="res://icon.png"
config/features=PackedStringArray("4.0")
config/icon="res://icon.png"
[autoload]