Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
## Unreleased

### Configurable benchmark timeout per runner

Runners can now set a `timeout_minutes` field in `bench_runner.toml` to
override the maximum runtime for benchmark jobs. If unset, the per-OS default
in the workflow template applies (Linux: 1440, others: GitHub default).

```toml
[runners.example]
os = "linux"
arch = "x86_64"
hostname = "example"
timeout_minutes = 210
```

### Bugfixes

#### Use PGO on weekly builds
Expand Down
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ You may limit the number of cores used to build Python with the `use_cores` opti
use_cores = 2
```

You may set a maximum runtime for benchmark jobs on a runner with `timeout_minutes`. If unset, the per-OS default in the workflow template applies (Linux: 1440, others: the GitHub default).

```
timeout_minutes = 210
```

### Try a benchmarking run

There are instructions for running a benchmarking action already in the `README.md` of your repo. Look there and give it a try!
Expand Down
4 changes: 4 additions & 0 deletions bench_runner/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ class Runner:
# The number of cores to use to compile CPython. If not provided, `make -j`
# will be used.
use_cores: int | None = None
# Maximum runtime for benchmark jobs on this runner, in minutes. If unset,
# the per-OS default in the workflow template applies (Linux: 1440, others:
# GitHub default).
timeout_minutes: int | None = None

def __post_init__(self):
if self.github_runner_name is None:
Expand Down
8 changes: 7 additions & 1 deletion bench_runner/scripts/install.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from bench_runner import runners
from bench_runner.util import PathLike


ROOT_PATH = Path()
TEMPLATE_PATH = Path(__file__).parents[1] / "templates"
WORKFLOW_PATH = Path() / ".github" / "workflows"
Expand Down Expand Up @@ -178,6 +177,13 @@ def generate__benchmark(src: Any) -> Any:

runner_template["runs-on"].append(runner.github_runner_name)

if runner.timeout_minutes is not None:
# Insert right after runs-on for readable YAML, regardless of
# whether the per-OS skeleton already had a timeout-minutes key.
runner_template.pop("timeout-minutes", None)
idx = list(runner_template.keys()).index("runs-on") + 1
runner_template.insert(idx, "timeout-minutes", runner.timeout_minutes)

machine_clauses = [
f"inputs.machine == '{runner.name}'",
"inputs.machine == '__really_all'",
Expand Down
1 change: 1 addition & 0 deletions tests/data/bench_runner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ os = "linux"
arch = "x86_64"
hostname = "pyperf"
plot = { name = "Linux", color = "C0" }
timeout_minutes = 210

[longitudinal_plot]
subplots = [
Expand Down
7 changes: 6 additions & 1 deletion tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

from bench_runner import runners


DATA_PATH = Path(__file__).parent / "data"


Expand All @@ -16,3 +15,9 @@ def test_get_runner_for_hostname(monkeypatch):
assert runner.os == "linux"
assert runner.arch == "x86_64"
assert runner.hostname == "pyperf"
assert runner.timeout_minutes == 210


def test_runner_timeout_minutes_default():
runner = runners.Runner(nickname="x", os="linux", arch="x86_64", hostname="x")
assert runner.timeout_minutes is None
Loading