-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample_config.toml
More file actions
129 lines (108 loc) · 5.15 KB
/
example_config.toml
File metadata and controls
129 lines (108 loc) · 5.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Example .gitperfconfig file for git-perf
# This file should be placed in your repository root
#
# For detailed explanations of all options, see:
# https://github.com/kaihowl/git-perf/blob/master/README.md#configuration
#
# Quick links:
# - Configuration Guide: ../README.md#configuration
# - Integration Tutorial: ./INTEGRATION_TUTORIAL.md
# - Audit System: ../README.md#audit-system
# Default settings for all measurements (parent table)
[measurement]
# Minimum relative deviation threshold (as percentage)
# Measurements with relative deviation below this threshold will be ignored
# even if they exceed the sigma threshold
# The relative deviation is calculated as: |(head_value / tail_median - 1.0) * 100%|
# where tail_median is the median of historical measurements (excluding HEAD)
min_relative_deviation = 5.0
# Minimum absolute deviation threshold (in measurement units)
# Measurements with absolute deviation below this threshold will be ignored
# even if they exceed the sigma threshold
# Useful when measurements are near zero (where relative deviation is meaningless)
# or when there is a known acceptable absolute tolerance
# The absolute deviation is calculated as: |head_value - tail_median|
min_absolute_deviation = 0.5
# Statistical dispersion method for outlier detection
# Options: "stddev" (standard deviation) or "mad" (median absolute deviation)
# MAD is more robust to outliers, stddev is more sensitive
dispersion_method = "mad"
# Sigma threshold for statistical outlier detection
# Number of standard deviations (or MADs) from the median
# Higher values are more lenient, lower values are more strict
# Default: 4.0 (if not specified)
sigma = 3.5
# Minimum number of measurements required for statistical analysis
# If fewer measurements exist, audits may skip statistical checks
# Default: 2 (if not specified)
min_measurements = 3
# Aggregation method for reducing multiple measurements
# Options: "min", "max", "median", "mean"
# Used when multiple measurements exist for the same commit
# Default: "min" (if not specified)
aggregate_by = "median"
# Default epoch for accepting performance changes (hex format)
epoch = "00000000"
# Default unit for all measurements (optional)
# If set, this unit will be displayed in reports, audit output, and CSV exports
# for all measurements that don't have a specific unit configured
# Common units: "ms", "seconds", "bytes", "MB", "requests/sec", etc.
unit = "ms"
# Measurement-specific settings (override parent defaults)
[measurement."build_time"]
# Build time measurements can have higher variance, so use a higher threshold
# Use MAD for build times as they can have occasional outliers from system load
min_relative_deviation = 10.0
min_absolute_deviation = 50.0 # e.g., ignore changes below 50ms
dispersion_method = "mad"
epoch = "12345678"
unit = "ms" # Display build time in milliseconds
[measurement."memory_usage"]
# Memory usage should be more stable, so use a lower threshold
# Use stddev for memory as it's typically more consistent
min_relative_deviation = 2.0
dispersion_method = "stddev"
epoch = "abcdef12"
unit = "bytes" # Display memory usage in bytes
[measurement."test_runtime"]
# Test runtime can vary significantly, use a moderate threshold
# Use MAD for test times as they can have outliers from system performance
min_relative_deviation = 7.5
dispersion_method = "mad"
unit = "seconds" # Display test runtime in seconds
[measurement."cpu_usage"]
# CPU usage measurements - use stddev for more sensitive detection
min_relative_deviation = 3.0
dispersion_method = "stddev"
unit = "%" # Display CPU usage as percentage
[measurement."network_latency"]
# Network latency can have spikes, use MAD for robustness
min_relative_deviation = 8.0
dispersion_method = "mad"
unit = "ms" # Display network latency in milliseconds
# Change point detection settings (default configuration for all measurements)
[change_point]
# Enable or disable change point detection globally (default: true)
enabled = true
# Minimum number of data points required for detection (default: 10)
min_data_points = 10
# Minimum percentage change to consider significant (default: 5.0)
min_magnitude_pct = 5.0
# Minimum confidence threshold for reporting a change point (0.0-1.0, default: 0.75)
# Higher values require stronger statistical evidence before reporting
confidence_threshold = 0.75
# Penalty factor for PELT algorithm (default: 0.5)
# Higher values result in fewer change points detected (less sensitive)
# Lower values result in more change points detected (more sensitive)
penalty = 0.5
# Measurement-specific change point settings (override defaults)
[change_point."build_time"]
# Less sensitive for this measurement (avoid detecting minor fluctuations)
penalty = 1.0
[change_point."memory_usage"]
# More sensitive for detecting subtle memory changes
penalty = 0.3
# Backoff settings for retry logic
[backoff]
# Maximum elapsed seconds for backoff
max_elapsed_seconds = 60