-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpyqual.yaml
More file actions
144 lines (129 loc) · 4.73 KB
/
pyqual.yaml
File metadata and controls
144 lines (129 loc) · 4.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
pipeline:
name: regix-regression-loop
# Quality gates — DO NOT lower these thresholds to pass!
metrics:
cc_max: 10 # cyclomatic complexity per function
vallm_pass_min: 50 # vallm validation pass rate (%)
coverage_min: 80 # test coverage (%)
# Pipeline stages with regression detection and fixing
stages:
- name: baseline
tool: code2llm
when: first_iteration
timeout: 0 # No timeout for large repos
- name: validate
run: vallm batch regix tests --recursive --format toon --output project
when: always
timeout: 600
- name: test
run: pytest --cov=regix --cov-report=json:.pyqual/coverage.json -q
optional: true
timeout: 300
- name: prefact
tool: prefact
when: metrics_fail
timeout: 900
- name: fix_regression
run: |
# Use llx to fix detected issues (scope to regix/ only, protect config files)
llx fix regix/ --apply --verbose --model ${LLM_MODEL:-openrouter/openai/gpt-5-mini}
when: metrics_fail
timeout: 1800
- name: verify_fix
run: vallm batch regix tests --recursive --format toon --output .pyqual/after_fix
when: after_fix
timeout: 600
- name: benchmark
run: |
mkdir -p .pyqual
python3 -m regix.benchmark --json > .pyqual/benchmark.json
when: always
optional: true
timeout: 300
- name: regression_report
run: |
echo "=== Regression Report ==="
if [ -f .pyqual/validation.toon.yaml ]; then
echo "Initial validation results:"
cat .pyqual/validation.toon.yaml | grep -E "passed:|errors:|warnings:" || echo "Results available in .pyqual/validation.toon.yaml"
fi
if [ -d .pyqual/after_fix ]; then
echo "After fix validation results:"
cat .pyqual/after_fix/validation.toon.yaml | grep -E "passed:|errors:|warnings:" || echo "Results available in .pyqual/after_fix/validation.toon.yaml"
fi
if [ -f .pyqual/benchmark.json ]; then
echo "Benchmark:"
python3 -c "import json; d=json.load(open('.pyqual/benchmark.json')); print(f' probes: {len(d)}, failed: {sum(1 for r in d if r[\"status\"] in (\"FAIL\",\"ERROR\"))}')"
fi
echo "========================"
when: after_verify_fix
timeout: 60
# Push & publish (run only when all gates pass)
- name: push
run: |
git add -A
if ! git diff --cached --quiet; then
git commit -m "chore: pyqual auto-fix iteration"
git push origin HEAD
else
echo "No changes to push."
fi
when: metrics_pass
optional: true
timeout: 120
- name: bump_version
run: |
# Bump patch version in all version files
CURRENT=$(cat VERSION)
echo "Current version: $CURRENT"
# Parse version: major.minor.patch using cut (POSIX compatible)
MAJOR=$(echo "$CURRENT" | cut -d. -f1)
MINOR=$(echo "$CURRENT" | cut -d. -f2)
PATCH=$(echo "$CURRENT" | cut -d. -f3)
NEW_PATCH=$((PATCH + 1))
NEW_VERSION="${MAJOR}.${MINOR}.${NEW_PATCH}"
echo "New version: $NEW_VERSION"
# Update VERSION file
echo "$NEW_VERSION" > VERSION
# Update __init__.py
sed -i "s/__version__ = \"[0-9]\\+\\.[0-9]\\+\\.[0-9]\\+\"/__version__ = \"$NEW_VERSION\"/" regix/__init__.py
# Update pyproject.toml
sed -i "s/^version = \"[0-9]\\+\\.[0-9]\\+\\.[0-9]\\+\"/version = \"$NEW_VERSION\"/" pyproject.toml
# Commit version bump
git add VERSION regix/__init__.py pyproject.toml
git commit -m "chore: bump version to $NEW_VERSION [skip ci]" || echo "No changes to commit"
echo "Bumped to $NEW_VERSION"
when: metrics_pass
optional: true
timeout: 60
- name: build
run: python3 -m build
when: after bump_version
optional: true
timeout: 120
- name: publish
run: |
# Publish to PyPI (packages built in previous steps)
VERSION=$(cat VERSION)
echo "Publishing version: $VERSION"
if command -v twine &>/dev/null; then
twine upload dist/regix-${VERSION}*
else
echo "twine not installed — skipping publish"
fi
when: metrics_pass
optional: true
timeout: 300
# Loop behavior - continue until regression is fixed
loop:
max_iterations: 5
on_fail: create_ticket
# Environment
env:
LLX_DEFAULT_TIER: balanced
LLX_VERBOSE: true
OPENAI_API_KEY: ${OPENROUTER_API_KEY}
OPENAI_BASE_URL: https://openrouter.ai/api/v1
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY}
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${PYPI_API_TOKEN}