-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcheck_quality.py
More file actions
311 lines (254 loc) · 9.83 KB
/
check_quality.py
File metadata and controls
311 lines (254 loc) · 9.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
#!/usr/bin/env python3
"""
# Houdinis Framework - Quantum Cryptography Testing Platform
# Author: Mauro Risonho de Paula Assumpção aka firebitsbr
# Developed by: Human Logic & Coding with AI Assistance (Claude Sonnet 4.5)
# License: MIT
Code Quality Validation Script
================================
Valida e melhora a qualidade do código do projeto Houdinis.
Atingir 9.5/10 no Pylance.
Features:
- Type hints validation
- Import cleanup
- Docstring validation
- Code style checking
- Security scanning
"""
import ast
import os
import re
import subprocess
import sys
from pathlib import Path
from typing import List, Dict, Tuple, Optional
from dataclasses import dataclass
@dataclass
class QualityMetrics:
"""Quality metrics for code analysis"""
total_functions: int = 0
functions_with_types: int = 0
functions_with_docstrings: int = 0
unused_imports: int = 0
missing_type_hints: int = 0
undocumented_functions: int = 0
@property
def type_coverage(self) -> float:
"""Calculate type hint coverage percentage"""
if self.total_functions == 0:
return 100.0
return (self.functions_with_types / self.total_functions) * 100
@property
def docstring_coverage(self) -> float:
"""Calculate docstring coverage percentage"""
if self.total_functions == 0:
return 100.0
return (self.functions_with_docstrings / self.total_functions) * 100
@property
def overall_score(self) -> float:
"""Calculate overall quality score out of 10"""
type_score = self.type_coverage / 10 # Max 10 points
doc_score = self.docstring_coverage / 10 # Max 10 points
import_penalty = min(self.unused_imports * 0.1, 2.0) # Max -2 points
score = (type_score * 0.5 + doc_score * 0.5) - import_penalty
return max(0.0, min(10.0, score))
class CodeQualityAnalyzer:
"""Analyze Python code quality"""
def __init__(self, root_dir: str = "."):
self.root_dir = Path(root_dir)
self.metrics = QualityMetrics()
self.issues: List[str] = []
def analyze_file(self, filepath: Path) -> None:
"""Analyze a single Python file"""
try:
with open(filepath, "r", encoding="utf-8") as f:
content = f.read()
tree = ast.parse(content, filename=str(filepath))
# Analyze functions
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
self._analyze_function(node, filepath)
# Check for unused imports
self._check_unused_imports(filepath, content)
except Exception as e:
self.issues.append(f"Error analyzing {filepath}: {e}")
def _analyze_function(self, node: ast.FunctionDef, filepath: Path) -> None:
"""Analyze a function for type hints and docstrings"""
# Skip private functions and test functions
if node.name.startswith("_") or node.name.startswith("test_"):
return
self.metrics.total_functions += 1
# Check type hints
has_type_hints = self._has_type_hints(node)
if has_type_hints:
self.metrics.functions_with_types += 1
else:
self.metrics.missing_type_hints += 1
self.issues.append(
f"{filepath}:{node.lineno} - Function '{node.name}' missing type hints"
)
# Check docstrings
has_docstring = ast.get_docstring(node) is not None
if has_docstring:
self.metrics.functions_with_docstrings += 1
else:
self.metrics.undocumented_functions += 1
self.issues.append(
f"{filepath}:{node.lineno} - Function '{node.name}' missing docstring"
)
def _has_type_hints(self, node: ast.FunctionDef) -> bool:
"""Check if function has type hints"""
# Check return type
has_return_type = node.returns is not None
# Check parameter types (skip 'self' and 'cls')
params = [arg for arg in node.args.args if arg.arg not in ("self", "cls")]
if not params:
return has_return_type
has_param_types = all(arg.annotation is not None for arg in params)
return has_return_type and has_param_types
def _check_unused_imports(self, filepath: Path, content: str) -> None:
"""Check for unused imports using pyflakes"""
try:
result = subprocess.run(
["python", "-m", "pyflakes", str(filepath)],
capture_output=True,
text=True,
timeout=5,
)
# Count unused imports
for line in result.stdout.splitlines():
if "imported but unused" in line.lower():
self.metrics.unused_imports += 1
self.issues.append(f"{filepath} - {line.strip()}")
except (subprocess.TimeoutExpired, FileNotFoundError):
pass
def analyze_directory(self, directory: Path) -> None:
"""Analyze all Python files in directory"""
python_files = list(directory.rglob("*.py"))
# Filter out test files and __pycache__
python_files = [
f
for f in python_files
if "__pycache__" not in str(f) and "test_" not in f.name
]
print(f"Analyzing {len(python_files)} Python files...")
for filepath in python_files:
self.analyze_file(filepath)
def print_report(self) -> None:
"""Print quality report"""
print("\n" + "=" * 70)
print("CODE QUALITY REPORT")
print("=" * 70)
print(f"\n Metrics:")
print(f" Total Functions: {self.metrics.total_functions}")
print(
f" With Type Hints: {self.metrics.functions_with_types} "
f"({self.metrics.type_coverage:.1f}%)"
)
print(
f" With Docstrings: {self.metrics.functions_with_docstrings} "
f"({self.metrics.docstring_coverage:.1f}%)"
)
print(f" Unused Imports: {self.metrics.unused_imports}")
print(f" Missing Type Hints: {self.metrics.missing_type_hints}")
print(f" Undocumented: {self.metrics.undocumented_functions}")
print(f"\n Overall Quality Score: {self.metrics.overall_score:.1f}/10.0")
# Score interpretation
score = self.metrics.overall_score
if score >= 9.5:
status = " EXCELLENT"
message = "Code quality is outstanding!"
elif score >= 8.5:
status = " GOOD"
message = "Code quality is good, minor improvements needed."
elif score >= 7.0:
status = " FAIR"
message = "Code quality needs improvement."
else:
status = " POOR"
message = "Code quality needs significant improvement."
print(f"\nStatus: {status}")
print(f"{message}")
# Print top issues
if self.issues and len(self.issues) <= 20:
print(f"\n Issues Found ({len(self.issues)}):")
for issue in self.issues[:20]:
print(f" - {issue}")
if len(self.issues) > 20:
print(f" ... and {len(self.issues) - 20} more")
elif self.issues:
print(
f"\n {len(self.issues)} issues found. Run with --verbose for details."
)
print("\n" + "=" * 70)
def run_additional_checks() -> Dict[str, bool]:
"""Run additional code quality checks"""
checks = {}
# Check if black would reformat code
try:
result = subprocess.run(
["black", "--check", "--quiet", "."], capture_output=True, timeout=30
)
checks["black"] = result.returncode == 0
except:
checks["black"] = None
# Check with flake8
try:
result = subprocess.run(
["flake8", ".", "--count", "--select=E9,F63,F7,F82"],
capture_output=True,
timeout=30,
)
checks["flake8"] = result.returncode == 0
except:
checks["flake8"] = None
# Check with mypy
try:
result = subprocess.run(
["mypy", ".", "--ignore-missing-imports"], capture_output=True, timeout=60
)
checks["mypy"] = result.returncode == 0
except:
checks["mypy"] = None
return checks
def main():
"""Main execution function"""
print(" Houdinis Code Quality Analyzer")
print("=" * 70)
# Analyze main directories
analyzer = CodeQualityAnalyzer()
directories = ["quantum", "exploits", "core", "scanners", "utils", "security"]
for dir_name in directories:
dir_path = Path(dir_name)
if dir_path.exists():
print(f" Analyzing {dir_name}/...")
analyzer.analyze_directory(dir_path)
# Print main report
analyzer.print_report()
# Run additional checks
print("\n Additional Checks:")
checks = run_additional_checks()
for tool, status in checks.items():
if status is True:
print(f" {tool}: PASS")
elif status is False:
print(f" {tool}: FAIL")
else:
print(f" {tool}: NOT AVAILABLE")
# Exit code based on score
if analyzer.metrics.overall_score >= 9.5:
print("\n Code quality target achieved (9.5/10)!")
return 0
else:
needed = 9.5 - analyzer.metrics.overall_score
print(f"\n Need {needed:.1f} more points to reach 9.5/10")
print("\nRecommendations:")
if analyzer.metrics.type_coverage < 90:
print(" 1. Add type hints to more functions")
if analyzer.metrics.docstring_coverage < 90:
print(" 2. Add docstrings to public functions")
if analyzer.metrics.unused_imports > 0:
print(" 3. Remove unused imports")
return 1
if __name__ == "__main__":
sys.exit(main())