Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions ci/jobs/functional_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,8 @@ def start():
)
res = results[-1].is_ok()

runner_options += f" --known-fails-file-path tests/broken_tests.yaml"

test_result = None
if res and JobStages.TEST in stages:
stop_watch_ = Utils.Stopwatch()
Expand Down Expand Up @@ -447,6 +449,10 @@ def collect_logs():
)
force_ok_exit = True

broken_tests_handler_log = os.path.join(temp_dir, "broken_tests_handler.log")
if os.path.exists(broken_tests_handler_log):
debug_files.append(broken_tests_handler_log)

Result.create_from(
results=results,
stopwatch=stop_watch,
Expand Down
133 changes: 12 additions & 121 deletions ci/jobs/scripts/functional_tests_results.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
import dataclasses
import json
import os
import traceback
from typing import List
import re

import yaml

from praktika.result import Result

Expand All @@ -14,6 +9,7 @@
TIMEOUT_SIGN = "[ Timeout! "
UNKNOWN_SIGN = "[ UNKNOWN "
SKIPPED_SIGN = "[ SKIPPED "
BROKEN_SIGN = "[ BROKEN "
HUNG_SIGN = "Found hung queries in processlist"
SERVER_DIED_SIGN = "Server died, terminating all processes"
SERVER_DIED_SIGN2 = "Server does not respond to health check"
Expand All @@ -33,101 +29,6 @@
# out.writerow(status)


def get_broken_tests_rules() -> dict:
broken_tests_file_path = "tests/broken_tests.yaml"
if (
not os.path.isfile(broken_tests_file_path)
or os.path.getsize(broken_tests_file_path) == 0
):
raise ValueError(
"There is something wrong with getting broken tests rules: "
f"file '{broken_tests_file_path}' is empty or does not exist."
)

with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file:
broken_tests = yaml.safe_load(broken_tests_file)

compiled_rules = {"exact": {}, "pattern": {}}

for test in broken_tests:
regex = test.get("regex") is True
rule = {
"reason": test["reason"],
}

if test.get("message"):
rule["message"] = re.compile(test["message"]) if regex else test["message"]

if test.get("not_message"):
rule["not_message"] = (
re.compile(test["not_message"]) if regex else test["not_message"]
)
if test.get("check_types"):
rule["check_types"] = test["check_types"]

if regex:
rule["regex"] = True
compiled_rules["pattern"][re.compile(test["name"])] = rule
else:
compiled_rules["exact"][test["name"]] = rule

print(
f"INFO: Compiled {len(compiled_rules['exact'])} exact rules and {len(compiled_rules['pattern'])} pattern rules"
)

return compiled_rules


def test_is_known_fail(test_name, test_logs, known_broken_tests, test_options_string):
matching_rules = []

print(f"Checking known broken tests for failed test: {test_name}")
print("Potential matching rules:")
exact_rule = known_broken_tests["exact"].get(test_name)
if exact_rule:
print(f"{test_name} - {exact_rule}")
matching_rules.append(exact_rule)

for name_re, data in known_broken_tests["pattern"].items():
if name_re.fullmatch(test_name):
print(f"{name_re} - {data}")
matching_rules.append(data)

if not matching_rules:
return False

def matches_substring(substring, log, is_regex):
if log is None:
return False
if is_regex:
return bool(substring.search(log))
return substring in log

for rule_data in matching_rules:
if rule_data.get("check_types") and not any(
ct in test_options_string for ct in rule_data["check_types"]
):
print(
f"Check types didn't match: '{rule_data['check_types']}' not in '{test_options_string}'"
)
continue # check_types didn't match → skip rule

is_regex = rule_data.get("regex", False)
not_message = rule_data.get("not_message")
if not_message and matches_substring(not_message, test_logs, is_regex):
print(f"Skip rule: Not message matched: '{rule_data['not_message']}'")
continue # not_message matched → skip rule
message = rule_data.get("message")
if message and not matches_substring(message, test_logs, is_regex):
print(f"Skip rule: Message didn't match: '{rule_data['message']}'")
continue

print(f"Test {test_name} matched rule: {rule_data}")
return rule_data["reason"]

return False


class FTResultsProcessor:
@dataclasses.dataclass
class Summary:
Expand Down Expand Up @@ -163,8 +64,6 @@ def _process_test_output(self):
test_results = []
test_end = True

known_broken_tests = get_broken_tests_rules()

with open(self.tests_output_file, "r", encoding="utf-8") as test_file:
for line in test_file:
original_line = line
Expand All @@ -183,7 +82,13 @@ def _process_test_output(self):
retries = True
if any(
sign in line
for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)
for sign in (
OK_SIGN,
FAIL_SIGN,
UNKNOWN_SIGN,
SKIPPED_SIGN,
BROKEN_SIGN,
)
):
test_name = line.split(" ")[2].split(":")[0]

Expand Down Expand Up @@ -216,13 +121,16 @@ def _process_test_output(self):
elif SKIPPED_SIGN in line:
skipped += 1
test_results.append((test_name, "SKIPPED", test_time, []))
elif BROKEN_SIGN in line:
broken += 1
test_results.append((test_name, "BROKEN", test_time, []))
else:
success += int(OK_SIGN in line)
test_results.append((test_name, "OK", test_time, []))
test_end = False
elif (
len(test_results) > 0
and test_results[-1][1] in ("FAIL", "SKIPPED")
and test_results[-1][1] in ("FAIL", "SKIPPED", "BROKEN")
and not test_end
):
test_results[-1][3].append(original_line)
Expand All @@ -233,8 +141,6 @@ def _process_test_output(self):
if DATABASE_SIGN in line:
test_end = True

test_options_string = ", ".join(self.test_options)

test_results_ = []
for test in test_results:
try:
Expand All @@ -248,21 +154,6 @@ def _process_test_output(self):
)
)

if test[1] == "FAIL":
broken_message = test_is_known_fail(
test[0],
test_results_[-1].info,
known_broken_tests,
test_options_string,
)

if broken_message:
broken += 1
failed -= 1
test_results_[-1].set_status(Result.StatusExtended.BROKEN)
test_results_[-1].set_label(Result.Label.BROKEN)
test_results_[-1].info += "\nMarked as broken: " + broken_message

except Exception as e:
print(f"ERROR: Failed to parse test results: {test}")
traceback.print_exc()
Expand Down
20 changes: 19 additions & 1 deletion tests/broken_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,27 @@
message: result differs with reference
check_types:
- msan
- name: 02313_filesystem_cache_seeks
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 02286_drop_filesystem_cache
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 02242_system_filesystem_cache_log_table
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 02241_filesystem_cache_on_write_operations
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 02240_system_filesystem_cache_table
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 02226_filesystem_cache_profile_events
reason: fails when azure storage is not set up
message: 'DB::Exception: Unknown storage policy `azure_cache`'
- name: 00024_random_counters
reason: INVESTIGATE - random timeout
message: Timeout! Killing process group
message: Timeout! Processes left in process group
- name: test_storage_s3_queue/test_5.py::test_migration[1-s3queue_]
reason: KNOWN - Sometimes fails due to test order
message: 'Failed: Timeout >900.0s'
Expand Down
Loading
Loading