Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/dvsim/flow/sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from dvsim.modes import BuildMode, Mode, RunMode, find_mode
from dvsim.regression import Regression
from dvsim.report.data import FlowResults, IPMeta, Testpoint, TestResult, TestStage, ToolMeta
from dvsim.sim_results import SimResults
from dvsim.sim_results import BucketedFailures, SimResults
from dvsim.test import Test
from dvsim.testplan import Testplan
from dvsim.tool.utils import get_sim_tool_plugin
Expand Down Expand Up @@ -697,13 +697,16 @@ def make_test_result(tr) -> TestResult | None:
raw_metrics=coverage,
)

failures = BucketedFailures.from_job_status(results=run_results)

# --- Final result ---
return FlowResults(
block=block,
tool=tool,
timestamp=timestamp,
stages=stages,
coverage=coverage_model,
failed_jobs=failures,
passed=total_passed,
total=total_runs,
percent=100.0 * total_passed / total_runs if total_runs else 0.0,
Expand Down
5 changes: 5 additions & 0 deletions src/dvsim/report/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

from pydantic import BaseModel, ConfigDict

from dvsim.sim_results import BucketedFailures

__all__ = (
"IPMeta",
"ResultsSummary",
Expand Down Expand Up @@ -181,6 +183,9 @@ class FlowResults(BaseModel):
coverage: CoverageMetrics | None
"""Coverage metrics."""

failed_jobs: BucketedFailures
"""Bucketed failed job overview."""

passed: int
"""Number of tests passed."""
total: int
Expand Down
117 changes: 87 additions & 30 deletions src/dvsim/sim_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,19 @@

"""Class describing simulation results."""

import collections
import re
from collections.abc import Sequence
from collections.abc import Mapping, Sequence
from typing import TYPE_CHECKING

from pydantic import BaseModel, ConfigDict

from dvsim.job.data import CompletedJobStatus
from dvsim.testplan import Result

if TYPE_CHECKING:
from dvsim.job.data import CompletedJobStatus

__all__ = ()

_REGEX_REMOVE = [
# Remove UVM time.
re.compile(r"@\s+[\d.]+\s+[np]s: "),
Expand Down Expand Up @@ -66,6 +72,78 @@
]


def _bucketize(fail_msg: str) -> str:
"""Generalise error messages to create common error buckets."""
bucket = fail_msg
# Remove stuff.
for regex in _REGEX_REMOVE:
bucket = regex.sub("", bucket)
# Strip stuff.
for regex in _REGEX_STRIP:
bucket = regex.sub(r"\g<1>", bucket)
# Replace with '*'.
for regex in _REGEX_STAR:
bucket = regex.sub("*", bucket)

return bucket


class JobFailureOverview(BaseModel):
"""Overview of the Job failure."""

model_config = ConfigDict(frozen=True, extra="forbid")

name: str
"""Name of the job."""

seed: int | None
"""Test seed."""

line: int | None
"""Line number within the log if there is one."""

log_context: Sequence[str]
"""Context within the log."""


class BucketedFailures(BaseModel):
"""Bucketed failed runs.

The runs are grouped into failure buckets based on the error messages they
reported. This makes it easier to see the classes of errors.
"""

model_config = ConfigDict(frozen=True, extra="forbid")

buckets: Mapping[str, Sequence["JobFailureOverview"]]
"""Mapping of common error message strings to the full job failure summary."""

@staticmethod
def from_job_status(results: Sequence["CompletedJobStatus"]) -> "BucketedFailures":
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Return type needs quoting as the type is the class itself. The reference for which doesn't exist until it's been fully defined, at least as far as the class attributes and method prototypes.

"""Construct from CompletedJobStatus objects."""
buckets = {}

for job_status in results:
if job_status.status in ["F", "K"]:
bucket = _bucketize(job_status.fail_msg.message)

if bucket not in buckets:
buckets[bucket] = []

buckets[bucket].append(
JobFailureOverview(
name=job_status.full_name,
seed=job_status.seed,
line=job_status.fail_msg.line_number,
log_context=job_status.fail_msg.context,
),
)

return BucketedFailures(
buckets=buckets,
)


class SimResults:
"""An object wrapping up a table of results for some tests.

Expand All @@ -76,30 +154,22 @@ class SimResults:
holding all failing tests with the same signature.
"""

def __init__(self, results: Sequence[CompletedJobStatus]) -> None:
def __init__(self, results: Sequence["CompletedJobStatus"]) -> None:
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Quoted type required here to prevent circular imports.

Once we have a few more data classes, it might be worth refactoring the file structure a bit so that we can have types without quotes. Circular imports due to typing is generally a good sign that things that should be together are not.

self.table = []
self.buckets = collections.defaultdict(list)
self.buckets: Mapping[str, JobFailureOverview] = {}

self._name_to_row = {}

for job_status in results:
self._add_item(job_status=job_status)

def _add_item(self, job_status: CompletedJobStatus) -> None:
def _add_item(self, job_status: "CompletedJobStatus") -> None:
"""Recursively add a single item to the table of results."""
if job_status.status in ["F", "K"]:
bucket = self._bucketize(job_status.fail_msg.message)
self.buckets[bucket].append(
(
job_status,
job_status.fail_msg.line_number,
job_status.fail_msg.context,
),
)

# Runs get added to the table directly
if job_status.target == "run":
self._add_run(job_status)

def _add_run(self, job_status: CompletedJobStatus) -> None:
def _add_run(self, job_status: "CompletedJobStatus") -> None:
"""Add an entry to table for item."""
row = self._name_to_row.get(job_status.name)
if row is None:
Expand All @@ -119,16 +189,3 @@ def _add_run(self, job_status: CompletedJobStatus) -> None:
if job_status.status == "P":
row.passing += 1
row.total += 1

def _bucketize(self, fail_msg):
bucket = fail_msg
# Remove stuff.
for regex in _REGEX_REMOVE:
bucket = regex.sub("", bucket)
# Strip stuff.
for regex in _REGEX_STRIP:
bucket = regex.sub(r"\g<1>", bucket)
# Replace with '*'.
for regex in _REGEX_STAR:
bucket = regex.sub("*", bucket)
return bucket
Loading