Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: Copyright (c) 2023 - 2026 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: Copyright (c) 2023 - 2025 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
Expand All @@ -23,7 +23,7 @@
import os
import re
import argparse
from typing import List, Dict
from typing import List, Dict, Tuple, Optional

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -141,6 +141,75 @@ def load_averaged_series(
return df


def discover_runs(parent_dir: str) -> List[str]:
"""
Return sorted list of directories under parent_dir that contain at least one .vtp file.
Used to find all sample/run subdirectories for aggregation.
"""
if not parent_dir or not os.path.isdir(parent_dir):
return []
runs: List[str] = []
for root, _dirs, files in os.walk(parent_dir):
if any(f.lower().endswith(".vtp") for f in files):
runs.append(root)
return sorted(set(runs))


def compute_squared_error_series(
gt_df: pd.DataFrame, pred_df: pd.DataFrame
) -> pd.DataFrame | None:
"""
Compute squared L2 error at each time for position, velocity, and acceleration.
Returns a DataFrame with Time (s), Pos_sqerr, Vel_sqerr, Acc_sqerr (scalar per time).
"""
common_time = np.intersect1d(gt_df["Time (s)"].values, pred_df["Time (s)"].values)
if common_time.size == 0:
return None
gt_aligned = gt_df.set_index("Time (s)").loc[common_time].sort_index()
pred_aligned = pred_df.set_index("Time (s)").loc[common_time].sort_index()
pos_cols = [c for c in gt_aligned.columns if c.startswith("Position_")]
vel_cols = [c for c in gt_aligned.columns if c.startswith("Velocity_")]
acc_cols = [c for c in gt_aligned.columns if c.startswith("Acceleration_")]
if not (pos_cols and vel_cols and acc_cols):
return None
pos_gt = gt_aligned[pos_cols].values
pos_pred = pred_aligned[pos_cols].values
vel_gt = gt_aligned[vel_cols].values
vel_pred = pred_aligned[vel_cols].values
acc_gt = gt_aligned[acc_cols].values
acc_pred = pred_aligned[acc_cols].values
pos_sqerr = np.sum((pos_pred - pos_gt) ** 2, axis=1)
vel_sqerr = np.sum((vel_pred - vel_gt) ** 2, axis=1)
acc_sqerr = np.sum((acc_pred - acc_gt) ** 2, axis=1)
return pd.DataFrame(
{
"Time (s)": common_time,
"Pos_sqerr": pos_sqerr,
"Vel_sqerr": vel_sqerr,
"Acc_sqerr": acc_sqerr,
}
)


def compute_mse_over_samples(
list_of_sqerr: List[pd.DataFrame],
) -> Optional[Tuple[float, float, float]]:
"""
Given per-sample squared-error DataFrames (Pos_sqerr, Vel_sqerr, Acc_sqerr),
return MSE over all samples and all time: (mse_pos, mse_vel, mse_acc).
"""
if not list_of_sqerr:
return None
all_pos = np.concatenate([df["Pos_sqerr"].values for df in list_of_sqerr])
all_vel = np.concatenate([df["Vel_sqerr"].values for df in list_of_sqerr])
all_acc = np.concatenate([df["Acc_sqerr"].values for df in list_of_sqerr])
return (
float(np.mean(all_pos)),
float(np.mean(all_vel)),
float(np.mean(all_acc)),
)


def plot_kinematics(
driver_gt: pd.DataFrame,
driver_pred: pd.DataFrame,
Expand Down Expand Up @@ -249,7 +318,7 @@ def get_limits(dfs: List[pd.DataFrame], col: str):
plt.show()


def main():
def compute_probe_kinematics_single_sample():
parser = argparse.ArgumentParser(
description="Plot derived kinematics (Driver & Passenger | GT vs Pred) from averaged point positions.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
Expand Down Expand Up @@ -324,6 +393,72 @@ def main():
if passenger_pred is None:
return

# Compute normalized squared error for driver
common = driver_gt.columns.intersection(driver_pred.columns)
cols = common[
~common.str.contains("Time", case=False)
] # drop any col containing 'time'

# normalized error: (gt - pred) / mean(|gt|)
sqerr = (driver_gt[cols] - driver_pred[cols]).abs()
denom = driver_gt[cols].abs().mean(axis=0).replace(0, np.nan) # avoid div-by-zero
norm_sqerr = sqerr.div(denom, axis=1)

# rename: e.g., Position_X -> Position_error_X
norm_sqerr.columns = [
f"{a}_error_{b}" for a, b in (c.split("_", 1) for c in norm_sqerr.columns)
]

# print mean for each normalized error column (driver)
driver_norm_means = norm_sqerr.mean()
print("\nDriver normalized error means:")
print(
f" Position_error: {(driver_norm_means['Position_error_X'] + driver_norm_means['Position_error_Y'] + driver_norm_means['Position_error_Z']) / 3}"
)
print(
f" Velocity_error: {(driver_norm_means['Velocity_error_X'] + driver_norm_means['Velocity_error_Y'] + driver_norm_means['Velocity_error_Z']) / 3}"
)
print(
f" Acceleration_error: {(driver_norm_means['Acceleration_error_X'] + driver_norm_means['Acceleration_error_Y'] + driver_norm_means['Acceleration_error_Z']) / 3}"
)

# append to driver_gt (optional: fill NaNs if any denom was 0)
driver_gt = pd.concat([driver_gt, norm_sqerr.fillna(0)], axis=1)

# Compute normalized squared error for passenger
common = passenger_gt.columns.intersection(passenger_pred.columns)
cols = common[
~common.str.contains("Time", case=False)
] # drop any col containing 'time'

# normalized error: (gt - pred) / mean(|gt|)
sqerr = (passenger_gt[cols] - passenger_pred[cols]).abs()
denom = (
passenger_gt[cols].abs().mean(axis=0).replace(0, np.nan)
) # avoid div-by-zero
norm_sqerr = sqerr.div(denom, axis=1)

# rename: e.g., Position_X -> Position_error_X
norm_sqerr.columns = [
f"{a}_error_{b}" for a, b in (c.split("_", 1) for c in norm_sqerr.columns)
]

# print mean for each normalized error column (passenger)
passenger_norm_means = norm_sqerr.mean()
print("\nPassenger normalized error means:")
print(
f" Position_error: {(passenger_norm_means['Position_error_X'] + passenger_norm_means['Position_error_Y'] + passenger_norm_means['Position_error_Z']) / 3}"
)
print(
f" Velocity_error: {(passenger_norm_means['Velocity_error_X'] + passenger_norm_means['Velocity_error_Y'] + passenger_norm_means['Velocity_error_Z']) / 3}"
)
print(
f" Acceleration_error: {(passenger_norm_means['Acceleration_error_X'] + passenger_norm_means['Acceleration_error_Y'] + passenger_norm_means['Acceleration_error_Z']) / 3}"
)

# append to driver_gt (optional: fill NaNs if any denom was 0)
passenger_gt = pd.concat([passenger_gt, norm_sqerr.fillna(0)], axis=1)

if args.save_csv:
driver_gt.to_csv(
"driver_ground_truth_kinematics.csv", index=False, float_format="%.6e"
Expand All @@ -344,5 +479,182 @@ def main():
)


def compute_probe_kinematics_all_samples():
parser = argparse.ArgumentParser(
description="Compute MSE (position, velocity, acceleration) at probe locations over all samples and time; "
"optionally plot GT vs Pred per sample.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--predicted_parent",
type=str,
help="Parent directory whose subdirs contain predicted VTP files (one subdir per sample).",
default=None,
)
parser.add_argument(
"--exact_parent",
type=str,
help="Parent directory whose subdirs contain ground truth VTP files (same relative paths as predicted_parent).",
default=None,
)
parser.add_argument(
"--pred_dir",
type=str,
default=None,
help='Single-sample: directory with predicted VTP files ("prediction" array). Ignored if predicted_parent is set.',
)
parser.add_argument(
"--exact_dir",
type=str,
default=None,
help='Single-sample: directory with exact VTP files ("exact" array). Ignored if exact_parent is set.',
)
parser.add_argument(
"--driver_points",
type=str,
required=True,
help='Driver point IDs/ranges (e.g., "70658-70659,70664,70676-70679").',
)
parser.add_argument(
"--passenger_points",
type=str,
required=True,
help="Passenger point IDs/ranges.",
)
parser.add_argument(
"--dt", type=float, default=5e-3, help="Time step size Δt in seconds."
)
parser.add_argument(
"--output_path",
type=str,
default=None,
help="Directory where all plots are stored (GT vs Pred per sample + mean ± 2*std summary).",
)
parser.add_argument(
"--save_csv",
action="store_true",
help="Save probe MSE (position, velocity, acceleration) to CSV.",
)
args = parser.parse_args()

driver_ids = parse_point_set(args.driver_points)
passenger_ids = parse_point_set(args.passenger_points)

os.makedirs(args.output_path, exist_ok=True)

run_pairs: List[tuple] = []
if args.predicted_parent and args.exact_parent:
pred_runs = discover_runs(args.predicted_parent)
exact_by_rel = {
os.path.relpath(d, args.exact_parent): d
for d in discover_runs(args.exact_parent)
}
for pred_run in pred_runs:
rel = os.path.relpath(pred_run, args.predicted_parent)
exact_run = exact_by_rel.get(rel)
if exact_run is not None:
run_pairs.append((pred_run, exact_run))
elif args.pred_dir and args.exact_dir:
if os.path.isdir(args.pred_dir) and os.path.isdir(args.exact_dir):
run_pairs = [(args.pred_dir, args.exact_dir)]
if not run_pairs:
print(
"❌ No sample directories found. Set --predicted_parent and --exact_parent, or --pred_dir and --exact_dir."
)
return

print(
f"Found {len(run_pairs)} sample(s). Computing squared errors and GT vs Pred plots..."
)
driver_sqerrs: List[pd.DataFrame] = []
passenger_sqerrs: List[pd.DataFrame] = []
predicted_parent = args.predicted_parent or ""

for idx, (pred_dir, exact_dir) in enumerate(run_pairs):
driver_gt = load_averaged_series(
exact_dir, driver_ids, args.dt, position_array="exact"
)
driver_pred = load_averaged_series(
pred_dir, driver_ids, args.dt, position_array="prediction"
)
passenger_gt = load_averaged_series(
exact_dir, passenger_ids, args.dt, position_array="exact"
)
passenger_pred = load_averaged_series(
pred_dir, passenger_ids, args.dt, position_array="prediction"
)
if driver_gt is None or driver_pred is None:
continue
if passenger_gt is None or passenger_pred is None:
continue
dr_sq = compute_squared_error_series(driver_gt, driver_pred)
pass_sq = compute_squared_error_series(passenger_gt, passenger_pred)
if dr_sq is not None:
driver_sqerrs.append(dr_sq)
if pass_sq is not None:
passenger_sqerrs.append(pass_sq)

# Unique name for this sample: relative path from parent, sanitized for filename
if predicted_parent:
sample_rel = os.path.relpath(pred_dir, predicted_parent)
else:
sample_rel = str(idx)
sample_name = sample_rel.replace(os.sep, "_").replace(" ", "_")
if not sample_name.strip():
sample_name = f"sample_{idx}"
sample_plot_path = os.path.join(
args.output_path, f"kinematics_gt_pred_{sample_name}.png"
)
plot_kinematics(
driver_gt, driver_pred, passenger_gt, passenger_pred, sample_plot_path
)

if not driver_sqerrs or not passenger_sqerrs:
print(
"❌ Could not compute squared errors for any sample (missing or misaligned data)."
)
return

mse_driver = compute_mse_over_samples(driver_sqerrs)
mse_passenger = compute_mse_over_samples(passenger_sqerrs)
if mse_driver is None or mse_passenger is None:
return

print("\n--- MSE at probe locations (over all samples and all time) ---")
print("Driver probe:")
print(f" Position MSE: {mse_driver[0]:.6e}")
print(f" Velocity MSE: {mse_driver[1]:.6e}")
print(f" Acceleration MSE: {mse_driver[2]:.6e}")
print("Passenger probe:")
print(f" Position MSE: {mse_passenger[0]:.6e}")
print(f" Velocity MSE: {mse_passenger[1]:.6e}")
print(f" Acceleration MSE: {mse_passenger[2]:.6e}")

if args.save_csv:
mse_df = pd.DataFrame(
[
{
"probe": "driver",
"position_mse": mse_driver[0],
"velocity_mse": mse_driver[1],
"acceleration_mse": mse_driver[2],
},
{
"probe": "passenger",
"position_mse": mse_passenger[0],
"velocity_mse": mse_passenger[1],
"acceleration_mse": mse_passenger[2],
},
]
)
mse_df.to_csv(
os.path.join(args.output_path, "probe_mse.csv"),
index=False,
float_format="%.6e",
)
print("\n💾 Saved probe MSE to probe_mse.csv.")


if __name__ == "__main__":
main()
# compute_probe_kinematics_single_sample()
compute_probe_kinematics_all_samples()
13 changes: 8 additions & 5 deletions examples/structural_mechanics/crash/post_processing/run_post_processing.sh
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@

mkdir -p results

dir_path= /path/to/output/directory
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Space after = causes bash syntax error - variable won't be set

Suggested change
dir_path= /path/to/output/directory
dir_path=/path/to/output/directory


python compute_probe_kinematics.py \
--pred_dir ../predicted_vtps/Run51/test_000 \
--exact_dir ../exact_vtps/Run51/test_000 \
--predicted_parent $dir_path/predicted_vtps \
--exact_parent $dir_path/exact_vtps \
--driver_points "70658-70659,70654-70655,70664,70656,70657,70660,70661-70663,70665,70676-70679,70680-70684,70695-70759,70775-70777,70760,70757,70666-70675,70644,70646,70651,70650" \
--passenger_points "78575-78583,78577,78579,78706-78733,78738-78763,78765-78767,78776-78778,78734-78737,78768-78775" \
--dt 5e-3 \
--output_plot results/probe_kinematics.png
python compute_l2_error.py --predicted_parent ../predicted_vtps --exact_parent ../exact_vtps --output_plot results/l2_error.png
python plot_cross_section.py --pred_dir ../predicted_vtps/Run51/test_000/ --exact_dir ../exact_vtps/Run51/test_000/ --output_file results/cross_section.png
--output_path $dir_path/post_processing_results \
# --save_csv
python compute_l2_error.py --predicted_parent $dir_path/predicted_vtps --exact_parent $dir_path/exact_vtps --output_plot $dir_path/post_processing_results/l2_error.png --output_csv $dir_path/post_processing_results/l2_error.csv
python plot_cross_section.py --pred_dir $pred_path --exact_dir $exact_path --output_file $dir_path/post_processing_results/cross_section.png
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

$pred_path and $exact_path are undefined - this line will fail

Suggested change
python plot_cross_section.py --pred_dir $pred_path --exact_dir $exact_path --output_file $dir_path/post_processing_results/cross_section.png
python plot_cross_section.py --pred_dir $dir_path/predicted_vtps --exact_dir $dir_path/exact_vtps --output_file $dir_path/post_processing_results/cross_section.png