Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions bluemath_tk/interpolation/rbf.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,9 @@ def __init__(
self._rbf_coeffs: pd.DataFrame = pd.DataFrame()
self._opt_sigmas: dict = {}

# Exclude attributes to .save_model() method
self._exclude_attributes = []

@property
def sigma_min(self) -> float:
return self._sigma_min
Expand Down
14 changes: 5 additions & 9 deletions bluemath_tk/wrappers/_base_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,13 +424,11 @@ def run_cases(
cases_to_run : List[int], optional
The list with the cases to run. Default is None.
num_workers : int, optional
The number of parallel workers. Default is 1.
The number of parallel workers. Default is None.
"""

if num_workers is None:
num_workers = self.num_workers
else:
self.num_workers = num_workers

# Get launcher command from the available launchers
launcher = self.list_available_launchers().get(launcher, launcher)
Expand Down Expand Up @@ -500,7 +498,7 @@ def run_cases_in_background(
self,
launcher: str,
cases_to_run: List[int] = None,
num_workers: int = 1,
num_workers: int = None,
) -> None:
"""
Run the cases in the background based on the launcher specified.
Expand All @@ -514,13 +512,11 @@ def run_cases_in_background(
cases_to_run : List[int], optional
The list with the cases to run. Default is None.
num_workers : int, optional
The number of parallel workers. Default is 1.
The number of parallel workers. Default is None.
"""

if num_workers is None:
num_workers = self.num_workers
else:
self.num_workers = num_workers

self.status_queue = Queue()
self.thread = threading.Thread(
Expand Down Expand Up @@ -600,7 +596,7 @@ def join_postprocessed_files(
def postprocess_cases(
self,
cases_to_postprocess: List[int] = None,
num_workers: int = 1,
num_workers: int = None,
write_output_nc: bool = True,
clean_after: bool = False,
force: bool = False,
Expand All @@ -613,7 +609,7 @@ def postprocess_cases(
cases_to_postprocess : List[int], optional
The list with the cases to postprocess. Default is None.
num_workers : int, optional
The number of parallel workers. Default is 1.
The number of parallel workers. Default is None.
write_output_nc : bool, optional
Write the output postprocessed file. Default is True.
clean_after : bool, optional
Expand Down
52 changes: 52 additions & 0 deletions bluemath_tk/wrappers/swan/swan_wrapper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import os
import re
from typing import List

import pandas as pd
import scipy.io as sio
import xarray as xr

Expand Down Expand Up @@ -30,6 +32,7 @@ class SwanModelWrapper(BaseModelWrapper):
available_launchers = {
"bash": "swanrun -input input",
"docker": "docker run --rm -v .:/case_dir -w /case_dir tausiaj/swan-geoocean:41.51 swanrun -input input",
"docker_serial": "docker run --rm -v .:/case_dir -w /case_dir geoocean/rocky8 swash_serial.exe",
}

output_variables = {
Expand Down Expand Up @@ -134,6 +137,55 @@ def _convert_case_output_files_to_nc(

return ds

def get_case_percentage_from_file(self, output_log_file: str) -> str:
"""
Get the case percentage from the output log file.

Parameters
----------
output_log_file : str
The output log file.

Returns
-------
str
The case percentage.
"""

if not os.path.exists(output_log_file):
return "0 %"

progress_pattern = r"OK in\s+(\d+\.\d+)\s*%"
with open(output_log_file, "r") as f:
for line in reversed(f.readlines()):
match = re.search(progress_pattern, line)
if match:
return f"{match.group(1)} %"

return "0 %" # if no progress is found

def monitor_cases(self) -> pd.DataFrame:
"""
Monitor the cases and log relevant information.

Returns
-------
pd.DataFrame
The cases percentage.
"""

cases_percentage = {}

for case_dir in self.cases_dirs:
# TODO: Try to generalise the output log file name with launcher??
output_log_file = os.path.join(case_dir, "wrapper_out.log")
progress = self.get_case_percentage_from_file(
output_log_file=output_log_file
)
cases_percentage[os.path.basename(case_dir)] = progress

return pd.DataFrame(cases_percentage.items(), columns=["Case", "Percentage"])

def postprocess_case(
self,
case_num: int,
Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,17 @@ dependencies = [
"minisom",
"statsmodels",
"regionmask",
"wavespectra",
"cdsapi",
"ipykernel",
"ipywidgets",
]

[project.optional-dependencies]

docs = ["sphinx", "sphinx-rtd-theme", "jupyter-sphinx"]
deep = ["tensorflow", "keras"]
num-models = ["hydromt-sfincs"]
waves = ["wavespectra"]

[project.urls]

Expand Down