Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions src/MaCh3PythonUtils/machine_learning/file_ml_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,6 @@ def test_model(self):

print("Training Results!")
train_prediction = self.model_predict(self._training_data)
print(self._training_data)
print(train_prediction)
train_as_numpy = self.scale_labels(self._training_labels).T[0]
self.evaluate_model(train_prediction, train_as_numpy, "train_qq_plot.pdf")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
import tensorflow.keras as tfk
import tensorflow_probability as tfp

from typing import Iterable


class TfInterface(FileMLInterface):
Expand Down Expand Up @@ -36,7 +36,7 @@ def add_layer(self, layer_id: str, layer_args: dict):

self._layers.append(self.__TF_LAYER_IMPLEMENTATIONS[layer_id.lower()](**layer_args))

def build_model(self, _: dict):
def build_model(self, **kwargs):
return None


Expand Down Expand Up @@ -93,11 +93,14 @@ def model_predict(self, test_data: pd.DataFrame):
if self._model is None:
return np.zeros(len(test_data))

pred = self._model.predict(scaled_data, verbose=False)
print(f"PREDICTION: {pred}")
# return self._model.predict(scaled_data, verbose=False).T[0]
return pred
return self._model.predict(scaled_data, verbose=False).T[0]

def model_predict_no_scale(self, test_data):
# Same as above but specifically for TF, optimised to avoid if statement...
return self._model(test_data, training=False)

def evaluate_model(self, predicted_values: Iterable, true_values: Iterable, outfile: str = ""):

# CODE TO DO TF SPECIFIC PLOTS GOES HERE

return super().evaluate_model(predicted_values, true_values, outfile)
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ def __call__(self):


class TfNormalizingFlowModel(TfManualInterface):
def build_model(self, model_args):
def build_model(self, **kwargs):
input_dim = self.chain.ndim-1
self._model = NormalizingFlow(model_args.get("hidden_units", [100]), model_args.get("n_bijectors", 1), input_dim)()
self._optimizer = tfk.optimizers.Adam(model_args.get("learning_rate", 1e-3))
self._model = NormalizingFlow(kwargs.get("hidden_units", [100]), kwargs.get("n_bijectors", 1), input_dim)()
self._optimizer = tfk.optimizers.Adam(kwargs.get("learning_rate", 1e-3))

def nll_loss(self, features):
return -tf.reduce_mean(self._model.log_prob(features))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


class TfResidualModel(TfManualLayeredInterface):
def build_model(self, model_args: dict):
def build_model(self, **kwargs):
input_shape = self.training_data.shape[1:] # Assuming shape is (batch_size, features)
network_input = tfk.layers.Input(shape=input_shape)

Expand All @@ -23,9 +23,9 @@ def build_model(self, model_args: dict):

# Define and compile the model
self._model = tfk.Model(inputs=network_input, outputs=x)
optimizer = tfk.optimizers.AdamW(learning_rate=model_args.get("learning_rate", 1e-5),
optimizer = tfk.optimizers.AdamW(learning_rate=kwargs.get("learning_rate", 1e-5),
weight_decay=1e-4, clipnorm=1.0)

_ = model_args.pop("learning_rate", None)
_ = kwargs.pop("learning_rate", None)

self._model.compile(optimizer=optimizer, **model_args)
self._model.compile(optimizer=optimizer, **kwargs)
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@

class TfSequentialModel(TfManualLayeredInterface):

def build_model(self, model_args: dict):
def build_model(self, **kwargs: dict):
"""Build and compile TF model

:param model_args: Model arguments as dictionary
:type model_args: dict
:param kwargs: Model arguments as dictionary
:type kwargs: dict
:raises ValueError: Model not set up yet
"""
self._model = tfk.Sequential()
Expand All @@ -20,10 +20,10 @@ def build_model(self, model_args: dict):
self._model.add(layer)

self._model.build()
optimizer = tfk.optimizers.AdamW(learning_rate=model_args.get("learning_rate", 1e-5),
optimizer = tfk.optimizers.AdamW(learning_rate=kwargs.get("learning_rate", 1e-5),
weight_decay=1e-4, clipnorm=1.0)

model_args.pop("learning_rate", None)
kwargs.pop("learning_rate", None)


self._model.compile(**model_args, optimizer=optimizer)
self._model.compile(**kwargs, optimizer=optimizer)