Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGES.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
## New Features / Improvements

* Support configuring Firestore database on ReadFn transforms (Java) ([#36904](https://github.com/apache/beam/issues/36904)).
* (Python) Inference args are now allowed in most model handlers, except where they are explicitly/intentionally disallowed ([#37093](https://github.com/apache/beam/issues/37093)).

## Breaking Changes

Expand Down
13 changes: 5 additions & 8 deletions sdks/python/apache_beam/ml/inference/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,15 +213,12 @@ def batch_elements_kwargs(self) -> Mapping[str, Any]:
return {}

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
"""Validates inference_args passed in the inference call.

Because most frameworks do not need extra arguments in their predict() call,
the default behavior is to error out if inference_args are present.
"""
if inference_args:
raise ValueError(
'inference_args were provided, but should be None because this '
'framework does not expect extra arguments on inferences.')
Allows model handlers to provide some validation to make sure passed in
inference args are valid. Some ModelHandlers throw here to disallow
inference args altogether.
"""
pass

def update_model_path(self, model_path: Optional[str] = None):
"""
Expand Down
6 changes: 6 additions & 0 deletions sdks/python/apache_beam/ml/inference/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,12 @@ def run_inference(self, batch, unused_model, inference_args=None):
'run_inference should not be called because error should already be '
'thrown from the validate_inference_args check.')

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
if inference_args:
raise ValueError(
'inference_args were provided, but should be None because this '
'framework does not expect extra arguments on inferences.')


class FakeModelHandlerExpectedInferenceArgs(FakeModelHandler):
def run_inference(self, batch, unused_model, inference_args=None):
Expand Down
6 changes: 0 additions & 6 deletions sdks/python/apache_beam/ml/inference/pytorch_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,9 +342,6 @@ def get_metrics_namespace(self) -> str:
"""
return 'BeamML_PyTorch'

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
pass

def batch_elements_kwargs(self):
return self._batching_kwargs

Expand Down Expand Up @@ -590,9 +587,6 @@ def get_metrics_namespace(self) -> str:
"""
return 'BeamML_PyTorch'

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
pass

def batch_elements_kwargs(self):
return self._batching_kwargs

Expand Down
3 changes: 2 additions & 1 deletion sdks/python/apache_beam/ml/inference/sklearn_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,10 @@ def _default_numpy_inference_fn(
model: BaseEstimator,
batch: Sequence[numpy.ndarray],
inference_args: Optional[dict[str, Any]] = None) -> Any:
inference_args = {} if not inference_args else inference_args
# vectorize data for better performance
vectorized_batch = numpy.stack(batch, axis=0)
return model.predict(vectorized_batch)
return model.predict(vectorized_batch, **inference_args)


class SklearnModelHandlerNumpy(ModelHandler[numpy.ndarray,
Expand Down
6 changes: 0 additions & 6 deletions sdks/python/apache_beam/ml/inference/tensorflow_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,6 @@ def get_metrics_namespace(self) -> str:
"""
return 'BeamML_TF_Numpy'

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
pass

def batch_elements_kwargs(self):
return self._batching_kwargs

Expand Down Expand Up @@ -360,9 +357,6 @@ def get_metrics_namespace(self) -> str:
"""
return 'BeamML_TF_Tensor'

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
pass

def batch_elements_kwargs(self):
return self._batching_kwargs

Expand Down
10 changes: 10 additions & 0 deletions sdks/python/apache_beam/ml/inference/tensorrt_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,3 +341,13 @@ def share_model_across_processes(self) -> bool:

def model_copies(self) -> int:
return self._model_copies

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
"""
Currently, this model handler does not support inference args. Given that,
we will throw if any are passed in.
"""
if inference_args:
raise ValueError(
'inference_args were provided, but should be None because this '
'framework does not expect extra arguments on inferences.')
3 changes: 0 additions & 3 deletions sdks/python/apache_beam/ml/inference/vertex_ai_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,5 @@ def request(
return utils._convert_to_result(
batch, prediction.predictions, prediction.deployed_model_id)

def validate_inference_args(self, inference_args: Optional[dict[str, Any]]):
pass

def batch_elements_kwargs(self) -> Mapping[str, Any]:
return self._batching_kwargs
Loading