Skip to content

Commit

Permalink
[Serving] Fix function serialization for workflows (mlrun#7206)
Browse files Browse the repository at this point in the history
  • Loading branch information
davesh0812 authored Feb 3, 2025
1 parent 983443f commit 8427057
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 0 deletions.
8 changes: 8 additions & 0 deletions mlrun/serving/states.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,6 +652,14 @@ def to_dict(
if isinstance(self.endpoint_type, schemas.EndpointType)
else self.endpoint_type
)
self.model_endpoint_creation_strategy = (
self.model_endpoint_creation_strategy.value
if isinstance(
self.model_endpoint_creation_strategy,
schemas.ModelEndpointCreationStrategy,
)
else self.model_endpoint_creation_strategy
)
return super().to_dict(fields, exclude, strip)


Expand Down
11 changes: 11 additions & 0 deletions tests/serving/test_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

import mlrun
from mlrun.runtimes import nuclio_init_hook
from mlrun.runtimes.funcdoc import py_eval
from mlrun.runtimes.nuclio.serving import serving_subkind
from mlrun.serving import V2ModelServer
from mlrun.serving.server import (
Expand Down Expand Up @@ -842,3 +843,13 @@ def test_add_route_exceeds_max_models():
assert (
len(server.graph.routes) == max_models
), f"expected to have {max_models} models"


def test_serialize():
fn = mlrun.new_function("tests", kind="serving")
fn.set_topology("router")
fn.add_model("my", ".", class_name=ModelTestingClass(multiplier=100))

# simulate mlrun/__main__.py
eval_fn_result = py_eval(str(fn.to_dict()))
mlrun.utils.helpers.as_dict(eval_fn_result)

0 comments on commit 8427057

Please sign in to comment.