diff --git a/mask-detection/1-training-and-evaluation.ipynb b/mask-detection/1-training-and-evaluation.ipynb index ea982e91..00f51560 100644 --- a/mask-detection/1-training-and-evaluation.ipynb +++ b/mask-detection/1-training-and-evaluation.ipynb @@ -134,7 +134,7 @@ " base_image='mlrun/mlrun-gpu' if use_gpu else 'mlrun/mlrun',\n", " commands=commands,\n", " builder_env=builder_env,\n", - " skip_deployed=True)" + " overwrite_build_params=True)" ] }, { diff --git a/mask-detection/3-automatic-pipeline.ipynb b/mask-detection/3-automatic-pipeline.ipynb index 6a957696..17703f83 100644 --- a/mask-detection/3-automatic-pipeline.ipynb +++ b/mask-detection/3-automatic-pipeline.ipynb @@ -216,11 +216,11 @@ "# Import the `onnx_utils` function to optimize the model:\n", "# Import the `onnx_utils` function to optimize the model:\n", "onnx_func = project.set_function(\"hub://onnx_utils\", name=\"onnx-utils\",requirements=['tensorflow==2.9.0',\n", - " 'onnx~=1.10.1',\n", + " 'onnx~=1.15.0',\n", " 'onnxruntime~=1.8.1',\n", " 'onnxoptimizer~=0.2.0',\n", " 'onnxmltools~=1.9.0',\n", - " 'tf2onnx~=1.9.0'] if framework=='tf-keras' else ['torch'])\n", + " 'tf2onnx~=1.16.0'] if framework=='tf-keras' else ['torch'])\n", "\n", "onnx_func.deploy()\n", "\n", diff --git a/stocks-prediction/src/train_stocks.py b/stocks-prediction/src/train_stocks.py index d1af4758..0113f1af 100644 --- a/stocks-prediction/src/train_stocks.py +++ b/stocks-prediction/src/train_stocks.py @@ -72,7 +72,7 @@ def __len__(self): class Model(torch.nn.Module): - def __init__(self, input_size=16, output_size=1, hidden_dim=2, n_layers=1, batch_size=1, seq_size=5): + def __init__(self, input_size=11, output_size=1, hidden_dim=2, n_layers=1, batch_size=1, seq_size=5): super(Model, self).__init__() # Defining some parameters self.input_size = input_size @@ -120,6 +120,7 @@ def handler(vector_name='stocks', dataset = StocksDataset(vector_name, seq_size, start_time, end_time) training_set = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True) input_size = dataset.data[0][0].shape[0] + context.logger.info("input size {}".format(input_size)) output_size = 1 # creating the model model = Model(input_size=input_size, output_size=output_size, hidden_dim=hidden_dim, n_layers=n_layers,