From f34c190130dbce66cea1ffa9e588dca2863d0e70 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 9 Dec 2024 22:15:35 +0000 Subject: [PATCH] Adding "jump" install option, organizing imports --- pyproject.toml | 6 ++--- src/omlt/__init__.py | 2 +- src/omlt/base/__init__.py | 31 +++++++++++----------- src/omlt/base/constraint.py | 1 - src/omlt/base/julia.py | 8 +++--- src/omlt/base/pyomo.py | 6 ++--- src/omlt/io/__init__.py | 6 ++--- src/omlt/neuralnet/__init__.py | 2 +- src/omlt/neuralnet/activations/__init__.py | 8 +++--- src/omlt/neuralnet/activations/linear.py | 2 +- src/omlt/neuralnet/activations/smooth.py | 2 +- src/omlt/neuralnet/layers/full_space.py | 8 +++--- src/omlt/neuralnet/layers/reduced_space.py | 2 +- tests/conftest.py | 7 ++--- tests/gbt/test_gbt_formulation.py | 1 + tests/io/test_keras_reader.py | 1 + tests/io/test_onnx_parser.py | 1 + tests/io/test_torch_geometric.py | 12 +++++---- tests/linear_tree/test_lt_formulation.py | 4 ++- tests/neuralnet/test_keras.py | 4 ++- tests/neuralnet/test_layer.py | 1 + tests/neuralnet/test_network_definition.py | 1 + tests/neuralnet/test_nn_formulation.py | 3 ++- tests/neuralnet/test_onnx.py | 7 +++-- tests/neuralnet/test_relu.py | 1 + tests/neuralnet/train_keras_models.py | 3 ++- tests/notebooks/test_run_notebooks.py | 5 ++-- 27 files changed, 75 insertions(+), 60 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2b215f71..e41f3234 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,6 +27,7 @@ linear-tree = ["linear-tree"] keras = ["tensorflow", "keras"] keras-gpu = ["tensorflow[and-cuda]", "keras"] torch = ["torch", "torch-geometric"] +jump = ["juliacall"] dev-tools = [ "ruff", "mypy", @@ -52,10 +53,10 @@ docs = [ "linear-tree", ] dev = [ - "omlt[dev-tools,keras,torch,linear-tree,docs]", + "omlt[dev-tools,jump,keras,torch,linear-tree,docs]", ] dev-gpu = [ - "omlt[dev-tools,keras-gpu,torch,linear-tree,docs]", + "omlt[dev-tools,jump,keras-gpu,torch,linear-tree,docs]", ] @@ -74,7 +75,6 @@ extend-exclude = ["src/omlt/_version.py"] [tool.ruff.lint] select = ["ALL"] ignore = [ - "ANN101", "ANN401", "COM812", "ISC001", diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index dfd36f37..733484ba 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -15,7 +15,7 @@ from omlt.scaling import OffsetScaling __all__ = [ - "OmltBlock", "OffsetScaling", + "OmltBlock", "__version__", ] diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 08ed411c..7f41372e 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -16,30 +16,29 @@ ) from omlt.base.var import OmltIndexed, OmltScalar, OmltVar, OmltVarFactory - __all__ = [ "DEFAULT_MODELING_LANGUAGE", - "julia_available", "Jl", - "jump", - "OmltExpr", - "OmltExprFactory", - "OmltScalar", - "OmltIndexed", - "OmltVar", - "OmltVarFactory", - "OmltConstraintIndexed", - "OmltConstraintScalar", "OmltConstraint", "OmltConstraintFactory", - "OmltConstraintIndexedPyomo", - "OmltConstraintScalarPyomo", - "OmltExprScalarPyomo", - "OmltIndexedPyomo", - "OmltScalarPyomo", + "OmltConstraintIndexed", "OmltConstraintIndexedJuMP", + "OmltConstraintIndexedPyomo", + "OmltConstraintScalar", "OmltConstraintScalarJuMP", + "OmltConstraintScalarPyomo", + "OmltExpr", + "OmltExprFactory", "OmltExprJuMP", + "OmltExprScalarPyomo", + "OmltIndexed", "OmltIndexedJuMP", + "OmltIndexedPyomo", + "OmltScalar", "OmltScalarJuMP", + "OmltScalarPyomo", + "OmltVar", + "OmltVarFactory", + "julia_available", + "jump", ] diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py index f977094b..a66f749f 100644 --- a/src/omlt/base/constraint.py +++ b/src/omlt/base/constraint.py @@ -1,6 +1,5 @@ from __future__ import annotations -from abc import abstractmethod from typing import Any import pyomo.environ as pyo diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 618cfe39..23e5c810 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,19 +1,17 @@ from typing import Any -from pyomo.core.base import ParamData - from numpy import float32 +from pyomo.core.base import ParamData -from omlt.block import OmltBlockCore from omlt.base.constraint import OmltConstraintIndexed, OmltConstraintScalar from omlt.base.expression import OmltExpr from omlt.base.var import OmltElement, OmltIndexed, OmltScalar +from omlt.block import OmltBlockCore from omlt.dependencies import julia_available if julia_available: - from juliacall import Base + from juliacall import Base, convert from juliacall import Main as Jl - from juliacall import convert jl_err = Base.error Jl.seval("import Pkg") diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 5d950287..e04c8e03 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -8,7 +8,7 @@ import pyomo.environ as pyo from numpy import float32 -from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.var import VarData from omlt.base.constraint import OmltConstraintIndexed, OmltConstraintScalar from omlt.base.expression import OmltExpr, OmltExprFactory @@ -335,7 +335,7 @@ def _parse_expression_tuple_term(self, term): if isinstance(term, OmltScalarPyomo): return term._pyovar if isinstance( - term, (pyo.Expression, pyo.Var, _GeneralVarData, int, float, float32) + term, (pyo.Expression, pyo.Var, VarData, int, float, float32) ): return term msg = ("Term of expression %s is an unsupported type. %s", term, type(term)) @@ -411,7 +411,7 @@ def __sub__(self, other): def __mul__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression * other._expression - elif isinstance(other, (int, float, pyo.Expression)): + elif isinstance(other, (int, float, float32, pyo.Expression)): expr = self._expression * other return self.expr_factory.new_expression(lang=self._format, expr=expr) diff --git a/src/omlt/io/__init__.py b/src/omlt/io/__init__.py index 64fa72e1..9a1fe7ee 100644 --- a/src/omlt/io/__init__.py +++ b/src/omlt/io/__init__.py @@ -17,11 +17,11 @@ __all__ = [ "keras_available", + "load_keras_sequential", + "load_onnx_neural_network", + "load_onnx_neural_network_with_bounds", "onnx_available", "torch_available", "torch_geometric_available", - "load_onnx_neural_network", - "load_onnx_neural_network_with_bounds", "write_onnx_model_with_bounds", - "load_keras_sequential", ] diff --git a/src/omlt/neuralnet/__init__.py b/src/omlt/neuralnet/__init__.py index 014de739..e4072c30 100644 --- a/src/omlt/neuralnet/__init__.py +++ b/src/omlt/neuralnet/__init__.py @@ -32,9 +32,9 @@ ) __all__ = [ - "NetworkDefinition", "FullSpaceNNFormulation", "FullSpaceSmoothNNFormulation", + "NetworkDefinition", "ReducedSpaceNNFormulation", "ReducedSpaceSmoothNNFormulation", "ReluBigMFormulation", diff --git a/src/omlt/neuralnet/activations/__init__.py b/src/omlt/neuralnet/activations/__init__.py index 740022ad..4b4f32e9 100644 --- a/src/omlt/neuralnet/activations/__init__.py +++ b/src/omlt/neuralnet/activations/__init__.py @@ -29,16 +29,16 @@ NON_INCREASING_ACTIVATIONS: list[Any] = [] __all__ = [ - "linear_activation_constraint", - "linear_activation_function", + "ACTIVATION_FUNCTION_MAP", + "NON_INCREASING_ACTIVATIONS", "ComplementarityReLUActivation", "bigm_relu_activation_constraint", + "linear_activation_constraint", + "linear_activation_function", "sigmoid_activation_constraint", "sigmoid_activation_function", "softplus_activation_constraint", "softplus_activation_function", "tanh_activation_constraint", "tanh_activation_function", - "ACTIVATION_FUNCTION_MAP", - "NON_INCREASING_ACTIVATIONS", ] diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index c4661223..2e48457a 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -1,4 +1,4 @@ -from omlt.base import OmltConstraintFactory, DEFAULT_MODELING_LANGUAGE +from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltConstraintFactory def linear_activation_function(zhat, lang=DEFAULT_MODELING_LANGUAGE): diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index 692800e6..77d28bf8 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -1,6 +1,6 @@ from pyomo.environ import exp, log, tanh -from omlt.base import OmltConstraintFactory, DEFAULT_MODELING_LANGUAGE +from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltConstraintFactory def softplus_activation_function(x, lang=DEFAULT_MODELING_LANGUAGE): diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 45b56df2..0766b584 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -35,20 +35,20 @@ def full_space_dense_layer(net_block, net, layer_block, layer): if input_layer_block.z[input_index].lb is not None: lb += input_layer_block.z[input_index].lb * w else: - lb += -float('inf') + lb += -float("inf") if input_layer_block.z[input_index].ub is not None: ub += input_layer_block.z[input_index].ub * w else: - ub += float('inf') + ub += float("inf") else: if input_layer_block.z[input_index].ub is not None: lb += input_layer_block.z[input_index].ub * w else: - lb += -float('inf') + lb += -float("inf") if input_layer_block.z[input_index].lb is not None: ub += input_layer_block.z[input_index].lb * w else: - ub += float('inf') + ub += float("inf") # move this at the end to avoid numpy/pyomo var bug expr += layer.biases[output_index[-1]] diff --git a/src/omlt/neuralnet/layers/reduced_space.py b/src/omlt/neuralnet/layers/reduced_space.py index dee3f1b4..89be752e 100644 --- a/src/omlt/neuralnet/layers/reduced_space.py +++ b/src/omlt/neuralnet/layers/reduced_space.py @@ -1,4 +1,4 @@ -from omlt.base import OmltVarFactory, OmltConstraintFactory +from omlt.base import OmltConstraintFactory, OmltVarFactory def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): diff --git a/tests/conftest.py b/tests/conftest.py index bcea6cff..6460461f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,9 +2,10 @@ import numpy as np import pytest +from pyomo.common.fileutils import this_file_dir + from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition -from pyomo.common.fileutils import this_file_dir def get_neural_network_data(desc): @@ -47,13 +48,13 @@ def file(self, filename): return str(self._basedir / filename) -@pytest.fixture() +@pytest.fixture def datadir(): basedir = Path(this_file_dir()) / "models" return _Datadir(basedir) -@pytest.fixture() +@pytest.fixture def two_node_network_relu(): """Two node network with ReLU activation. diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index 8973ca1a..a68dbf08 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -2,6 +2,7 @@ import pyomo.environ as pe import pytest + from omlt import OmltBlock from omlt.base import OmltVarFactory from omlt.dependencies import onnx, onnx_available diff --git a/tests/io/test_keras_reader.py b/tests/io/test_keras_reader.py index 1982063b..5f738aca 100644 --- a/tests/io/test_keras_reader.py +++ b/tests/io/test_keras_reader.py @@ -1,4 +1,5 @@ import pytest + from omlt.dependencies import keras, keras_available NUM_LAYERS_131 = 3 diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index fc74b34c..4db71e03 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -1,4 +1,5 @@ import pytest + from omlt.dependencies import onnx, onnx_available NUM_LAYERS_131 = 3 diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index 53d9db58..43703d97 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -1,6 +1,7 @@ import numpy as np import pyomo.environ as pyo import pytest + from omlt import OmltBlock from omlt.dependencies import ( torch_available, @@ -8,11 +9,6 @@ ) if torch_available and torch_geometric_available: - from omlt.io.torch_geometric import ( - gnn_with_fixed_graph, - gnn_with_non_fixed_graph, - load_torch_geometric_sequential, - ) from torch.nn import Linear, ReLU, Sigmoid, Tanh from torch_geometric.nn import ( GCNConv, @@ -23,6 +19,12 @@ global_mean_pool, ) + from omlt.io.torch_geometric import ( + gnn_with_fixed_graph, + gnn_with_non_fixed_graph, + load_torch_geometric_sequential, + ) + @pytest.mark.skipif( not (torch_available and torch_geometric_available), diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index a3082c01..db9121dd 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,16 +1,18 @@ import numpy as np import pyomo.environ as pe import pytest + from omlt.dependencies import lineartree_available if lineartree_available: from lineartree import LinearTreeRegressor + from sklearn.linear_model import LinearRegression + from omlt.linear_tree import ( LinearTreeDefinition, LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, ) - from sklearn.linear_model import LinearRegression import omlt from omlt import OmltBlock diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index f83c07d1..9eda9772 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -1,13 +1,15 @@ import numpy as np import pyomo.environ as pyo import pytest -from omlt.dependencies import keras, keras_available from pyomo.common.dependencies import DeferredImportError +from omlt.dependencies import keras, keras_available + if keras_available: from omlt.io import load_keras_sequential from conftest import get_neural_network_data + from omlt import OmltBlock from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation diff --git a/tests/neuralnet/test_layer.py b/tests/neuralnet/test_layer.py index 6cf2b6de..2f42fc59 100644 --- a/tests/neuralnet/test_layer.py +++ b/tests/neuralnet/test_layer.py @@ -1,5 +1,6 @@ import numpy as np import pytest + from omlt.neuralnet.layer import ( ConvLayer2D, DenseLayer, diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index a18d7eea..609e12ec 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -3,6 +3,7 @@ import numpy as np import pyomo.environ as pyo import pytest + from omlt import OmltBlock from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index e7b5bc02..fd9027d7 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -5,6 +5,8 @@ import numpy as np import pyomo.environ as pyo import pytest +from pyomo.contrib.fbbt import interval + from omlt import OmltBlock from omlt.formulation import _PyomoFormulation from omlt.neuralnet import ( @@ -32,7 +34,6 @@ partition_based_dense_relu_layer, ) from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer -from pyomo.contrib.fbbt import interval if TYPE_CHECKING: from omlt.formulation import _PyomoFormulation diff --git a/tests/neuralnet/test_onnx.py b/tests/neuralnet/test_onnx.py index 7d33675f..7c8014e3 100644 --- a/tests/neuralnet/test_onnx.py +++ b/tests/neuralnet/test_onnx.py @@ -2,20 +2,23 @@ import numpy as np import pytest -from omlt.dependencies import onnx, onnx_available from pyomo.common.dependencies import DeferredImportError +from omlt.dependencies import onnx, onnx_available + if onnx_available: import onnxruntime as ort + from omlt.io.onnx import ( load_onnx_neural_network, load_onnx_neural_network_with_bounds, write_onnx_model_with_bounds, ) +from pyomo.environ import ConcreteModel, SolverFactory, value + from omlt import OffsetScaling, OmltBlock from omlt.neuralnet import FullSpaceNNFormulation -from pyomo.environ import ConcreteModel, SolverFactory, value @pytest.mark.skipif(onnx_available, reason="Test only valid when onnx not available") diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 97af8af0..0a11682c 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -1,6 +1,7 @@ import numpy as np import pyomo.environ as pyo import pytest + from omlt import OmltBlock from omlt.dependencies import onnx_available from omlt.neuralnet import ( diff --git a/tests/neuralnet/train_keras_models.py b/tests/neuralnet/train_keras_models.py index a2a14d94..e68e1b1a 100644 --- a/tests/neuralnet/train_keras_models.py +++ b/tests/neuralnet/train_keras_models.py @@ -3,9 +3,10 @@ from keras.layers import Conv2D, Dense from keras.models import Sequential from keras.optimizers import Adamax -from omlt.io import write_onnx_model_with_bounds from pyomo.common.fileutils import this_file_dir +from omlt.io import write_onnx_model_with_bounds + def train_models(): # noqa: PLR0915 x, y, x_test = get_neural_network_data("131") diff --git a/tests/notebooks/test_run_notebooks.py b/tests/notebooks/test_run_notebooks.py index 62d70d57..4ca85405 100644 --- a/tests/notebooks/test_run_notebooks.py +++ b/tests/notebooks/test_run_notebooks.py @@ -2,14 +2,15 @@ from pathlib import Path import pytest +from pyomo.common.fileutils import this_file_dir +from testbook import testbook + from omlt.dependencies import ( keras_available, onnx_available, torch_available, torch_geometric_available, ) -from pyomo.common.fileutils import this_file_dir -from testbook import testbook # TODO @cog-imperial: These will be replaced with stronger tests using testbook soon # https://github.com/cog-imperial/OMLT/issues/159