From 28b3fd96735839f05a7b3fd21fbaa15fbbb716ea Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 19 Mar 2024 02:05:50 +0000 Subject: [PATCH 01/75] OmltVar wrapper class --- src/omlt/base.py | 319 +++++++++++++++++++ src/omlt/block.py | 6 +- src/omlt/formulation.py | 8 +- src/omlt/gbt/gbt_formulation.py | 5 +- src/omlt/linear_tree/lt_formulation.py | 5 +- src/omlt/neuralnet/activations/relu.py | 3 +- src/omlt/neuralnet/layers/full_space.py | 5 +- src/omlt/neuralnet/layers/partition_based.py | 12 +- src/omlt/neuralnet/nn_formulation.py | 9 +- tests/neuralnet/test_nn_formulation.py | 9 +- 10 files changed, 359 insertions(+), 22 deletions(-) create mode 100644 src/omlt/base.py diff --git a/src/omlt/base.py b/src/omlt/base.py new file mode 100644 index 00000000..223c76b5 --- /dev/null +++ b/src/omlt/base.py @@ -0,0 +1,319 @@ +""" +Abstraction layer of classes used by OMLT. Underneath these are +objects in a choice of modeling languages: Pyomo (default), +MathOptInterface, or Smoke (not yet implemented). + + +""" + +from abc import ABC, abstractmethod +import pyomo.environ as pyo + + +class OmltVar(ABC): + def __new__(cls, *indexes, **kwargs): + + if not indexes: + instance = OmltScalar.__new__(OmltScalar, **kwargs) + else: + instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) + return instance + + +class OmltScalar(OmltVar): + def __new__(cls, *args, format="pyomo", **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats " + "are 'pyomo' or 'moi'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + + instance.__init__(*args, **kwargs) + return instance + + def __init__(self, *args, **kwargs): + pass + + @abstractmethod + def construct(self, data): + pass + + @abstractmethod + def fix(self, value, skip_validation): + pass + + @property + @abstractmethod + def bounds(self): + pass + + @bounds.setter + @abstractmethod + def bounds(self, val): + pass + + @property + @abstractmethod + def lb(self): + pass + + @lb.setter + @abstractmethod + def lb(self, val): + pass + + @property + @abstractmethod + def ub(self): + pass + + @ub.setter + @abstractmethod + def ub(self, val): + pass + + # @abstractmethod + # def __mul__(self, other): + # pass + + # @abstractmethod + # def __rmul__(self, other): + # pass + + +class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): + format = "pyomo" + + def __init__(self, *args, **kwargs): + pyo.ScalarVar.__init__(self, *args, **kwargs) + + def construct(self, data): + super().construct(data) + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + super().fix(skip_validation) + else: + super().fix(value, skip_validation) + + @property + def bounds(self): + return super().bounds + + @bounds.setter + def bounds(self, val): + super().bounds = val + + @property + def ub(self): + return super().ub + + @ub.setter + def ub(self, val): + super().ub = val + + @property + def lb(self): + return super().__get__(self.lb) + + @lb.setter + def lb(self, val): + super().__setattr__(self.lb, val) + + def __lt__(self, other): + return pyo.NumericValue.__lt__(self, other) + + def __gt__(self, other): + return pyo.NumericValue.__gt__(self, other) + + def __le__(self, other): + return pyo.NumericValue.__le__(self, other) + + def __ge__(self, other): + return pyo.NumericValue.__ge__(self, other) + + def __eq__(self, other): + return pyo.NumericValue.__eq__(self, other) + + def __add__(self, other): + return pyo.NumericValue.__add__(self, other) + + def __sub__(self, other): + return pyo.NumericValue.__sub__(self, other) + + # def __mul__(self,other): + # return pyo.NumericValue.__mul__(self,other) + + def __div__(self, other): + return pyo.NumericValue.__div__(self, other) + + def __truediv__(self, other): + return pyo.NumericValue.__truediv__(self, other) + + def __pow__(self, other): + return pyo.NumericValue.__pow__(self, other) + + def __radd__(self, other): + return pyo.NumericValue.__radd__(self, other) + + def __rsub__(self, other): + return pyo.NumericValue.__rsub__(self, other) + + # def __rmul__(self,other): + # return self._ComponentDataClass.__rmul__(self,other) + + def __rdiv__(self, other): + return pyo.NumericValue.__rdiv__(self, other) + + def __rtruediv__(self, other): + return pyo.NumericValue.__rtruediv__(self, other) + + def __rpow__(self, other): + return pyo.NumericValue.__rpow__(self, other) + + def __iadd__(self, other): + return pyo.NumericValue.__iadd__(self, other) + + def __isub__(self, other): + return pyo.NumericValue.__isub__(self, other) + + def __imul__(self, other): + return pyo.NumericValue.__imul__(self, other) + + def __idiv__(self, other): + return pyo.NumericValue.__idiv__(self, other) + + def __itruediv__(self, other): + return pyo.NumericValue.__itruediv__(self, other) + + def __ipow__(self, other): + return pyo.NumericValue.__ipow__(self, other) + + def __neg__(self): + return pyo.NumericValue.__neg__(self) + + def __pos__(self): + return pyo.NumericValue.__pos__(self) + + def __abs__(self): + return pyo.NumericValue.__abs__(self) + + +""" +Future formats to implement. +""" + + +class OmltScalarMOI(OmltScalar): + format = "moi" + + +class OmltScalarSmoke(OmltScalar): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltScalarGurobi(OmltScalar): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltIndexed(OmltVar): + def __new__(cls, *indexes, format="pyomo", **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats are 'pyomo'" + " or 'moi'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + return instance + + @abstractmethod + def fix(self, value=None, skip_validation=False): + pass + + @abstractmethod + def setub(self, value): + pass + + @abstractmethod + def setlb(self, value): + pass + + +class OmltIndexedPyomo(pyo.Var, OmltIndexed): + format = "pyomo" + + def __init__(self, *indexes, **kwargs): + super().__init__(*indexes, **kwargs) + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def setub(self, value): + for vardata in self.values(): + vardata.ub = value + + def setlb(self, value): + for vardata in self.values(): + vardata.lb = value + + +""" +Future formats to implement. +""" + + +class OmltIndexedMOI(OmltIndexed): + format = "moi" + + +class OmltIndexedSmoke(OmltIndexed): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltIndexedGurobi(OmltIndexed): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltSet: + def __init__(self): + pass + + +class OmltExpression: + def __init__(self): + pass diff --git a/src/omlt/block.py b/src/omlt/block.py index a6c7bbf2..04932e41 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,6 +25,8 @@ class is used in combination with a formulation object to construct the import warnings +from omlt.base import OmltVar + import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -59,9 +61,9 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): ) self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = pyo.Var(self.inputs_set, initialize=0) + self.inputs = OmltVar(self.inputs_set, initialize=0) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = pyo.Var(self.outputs_set, initialize=0) + self.outputs = OmltVar(self.outputs_set, initialize=0) def build_formulation(self, formulation): """ diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index fd83ae86..0d054ca9 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -2,7 +2,7 @@ import weakref import pyomo.environ as pyo - +from omlt.base import OmltVar class _PyomoFormulationInterface(abc.ABC): """ @@ -82,11 +82,11 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): k: (float(scaled_input_bounds[k][0]), float(scaled_input_bounds[k][1])) for k in block.inputs_set } - block.scaled_inputs = pyo.Var(block.inputs_set, initialize=0, bounds=bnds) + block.scaled_inputs = OmltVar(block.inputs_set, initialize=0, bounds=bnds) else: - block.scaled_inputs = pyo.Var(block.inputs_set, initialize=0) + block.scaled_inputs = OmltVar(block.inputs_set, initialize=0) - block.scaled_outputs = pyo.Var(block.outputs_set, initialize=0) + block.scaled_outputs = OmltVar(block.outputs_set, initialize=0) if scaled_input_bounds is not None and scaler is None: # set the bounds on the inputs to be the same as the scaled inputs diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index f2d01296..58c133fd 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -3,6 +3,7 @@ import numpy as np import pyomo.environ as pe +from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.gbt.model import GradientBoostedTreeModel @@ -148,7 +149,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): var = input_vars[var_idx] continuous_vars[var_idx] = var - block.z_l = pe.Var( + block.z_l = OmltVar( list(zip(nodes_tree_ids[nodes_leaf_mask], nodes_node_ids[nodes_leaf_mask])), bounds=(0, None), domain=pe.Reals, @@ -167,7 +168,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): for f in continuous_vars.keys() for bi, _ in enumerate(branch_value_by_feature_id[f]) ] - block.y = pe.Var(y_index, domain=pe.Binary) + block.y = OmltVar(y_index, domain=pe.Binary) @block.Constraint(tree_ids) def single_leaf(b, tree_id): diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 4f83e7f3..76312278 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -2,6 +2,7 @@ import pyomo.environ as pe from pyomo.gdp import Disjunct +from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -250,7 +251,7 @@ def _add_gdp_formulation_to_block( block.scaled_outputs.setub(output_bounds[1]) block.scaled_outputs.setlb(output_bounds[0]) - block.intermediate_output = pe.Var( + block.intermediate_output = OmltVar( tree_ids, bounds=(output_bounds[0], output_bounds[1]) ) @@ -329,7 +330,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output # Create the intermeditate variables. z is binary that indicates which leaf # in tree t is returned. intermediate_output is the output of tree t and # the total output of the model is the sum of the intermediate_output vars - block.z = pe.Var(t_l, within=pe.Binary) + block.z = OmltVar(t_l, within=pe.Binary) block.intermediate_output = pe.Var(tree_ids) @block.Constraint(features, tree_ids) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 427be19a..8ac42aa0 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,6 +1,7 @@ import pyomo.environ as pyo import pyomo.mpec as mpec +from omlt.base import OmltVar def bigm_relu_activation_constraint(net_block, net, layer_block, layer): r""" @@ -38,7 +39,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): The lower bound of :math:`y` is :math:`\max(0,l)`, and the upper bound of :math:`y` is :math:`\max(0,u)`. """ - layer_block.q_relu = pyo.Var(layer.output_indexes, within=pyo.Binary) + layer_block.q_relu = OmltVar(layer.output_indexes, within=pyo.Binary) layer_block._z_lower_bound_relu = pyo.Constraint(layer.output_indexes) layer_block._z_lower_bound_zhat_relu = pyo.Constraint(layer.output_indexes) diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 8970bc69..c8ac1bf5 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -2,6 +2,7 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from omlt.base import OmltVar from omlt.neuralnet.activations import NON_INCREASING_ACTIVATIONS from omlt.neuralnet.layer import ConvLayer2D, IndexMapper, PoolingLayer2D @@ -86,7 +87,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - input_layer_block.zbar = pyo.Var( + input_layer_block.zbar = OmltVar( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), initialize=0, @@ -276,7 +277,7 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): for kernel_index, _ in layer.kernel_index_with_input_indexes(0, 0, 0) ) ) - layer_block.q_maxpool = pyo.Var( + layer_block.q_maxpool = OmltVar( layer.output_indexes, layer_block._kernel_indexes, within=pyo.Binary ) layer_block._q_sum_maxpool = pyo.Constraint(layer.output_indexes) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index f29cadd2..b67c796b 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -2,6 +2,8 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from omlt.base import OmltVar + def default_partition_split_func(w, n): r""" @@ -81,8 +83,8 @@ def output_node_block(b, *output_index): splits = split_func(weights) num_splits = len(splits) - b.sig = pyo.Var(domain=pyo.Binary) - b.z2 = pyo.Var(range(num_splits)) + b.sig = OmltVar(domain=pyo.Binary) + b.z2 = OmltVar(range(num_splits)) mapper = layer.input_index_mapper @@ -109,6 +111,12 @@ def output_node_block(b, *output_index): expr += prev_layer_block.z[input_index] * w lb, ub = compute_bounds_on_expr(expr) + print("inside function") + print(expr) + print(w) + print(prev_layer_block.z[input_index]) + print(prev_layer_block.z[input_index].lb) + print(prev_layer_block.z[input_index].ub) if lb is None: raise ValueError("Expression is unbounded below.") if ub is None: diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index 042b14fe..1ff4d4fb 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,6 +1,7 @@ import numpy as np import pyomo.environ as pyo +from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.neuralnet.activations import ( ACTIVATION_FUNCTION_MAP as _DEFAULT_ACTIVATION_FUNCTIONS, @@ -162,7 +163,7 @@ def _build_neural_network_formulation( @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = pyo.Var(net_layer.output_indexes, initialize=0) + b.z = OmltVar(net_layer.output_indexes, initialize=0) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -171,7 +172,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = pyo.Var(net_layer.output_indexes, initialize=0) + b.zhat = OmltVar(net_layer.output_indexes, initialize=0) return b @@ -488,7 +489,7 @@ def _build_formulation(self): @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = pyo.Var(net_layer.output_indexes, initialize=0) + b.z = OmltVar(net_layer.output_indexes, initialize=0) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -497,7 +498,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = pyo.Var(net_layer.output_indexes, initialize=0) + b.zhat = OmltVar(net_layer.output_indexes, initialize=0) return b diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index ad3b2b0f..2e1d6cb5 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -537,18 +537,19 @@ def test_partition_based_unbounded_below(): m.neural_net_block = OmltBlock() net, y = two_node_network(None, -2.0) test_layer = list(net.layers)[2] + test_layer_id = id(test_layer) prev_layer_id = id(list(net.layers)[1]) formulation = ReluPartitionFormulation(net) m.neural_net_block.build_formulation(formulation) prev_layer_block = m.neural_net_block.layer[prev_layer_id] prev_layer_block.z.setlb(-interval.inf) - + split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: partition_based_dense_relu_layer( - m.neural_net_block, net, m.neural_net_block, test_layer, split_func + m.neural_net_block, net, m.neural_net_block.layer[test_layer_id], test_layer, split_func ) expected_msg = "Expression is unbounded below." assert str(excinfo.value) == expected_msg @@ -559,6 +560,7 @@ def test_partition_based_unbounded_above(): m.neural_net_block = OmltBlock() net, y = two_node_network(None, -2.0) test_layer = list(net.layers)[2] + test_layer_id = id(test_layer) prev_layer_id = id(list(net.layers)[1]) formulation = ReluPartitionFormulation(net) @@ -566,11 +568,12 @@ def test_partition_based_unbounded_above(): prev_layer_block = m.neural_net_block.layer[prev_layer_id] prev_layer_block.z.setub(interval.inf) + split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: partition_based_dense_relu_layer( - m.neural_net_block, net, m.neural_net_block, test_layer, split_func + m.neural_net_block, net, m.neural_net_block.layer[test_layer_id], test_layer, split_func ) expected_msg = "Expression is unbounded above." assert str(excinfo.value) == expected_msg From 45ac09582b658d690c3c0f252527b39504527879 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 19 Mar 2024 02:05:50 +0000 Subject: [PATCH 02/75] OmltVar wrapper class --- src/omlt/neuralnet/layers/partition_based.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index b67c796b..5f99e706 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -111,12 +111,7 @@ def output_node_block(b, *output_index): expr += prev_layer_block.z[input_index] * w lb, ub = compute_bounds_on_expr(expr) - print("inside function") - print(expr) - print(w) - print(prev_layer_block.z[input_index]) - print(prev_layer_block.z[input_index].lb) - print(prev_layer_block.z[input_index].ub) + if lb is None: raise ValueError("Expression is unbounded below.") if ub is None: From 13b5265aab5290a727774df0fc3957e00d09a009 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 18 Mar 2024 22:16:04 -0700 Subject: [PATCH 03/75] Create main.yml copying CI workflow over --- .github/workflows/main.yml | 58 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..55870dbc --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,58 @@ +--- +name: CI + +on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + +jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + # python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v2" + with: + fail_ci_if_error: true From 3b19e4c131e20622edf5d1972e68d719bacbb32c Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 18 Mar 2024 22:41:10 -0700 Subject: [PATCH 04/75] Create python-package.yml --- .github/workflows/python-package.yml | 58 ++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/python-package.yml diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 00000000..55870dbc --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,58 @@ +--- +name: CI + +on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + +jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + # python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v2" + with: + fail_ci_if_error: true From ccd3c89a97a624afbf4d62e232d298970a82a999 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 19 Mar 2024 00:58:12 -0700 Subject: [PATCH 05/75] Removing ipopt from CI workflow --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 55870dbc..3e302f03 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,7 +35,7 @@ jobs: python -m site python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt + # conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" From ff4dbcb430b11413d8176d2d11d632e79d9de1a6 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 8 Apr 2024 18:28:49 +0000 Subject: [PATCH 06/75] Implementing JuMP format scalar and indexed variables. --- src/omlt/base.py | 279 +++++++++++++++++++++++++++++++++++++-- src/omlt/block.py | 19 ++- src/omlt/dependencies.py | 10 ++ src/omlt/formulation.py | 7 +- tests/test_block.py | 13 +- 5 files changed, 312 insertions(+), 16 deletions(-) diff --git a/src/omlt/base.py b/src/omlt/base.py index 223c76b5..ab735a6d 100644 --- a/src/omlt/base.py +++ b/src/omlt/base.py @@ -9,6 +9,17 @@ from abc import ABC, abstractmethod import pyomo.environ as pyo +from omlt.dependencies import julia_available, moi_available + +if julia_available and moi_available: + from juliacall import Main as jl + from juliacall import Base + + jl.seval("import MathOptInterface") + moi = jl.MathOptInterface + jl.seval("import JuMP") + jump = jl.JuMP + class OmltVar(ABC): def __new__(cls, *indexes, **kwargs): @@ -26,7 +37,7 @@ def __new__(cls, *args, format="pyomo", **kwargs): if format not in subclass_map: raise ValueError( f"Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'moi'.", + "are 'pyomo' or 'jump'.", format, ) subclass = subclass_map[format] @@ -76,6 +87,12 @@ def ub(self): def ub(self, val): pass + def is_component_type(self): + return True + + def is_indexed(self): + return False + # @abstractmethod # def __mul__(self, other): # pass @@ -204,15 +221,128 @@ def __abs__(self): return pyo.NumericValue.__abs__(self) +class OmltScalarJuMP(OmltScalar): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.ScalarVar + + def __init__(self, *args, **kwargs): + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = self._bounds[0] + _has_lb = _lb is not None + _ub = self._bounds[1] + _has_ub = _ub is not None + elif self._bounds is None: + _has_lb = False + _lb = None + _has_ub = False + _ub = None + else: + raise ValueError("Bounds must be given as a tuple") + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + self._value = _initialize + else: + self._value = None + + self._jumpvarinfo = jump.VariableInfo( + _has_lb, + _lb, + _has_ub, + _ub, + False, # is fixed + None, # fixed value + _initialize is not None, + self._value, + self.binary, + self.integer, + ) + self._constructed = False + self._parent = None + self._ctype = pyo.ScalarVar + + def construct(self, data): + if self._block: + self._jumpvar = jump.add_variable(self._block, self._jumpvarinfo) + else: + self._jumpvar = jump.build_variable(Base.error, self._jumpvarinfo) + self._constructed = True + + def fix(self, value, skip_validation): + self.fixed = True + self._value = value + + @property + def bounds(self): + pass + + @bounds.setter + def bounds(self, val): + pass + + @property + def lb(self): + return self._jumpvar.info.lower_bound + + @lb.setter + def lb(self, val): + jump.set_upper_bound(self._jumpvar, val) + + @property + def ub(self): + return self._jumpvar.info.upper_bound + + @ub.setter + def ub(self, val): + jump.set_upper_bound(self._jumpvar, val) + + def to_jump(self): + if self._constructed: + return self._jumpvar + + """ Future formats to implement. """ -class OmltScalarMOI(OmltScalar): - format = "moi" - - class OmltScalarSmoke(OmltScalar): format = "smoke" @@ -257,11 +387,16 @@ def setub(self, value): def setlb(self, value): pass + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + class OmltIndexedPyomo(pyo.Var, OmltIndexed): format = "pyomo" def __init__(self, *indexes, **kwargs): + kwargs.pop("format", None) super().__init__(*indexes, **kwargs) def fix(self, value=None, skip_validation=False): @@ -282,15 +417,141 @@ def setlb(self, value): vardata.lb = value +class OmltIndexedJuMP(OmltIndexed): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.Var + + def __init__(self, *indexes, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + else: + raise ValueError("Currently index cross-products are unsupported.") + self._varinfo = {} + for idx in self._index_set: + self._varinfo[idx] = jump.VariableInfo( + False, # _has_lb, + None, # _lb, + False, # _has_ub, + None, # _ub, + False, # is fixed + None, # fix value + False, # _initialize is not None, + None, # self._value, + False, # self.binary, + False, # self.integer + ) + self._vars = {} + self._constructed = False + self._ctype = pyo.Var + self._parent = None + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._vars[item[0]] + else: + return self._vars[item] + + def __setitem__(self, item, value): + self._vars[item] = value + + def keys(self): + return self._vars.keys() + + def values(self): + return self._vars.values() + + def items(self): + return self._vars.items() + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._vars) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._vars + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._vars.__iter__() + + def construct(self, data=None): + for idx in self._index_set: + self._vars[idx] = jump.build_variable(Base.error, self._varinfo[idx]) + self._constructed = True + + def setub(self, value): + if self._constructed: + for idx in self.index_set(): + self._varinfo[idx].has_ub = True + self._varinfo[idx].upper_bound = value + self._vars[idx].info.has_ub = True + self._vars[idx].info.upper_bound = value + else: + for idx in self.index_set(): + self._varinfo[idx].has_ub = True + self._varinfo[idx].upper_bound = value + + def setlb(self, value): + if self._constructed: + for idx in self.index_set(): + self._varinfo[idx].has_lb = True + self._varinfo[idx].lower_bound = value + self._vars[idx].info.has_lb = True + self._vars[idx].info.lower_bound = value + else: + for idx in self.index_set(): + self._varinfo[idx].has_lb = True + self._varinfo[idx].lower_bound = value + + @property + def ctype(self): + return self._ctype + + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def to_jump(self): + if self._constructed: + return jump.Containers.DenseAxisArray( + list(self._vars.values()), self.index_set() + ) + + """ Future formats to implement. """ -class OmltIndexedMOI(OmltIndexed): - format = "moi" - - class OmltIndexedSmoke(OmltIndexed): format = "smoke" diff --git a/src/omlt/block.py b/src/omlt/block.py index 04932e41..26ef423d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -38,6 +38,7 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None + self.__format = "pyomo" def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ @@ -54,18 +55,20 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ self.__input_indexes = input_indexes self.__output_indexes = output_indexes + if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide more contextual error msg + # TODO: implement this check higher up in the class hierarchy to + # provide more contextual error msg raise ValueError( "OmltBlock must have at least one input and at least one output." ) self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0) + self.inputs = OmltVar(self.inputs_set, initialize=0, format=self.__format) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0) + self.outputs = OmltVar(self.outputs_set, initialize=0, format=self.__format) - def build_formulation(self, formulation): + def build_formulation(self, formulation, format=None): """ Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network @@ -76,7 +79,15 @@ def build_formulation(self, formulation): ---------- formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation + format : str + Which modelling language to build the formulation in. + Currently supported are "pyomo" (default) and "jump". + """ + + if format is not None: + self.__format = format + self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/src/omlt/dependencies.py b/src/omlt/dependencies.py index 6330c38f..595e2274 100644 --- a/src/omlt/dependencies.py +++ b/src/omlt/dependencies.py @@ -8,3 +8,13 @@ torch_geometric, torch_geometric_available = attempt_import("torch_geometric") lineartree, lineartree_available = attempt_import("lineartree") + +julia, julia_available = attempt_import("juliacall") + +if julia_available: + from juliacall import Main as jl + try: + jl.seval("import MathOptInterface") + moi_available = True + except jl.ArgumentError: + moi_available = False diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 0d054ca9..3149d7ef 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -4,6 +4,7 @@ import pyomo.environ as pyo from omlt.base import OmltVar + class _PyomoFormulationInterface(abc.ABC): """ Base class interface for a Pyomo formulation object. This class @@ -54,7 +55,8 @@ def _build_formulation(self): class _PyomoFormulation(_PyomoFormulationInterface): """ This is a base class for different Pyomo formulations. To create a new - formulation, inherit from this class and implement the abstract methods and properties. + formulation, inherit from this class and implement the abstract methods + and properties. """ def __init__(self): @@ -66,7 +68,8 @@ def _set_block(self, block): @property def block(self): - """The underlying block containing the constraints / variables for this formulation.""" + """The underlying block containing the constraints / variables for this + formulation.""" return self.__block() diff --git a/tests/test_block.py b/tests/test_block.py index 6c6311f5..03f025b7 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -35,7 +35,18 @@ def test_block(): m.b = OmltBlock() formulation = dummy_formulation() m.b.build_formulation(formulation) - print(dir(m.b)) + assert m.b._OmltBlockData__formulation is formulation + assert [k for k in m.b.inputs] == ["A", "C", "D"] + assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + + +def test_jump_block(): + m = pyo.ConcreteModel() + m.b = OmltBlock() + formulation = dummy_formulation() + + m.b.build_formulation(formulation, format="jump") + assert m.b._OmltBlockData__formulation is formulation assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] From be74ed29bced30ad3f0e19c6157fc8612c0994c0 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 07/75] Cleaning up variables --- src/omlt/base.py | 580 ---------------------------- src/omlt/base/__init__.py | 7 + src/omlt/base/julia.py | 114 ++++++ src/omlt/base/var.py | 771 ++++++++++++++++++++++++++++++++++++++ src/omlt/block.py | 5 +- tests/test_block.py | 6 + 6 files changed, 901 insertions(+), 582 deletions(-) delete mode 100644 src/omlt/base.py create mode 100644 src/omlt/base/__init__.py create mode 100644 src/omlt/base/julia.py create mode 100644 src/omlt/base/var.py diff --git a/src/omlt/base.py b/src/omlt/base.py deleted file mode 100644 index ab735a6d..00000000 --- a/src/omlt/base.py +++ /dev/null @@ -1,580 +0,0 @@ -""" -Abstraction layer of classes used by OMLT. Underneath these are -objects in a choice of modeling languages: Pyomo (default), -MathOptInterface, or Smoke (not yet implemented). - - -""" - -from abc import ABC, abstractmethod -import pyomo.environ as pyo - -from omlt.dependencies import julia_available, moi_available - -if julia_available and moi_available: - from juliacall import Main as jl - from juliacall import Base - - jl.seval("import MathOptInterface") - moi = jl.MathOptInterface - jl.seval("import JuMP") - jump = jl.JuMP - - -class OmltVar(ABC): - def __new__(cls, *indexes, **kwargs): - - if not indexes: - instance = OmltScalar.__new__(OmltScalar, **kwargs) - else: - instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) - return instance - - -class OmltScalar(OmltVar): - def __new__(cls, *args, format="pyomo", **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - f"Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - - instance.__init__(*args, **kwargs) - return instance - - def __init__(self, *args, **kwargs): - pass - - @abstractmethod - def construct(self, data): - pass - - @abstractmethod - def fix(self, value, skip_validation): - pass - - @property - @abstractmethod - def bounds(self): - pass - - @bounds.setter - @abstractmethod - def bounds(self, val): - pass - - @property - @abstractmethod - def lb(self): - pass - - @lb.setter - @abstractmethod - def lb(self, val): - pass - - @property - @abstractmethod - def ub(self): - pass - - @ub.setter - @abstractmethod - def ub(self, val): - pass - - def is_component_type(self): - return True - - def is_indexed(self): - return False - - # @abstractmethod - # def __mul__(self, other): - # pass - - # @abstractmethod - # def __rmul__(self, other): - # pass - - -class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): - format = "pyomo" - - def __init__(self, *args, **kwargs): - pyo.ScalarVar.__init__(self, *args, **kwargs) - - def construct(self, data): - super().construct(data) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - super().fix(skip_validation) - else: - super().fix(value, skip_validation) - - @property - def bounds(self): - return super().bounds - - @bounds.setter - def bounds(self, val): - super().bounds = val - - @property - def ub(self): - return super().ub - - @ub.setter - def ub(self, val): - super().ub = val - - @property - def lb(self): - return super().__get__(self.lb) - - @lb.setter - def lb(self, val): - super().__setattr__(self.lb, val) - - def __lt__(self, other): - return pyo.NumericValue.__lt__(self, other) - - def __gt__(self, other): - return pyo.NumericValue.__gt__(self, other) - - def __le__(self, other): - return pyo.NumericValue.__le__(self, other) - - def __ge__(self, other): - return pyo.NumericValue.__ge__(self, other) - - def __eq__(self, other): - return pyo.NumericValue.__eq__(self, other) - - def __add__(self, other): - return pyo.NumericValue.__add__(self, other) - - def __sub__(self, other): - return pyo.NumericValue.__sub__(self, other) - - # def __mul__(self,other): - # return pyo.NumericValue.__mul__(self,other) - - def __div__(self, other): - return pyo.NumericValue.__div__(self, other) - - def __truediv__(self, other): - return pyo.NumericValue.__truediv__(self, other) - - def __pow__(self, other): - return pyo.NumericValue.__pow__(self, other) - - def __radd__(self, other): - return pyo.NumericValue.__radd__(self, other) - - def __rsub__(self, other): - return pyo.NumericValue.__rsub__(self, other) - - # def __rmul__(self,other): - # return self._ComponentDataClass.__rmul__(self,other) - - def __rdiv__(self, other): - return pyo.NumericValue.__rdiv__(self, other) - - def __rtruediv__(self, other): - return pyo.NumericValue.__rtruediv__(self, other) - - def __rpow__(self, other): - return pyo.NumericValue.__rpow__(self, other) - - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) - - -class OmltScalarJuMP(OmltScalar): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.ScalarVar - - def __init__(self, *args, **kwargs): - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = self._bounds[0] - _has_lb = _lb is not None - _ub = self._bounds[1] - _has_ub = _ub is not None - elif self._bounds is None: - _has_lb = False - _lb = None - _has_ub = False - _ub = None - else: - raise ValueError("Bounds must be given as a tuple") - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - raise ValueError( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - elif _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - self._value = _initialize - else: - self._value = None - - self._jumpvarinfo = jump.VariableInfo( - _has_lb, - _lb, - _has_ub, - _ub, - False, # is fixed - None, # fixed value - _initialize is not None, - self._value, - self.binary, - self.integer, - ) - self._constructed = False - self._parent = None - self._ctype = pyo.ScalarVar - - def construct(self, data): - if self._block: - self._jumpvar = jump.add_variable(self._block, self._jumpvarinfo) - else: - self._jumpvar = jump.build_variable(Base.error, self._jumpvarinfo) - self._constructed = True - - def fix(self, value, skip_validation): - self.fixed = True - self._value = value - - @property - def bounds(self): - pass - - @bounds.setter - def bounds(self, val): - pass - - @property - def lb(self): - return self._jumpvar.info.lower_bound - - @lb.setter - def lb(self, val): - jump.set_upper_bound(self._jumpvar, val) - - @property - def ub(self): - return self._jumpvar.info.upper_bound - - @ub.setter - def ub(self, val): - jump.set_upper_bound(self._jumpvar, val) - - def to_jump(self): - if self._constructed: - return self._jumpvar - - -""" -Future formats to implement. -""" - - -class OmltScalarSmoke(OmltScalar): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltScalarGurobi(OmltScalar): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - -class OmltIndexed(OmltVar): - def __new__(cls, *indexes, format="pyomo", **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - f"Variable format %s not recognized. Supported formats are 'pyomo'" - " or 'moi'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - return instance - - @abstractmethod - def fix(self, value=None, skip_validation=False): - pass - - @abstractmethod - def setub(self, value): - pass - - @abstractmethod - def setlb(self, value): - pass - - def valid_model_component(self): - """Return True if this can be used as a model component.""" - return True - - -class OmltIndexedPyomo(pyo.Var, OmltIndexed): - format = "pyomo" - - def __init__(self, *indexes, **kwargs): - kwargs.pop("format", None) - super().__init__(*indexes, **kwargs) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def setub(self, value): - for vardata in self.values(): - vardata.ub = value - - def setlb(self, value): - for vardata in self.values(): - vardata.lb = value - - -class OmltIndexedJuMP(OmltIndexed): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.Var - - def __init__(self, *indexes, **kwargs): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - else: - raise ValueError("Currently index cross-products are unsupported.") - self._varinfo = {} - for idx in self._index_set: - self._varinfo[idx] = jump.VariableInfo( - False, # _has_lb, - None, # _lb, - False, # _has_ub, - None, # _ub, - False, # is fixed - None, # fix value - False, # _initialize is not None, - None, # self._value, - False, # self.binary, - False, # self.integer - ) - self._vars = {} - self._constructed = False - self._ctype = pyo.Var - self._parent = None - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._vars[item[0]] - else: - return self._vars[item] - - def __setitem__(self, item, value): - self._vars[item] = value - - def keys(self): - return self._vars.keys() - - def values(self): - return self._vars.values() - - def items(self): - return self._vars.items() - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ - return len(self._vars) - - def __contains__(self, idx): - """Return true if the index is in the dictionary""" - return idx in self._vars - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys""" - return self._vars.__iter__() - - def construct(self, data=None): - for idx in self._index_set: - self._vars[idx] = jump.build_variable(Base.error, self._varinfo[idx]) - self._constructed = True - - def setub(self, value): - if self._constructed: - for idx in self.index_set(): - self._varinfo[idx].has_ub = True - self._varinfo[idx].upper_bound = value - self._vars[idx].info.has_ub = True - self._vars[idx].info.upper_bound = value - else: - for idx in self.index_set(): - self._varinfo[idx].has_ub = True - self._varinfo[idx].upper_bound = value - - def setlb(self, value): - if self._constructed: - for idx in self.index_set(): - self._varinfo[idx].has_lb = True - self._varinfo[idx].lower_bound = value - self._vars[idx].info.has_lb = True - self._vars[idx].info.lower_bound = value - else: - for idx in self.index_set(): - self._varinfo[idx].has_lb = True - self._varinfo[idx].lower_bound = value - - @property - def ctype(self): - return self._ctype - - def index_set(self): - return self._index_set - - @property - def name(self): - return self._name - - def to_jump(self): - if self._constructed: - return jump.Containers.DenseAxisArray( - list(self._vars.values()), self.index_set() - ) - - -""" -Future formats to implement. -""" - - -class OmltIndexedSmoke(OmltIndexed): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltIndexedGurobi(OmltIndexed): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - -class OmltSet: - def __init__(self): - pass - - -class OmltExpression: - def __init__(self): - pass diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py new file mode 100644 index 00000000..328ea98a --- /dev/null +++ b/src/omlt/base/__init__.py @@ -0,0 +1,7 @@ +DEFAULT_MODELING_LANGUAGE = "pyomo" + +from omlt.base.julia import jump +from omlt.base.var import OmltVar + +# from omlt.base.expression import OmltExpression +# from omlt.base.constraint import OmltConstraint diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py new file mode 100644 index 00000000..1e6fb413 --- /dev/null +++ b/src/omlt/base/julia.py @@ -0,0 +1,114 @@ +from omlt.dependencies import julia_available, moi_available +from omlt.base.expression import OmltExpression + +if julia_available and moi_available: + from juliacall import Main as jl + from juliacall import Base + + jl_err = Base.error + jl.seval("import MathOptInterface") + moi = jl.MathOptInterface + jl.seval("import JuMP") + jump = jl.JuMP + + +class JuMPVarInfo: + def __init__( + self, + lower_bound=None, + upper_bound=None, + fixed_value=None, + start_value=None, + binary=False, + integer=False, + ): + self.has_lb = lower_bound is not None + self.lb = lower_bound + self.has_ub = upper_bound is not None + self.ub = upper_bound + self.has_fix = fixed_value is not None + self.fixed_value = fixed_value + self.has_start = start_value is not None + self.start_value = start_value + self.binary = binary + self.integer = integer + + @property + def lower_bound(self): + return self.lb + + @lower_bound.setter + def lower_bound(self, value=None): + self.lb = value + self.has_lb = value is not None + + def setlb(self, value): + self.lower_bound = value + + @property + def upper_bound(self): + return self.ub + + @upper_bound.setter + def upper_bound(self, value=None): + self.ub = value + self.has_ub = value is not None + + def setub(self, value): + self.upper_bound = value + + def to_jump(self): + return jump.VariableInfo( + self.has_lb, + self.lower_bound, + self.has_ub, + self.upper_bound, + self.has_fix, + self.fixed_value, + self.has_start, + self.start_value, + self.binary, + self.integer, + ) + + +class JumpVar: + def __init__(self, varinfo: JuMPVarInfo, name): + self.info = varinfo + self.name = name + self.construct() + + def __str__(self): + return self.name + + def setlb(self, value): + self.info.setlb(value) + self.construct() + + def setub(self, value): + self.info.setlb(value) + self.construct() + + def construct(self): + self.var = jump.build_variable(Base.error, self.info.to_jump()) + + @property + def value(self): + return self.var.info.start + + def add_to_model(self, model, name=None): + if name is None: + name = self._name + jump.add_variable(model, self.var, name) + + def to_jump(self): + return self.var + + def __sub__(self, other): + return OmltExpression(expr=(self, "-", other), format="jump") + + def __mul__(self, other): + return OmltExpression(expr=(self, "*", other), format="jump") + + def __eq__(self, other): + return OmltExpression(expr=(self, "==", other), format="jump") diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py new file mode 100644 index 00000000..eea523c6 --- /dev/null +++ b/src/omlt/base/var.py @@ -0,0 +1,771 @@ +""" +Abstraction layer of classes used by OMLT. Underneath these are +objects in a choice of modeling languages: Pyomo (default), +JuMP, or others (not yet implemented - e.g. Smoke, Gurobi). + + +""" + +from abc import ABC, abstractmethod +import pyomo.environ as pyo + +from omlt.base import jump, DEFAULT_MODELING_LANGUAGE +from omlt.base.julia import JuMPVarInfo, JumpVar + + +class OmltVar(ABC): + def __new__(cls, *indexes, **kwargs): + + if not indexes: + instance = OmltScalar.__new__(OmltScalar, **kwargs) + else: + instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) + return instance + + @abstractmethod + def construct(self, data): + pass + + @abstractmethod + def fix(self, value, skip_validation): + pass + + @property + @abstractmethod + def ctype(self): + pass + + @property + @abstractmethod + def name(self): + pass + + # Some methods to tell OMLT (and Pyomo components) that this + # is a variable. + def is_component_type(self): + return True + + @abstractmethod + def is_indexed(self): + pass + + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + + +class OmltScalar(OmltVar): + def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + + instance.__init__(*args, **kwargs) + instance._format = format + return instance + + def is_indexed(self): + return False + + # Bound-setting interface for scalar variables: + @property + @abstractmethod + def bounds(self): + pass + + @bounds.setter + @abstractmethod + def bounds(self, val): + pass + + @property + @abstractmethod + def lb(self): + pass + + @lb.setter + @abstractmethod + def lb(self, val): + pass + + @property + @abstractmethod + def ub(self): + pass + + @ub.setter + @abstractmethod + def ub(self, val): + pass + + # Interface for getting/setting value + @property + @abstractmethod + def value(self): + pass + + @value.setter + @abstractmethod + def value(self, val): + pass + + # Interface governing how variables behave in expressions. + + # def __lt__(self, other): + # return pyo.NumericValue.__lt__(self, other) + + # def __gt__(self, other): + # return pyo.NumericValue.__gt__(self, other) + + # def __le__(self, other): + # return pyo.NumericValue.__le__(self, other) + + # def __ge__(self, other): + # return pyo.NumericValue.__ge__(self, other) + + # def __eq__(self, other): + # return pyo.NumericValue.__eq__(self, other) + + # def __add__(self, other): + # return pyo.NumericValue.__add__(self, other) + + # def __sub__(self, other): + # return pyo.NumericValue.__sub__(self, other) + + # # def __mul__(self,other): + # # return pyo.NumericValue.__mul__(self,other) + + # def __div__(self, other): + # return pyo.NumericValue.__div__(self, other) + + # def __truediv__(self, other): + # return pyo.NumericValue.__truediv__(self, other) + + # def __pow__(self, other): + # return pyo.NumericValue.__pow__(self, other) + + # def __radd__(self, other): + # return pyo.NumericValue.__radd__(self, other) + + # def __rsub__(self, other): + # return pyo.NumericValue.__rsub__(self, other) + + # # def __rmul__(self,other): + # # return self._ComponentDataClass.__rmul__(self,other) + + # def __rdiv__(self, other): + # return pyo.NumericValue.__rdiv__(self, other) + + # def __rtruediv__(self, other): + # return pyo.NumericValue.__rtruediv__(self, other) + + # def __rpow__(self, other): + # return pyo.NumericValue.__rpow__(self, other) + + # def __iadd__(self, other): + # return pyo.NumericValue.__iadd__(self, other) + + # def __isub__(self, other): + # return pyo.NumericValue.__isub__(self, other) + + # def __imul__(self, other): + # return pyo.NumericValue.__imul__(self, other) + + # def __idiv__(self, other): + # return pyo.NumericValue.__idiv__(self, other) + + # def __itruediv__(self, other): + # return pyo.NumericValue.__itruediv__(self, other) + + # def __ipow__(self, other): + # return pyo.NumericValue.__ipow__(self, other) + + # def __neg__(self): + # return pyo.NumericValue.__neg__(self) + + # def __pos__(self): + # return pyo.NumericValue.__pos__(self) + + # def __abs__(self): + # return pyo.NumericValue.__abs__(self) + + +class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): + format = "pyomo" + + def __init__(self, *args, **kwargs): + kwargs.pop("format", None) + pyo.ScalarVar.__init__(self, *args, **kwargs) + + +class OmltScalarJuMP(OmltScalar): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.ScalarVar + + def __init__(self, *args, **kwargs): + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = self._bounds[0] + _ub = self._bounds[1] + elif self._bounds is None: + _lb = None + _ub = None + else: + raise ValueError("Bounds must be given as a tuple") + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + self._value = _initialize + else: + self._value = None + + self._varinfo = JuMPVarInfo( + _lb, + _ub, + None, # fix value + self._value, + self.binary, + self.integer, + ) + self._constructed = False + self._parent = None + self._ctype = pyo.ScalarVar + self._name = None + + def construct(self, data=None): + self._var = JumpVar(self._varinfo, self._name) + self._constructed = True + if self._block: + self._blockvar = jump.add_variable(self._block, self._var) + + def fix(self, value, skip_validation): + self.fixed = True + self._value = value + self._varinfo.fixed_value = value + self._varinfo.has_fix = value is not None + if self._constructed: + self.construct() + + @property + def bounds(self): + return (self.lb, self.ub) + + @bounds.setter + def bounds(self, val): + if val is None: + self.lb = None + self.ub = None + elif len(val) == 2: + self.lb = val[0] + self.ub = val[1] + + @property + def lb(self): + return self._varinfo.lower_bound + + @lb.setter + def lb(self, val): + self._varinfo.lower_bound = val + if self._constructed: + self.construct() + + @property + def ub(self): + return self._varinfo.upper_bound + + @ub.setter + def ub(self, val): + self._varinfo.upper_bound = val + if self._constructed: + self.construct() + + @property + def value(self): + if self._constructed: + return self._var.value + else: + return self._varinfo.start_value + + @value.setter + def value(self, val): + if self._constructed: + self._var.value = val + else: + self._varinfo.start_value = val + self + + @property + def ctype(self): + return self._ctype + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + self._name = value + + def to_jump(self): + if self._constructed: + return self._var.to_jump() + else: + return self._varinfo.to_jump() + + +""" +Future formats to implement. +""" + + +class OmltScalarSmoke(OmltScalar): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltScalarGurobi(OmltScalar): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltIndexed(OmltVar): + def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats are 'pyomo'" + " or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + instance._format = format + return instance + + def is_indexed(self): + return True + + @property + @abstractmethod + def index_set(self): + pass + + # Bound-setting interface for indexed variables: + @abstractmethod + def setub(self, value): + pass + + @abstractmethod + def setlb(self, value): + pass + + # Interface: act as a dict for the sub-variables. + @abstractmethod + def __getitem__(self, item): + pass + + @abstractmethod + def __setitem__(self, item, value): + pass + + @abstractmethod + def keys(self): + pass + + @abstractmethod + def values(self): + pass + + @abstractmethod + def items(self): + pass + + @abstractmethod + def __len__(self): + pass + + @abstractmethod + def __contains__(self, idx): + pass + + @abstractmethod + def __iter__(self): + pass + + +# Interface governing how variables behave in expressions. + +# def __lt__(self, other): +# return pyo.NumericValue.__lt__(self, other) + +# def __gt__(self, other): +# return pyo.NumericValue.__gt__(self, other) + +# def __le__(self, other): +# return pyo.NumericValue.__le__(self, other) + +# def __ge__(self, other): +# return pyo.NumericValue.__ge__(self, other) + +# def __eq__(self, other): +# return pyo.NumericValue.__eq__(self, other) + +# def __add__(self, other): +# return pyo.NumericValue.__add__(self, other) + +# def __sub__(self, other): +# return pyo.NumericValue.__sub__(self, other) + +# # def __mul__(self,other): +# # return pyo.NumericValue.__mul__(self,other) + +# def __div__(self, other): +# return pyo.NumericValue.__div__(self, other) + +# def __truediv__(self, other): +# return pyo.NumericValue.__truediv__(self, other) + +# def __pow__(self, other): +# return pyo.NumericValue.__pow__(self, other) + +# def __radd__(self, other): +# return pyo.NumericValue.__radd__(self, other) + +# def __rsub__(self, other): +# return pyo.NumericValue.__rsub__(self, other) + +# # def __rmul__(self,other): +# # return self._ComponentDataClass.__rmul__(self,other) + +# def __rdiv__(self, other): +# return pyo.NumericValue.__rdiv__(self, other) + +# def __rtruediv__(self, other): +# return pyo.NumericValue.__rtruediv__(self, other) + +# def __rpow__(self, other): +# return pyo.NumericValue.__rpow__(self, other) + +# def __iadd__(self, other): +# return pyo.NumericValue.__iadd__(self, other) + +# def __isub__(self, other): +# return pyo.NumericValue.__isub__(self, other) + +# def __imul__(self, other): +# return pyo.NumericValue.__imul__(self, other) + +# def __idiv__(self, other): +# return pyo.NumericValue.__idiv__(self, other) + +# def __itruediv__(self, other): +# return pyo.NumericValue.__itruediv__(self, other) + +# def __ipow__(self, other): +# return pyo.NumericValue.__ipow__(self, other) + +# def __neg__(self): +# return pyo.NumericValue.__neg__(self) + +# def __pos__(self): +# return pyo.NumericValue.__pos__(self) + +# def __abs__(self): +# return pyo.NumericValue.__abs__(self) + + +class OmltIndexedPyomo(pyo.Var, OmltIndexed): + format = "pyomo" + + def __init__(self, *indexes, **kwargs): + kwargs.pop("format", None) + super().__init__(*indexes, **kwargs) + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def setub(self, value): + for vardata in self.values(): + vardata.ub = value + + def setlb(self, value): + for vardata in self.values(): + vardata.lb = value + + +class OmltIndexedJuMP(OmltIndexed): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.Var + + def __init__(self, *indexes, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + else: + raise ValueError("Currently index cross-products are unsupported.") + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): + _lb = {k: v[0] for k, v in self._bounds.items()} + _ub = {k: v[1] for k, v in self._bounds.items()} + elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = {i: self._bounds[0] for i in self._index_set} + _ub = {i: self._bounds[1] for i in self._index_set} + elif self._bounds is None: + _lb = {i: None for i in self._index_set} + _ub = {i: None for i in self._index_set} + else: + raise ValueError( + f"Bounds must be given as a tuple," " but %s was given.", self._bounds + ) + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + # If starting values have same length as index set, + # take one for each variable in index. + if len(self._index_set) == len(_initialize): + self._value = _initialize + # If there's a single starting value, use it for all + # variables in index. + elif len(_initialize) == 1: + self._value = {i: _initialize[0] for i in self._index_set} + else: + raise ValueError( + f"Index set has length %s, but" " initializer has length %s.", + len(self._index_set), + len(_initialize), + ) + else: + self._value = {i: None for i in self._index_set} + + self._varinfo = {} + for idx in self._index_set: + self._varinfo[idx] = JuMPVarInfo( + _lb[idx], + _ub[idx], + None, # fix value + self._value[idx], + self.binary, + self.integer, + ) + self._vars = {} + self._constructed = False + self._ctype = pyo.Var + self._parent = None + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._vars[item[0]] + else: + return self._vars[item] + + def __setitem__(self, item, value): + self._varinfo[item] = value + if self._constructed: + self.construct() + + def keys(self): + return self._vars.keys() + + def values(self): + return self._vars.values() + + def items(self): + return self._vars.items() + + def fix(self, value=None): + self.fixed = True + if value is not None: + for vardata in self._varinfo(): + vardata.has_fix = True + vardata.fixed_value = value + else: + for vardata in self._varinfo(): + vardata.has_fix = True + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._vars) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._vars + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._vars.__iter__() + + def construct(self, data=None): + for idx in self._index_set: + if isinstance(idx, int): + name = str(self.name) + "[" + str(idx) + "]" + else: + name = str(self.name) + str(list(idx)).replace(" ", "") + self._vars[idx] = JumpVar(self._varinfo[idx], name) + self._constructed = True + + def setub(self, value): + for idx in self.index_set(): + self._varinfo[idx][2] = True + self._varinfo[idx][3] = value + if self._constructed: + self.construct() + + def setlb(self, value): + for idx in self.index_set(): + self._varinfo[idx][0] = True + self._varinfo[idx][1] = value + if self._constructed: + self.construct() + + @property + def ctype(self): + return self._ctype + + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def to_jump(self): + if self._constructed: + return jump.Containers.DenseAxisArray( + list(self._vars.values()), self.index_set() + ) + + +""" +Future formats to implement. +""" + + +class OmltIndexedSmoke(OmltIndexed): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltIndexedGurobi(OmltIndexed): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltSet: + def __init__(self): + pass + + +class OmltExpression: + def __init__(self): + pass diff --git a/src/omlt/block.py b/src/omlt/block.py index 26ef423d..11956f48 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,7 +25,8 @@ class is used in combination with a formulation object to construct the import warnings -from omlt.base import OmltVar +from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE + import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -38,7 +39,7 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None - self.__format = "pyomo" + self.__format = DEFAULT_MODELING_LANGUAGE def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ diff --git a/tests/test_block.py b/tests/test_block.py index 03f025b7..c58da133 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -2,6 +2,7 @@ import pytest from omlt import OmltBlock +from omlt.base import OmltVar class dummy_formulation(object): @@ -43,6 +44,11 @@ def test_block(): def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() + + m.b.x = OmltVar(initialize=(2, 7), format="jump") + + assert m.b.x.value == (2, 7) + formulation = dummy_formulation() m.b.build_formulation(formulation, format="jump") From 820f5f6db3aac333ee80188d50c4aa5ae8d53d0c Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 08/75] Cleaning up variables - MOI dependency --- src/omlt/base/julia.py | 6 ++---- src/omlt/dependencies.py | 8 -------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 1e6fb413..8dbb23df 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,13 +1,11 @@ -from omlt.dependencies import julia_available, moi_available +from omlt.dependencies import julia_available from omlt.base.expression import OmltExpression -if julia_available and moi_available: +if julia_available: from juliacall import Main as jl from juliacall import Base jl_err = Base.error - jl.seval("import MathOptInterface") - moi = jl.MathOptInterface jl.seval("import JuMP") jump = jl.JuMP diff --git a/src/omlt/dependencies.py b/src/omlt/dependencies.py index 595e2274..3b882da2 100644 --- a/src/omlt/dependencies.py +++ b/src/omlt/dependencies.py @@ -10,11 +10,3 @@ lineartree, lineartree_available = attempt_import("lineartree") julia, julia_available = attempt_import("juliacall") - -if julia_available: - from juliacall import Main as jl - try: - jl.seval("import MathOptInterface") - moi_available = True - except jl.ArgumentError: - moi_available = False From 55e338b9d7a686b550e14feaee13b06ee51187b7 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 09/75] Cleaning up variables - MOI dependency --- src/omlt/base/julia.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 8dbb23df..1a4bcf0c 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,6 +1,7 @@ from omlt.dependencies import julia_available -from omlt.base.expression import OmltExpression +# from omlt.base.expression import OmltExpression +if julia_available: if julia_available: from juliacall import Main as jl from juliacall import Base @@ -102,11 +103,11 @@ def add_to_model(self, model, name=None): def to_jump(self): return self.var - def __sub__(self, other): - return OmltExpression(expr=(self, "-", other), format="jump") + # def __sub__(self, other): + # return OmltExpression(expr=(self, "-", other), format="jump") - def __mul__(self, other): - return OmltExpression(expr=(self, "*", other), format="jump") + # def __mul__(self, other): + # return OmltExpression(expr=(self, "*", other), format="jump") - def __eq__(self, other): - return OmltExpression(expr=(self, "==", other), format="jump") + # def __eq__(self, other): + # return OmltExpression(expr=(self, "==", other), format="jump") From fa7a859a26260aeb4de057ee589d3e579fa05735 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:52:08 -0700 Subject: [PATCH 10/75] Removing duplicate line --- src/omlt/base/julia.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 1a4bcf0c..e2e771d6 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,7 +1,6 @@ from omlt.dependencies import julia_available # from omlt.base.expression import OmltExpression -if julia_available: if julia_available: from juliacall import Main as jl from juliacall import Base From 5790bad956ae13963e112a57a2bbead512b178be Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:58:01 -0700 Subject: [PATCH 11/75] Getting dependencies lined up correctly --- src/omlt/base/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 328ea98a..292c5eb2 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,6 +1,8 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" +from omlt.dependencies import julia_available -from omlt.base.julia import jump +if julia_available: + from omlt.base.julia import jump from omlt.base.var import OmltVar # from omlt.base.expression import OmltExpression From 8cc7bf649922ffa83f48cfebe083ba0836805c9b Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:29:08 -0700 Subject: [PATCH 12/75] Update var.py --- src/omlt/base/var.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index eea523c6..176fe372 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -9,7 +9,10 @@ from abc import ABC, abstractmethod import pyomo.environ as pyo -from omlt.base import jump, DEFAULT_MODELING_LANGUAGE +from omlt.dependencies import julia_available + +if julia_available: + from omlt.base import jump, DEFAULT_MODELING_LANGUAGE from omlt.base.julia import JuMPVarInfo, JumpVar From cad7ba28236f1cec5a13ba87f3df75a068023ee6 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:35:36 -0700 Subject: [PATCH 13/75] Update var.py --- src/omlt/base/var.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 176fe372..f22d8183 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -11,8 +11,9 @@ from omlt.dependencies import julia_available +from omlt.base import DEFAULT_MODELING_LANGUAGE if julia_available: - from omlt.base import jump, DEFAULT_MODELING_LANGUAGE + from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar From 3abb7ffb085f4b99fd32722740e3417fbc867fed Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:43:43 -0700 Subject: [PATCH 14/75] Make test for JuMP variables conditional on presence of JuMP --- tests/test_block.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_block.py b/tests/test_block.py index c58da133..74fb9290 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -3,6 +3,8 @@ from omlt import OmltBlock from omlt.base import OmltVar +from omlt.dependencies import julia_available + class dummy_formulation(object): @@ -40,7 +42,9 @@ def test_block(): assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] - +@pytest.mark.skipif( + not julia_available, reason="Test only valid when Julia is available" +) def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() From 9621f3d25ae7049e3fe0dc2af5957617de16fbab Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:01:19 -0700 Subject: [PATCH 15/75] Use tensorflow-cpu for testing to save space --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 7b4233e8..90a56457 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,7 +72,7 @@ testing = nbmake tox flake8 - tensorflow + tensorflow-cpu ipywidgets jupyter lightgbm From 725348beb392956a85a982d84108568c38ac397e Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:21:18 -0700 Subject: [PATCH 16/75] Fix Keras version at 2.9 Keras 3 requires models to have the .keras file format. Going forward we should probably update the test models to use this format, but to unblock I'm holding back the Keras version. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 90a56457..7ea8599e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -79,7 +79,7 @@ testing = linear-tree matplotlib pandas - keras + keras==2.9.0 onnx onnxruntime onnxmltools From 19a7128eb4b7d397a2eb776966747d066a15e539 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:16:13 +0000 Subject: [PATCH 17/75] removing tweaked action file --- .github/workflows/main.yml | 58 -------------------------------------- 1 file changed, 58 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 3e302f03..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - # conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" - with: - fail_ci_if_error: true From 015324f1a331f5ba7b16867457c7b90c8f3ffe7f Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:20:48 +0000 Subject: [PATCH 18/75] restoring action workflow file --- .github/workflows/main.yml | 57 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..528fdaf0 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,57 @@ +--- + name: CI + + on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + + jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + # python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v2" + with: + fail_ci_if_error: true \ No newline at end of file From da6c316eea84e9679dec0d8784b60da0a5c32e53 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:35:06 +0000 Subject: [PATCH 19/75] Fixing some whitespace linting --- src/omlt/neuralnet/layer.py | 3 ++- tests/neuralnet/test_nn_formulation.py | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index fd1b3234..16e068a3 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -16,6 +16,7 @@ \end{align*} """ + import itertools import numpy as np @@ -254,7 +255,7 @@ class GNNLayer(DenseLayer): .. math:: \begin{align*} - y_j = \sigma \left(\sum\limits_{i=0}^{F_{in}-1}A_{u,v}w_{ij}x_i+b_j\right), && \forall 0\le j Date: Sun, 21 Apr 2024 18:05:47 -0700 Subject: [PATCH 20/75] Update setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7b4233e8..7ea8599e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,14 +72,14 @@ testing = nbmake tox flake8 - tensorflow + tensorflow-cpu ipywidgets jupyter lightgbm linear-tree matplotlib pandas - keras + keras==2.9.0 onnx onnxruntime onnxmltools From b0c5ff1ec1b8bf02cff9fea1e2291a3f37aca989 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:33:14 -0700 Subject: [PATCH 21/75] Update setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7ea8599e..cb8d5e2e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,7 +48,7 @@ python_requires = >=3.7 install_requires = importlib-metadata; python_version<"3.8" networkx - pyomo + pyomo==6.6.2 numpy protobuf==3.20.3 @@ -79,7 +79,7 @@ testing = linear-tree matplotlib pandas - keras==2.9.0 + keras>=3.0 onnx onnxruntime onnxmltools From e151c6c85fe9da63127a2a74ebe5389f09069578 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:38:30 -0700 Subject: [PATCH 22/75] Update Python versions in main.yml --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 528fdaf0..afde145f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ strategy: matrix: # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: "actions/checkout@v2" @@ -54,4 +54,4 @@ - name: "Upload coverage to Codecov" uses: "codecov/codecov-action@v2" with: - fail_ci_if_error: true \ No newline at end of file + fail_ci_if_error: true From f7e1e7f759f3f66fa69aa4c88366aa9377ad99fa Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:39:03 -0700 Subject: [PATCH 23/75] Update setup.cfg for Keras version --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7ea8599e..cb8d5e2e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,7 +48,7 @@ python_requires = >=3.7 install_requires = importlib-metadata; python_version<"3.8" networkx - pyomo + pyomo==6.6.2 numpy protobuf==3.20.3 @@ -79,7 +79,7 @@ testing = linear-tree matplotlib pandas - keras==2.9.0 + keras>=3.0 onnx onnxruntime onnxmltools From 63798c322f313490a771d6182ddffa11e4618b67 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:40:35 -0700 Subject: [PATCH 24/75] Update main.yml --- .github/workflows/main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index afde145f..3f9a8f8d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,7 +15,6 @@ strategy: matrix: - # python-version: ["3.7", "3.8", "3.9"] python-version: ["3.9", "3.10", "3.11", "3.12"] steps: @@ -52,6 +51,6 @@ run: "python -m coverage xml" - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" + uses: "codecov/codecov-action@v4" with: fail_ci_if_error: true From 175f61353fa183420ff0196d8aff45e5fe1ea510 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:42:42 -0700 Subject: [PATCH 25/75] Update main.yml --- .github/workflows/main.yml | 111 +++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3f9a8f8d..34aef0e0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,56 +1,57 @@ --- - name: CI - - on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - - jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v4" - with: - fail_ci_if_error: true +name: CI + +on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + +jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v4" + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true From 40ef6b1c81da3c9e884b1d3d2558583b48ac6966 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:44:36 -0700 Subject: [PATCH 26/75] Update main.yml --- .github/workflows/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 34aef0e0..bd222b2f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,17 +36,13 @@ jobs: python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" run: "tox -re leanenv" - name: "Run tox targets for ${{ matrix.python-version }}" run: "tox" - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - name: "Convert coverage" run: "python -m coverage xml" From e2224242898f0a4958e606a6400286ff082c6283 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:45:42 -0700 Subject: [PATCH 27/75] Update main.yml --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index bd222b2f..2c92b089 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,7 +36,7 @@ jobs: python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt - + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" run: "tox -re leanenv" @@ -49,5 +49,5 @@ jobs: - name: "Upload coverage to Codecov" uses: "codecov/codecov-action@v4" with: - token: ${{ secrets.CODECOV_TOKEN }} - fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true From 0babe628f5e8f317092f589fe17d280d23e0eed9 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:04:57 +0100 Subject: [PATCH 28/75] wip --- Makefile | 10 --- justfile | 58 +++++++++++++++++ pyproject.toml | 107 +++++++++++++++++++++++++++++-- setup.cfg | 171 ------------------------------------------------- setup.py | 21 ------ tox.ini | 117 --------------------------------- 6 files changed, 161 insertions(+), 323 deletions(-) delete mode 100644 Makefile create mode 100644 justfile delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 tox.ini diff --git a/Makefile b/Makefile deleted file mode 100644 index cba83db0..00000000 --- a/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: develop docs test - -develop: - python -m pip install -e .[testing] - -docs: - python -m tox -e docs - -test: - python -m tox \ No newline at end of file diff --git a/justfile b/justfile new file mode 100644 index 00000000..bfea2cb3 --- /dev/null +++ b/justfile @@ -0,0 +1,58 @@ +# List all commands. +default: + @just --list + +# Build docs. +docs: + rm -rf docs/build docs/source/_autosummary + make -C docs html + echo Docs are in $PWD/docs/build/html/index.html + +conda-deps := " \ + conda-forge::ipopt \ + conda-forge::pyscipopt \ + conda-forge::coin-or-cbc \ +" + +# Do a dev install. +dev: + pip install -e '.[dev]' + conda install {{conda-deps}} + +# Do a dev install with GPU support. +dev-gpu: + pip install -e '.[dev-gpu]' + conda install {{conda-deps}} + +# Run code checks. +check: + #!/usr/bin/env bash + + error=0 + trap error=1 ERR + + echo + (set -x; ruff check src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; ruff format --check src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; mypy src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; pytest ) + + echo + ( set -x; make -C docs doctest ) + + test $error = 0 + +# Auto-fix code issues. +fix: + ruff format src/ tests/ docs/source/ examples/ + ruff check --fix src/ tests/ docs/source/ examples/ + +# Build a release. +build: + python -m build diff --git a/pyproject.toml b/pyproject.toml index 2c63dbb2..e44fbb28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,107 @@ [build-system] -# AVOID CHANGING REQUIRES: IT WILL BE UPDATED BY PYSCAFFOLD! -requires = ["setuptools>=46.1.0", "setuptools_scm[toml]>=5", "wheel"] +requires = ["setuptools", "setuptools_scm"] build-backend = "setuptools.build_meta" +[project] +name = "omlt" +authors = [ + { name = "The OMLT Developers", email = "omlt@googlegroups.com" }, +] + +dependencies = [ + "networkx", + "numpy", + "pyomo", + "onnx", + "onnxruntime", +] +requires-python = ">=3.7" +dynamic = ["version"] +readme = "README.rst" +license = { file = "LICENSE.rst" } +description = "OMLT is a Python package for representing machine learning models (such as neural networks) within the Pyomo optimization environment." + +[project.optional-dependencies] +linear-tree = ["linear-tree"] +keras = ["tensorflow", "keras"] +keras-gpu = ["tensorflow[and-cuda]", "keras"] +torch = ["torch", "torch-geometric"] +dev-tools = [ + "ruff", + "mypy", + "pytest", + "pytest-cov", + "sphinx", + "sphinx-copybutton", + "build", + "twine", + "furo", + "testbook", + "notebook", + "pandas", + "matplotlib", + "gurobipy", + "torchvision", + "tf2onnx", +] +dev = [ + "omlt[dev-tools,keras,torch,linear-tree]", +] +dev-gpu = [ + "omlt[dev-tools,keras-gpu,torch,linear-tree]", +] + + +[project.urls] +github = "https://github.com/cog-imperial/OMLT" +x = "https://x.com/cogimperial" +documentation = "https://omlt.readthedocs.io" + [tool.setuptools_scm] -# See configuration details in https://github.com/pypa/setuptools_scm -version_scheme = "no-guess-dev" + +[tool.ruff] +line-length = 88 + +[tool.ruff.lint] +select = ["ALL"] +ignore = ["ANN101", "ANN401", "COM812", "ISC001"] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.per-file-ignores] +"tests/*" = [ + "D100", + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", + "S101", + "INP001", +] +"examples/*" = [ + "INP001", +] +"docs/source/conf.py" = ["D100", "INP001"] + +[tool.mypy] +show_error_codes = true +implicit_optional = false +warn_no_return = true +strict_optional = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +warn_unreachable = true +disallow_any_generics = true + +[[tool.mypy.overrides]] +module = [] +ignore_missing_imports = true + +[tool.pytest.ini_options] +addopts = "--cov omlt --cov-report term-missing --verbose" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index cb8d5e2e..00000000 --- a/setup.cfg +++ /dev/null @@ -1,171 +0,0 @@ -# This file is used to configure your project. -# Read more about the various options under: -# http://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files - -[metadata] -name = omlt -description = OMLT is a Python package for representing machine learning models (such as neural networks) within the Pyomo optimization environment. -author = The OMLT Developers -author_email = omlt@googlegroups.com -license = BSD 3-Clause -long_description = file: README.rst -long_description_content_type = text/x-rst; charset=UTF-8 -url = https://github.com/cog-imperial/OMLT/ -# Add here related links, for example: -project_urls = - Source = https://github.com/cog-imperial/OMLT/ - Twitter = https://twitter.com/cogimperial -# Changelog = https://pyscaffold.org/en/latest/changelog.html -# Tracker = https://github.com/pyscaffold/pyscaffold/issues -# Conda-Forge = https://anaconda.org/conda-forge/pyscaffold -# Download = https://pypi.org/project/PyScaffold/#files - -# Change if running only on Windows, Mac or Linux (comma-separated) -platforms = any - -# Add here all kinds of additional classifiers as defined under -# https://pypi.python.org/pypi?%3Aaction=list_classifiers -classifiers = - Development Status :: 4 - Beta - Programming Language :: Python - - -[options] -zip_safe = False -packages = find_namespace: -include_package_data = True -package_dir = - =src - -# Require a min/specific Python version (comma-separated conditions) -# OMLT currently supports Python 3.7 and above -python_requires = >=3.7 - -# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. -# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in -# new major versions. This works if the required packages follow Semantic Versioning. -# For more information, check out https://semver.org/. -install_requires = - importlib-metadata; python_version<"3.8" - networkx - pyomo==6.6.2 - numpy - protobuf==3.20.3 - - -[options.packages.find] -where = src -exclude = - tests - -[options.extras_require] -# Add here additional requirements for extra features, to install with: -# `pip install omlt[PDF]` like: -# PDF = ReportLab; RXP - -# Add here test requirements (semicolon/line-separated) -testing = - setuptools - pytest - pytest-cov - testbook - nbmake - tox - flake8 - tensorflow-cpu - ipywidgets - jupyter - lightgbm - linear-tree - matplotlib - pandas - keras>=3.0 - onnx - onnxruntime - onnxmltools - tf2onnx>=1.12 - torch - torchvision - tqdm - protobuf==3.20.3 - torch_geometric - -testing_lean = - setuptools - pytest - pytest-cov - testbook - nbmake - tox - flake8 - ipywidgets - jupyter - lightgbm - matplotlib - pandas - torch - torchvision - tqdm - -[options.entry_points] -# Add here console scripts like: -# console_scripts = -# script_name = omlt.module:function -# For example: -# console_scripts = -# fibonacci = omlt.skeleton:run -# And any other entry points, for example: -# pyscaffold.cli = -# awesome = pyscaffoldext.awesome.extension:AwesomeExtension - -[tool:pytest] -# Specify command line options as you would do when invoking pytest directly. -# e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml -# in order to write a coverage file that can be read by Jenkins. -# CAUTION: --cov flags may prohibit setting breakpoints while debugging. -# Comment those flags to avoid this py.test issue. -addopts = - --cov omlt --cov-report term-missing - --verbose -norecursedirs = - dist - build - .tox -testpaths = tests -# Use pytest markers to select/deselect specific tests -# markers = -# slow: mark tests as slow (deselect with '-m "not slow"') -# system: mark end-to-end system tests - -[bdist_wheel] -# Use this option if your package is pure-python -universal = 1 - -[devpi:upload] -# Options for the devpi: PyPI server and packaging tool -# VCS export must be deactivated since we are using setuptools-scm -no_vcs = 1 -formats = bdist_wheel - -[flake8] -# Some sane defaults for the code style checker flake8 -max_line_length = 88 -extend_ignore = E203, W503 -# ^ Black-compatible -# E203 and W503 have edge cases handled by black -exclude = - .tox - build - dist - .eggs - docs/conf.py -per_file_ignores = - # ignore docstrings in tests - tests/*:D100,D101,D102,D103,D104,D105,D106,D107 - -[pyscaffold] -# PyScaffold's parameters when the project was created. -# This will be used when updating. Do not change! -version = 4.0.2 -package = omlt -extensions = diff --git a/setup.py b/setup.py deleted file mode 100644 index 57314fee..00000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -""" - Setup file for omlt. - Use setup.cfg to configure your project. - - This file was generated with PyScaffold 4.0.2. - PyScaffold helps you to put up the scaffold of your new Python project. - Learn more under: https://pyscaffold.org/ -""" -from setuptools import setup - -if __name__ == "__main__": - try: - setup(use_scm_version={"version_scheme": "no-guess-dev"}) - except: # noqa - print( - "\n\nAn error occurred while building the project, " - "please ensure you have the most updated version of setuptools, " - "setuptools_scm and wheel with:\n" - " pip install -U setuptools setuptools_scm wheel\n\n" - ) - raise diff --git a/tox.ini b/tox.ini deleted file mode 100644 index e64ab1d8..00000000 --- a/tox.ini +++ /dev/null @@ -1,117 +0,0 @@ -# Tox configuration file -# Read more under https://tox.readthedocs.org/ -# THIS SCRIPT IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS! - -[tox] -minversion = 3.15 -envlist = py36, py37, py38, py39, py310, py311, py312, lint - -[gh-actions] -python = - 3.6: py36 - 3.7: py37 - 3.8: py38 - 3.9: lint, py39 - 3.10: py310 - 3.11: py311 - 3.12: py312 - -[testenv] -deps = pytest -extras = testing -commands = pytest {posargs} - -#[testenv:fullenv] -#description = Testing with full dependencies -#deps = pytest -#extras = testing -#commands = pytest {posargs} - -[testenv:leanenv] -description = Testing with fewer dependencies -deps = pytest -extras = testing_lean -commands = pytest {posargs} - -[testenv:notebooks] -deps = pytest -extras = testing -commands = pytest --nbmake --cov-append {posargs} - -[testenv:{clean,build}] -description = - Build (or clean) the package in isolation according to instructions in: - https://setuptools.readthedocs.io/en/latest/build_meta.html#how-to-use-it - https://github.com/pypa/pep517/issues/91 - https://github.com/pypa/build -# NOTE: build is still experimental, please refer to the links for updates/issues -skip_install = True -changedir = {toxinidir} -deps = - build: build[virtualenv] -commands = - clean: python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' - build: python -m build . -# By default `build` produces wheels, you can also explicitly use the flags `--sdist` and `--wheel` - - -[testenv:{docs,doctests}] -description = invoke sphinx-build to build the docs/run doctests -setenv = - DOCSDIR = {toxinidir}/docs - BUILDDIR = {toxinidir}/docs/_build - docs: BUILD = html - doctests: BUILD = doctest -deps = - -r {toxinidir}/docs/requirements.txt - # ^ requirements.txt shared with Read The Docs -commands = - sphinx-build -b {env:BUILD} -d "{env:BUILDDIR}/doctrees" "{env:DOCSDIR}" "{env:BUILDDIR}/{env:BUILD}" {posargs} - - -[testenv:publish] -description = - Publish the package you have been developing to a package index server. - By default, it uses testpypi. If you really want to publish your package - to be publicly accessible in PyPI, use the `-- --repository pypi` option. -skip_install = True -changedir = {toxinidir} -passenv = - TWINE_USERNAME - TWINE_PASSWORD - TWINE_REPOSITORY -deps = twine -commands = - python -m twine check dist/* - python -m twine upload {posargs:--repository testpypi} dist/* - -[flake8] -extend-ignore = D, E, F, N -per-file-ignores = __init__.py:F401 - -[testenv:lint] -description = Lint files using isort, black, and flake8 -skip_install = True -changedir = {toxinidir} -deps = - black - flake8 - flake8-bugbear - flake8-docstrings - isort - pep8-naming -commands = - flake8 --config=tox.ini src/omlt tests/ - black --check --diff src/omlt tests/ - -[testenv:format] -description = Format Python files using isort and black -skip_install = true -changedir = {toxinidir} -deps = - black - typing-extensions - isort -commands = - isort src/omlt tests - black src/omlt tests From 30a36d12dbbe5d9cb170f6c9b5ab9c8b079273b4 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:20:10 +0100 Subject: [PATCH 29/75] Add workflows --- .github/workflows/publish_release.yml | 23 +++++++++ .github/workflows/tests.yml | 73 +++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 .github/workflows/publish_release.yml create mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/publish_release.yml b/.github/workflows/publish_release.yml new file mode 100644 index 00000000..20a9e104 --- /dev/null +++ b/.github/workflows/publish_release.yml @@ -0,0 +1,23 @@ +name: Publish release +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' +jobs: + publish-release: + runs-on: ubuntu-22.04 + env: + VERSION: ${{ github.ref_name }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + cache: "pip" + - run: pip install -e '.[dev]' + - run: python -m build + - run: + twine upload + -u __token__ + -p ${{ secrets.PYPI_API_TOKEN }} + dist/* diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..155713d8 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,73 @@ +name: Tests +on: + push: + branches: + - main + pull_request: + workflow_dispatch: +jobs: + ruff: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: ruff check src/ tests/ docs/source/ examples/ + mypy: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: mypy src/ tests/ docs/source/ examples/ + ruff-format: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: ruff format --check src/ tests/ docs/source/ examples/ + pytest: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: pytest --cov=src --cov-report term-missing + doctest: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: make -C docs doctest From 027703f62fe4b805a7a95ec2701485e8c889f7d6 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:41:59 +0100 Subject: [PATCH 30/75] wip --- .github/workflows/tests.yml | 6 +++--- justfile | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 155713d8..8b25e5a3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: ruff check src/ tests/ docs/source/ examples/ + - run: ruff check src/ tests/ docs/ mypy: strategy: matrix: @@ -31,7 +31,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: mypy src/ tests/ docs/source/ examples/ + - run: mypy src/ tests/ docs/ ruff-format: strategy: matrix: @@ -44,7 +44,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: ruff format --check src/ tests/ docs/source/ examples/ + - run: ruff format --check src/ tests/ docs/ pytest: strategy: matrix: diff --git a/justfile b/justfile index bfea2cb3..fcf1dc4d 100644 --- a/justfile +++ b/justfile @@ -32,13 +32,13 @@ check: trap error=1 ERR echo - (set -x; ruff check src/ tests/ docs/source/ examples/ ) + (set -x; ruff check src/ tests/ docs/ ) echo - ( set -x; ruff format --check src/ tests/ docs/source/ examples/ ) + ( set -x; ruff format --check src/ tests/ docs/ ) echo - ( set -x; mypy src/ tests/ docs/source/ examples/ ) + ( set -x; mypy src/ tests/ docs/ ) echo ( set -x; pytest ) @@ -50,8 +50,8 @@ check: # Auto-fix code issues. fix: - ruff format src/ tests/ docs/source/ examples/ - ruff check --fix src/ tests/ docs/source/ examples/ + ruff format src/ tests/ docs/ + ruff check --fix src/ tests/ docs/ # Build a release. build: From 3959e6a85a2363ca35e61fe8ecb08ae05deb7fee Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 15:43:30 +0100 Subject: [PATCH 31/75] Add stuff --- .gitignore | 2 ++ .readthedocs.yml | 9 +++++---- docs/requirements.txt | 10 ---------- environment.yml | 6 ++++++ justfile | 10 ++-------- pyproject.toml | 9 ++++++++- 6 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 docs/requirements.txt create mode 100644 environment.yml diff --git a/.gitignore b/.gitignore index 1b77d315..4fbb7dfd 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,8 @@ coverage.xml *.py,cover .hypothesis/ .pytest_cache/ +docs/notebooks/data/MNIST +docs/notebooks/neuralnet/*.keras # Translations *.mo diff --git a/.readthedocs.yml b/.readthedocs.yml index 6e41af22..2cb5498c 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -16,7 +16,7 @@ build: tools: - python: "3.8" + python: "3.12" # You can also specify other tool versions: @@ -58,7 +58,8 @@ sphinx: # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: - install: - - - requirements: docs/requirements.txt \ No newline at end of file + - method: pip + path: . + extra_requirements: + - docs diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 6305e50b..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# Required dependencies for Sphinx documentation -sphinx -sphinx-rtd-theme -numpy -pyomo -networkx -onnx -tensorflow -linear-tree -importlib-metadata \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..69566368 --- /dev/null +++ b/environment.yml @@ -0,0 +1,6 @@ +channels: + - conda-forge +dependencies: + - ipopt + - pyscipopt + - coin-or-cbc diff --git a/justfile b/justfile index fcf1dc4d..dccccbfc 100644 --- a/justfile +++ b/justfile @@ -8,21 +8,15 @@ docs: make -C docs html echo Docs are in $PWD/docs/build/html/index.html -conda-deps := " \ - conda-forge::ipopt \ - conda-forge::pyscipopt \ - conda-forge::coin-or-cbc \ -" - # Do a dev install. dev: pip install -e '.[dev]' - conda install {{conda-deps}} + conda env update --file environment.yml # Do a dev install with GPU support. dev-gpu: pip install -e '.[dev-gpu]' - conda install {{conda-deps}} + conda env update --file environment.yml # Run code checks. check: diff --git a/pyproject.toml b/pyproject.toml index e44fbb28..2db39b62 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,8 @@ authors = [ dependencies = [ "networkx", "numpy", - "pyomo", + # TODO: Remove constraint when fix to https://github.com/Pyomo/pyomo/issues/3262 is released + "pyomo==6.6.2", "onnx", "onnxruntime", ] @@ -50,6 +51,12 @@ dev = [ dev-gpu = [ "omlt[dev-tools,keras-gpu,torch,linear-tree]", ] +docs = [ + "sphinx", + "sphinx-rtd-theme", + "tensorflow", + "linear-tree", +] [project.urls] From c3b619b462c5f7b04f6a6c4060b063235559bf0c Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 15:45:45 +0100 Subject: [PATCH 32/75] Fix formatting --- src/omlt/neuralnet/activations/relu.py | 4 +--- src/omlt/neuralnet/layers/full_space.py | 8 ++------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 427be19a..e14718d7 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -77,9 +77,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): output_index ] <= layer_block.zhat[output_index] - layer_block._big_m_lb_relu[ output_index - ] * ( - 1.0 - layer_block.q_relu[output_index] - ) + ] * (1.0 - layer_block.q_relu[output_index]) class ComplementarityReLUActivation: diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 8970bc69..3e0e1e2a 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -131,9 +131,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): local_index, output_node_index ] = input_layer_block.zbar[ local_index, output_node_index - ] >= input_layer_block.z[ - input_index - ] - ub * ( + ] >= input_layer_block.z[input_index] - ub * ( 1.0 - net_block.A[input_node_index, output_node_index] ) @@ -141,9 +139,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): local_index, output_node_index ] = input_layer_block.zbar[ local_index, output_node_index - ] <= input_layer_block.z[ - input_index - ] - lb * ( + ] <= input_layer_block.z[input_index] - lb * ( 1.0 - net_block.A[input_node_index, output_node_index] ) From 691d3a6179b7ed0ab3701ac1aa3d42cbc765c03f Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:16:38 +0100 Subject: [PATCH 33/75] wip --- {docs => .github}/pull_request_template.md | 0 .github/workflows/tests.yml | 2 +- .gitignore | 1 + docs/Makefile | 15 +- docs/conf.py | 176 +-------------------- justfile | 4 +- pyproject.toml | 2 +- src/omlt/py.typed | 0 8 files changed, 13 insertions(+), 187 deletions(-) rename {docs => .github}/pull_request_template.md (100%) create mode 100644 src/omlt/py.typed diff --git a/docs/pull_request_template.md b/.github/pull_request_template.md similarity index 100% rename from docs/pull_request_template.md rename to .github/pull_request_template.md diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8b25e5a3..4201663f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -57,7 +57,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: pytest --cov=src --cov-report term-missing + - run: pytest doctest: strategy: matrix: diff --git a/.gitignore b/.gitignore index 4fbb7dfd..d6b1a635 100644 --- a/.gitignore +++ b/.gitignore @@ -72,6 +72,7 @@ instance/ # Sphinx documentation docs/_build/ +docs/_autosummary # PyBuilder target/ diff --git a/docs/Makefile b/docs/Makefile index 95d96808..5117fbf5 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,27 +1,18 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. -SPHINXOPTS ?= +SPHINXOPTS ?= -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build -#AUTODOCDIR = api - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $?), 1) -$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/") -endif - -.PHONY: help clean Makefile # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -clean: - rm -rf $(BUILDDIR)/* #$(AUTODOCDIR) +.PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/docs/conf.py b/docs/conf.py index a85d176d..c0a58377 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,70 +1,7 @@ -# This file is execfile()d with the current directory set to its containing dir. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import inspect -import os -import shutil import sys -# -- Path setup -------------------------------------------------------------- - -__location__ = os.path.join( - os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())) -) - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.join(__location__, "../src")) - -# -- Run sphinx-apidoc ------------------------------------------------------- -# This hack is necessary since RTD does not issue `sphinx-apidoc` before running -# `sphinx-build -b html . _build/html`. See Issue: -# https://github.com/rtfd/readthedocs.org/issues/1139 -# DON'T FORGET: Check the box "Install your project inside a virtualenv using -# setup.py install" in the RTD Advanced Settings. -# Additionally it helps us to avoid running apidoc manually - -try: # for Sphinx >= 1.7 - from sphinx.ext import apidoc -except ImportError: - from sphinx import apidoc - -# output_dir = os.path.join(__location__, "api") -# module_dir = os.path.join(__location__, "../src/omlt") -# try: -# shutil.rmtree(output_dir) -# except FileNotFoundError: -# pass - -# try: -# import sphinx - -# cmd_line_template = ( -# "sphinx-apidoc --implicit-namespaces -f -o {outputdir} {moduledir}" -# ) -# cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) - -# args = cmd_line.split(" ") -# if tuple(sphinx.__version__.split(".")) >= ("1", "7"): -# # This is a rudimentary parse_version to avoid external dependencies -# args = args[1:] - -# apidoc.main(args) -# except Exception as e: -# print("Running `sphinx-apidoc` failed!\n{}".format(e)) - # -- General configuration --------------------------------------------------- -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ @@ -86,15 +23,12 @@ # The suffix of source filenames. source_suffix = ".rst" -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - # General information about the project. project = "omlt" -copyright = "2022, Carnegie Mellon University, Imperial College London, Sandia National Laboratories" +project_copyright = ( + "2022, Carnegie Mellon University, " + "Imperial College London, Sandia National Laboratories" +) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -105,49 +39,20 @@ # The full version, including alpha/beta/rc tags. release = "" # Is set by calling `setup.py docs` -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv", "notebooks"] -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -# html_theme = "furo" html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme @@ -164,8 +69,6 @@ }, } -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". @@ -176,64 +79,16 @@ else: release = version -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/omlt_logo.png" -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - # Output file base name for HTML help builder. htmlhelp_basename = "omlt-doc" @@ -255,31 +110,10 @@ ("index", "user_guide.tex", "OMLT Documentation", "The OMLT Developers", "manual") ] -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = "" - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - # -- External mapping -------------------------------------------------------- -python_version = ".".join(map(str, sys.version_info[0:2])) intersphinx_mapping = { "sphinx": ("http://www.sphinx-doc.org/en/stable", None), - "python": ("https://docs.python.org/" + python_version, None), + "python": ("https://docs.python.org/3", None), "matplotlib": ("https://matplotlib.org", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "sklearn": ("https://scikit-learn.org/stable", None), diff --git a/justfile b/justfile index dccccbfc..30eddd46 100644 --- a/justfile +++ b/justfile @@ -4,9 +4,9 @@ default: # Build docs. docs: - rm -rf docs/build docs/source/_autosummary + rm -rf docs/_build docs/_autosummary make -C docs html - echo Docs are in $PWD/docs/build/html/index.html + echo Docs are in $PWD/docs/_build/html/index.html # Do a dev install. dev: diff --git a/pyproject.toml b/pyproject.toml index 2db39b62..aa773d6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ convention = "google" "examples/*" = [ "INP001", ] -"docs/source/conf.py" = ["D100", "INP001"] +"docs/conf.py" = ["D100", "INP001"] [tool.mypy] show_error_codes = true diff --git a/src/omlt/py.typed b/src/omlt/py.typed new file mode 100644 index 00000000..e69de29b From 8a896baf6ebc7582b8ff86ac28c7d4e1542c8146 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:25:46 +0100 Subject: [PATCH 34/75] wip --- docs/conf.py | 47 ++++------------------------------------------- 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index c0a58377..575f2b79 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,4 @@ -import sys +import omlt # -- General configuration --------------------------------------------------- @@ -30,19 +30,10 @@ "Imperial College London, Sandia National Laboratories" ) -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "" # Is set by calling `setup.py docs` -# The full version, including alpha/beta/rc tags. -release = "" # Is set by calling `setup.py docs` - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv", "notebooks"] +exclude_patterns = ["_build"] # The name of the Pygments (syntax highlighting) style to use. @@ -55,29 +46,8 @@ # a list of builtin themes. html_theme = "sphinx_rtd_theme" -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "light_css_variables": { - "color-brand-primary": "#003E74", - "color-brand-content": "#002147", - }, - "dark_css_variables": { - "color-brand-primary": "#0091D4", - "color-brand-content": "#D4EFFC", - }, -} - - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -try: - from omlt import __version__ as version -except ImportError: - pass -else: - release = version +version = omlt.__version__ +release = omlt.__version__ # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -95,15 +65,6 @@ # -- Options for LaTeX output ------------------------------------------------ -latex_elements = { - # The paper size ("letterpaper" or "a4paper"). - # "papersize": "letterpaper", - # The font size ("10pt", "11pt" or "12pt"). - # "pointsize": "10pt", - # Additional stuff for the LaTeX preamble. - # "preamble": "", -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ From 25d31fec40af8225b90d348bd3657cba6c5d5583 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:55:36 +0100 Subject: [PATCH 35/75] docs work --- .gitignore | 1 + docs/installation.rst | 4 ++-- pyproject.toml | 15 ++++++++------- src/omlt/__init__.py | 27 +++++++-------------------- 4 files changed, 18 insertions(+), 29 deletions(-) diff --git a/.gitignore b/.gitignore index d6b1a635..243e16e1 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST +src/omlt/_version.py # PyInstaller # Usually these files are written by a python script from a template diff --git a/docs/installation.rst b/docs/installation.rst index 15b116ae..2c586bb0 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -1,5 +1,5 @@ Installation -============== +============ OMLT requires Python >= 3.6. The most stable OMLT version can be installed using the PyPI package index. This will also install the required depencies. Simply run: :: @@ -13,7 +13,7 @@ If using the latest un-released version, install from the github repository and Optional Requirements -------------- +--------------------- OMLT can import sequential Keras models which requires a working installation of tensorflow: :: diff --git a/pyproject.toml b/pyproject.toml index aa773d6c..1e85484e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "onnx", "onnxruntime", ] -requires-python = ">=3.7" +requires-python = ">=3.9" dynamic = ["version"] readme = "README.rst" license = { file = "LICENSE.rst" } @@ -45,18 +45,18 @@ dev-tools = [ "torchvision", "tf2onnx", ] -dev = [ - "omlt[dev-tools,keras,torch,linear-tree]", -] -dev-gpu = [ - "omlt[dev-tools,keras-gpu,torch,linear-tree]", -] docs = [ "sphinx", "sphinx-rtd-theme", "tensorflow", "linear-tree", ] +dev = [ + "omlt[dev-tools,keras,torch,linear-tree,docs]", +] +dev-gpu = [ + "omlt[dev-tools,keras-gpu,torch,linear-tree,docs]", +] [project.urls] @@ -65,6 +65,7 @@ x = "https://x.com/cogimperial" documentation = "https://omlt.readthedocs.io" [tool.setuptools_scm] +write_to = "src/omlt/_version.py" [tool.ruff] line-length = 88 diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 12aafdd5..8e702f0b 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -9,25 +9,12 @@ """ -import sys - -if sys.version_info[:2] >= (3, 8): - # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` - from importlib.metadata import PackageNotFoundError # pragma: no cover - from importlib.metadata import version -else: - from importlib_metadata import PackageNotFoundError # pragma: no cover - from importlib_metadata import version - -try: - # Change here if project is renamed and does not equal the package name - dist_name = __name__ - __version__ = version(dist_name) -except PackageNotFoundError: # pragma: no cover - __version__ = "unknown" -finally: - del version, PackageNotFoundError - -# Top level exports +from omlt._version import __version__ from omlt.block import OmltBlock from omlt.scaling import OffsetScaling + +__all__ = [ + "OmltBlock", + "OffsetScaling", + "__version__", +] From c02be6771def4183dc3de9017718995e7fe33ce2 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 17:06:23 +0100 Subject: [PATCH 36/75] wip --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1e85484e..ec31ee06 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,7 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] -ignore = ["ANN101", "ANN401", "COM812", "ISC001"] +ignore = ["ANN101", "ANN401"] [tool.ruff.lint.pydocstyle] convention = "google" From d8390716e05d5724bf0fadb1a7f572eb60d209fb Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:06:33 +0100 Subject: [PATCH 37/75] Update checks --- pyproject.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ec31ee06..dffd39e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,16 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] -ignore = ["ANN101", "ANN401"] +ignore = [ + "ANN101", + "ANN401", + # Remove these eventually + "ANN001", + "ANN002", + "ANN201", + "ANN202", + "ANN204", +] [tool.ruff.lint.pydocstyle] convention = "google" From 77c05880f0a7b25a19f8dd67f6d0bc36fc960b46 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:10:14 +0100 Subject: [PATCH 38/75] update checks --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index dffd39e8..ad26567a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,12 +69,15 @@ write_to = "src/omlt/_version.py" [tool.ruff] line-length = 88 +extend-exclude = ["src/omlt/_version.py"] [tool.ruff.lint] select = ["ALL"] ignore = [ "ANN101", "ANN401", + "COM812", + "ISC001", # Remove these eventually "ANN001", "ANN002", From a61fb827cbddd0bf8c6d7a55826143f5c3a52749 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:21:39 +0100 Subject: [PATCH 39/75] update checks --- pyproject.toml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ad26567a..4ad1ca44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ ignore = [ "ANN401", "COM812", "ISC001", - # Remove these eventually + # TODO: Remove these eventually "ANN001", "ANN002", "ANN201", @@ -112,7 +112,8 @@ show_error_codes = true implicit_optional = false warn_no_return = true strict_optional = true -disallow_untyped_defs = true +# TODO: Enable eventually +# disallow_untyped_defs = true disallow_incomplete_defs = true check_untyped_defs = true disallow_untyped_decorators = true @@ -120,7 +121,19 @@ warn_unreachable = true disallow_any_generics = true [[tool.mypy.overrides]] -module = [] +module = [ + "pandas.*", + "networkx.*", + "tf2onnx.*", + "onnxruntime.*", + "lineartree.*", + "sklearn.*", + "testbook.*", + "pyomo.*", + "keras.*", + "tensorflow.*", + "torch_geometric.*", +] ignore_missing_imports = true [tool.pytest.ini_options] From e0b35b21725024bdbeb5045c3c99024c7f789d66 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:34:35 +0100 Subject: [PATCH 40/75] Add conda --- .github/workflows/tests.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4201663f..419eadc9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -56,6 +56,15 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" + - uses: actions/cache@v3 + with: + path: ~/conda_pkgs_dir + key: ${{ runner.os }}-conda-${{ hashFiles('environment.yml') }} + - uses: conda-incubator/setup-miniconda@v3 + with: + channel-priority: strict + environment-file: environment.yml + use-only-tar-bz2: true - run: pip install -e '.[dev]' - run: pytest doctest: From 1a134310423e487db344509a3f4bff28b18a3e17 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:37:53 +0100 Subject: [PATCH 41/75] Thing --- .github/workflows/main.yml | 53 ------------------------------------- .github/workflows/tests.yml | 5 ++++ 2 files changed, 5 insertions(+), 53 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 2c92b089..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v4" - with: - token: ${{ secrets.CODECOV_TOKEN }} - fail_ci_if_error: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 419eadc9..7f662cb9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -67,6 +67,11 @@ jobs: use-only-tar-bz2: true - run: pip install -e '.[dev]' - run: pytest + - run: python -m coverage xml + - uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true doctest: strategy: matrix: From e8c20f6b00645e278f6e9562b6aa1af8a410fd13 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 12:57:50 +0100 Subject: [PATCH 42/75] Add thing --- .github/workflows/tests.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7f662cb9..e1b1541b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: strategy: @@ -30,7 +30,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ ruff-format: strategy: @@ -43,7 +43,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: ruff format --check src/ tests/ docs/ pytest: strategy: @@ -65,7 +65,7 @@ jobs: channel-priority: strict environment-file: environment.yml use-only-tar-bz2: true - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: pytest - run: python -m coverage xml - uses: codecov/codecov-action@v4 @@ -83,5 +83,5 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: make -C docs doctest From 3c6148a99d92b535225970250d16b88256bee7c4 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:03:26 +0100 Subject: [PATCH 43/75] wip --- .github/workflows/tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e1b1541b..d8af318e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -66,7 +66,8 @@ jobs: environment-file: environment.yml use-only-tar-bz2: true - run: pip install '.[dev]' - - run: pytest + - shell: bash -el {0} + run: pytest - run: python -m coverage xml - uses: codecov/codecov-action@v4 with: From 5c8be2ba5cf98466e8e104965aaca73f47cb6e14 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:17:32 +0100 Subject: [PATCH 44/75] update docs --- README.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index e99fe927..78fab6f4 100644 --- a/README.rst +++ b/README.rst @@ -142,14 +142,14 @@ Example Development =========== -OMLT uses `tox` to manage development tasks: +OMLT uses ``just`` to manage development tasks: -* `tox -av` to list available tasks -* `tox` to run tests -* `tox -e lint` to check formatting and code styles -* `tox -e format` to automatically format files -* `tox -e docs` to build the documentation -* `tox -e publish` to publish the package to PyPi +* ``just`` to list available tasks +* ``just check`` to run all checks +* ``just fix`` to apply any auto-fixes +* ``just dev`` to install development dependencies +* ``just dev-gpu`` to install development dependencies but with GPU support +* ``just docs`` to build the documentation Contributors ============ @@ -224,4 +224,4 @@ Contributors .. _zshiqiang: https://github.com/zshiqiang .. |zshiqiang| image:: https://avatars.githubusercontent.com/u/91337036?v=4 - :width: 80px + :width: 80px From b51ba7dc1b7b4c1998203f6592a186741d09baaa Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:18:08 +0100 Subject: [PATCH 45/75] add link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 78fab6f4..4dab06c7 100644 --- a/README.rst +++ b/README.rst @@ -142,7 +142,7 @@ Example Development =========== -OMLT uses ``just`` to manage development tasks: +OMLT uses [just](https://github.com/casey/just) to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks From cd892bcd1afa6f814d1cb956608bcfee55870bc5 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:20:00 +0100 Subject: [PATCH 46/75] wip --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 4dab06c7..3820ebe9 100644 --- a/README.rst +++ b/README.rst @@ -142,7 +142,7 @@ Example Development =========== -OMLT uses [just](https://github.com/casey/just) to manage development tasks: +OMLT uses `just `_ to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks From fbac1ba6ff277fe2c0e2ee37e738eefc72649cea Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:20:48 +0100 Subject: [PATCH 47/75] thing --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 3820ebe9..be307348 100644 --- a/README.rst +++ b/README.rst @@ -147,8 +147,8 @@ OMLT uses `just `_ to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks * ``just fix`` to apply any auto-fixes -* ``just dev`` to install development dependencies -* ``just dev-gpu`` to install development dependencies but with GPU support +* ``just dev`` to install development dependencies in your current Python environment +* ``just dev-gpu`` to install development dependencies in your current Python environment but with GPU support * ``just docs`` to build the documentation Contributors From 0e8bded5ad0bde90662eea30052378eac5853dff Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:21:20 +0100 Subject: [PATCH 48/75] wip --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index be307348..058c0fc6 100644 --- a/README.rst +++ b/README.rst @@ -148,7 +148,7 @@ OMLT uses `just `_ to manage development tasks: * ``just check`` to run all checks * ``just fix`` to apply any auto-fixes * ``just dev`` to install development dependencies in your current Python environment -* ``just dev-gpu`` to install development dependencies in your current Python environment but with GPU support +* ``just dev-gpu`` same as ``dev`` but with GPU support * ``just docs`` to build the documentation Contributors From 1cdf89c24e3f8ae473b0958806c15d0957d3df7a Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:31:55 +0100 Subject: [PATCH 49/75] remove unnecessary things --- .github/workflows/tests.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d8af318e..bbaaf8ca 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,41 +7,32 @@ on: workflow_dispatch: jobs: ruff: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ ruff-format: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: ruff format --check src/ tests/ docs/ From 909f86e4276f98e68e5bb2d44b77c6dd33165dbe Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:32:52 +0100 Subject: [PATCH 50/75] Add back for mypy --- .github/workflows/tests.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bbaaf8ca..507f64f5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,12 +17,15 @@ jobs: - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.12" + python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ From 7ae13bed517d93ccdcf15ad344c480736a2a4daa Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed, 5 Jun 2024 20:01:17 +0000 Subject: [PATCH 51/75] Including OmltExpr expressions for the OmltVars --- src/omlt/base/__init__.py | 7 +- src/omlt/base/expression.py | 554 +++++++++++++++++++ src/omlt/base/julia.py | 23 +- src/omlt/base/var.py | 365 +++++++----- src/omlt/block.py | 24 +- src/omlt/neuralnet/layers/partition_based.py | 14 +- tests/test_block.py | 16 +- tests/test_var.py | 32 ++ 8 files changed, 874 insertions(+), 161 deletions(-) create mode 100644 src/omlt/base/expression.py create mode 100644 tests/test_var.py diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 292c5eb2..bd526fc3 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,9 +1,10 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" -from omlt.dependencies import julia_available +from omlt.dependencies import julia_available if julia_available: - from omlt.base.julia import jump + from omlt.base.julia import jl, jump + from omlt.base.var import OmltVar +from omlt.base.expression import OmltExpr -# from omlt.base.expression import OmltExpression # from omlt.base.constraint import OmltConstraint diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py new file mode 100644 index 00000000..d8dda339 --- /dev/null +++ b/src/omlt/base/expression.py @@ -0,0 +1,554 @@ +from abc import ABC +import pyomo.environ as pyo + +# from pyomo.core.expr import RelationalExpression + +from omlt.base import DEFAULT_MODELING_LANGUAGE +import omlt.base.var as var +from omlt.dependencies import julia_available + +if julia_available: + from omlt.base.julia import jl, jump, JumpVar + from juliacall import AnyValue +relations = {"==", ">=", "<=", ">", "<"} + +formats = { + "pyomo": { + "scalar": pyo.Expression, + "indexed": pyo.Expression, + }, + "jump": { + "scalar": jump.AffExpr, + "indexed": jl.Vector, + }, +} + + +class OmltExpr(ABC): + # Claim to be a Pyomo Expression so blocks will register + # properly. + @property + def __class__(self): + return pyo.Expression + + def __new__(cls, *indexes, **kwargs): + if not indexes: + instance = super(OmltExpr, cls).__new__(OmltExprScalar) + instance.__init__(**kwargs) + else: + instance = super(OmltExpr, cls).__new__(OmltExprIndexed) + instance.__init__(*indexes, **kwargs) + return instance + + @property + def ctype(self): + return pyo.Expression + + def is_component_type(self): + return True + + def is_expression_type(self): + return True + + def is_indexed(self): + pass + + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + + +class OmltExprScalar(OmltExpr): + def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + "Expression format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltExpr, cls).__new__(subclass) + # instance.__init__(*args, **kwargs) + instance._format = format + return instance + + def __mul__(self, other): + pass + + +class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): + format = "pyomo" + + def __init__(self, *args, expr=None, **kwargs): + self._index_set = {} + if isinstance(expr, (pyo.Expression, pyo.NumericValue)): + self._expression = expr + elif isinstance(expr, OmltExprScalarPyomo): + self._expression = expr._expression + elif isinstance(expr, tuple): + self._expression = self._parse_expression_tuple(expr) + else: + print("expression not recognized", expr, type(expr)) + + self._parent = None + self.name = None + + def _parse_expression_tuple_term(self, term): + if isinstance(term, tuple): + return self._parse_expression_tuple(term) + elif isinstance(term, OmltExprScalarPyomo): + return term._expression + elif isinstance(term, var.OmltVar): + return term._pyovar + elif isinstance(term, ( + pyo.Expression, pyo.Var, int, float + )): + return term + else: + raise TypeError("Term of expression is an unsupported type. " + "Write a better error message.") + + def _parse_expression_tuple(self, expr): + lhs = self._parse_expression_tuple_term(expr[0]) + rhs = self._parse_expression_tuple_term(expr[2]) + + if expr[1] == "+": + return lhs + rhs + + elif expr[1] == "-": + return lhs - rhs + + elif expr[1] == "*": + return lhs * rhs + + elif expr[1] == "/": + return lhs / rhs + + else: + raise ValueError("Expression middle term was {%s}.", expr[1]) + + def __repr__(self): + return repr(self._expression.arg(0)) + + def is_indexed(self): + return False + + def as_numeric(self): + return self._expression._apply_operation(self._expression.args) + + def construct(self, data=None): + return self._expression.construct(data) + + @property + def _constructed(self): + return self._expression.expr._constructed + + @property + def const(self): + return self._expression.const + + @property + def args(self): + return self._expression.args + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __call__(self): + return self._expression() + + def __add__(self, other): + if isinstance(other, OmltExpr): + expr = self._expression + other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression + other + return OmltExpr(format=self._format, expr=expr) + + # def __sub__(self, other): + # expr = (self, "-", other) + # return OmltExpression(format=self._format, expr=expr) + + def __mul__(self, other): + if isinstance(other, OmltExpr): + expr = self._expression * other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression * other + return OmltExprScalar(format=self._format, expr=expr) + + def __div__(self, other): + expr = (self, "/", other) + return OmltExpr(format=self._format, expr=expr) + + def __truediv__(self, other): + expr = (self, "//", other) + return OmltExpr(format=self._format, expr=expr) + + def __radd__(self, other): + if isinstance(other, OmltExpr): + expr = other._expression + self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other + self._expression + return OmltExpr(format=self._format, expr=expr) + + def __rsub__(self, other): + if isinstance(other, OmltExpr): + expr = other._expression - self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other - self._expression + return OmltExpr(format=self._format, expr=expr) + + def __rmul__(self, other): + expr = (other, "*", self) + return OmltExpr(format=self._format, expr=expr) + + def __ge__(self, other): + expr = self._expression >= other + return expr + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + def __le__(self, other): + expr = (self._expression <= other) + return expr + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + def __eq__(self, other): + expr = self._expression == other + return pyo.Expression(expr=expr) + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + +class OmltExprIndexed(OmltExpr): + def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + "Variable format %s not recognized. Supported formats are 'pyomo'" + " or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltExpr, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + instance._format = format + return instance + + +class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): + format = "pyomo" + + def __init__(self, *indexes, expr=None, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + elif len(indexes) > 1: + raise ValueError("Currently index cross-products are unsupported.") + else: + self._index_set = {} + self._format = format + self._expression = pyo.Expression(self._index_set, expr=expr) + + # self.pyo.construct() + + def is_indexed(self): + return True + + def expression_as_dict(self): + if len(self._index_set) == 1: + return {self._index_set[0]: self._expression} + else: + return {k: self._expression[k] for k in self._index_set} + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._expression[item[0]] + else: + return self._expression[item] + + def __setitem__(self, item, value): + self._expression[item] = value + + def keys(self): + return self._expression.keys() + + def values(self): + return self._expression.values() + + def items(self): + return self._expression.items() + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._expression) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._expression + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._expression.__iter__() + + @property + def args(self): + return self._expression.args() + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __call__(self): + return self._expression() + + # # def __str__(self): + # # return parse_expression(self.expr, "").rstrip() + + # def __repr__(self): + # if self._expr is not None: + # return parse_expression(self._expr, "").rstrip() + # else: + # return "empty expression" + + # def set_value(self, value): + # print("setting value:", value) + # self.value = value + + # @property + # def rule(self): + # return self._expr + + def __add__(self, other): + expr = (self, "+", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + # def __sub__(self, other): + # expr = (self, "-", other) + # return OmltExpression(format=self._format, expr=expr) + + # def __mul__(self, other): + # expr = (self, "*", other) + # return OmltExpression(format=self._format, expr=expr) + + def __div__(self, other): + expr = (self, "/", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + def __truediv__(self, other): + expr = (self, "//", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + def __eq__(self, other): + expr = (self, "==", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + def __le__(self, other): + expr = (self, "<=", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + def __ge__(self, other): + expr = (self, ">=", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + +# def parse_expression(expr, string): +# if expr is not None: +# for t in expr: +# if str(t).count(" ") == 2: +# string += "(" + str(t) + ") " +# else: +# string += str(t) + " " +# else: +# string = expr +# return string + + +# def parse_jump_affine(expr_tuple): +# if expr_tuple is not None: +# if isinstance(expr_tuple, JumpVar): +# return jump.AffExpr(0, {expr_tuple.to_jump(): 1}) +# elif isinstance(expr_tuple, (int, float)): +# return jump.AffExpr(expr_tuple, {}) +# elif isinstance(expr_tuple, OmltExprScalar): +# print("found a scalar expression") +# print(expr_tuple) +# print(expr_tuple._expression) +# return expr_tuple._expression +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], JumpVar): +# return jump.AffExpr(0, {expr_tuple[0].to_jump(): 1}) +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): +# return jump.AffExpr(expr_tuple[0], {}) +# elif len(expr_tuple) == 2: +# print("don't know how to deal with 2-element expressions") +# print("expr_tuple") +# elif len(expr_tuple) == 3: +# print("triplet") +# if expr_tuple[1] == "+": +# return parse_jump_affine(expr_tuple[0]) + parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "-": +# return parse_jump_affine(expr_tuple[0]) - parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "*": +# return parse_jump_affine(expr_tuple[0]) * parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "/": +# return parse_jump_affine(expr_tuple[0]) / parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "//": +# return parse_jump_affine(expr_tuple[0]) // parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "**": +# return parse_jump_affine(expr_tuple[0]) ** parse_jump_affine( +# expr_tuple[2] +# ) + + +# def dictplus(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): +# c[k] = a[k] + b[k] +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dictminus(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): +# c[k] = a[k] - b[k] +# print("dictminus gives:", c) +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dicttimes(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): + +# c[k] = a[k] * b[k] +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dictover(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): + +# c[k] = jump_divide(a[k], b[k]) +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def jump_divide(a, b): +# assert isinstance(a, AnyValue) +# print(b.terms) +# assert (isinstance(b, AnyValue) and len(b.terms) == 0) or isinstance( +# b, (int, float) +# ) +# if isinstance(b, AnyValue): +# div_by = b.constant +# else: +# div_by = b +# return jump.AffExpr(a.constant / div_by, {}) + + +# def parse_jump_indexed(expr_tuple, index): +# print("parsing:", expr_tuple) +# if expr_tuple is not None: +# if isinstance(expr_tuple, OmltExpr): +# print("here") +# return expr_tuple.expression_as_dict() +# elif isinstance(expr_tuple, var.OmltVar): +# return expr_tuple.to_jumpexpr() +# elif isinstance(expr_tuple, (int, float)): +# return {k: jump.AffExpr(expr_tuple, {}) for k in index} +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], OmltExpr): +# return expr_tuple[0]._expression +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], var.OmltVar): +# indexed = { +# k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) +# for k, v in expr_tuple[0].items() +# } +# return indexed +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): +# return {k: jump.AffExpr(expr_tuple[0], {}) for k in index} +# elif len(expr_tuple) == 2: +# print("don't know how to deal with 2-element expressions") +# print(expr_tuple) +# elif len(expr_tuple) == 3: +# if expr_tuple[1] == "+": +# return dictplus( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "-": +# return dictminus( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "*": +# return dicttimes( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "/": +# return dictover( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "//": +# return dictover( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "**": +# return parse_jump_indexed(expr_tuple[0], index) ** parse_jump_indexed( +# expr_tuple[2], index +# ) +# elif expr_tuple[1] in relations: +# cnstrnt = constraint.OmltRelation( +# index, +# model=None, +# lhs=parse_jump_indexed(expr_tuple[0], index), +# sense=expr_tuple[1], +# rhs=parse_jump_indexed(expr_tuple[2], index), +# format="jump", +# ) +# indexed = {k: cnstrnt.lhs[k] - cnstrnt.rhs[k] for k in index} +# return indexed diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index e2e771d6..b3c9109f 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,5 +1,4 @@ from omlt.dependencies import julia_available -# from omlt.base.expression import OmltExpression if julia_available: from juliacall import Main as jl @@ -74,6 +73,8 @@ class JumpVar: def __init__(self, varinfo: JuMPVarInfo, name): self.info = varinfo self.name = name + self.omltvar = None + self.index = None self.construct() def __str__(self): @@ -96,17 +97,21 @@ def value(self): def add_to_model(self, model, name=None): if name is None: - name = self._name - jump.add_variable(model, self.var, name) + name = self.name + variable_ref = jump.add_variable(model, self.var, name) + return variable_ref def to_jump(self): return self.var - # def __sub__(self, other): - # return OmltExpression(expr=(self, "-", other), format="jump") + def __add__(self, other): + return (self.omltvar + other)[self.index] - # def __mul__(self, other): - # return OmltExpression(expr=(self, "*", other), format="jump") + def __sub__(self, other): + return (self.omltvar - other)[self.index] - # def __eq__(self, other): - # return OmltExpression(expr=(self, "==", other), format="jump") + def __mul__(self, other): + return (self.omltvar * other)[self.index] + + def __eq__(self, other): + return (self.omltvar == other)[self.index] diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index f22d8183..c119e8bf 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -12,9 +12,12 @@ from omlt.dependencies import julia_available from omlt.base import DEFAULT_MODELING_LANGUAGE + if julia_available: from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar +from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar +from omlt.base.constraint import OmltRelation, OmltRelScalar class OmltVar(ABC): @@ -63,7 +66,7 @@ def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} if format not in subclass_map: raise ValueError( - f"Variable format %s not recognized. Supported formats " + "Variable format %s not recognized. Supported formats " "are 'pyomo' or 'jump'.", format, ) @@ -108,6 +111,16 @@ def ub(self): def ub(self, val): pass + @property + @abstractmethod + def domain(self): + pass + + @domain.setter + @abstractmethod + def domain(self, val): + pass + # Interface for getting/setting value @property @abstractmethod @@ -121,91 +134,149 @@ def value(self, val): # Interface governing how variables behave in expressions. - # def __lt__(self, other): - # return pyo.NumericValue.__lt__(self, other) + def __lt__(self, other): + return OmltRelScalar(expr=(self, "<", other)) - # def __gt__(self, other): - # return pyo.NumericValue.__gt__(self, other) + def __gt__(self, other): + return OmltRelScalar(expr=(self, ">", other)) - # def __le__(self, other): - # return pyo.NumericValue.__le__(self, other) + def __le__(self, other): + return OmltRelScalar(expr=(self, "<=", other)) - # def __ge__(self, other): - # return pyo.NumericValue.__ge__(self, other) + def __ge__(self, other): + return OmltRelScalar(expr=(self, ">=", other)) - # def __eq__(self, other): - # return pyo.NumericValue.__eq__(self, other) + def __eq__(self, other): + return OmltRelScalar(expr=(self, "==", other)) - # def __add__(self, other): - # return pyo.NumericValue.__add__(self, other) + def __add__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "+", other)) - # def __sub__(self, other): - # return pyo.NumericValue.__sub__(self, other) + def __sub__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "-", other)) - # # def __mul__(self,other): - # # return pyo.NumericValue.__mul__(self,other) + def __mul__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "*", other)) - # def __div__(self, other): - # return pyo.NumericValue.__div__(self, other) + def __div__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "//", other)) - # def __truediv__(self, other): - # return pyo.NumericValue.__truediv__(self, other) + def __truediv__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "/", other)) - # def __pow__(self, other): - # return pyo.NumericValue.__pow__(self, other) + def __pow__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "**", other)) - # def __radd__(self, other): - # return pyo.NumericValue.__radd__(self, other) + def __radd__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "+", self)) - # def __rsub__(self, other): - # return pyo.NumericValue.__rsub__(self, other) + def __rsub__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "-", self)) - # # def __rmul__(self,other): - # # return self._ComponentDataClass.__rmul__(self,other) + def __rmul__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "*", self)) - # def __rdiv__(self, other): - # return pyo.NumericValue.__rdiv__(self, other) + def __rdiv__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "//", self)) - # def __rtruediv__(self, other): - # return pyo.NumericValue.__rtruediv__(self, other) + def __rtruediv__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "/", self)) - # def __rpow__(self, other): - # return pyo.NumericValue.__rpow__(self, other) + def __rpow__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "**", self)) - # def __iadd__(self, other): - # return pyo.NumericValue.__iadd__(self, other) + def __iadd__(self, other): + return pyo.NumericValue.__iadd__(self, other) - # def __isub__(self, other): - # return pyo.NumericValue.__isub__(self, other) + def __isub__(self, other): + return pyo.NumericValue.__isub__(self, other) - # def __imul__(self, other): - # return pyo.NumericValue.__imul__(self, other) + def __imul__(self, other): + return pyo.NumericValue.__imul__(self, other) - # def __idiv__(self, other): - # return pyo.NumericValue.__idiv__(self, other) + def __idiv__(self, other): + return pyo.NumericValue.__idiv__(self, other) - # def __itruediv__(self, other): - # return pyo.NumericValue.__itruediv__(self, other) + def __itruediv__(self, other): + return pyo.NumericValue.__itruediv__(self, other) - # def __ipow__(self, other): - # return pyo.NumericValue.__ipow__(self, other) + def __ipow__(self, other): + return pyo.NumericValue.__ipow__(self, other) - # def __neg__(self): - # return pyo.NumericValue.__neg__(self) + def __neg__(self): + return pyo.NumericValue.__neg__(self) - # def __pos__(self): - # return pyo.NumericValue.__pos__(self) + def __pos__(self): + return pyo.NumericValue.__pos__(self) - # def __abs__(self): - # return pyo.NumericValue.__abs__(self) + def __abs__(self): + return pyo.NumericValue.__abs__(self) -class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): +class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): format = "pyomo" def __init__(self, *args, **kwargs): kwargs.pop("format", None) - pyo.ScalarVar.__init__(self, *args, **kwargs) + # pyo.ScalarVar.__init__(self, *args, **kwargs) + self._pyovar = pyo.ScalarVar(*args, **kwargs) + + def construct(self, data=None): + return self._pyovar.construct(data) + + def fix(self, value, skip_validation): + return self._pyovar.fix(value, skip_validation) + + @property + def ctype(self): + return pyo.ScalarVar + + @property + def name(self): + self._pyovar._name = self._name + return self._pyovar._name + + @property + def bounds(self): + return (self._pyovar._lb, self._pyovar._ub) + + @bounds.setter + def bounds(self, val): + self._pyovar.lb = val[0] + self._pyovar.ub = val[1] + + @property + def lb(self): + return self._pyovar._lb + + @lb.setter + def lb(self, val): + self._pyovar.setlb(val) + + @property + def ub(self): + return self._pyovar._ub + + @ub.setter + def ub(self, val): + self._pyovar.setub(val) + + @property + def domain(self): + return self._pyovar._domain + + @domain.setter + def domain(self, val): + self._pyovar._domain = val + + # Interface for getting/setting value + @property + def value(self): + return self._pyovar.value + + @value.setter + def value(self, val): + self._pyovar.value = val class OmltScalarJuMP(OmltScalar): @@ -261,7 +332,18 @@ def __init__(self, *args, **kwargs): _initialize = kwargs.pop("initialize", None) if _initialize: - self._value = _initialize + if isinstance(_initialize, (int, float)): + self._value = _initialize + elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): + self._value = _initialize[0] + else: + # Pyomo's "scalar" variables can be multidimensional, they're + # just not indexed. JuMP scalar variables can only be a single + # dimension. Rewrite this error to be more helpful. + raise ValueError( + "Initial value for JuMP variables must be an int" + f" or float, but {type(_initialize)} was provided." + ) else: self._value = None @@ -280,9 +362,12 @@ def __init__(self, *args, **kwargs): def construct(self, data=None): self._var = JumpVar(self._varinfo, self._name) + self._var.omltvar = self self._constructed = True - if self._block: - self._blockvar = jump.add_variable(self._block, self._var) + if self._parent: + self._blockvar = jump.add_variable( + self._parent()._jumpmodel, self.to_jumpvar() + ) def fix(self, value, skip_validation): self.fixed = True @@ -311,7 +396,7 @@ def lb(self): @lb.setter def lb(self, val): - self._varinfo.lower_bound = val + self._varinfo.setlb(val) if self._constructed: self.construct() @@ -321,7 +406,7 @@ def ub(self): @ub.setter def ub(self, val): - self._varinfo.upper_bound = val + self._varinfo.setub(val) if self._constructed: self.construct() @@ -352,12 +437,15 @@ def name(self): def name(self, value): self._name = value - def to_jump(self): + def to_jumpvar(self): if self._constructed: return self._var.to_jump() else: return self._varinfo.to_jump() + def to_jumpexpr(self): + return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) + """ Future formats to implement. @@ -387,7 +475,7 @@ def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} if format not in subclass_map: raise ValueError( - f"Variable format %s not recognized. Supported formats are 'pyomo'" + "Variable format %s not recognized. Supported formats are 'pyomo'" " or 'jump'.", format, ) @@ -447,86 +535,85 @@ def __contains__(self, idx): def __iter__(self): pass + # Interface governing how variables behave in expressions. -# Interface governing how variables behave in expressions. - -# def __lt__(self, other): -# return pyo.NumericValue.__lt__(self, other) + def __lt__(self, other): + return OmltRelation(self.index_set(), expr=(self, "<", other)) -# def __gt__(self, other): -# return pyo.NumericValue.__gt__(self, other) + def __gt__(self, other): + return OmltRelation(self.index_set(), expr=(self, ">", other)) -# def __le__(self, other): -# return pyo.NumericValue.__le__(self, other) + def __le__(self, other): + return OmltRelation(self.index_set(), expr=(self, "<=", other)) -# def __ge__(self, other): -# return pyo.NumericValue.__ge__(self, other) + def __ge__(self, other): + return OmltRelation(self.index_set(), expr=(self, ">=", other)) -# def __eq__(self, other): -# return pyo.NumericValue.__eq__(self, other) + def __eq__(self, other): + return OmltRelation(self.index_set(), expr=(self, "==", other)) -# def __add__(self, other): -# return pyo.NumericValue.__add__(self, other) + def __add__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) -# def __sub__(self, other): -# return pyo.NumericValue.__sub__(self, other) + def __sub__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "-", other)) -# # def __mul__(self,other): -# # return pyo.NumericValue.__mul__(self,other) + def __mul__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "*", other)) -# def __div__(self, other): -# return pyo.NumericValue.__div__(self, other) + def __div__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "//", other)) -# def __truediv__(self, other): -# return pyo.NumericValue.__truediv__(self, other) + def __truediv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "/", other)) -# def __pow__(self, other): -# return pyo.NumericValue.__pow__(self, other) + def __pow__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "**", other)) -# def __radd__(self, other): -# return pyo.NumericValue.__radd__(self, other) + def __radd__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "+", self)) -# def __rsub__(self, other): -# return pyo.NumericValue.__rsub__(self, other) + def __rsub__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "-", self)) -# # def __rmul__(self,other): -# # return self._ComponentDataClass.__rmul__(self,other) + def __rmul__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "*", self)) -# def __rdiv__(self, other): -# return pyo.NumericValue.__rdiv__(self, other) + def __rdiv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "//", self)) -# def __rtruediv__(self, other): -# return pyo.NumericValue.__rtruediv__(self, other) + def __rtruediv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "/", self)) -# def __rpow__(self, other): -# return pyo.NumericValue.__rpow__(self, other) + def __rpow__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "**", self)) -# def __iadd__(self, other): -# return pyo.NumericValue.__iadd__(self, other) + def __iadd__(self, other): + return pyo.NumericValue.__iadd__(self, other) -# def __isub__(self, other): -# return pyo.NumericValue.__isub__(self, other) + def __isub__(self, other): + return pyo.NumericValue.__isub__(self, other) -# def __imul__(self, other): -# return pyo.NumericValue.__imul__(self, other) + def __imul__(self, other): + return pyo.NumericValue.__imul__(self, other) -# def __idiv__(self, other): -# return pyo.NumericValue.__idiv__(self, other) + def __idiv__(self, other): + return pyo.NumericValue.__idiv__(self, other) -# def __itruediv__(self, other): -# return pyo.NumericValue.__itruediv__(self, other) + def __itruediv__(self, other): + return pyo.NumericValue.__itruediv__(self, other) -# def __ipow__(self, other): -# return pyo.NumericValue.__ipow__(self, other) + def __ipow__(self, other): + return pyo.NumericValue.__ipow__(self, other) -# def __neg__(self): -# return pyo.NumericValue.__neg__(self) + def __neg__(self): + return pyo.NumericValue.__neg__(self) -# def __pos__(self): -# return pyo.NumericValue.__pos__(self) + def __pos__(self): + return pyo.NumericValue.__pos__(self) -# def __abs__(self): -# return pyo.NumericValue.__abs__(self) + def __abs__(self): + return pyo.NumericValue.__abs__(self) class OmltIndexedPyomo(pyo.Var, OmltIndexed): @@ -588,7 +675,7 @@ def __init__(self, *indexes, **kwargs): _ub = {i: None for i in self._index_set} else: raise ValueError( - f"Bounds must be given as a tuple," " but %s was given.", self._bounds + "Bounds must be given as a tuple," " but %s was given.", self._bounds ) _domain = kwargs.pop("domain", None) @@ -630,7 +717,7 @@ def __init__(self, *indexes, **kwargs): self._value = {i: _initialize[0] for i in self._index_set} else: raise ValueError( - f"Index set has length %s, but" " initializer has length %s.", + "Index set has length %s, but initializer has length %s.", len(self._index_set), len(_initialize), ) @@ -648,6 +735,7 @@ def __init__(self, *indexes, **kwargs): self.integer, ) self._vars = {} + self._varrefs = {} self._constructed = False self._ctype = pyo.Var self._parent = None @@ -664,13 +752,22 @@ def __setitem__(self, item, value): self.construct() def keys(self): - return self._vars.keys() + if self._parent is not None: + return self._varrefs.keys() + else: + return self._vars.keys() def values(self): - return self._vars.values() + if self._parent is not None: + return self._varrefs.values() + else: + return self._vars.values() def items(self): - return self._vars.items() + if self._parent is not None: + return self._varrefs.items() + else: + return self._vars.items() def fix(self, value=None): self.fixed = True @@ -708,6 +805,13 @@ def construct(self, data=None): else: name = str(self.name) + str(list(idx)).replace(" ", "") self._vars[idx] = JumpVar(self._varinfo[idx], name) + self._vars[idx].omltvar = self + self._vars[idx].index = idx + if self._parent is not None: + block = self._parent() + if block._format == "jump" and block._jumpmodel is not None: + self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) + self._constructed = True def setub(self, value): @@ -735,11 +839,12 @@ def index_set(self): def name(self): return self._name - def to_jump(self): + def to_jumpvar(self): if self._constructed: - return jump.Containers.DenseAxisArray( - list(self._vars.values()), self.index_set() - ) + return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) + + def to_jumpexpr(self): + return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} """ @@ -763,13 +868,3 @@ def __init__(self, *args, **kwargs): raise ValueError( "Storing variables in Gurobi format is not currently implemented." ) - - -class OmltSet: - def __init__(self): - pass - - -class OmltExpression: - def __init__(self): - pass diff --git a/src/omlt/block.py b/src/omlt/block.py index 11956f48..f97b9c5c 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -26,7 +26,9 @@ class is used in combination with a formulation object to construct the import warnings from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE - +from omlt.dependencies import julia_available +if julia_available: + from omlt.base import jump import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -39,7 +41,16 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None - self.__format = DEFAULT_MODELING_LANGUAGE + self._format = DEFAULT_MODELING_LANGUAGE + if self._format == "jump": + self._jumpmodel = jump.Model() + else: + self._jumpmodel = None + + def set_format(self, format): + self._format = format + if self._format == "jump" and self._jumpmodel is None: + self._jumpmodel = jump.Model() def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ @@ -65,9 +76,9 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): ) self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0, format=self.__format) + self.inputs = OmltVar(self.inputs_set, initialize=0, format=self._format) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0, format=self.__format) + self.outputs = OmltVar(self.outputs_set, initialize=0, format=self._format) def build_formulation(self, formulation, format=None): """ @@ -87,7 +98,10 @@ def build_formulation(self, formulation, format=None): """ if format is not None: - self.__format = format + self._format = format + + if self._format == "jump": + self._jumpmodel = jump.Model() self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 5f99e706..3d2ebff7 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -121,10 +121,12 @@ def output_node_block(b, *output_index): z2.setlb(min(0, lb)) z2.setub(max(0, ub)) - b.eq_16_lb.add(expr - z2 >= b.sig * lb) - b.eq_16_ub.add(expr - z2 <= b.sig * ub) - b.eq_17_lb.add(z2 >= (1 - b.sig) * lb) - b.eq_17_ub.add(z2 <= (1 - b.sig) * ub) + b.eq_16_lb.add(b.sig * lb <= expr - z2) + b.eq_16_ub.add(b.sig * ub >= expr - z2) + + minus_sig = 1 - b.sig + b.eq_17_lb.add(minus_sig * lb <= z2) + b.eq_17_ub.add(minus_sig * ub >= z2) # compute dense layer expression to compute bounds expr = 0.0 @@ -159,9 +161,9 @@ def output_node_block(b, *output_index): b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) b.eq_14 = pyo.Constraint( - expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) >= 0 + expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0 ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression ) diff --git a/tests/test_block.py b/tests/test_block.py index 74fb9290..35d58a6d 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -6,7 +6,6 @@ from omlt.dependencies import julia_available - class dummy_formulation(object): def __init__(self): self.input_indexes = ["A", "C", "D"] @@ -48,10 +47,21 @@ def test_block(): def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() + m.b.set_format("jump") + + with pytest.raises(ValueError) as excinfo: + m.b.x = OmltVar(initialize=(2, 7), format="jump") + expected_msg = "Initial value for JuMP variables must be an int or float, but was provided." - m.b.x = OmltVar(initialize=(2, 7), format="jump") + assert str(excinfo.value) == expected_msg - assert m.b.x.value == (2, 7) + m.b.y = OmltVar(initialize=2, format="jump") + assert m.b.y.value == 2 + assert m.b.y.name == 'y' + m.b.y.lb = 0 + m.b.y.ub = 5 + assert m.b.y.lb == 0 + assert m.b.y.ub == 5 formulation = dummy_formulation() diff --git a/tests/test_var.py b/tests/test_var.py new file mode 100644 index 00000000..1639c480 --- /dev/null +++ b/tests/test_var.py @@ -0,0 +1,32 @@ +import pytest + +import pyomo.environ as pyo +from omlt.base import OmltVar +from omlt.dependencies import julia_available + + +def _test_scalar_var(format): + v = OmltVar(format=format, initialize=2, domain=pyo.Integers) + assert v.is_indexed() is False + assert v.ctype == pyo.ScalarVar + + v.construct() + + v.value = 3 + assert v.value == 3 + + v.bounds = (0, 5) + assert v.lb == 0 + assert v.ub == 5 + assert v.bounds == (0, 5) + + +def test_scalar_pyomo(): + _test_scalar_var("pyomo") + + +@pytest.mark.skipif( + not julia_available, reason="Test only valid when Julia is available" +) +def test_scalar_jump(): + _test_scalar_var("jump") From 3483455ca049f7dd4ffc82c204a87389758e0fbf Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:22:30 +0000 Subject: [PATCH 52/75] cleanup in expression.py --- src/omlt/base/expression.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index d8dda339..c6086cf5 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -5,23 +5,12 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE import omlt.base.var as var -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base.julia import jl, jump, JumpVar - from juliacall import AnyValue -relations = {"==", ">=", "<=", ">", "<"} - -formats = { - "pyomo": { - "scalar": pyo.Expression, - "indexed": pyo.Expression, - }, - "jump": { - "scalar": jump.AffExpr, - "indexed": jl.Vector, - }, -} +# from omlt.dependencies import julia_available + +# if julia_available: +# from omlt.base.julia import jl, jump, JumpVar +# from juliacall import AnyValue +# relations = {"==", ">=", "<=", ">", "<"} class OmltExpr(ABC): From 21a63ea4b6f4a91f85bc4df998b32ae2d709ec25 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:47:01 +0000 Subject: [PATCH 53/75] tidying var.py --- src/omlt/base/var.py | 42 +++++++++++++------------- src/omlt/neuralnet/activations/relu.py | 1 + 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index c119e8bf..bc68df39 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -17,7 +17,7 @@ from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar -from omlt.base.constraint import OmltRelation, OmltRelScalar +# from omlt.base.constraint import OmltRelation, OmltRelScalar class OmltVar(ABC): @@ -134,20 +134,20 @@ def value(self, val): # Interface governing how variables behave in expressions. - def __lt__(self, other): - return OmltRelScalar(expr=(self, "<", other)) + # def __lt__(self, other): + # return OmltRelScalar(expr=(self, "<", other)) - def __gt__(self, other): - return OmltRelScalar(expr=(self, ">", other)) + # def __gt__(self, other): + # return OmltRelScalar(expr=(self, ">", other)) - def __le__(self, other): - return OmltRelScalar(expr=(self, "<=", other)) + # def __le__(self, other): + # return OmltRelScalar(expr=(self, "<=", other)) - def __ge__(self, other): - return OmltRelScalar(expr=(self, ">=", other)) + # def __ge__(self, other): + # return OmltRelScalar(expr=(self, ">=", other)) - def __eq__(self, other): - return OmltRelScalar(expr=(self, "==", other)) + # def __eq__(self, other): + # return OmltRelScalar(expr=(self, "==", other)) def __add__(self, other): return OmltExprScalar(format=self._format, expr=(self, "+", other)) @@ -537,20 +537,20 @@ def __iter__(self): # Interface governing how variables behave in expressions. - def __lt__(self, other): - return OmltRelation(self.index_set(), expr=(self, "<", other)) + # def __lt__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "<", other)) - def __gt__(self, other): - return OmltRelation(self.index_set(), expr=(self, ">", other)) + # def __gt__(self, other): + # return OmltRelation(self.index_set(), expr=(self, ">", other)) - def __le__(self, other): - return OmltRelation(self.index_set(), expr=(self, "<=", other)) + # def __le__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "<=", other)) - def __ge__(self, other): - return OmltRelation(self.index_set(), expr=(self, ">=", other)) + # def __ge__(self, other): + # return OmltRelation(self.index_set(), expr=(self, ">=", other)) - def __eq__(self, other): - return OmltRelation(self.index_set(), expr=(self, "==", other)) + # def __eq__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "==", other)) def __add__(self, other): return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 8ac42aa0..995bf31e 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -3,6 +3,7 @@ from omlt.base import OmltVar + def bigm_relu_activation_constraint(net_block, net, layer_block, layer): r""" Big-M ReLU activation formulation. From 4ae0715710134d8cf467926a07307abc4d0767f5 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:58:51 +0000 Subject: [PATCH 54/75] fixing variable initialization --- src/omlt/base/var.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index bc68df39..4ecfc7ef 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -220,6 +220,7 @@ def __init__(self, *args, **kwargs): kwargs.pop("format", None) # pyo.ScalarVar.__init__(self, *args, **kwargs) self._pyovar = pyo.ScalarVar(*args, **kwargs) + self._parent = None def construct(self, data=None): return self._pyovar.construct(data) From b17482059a4ffd04efbefa46360945f581654aea Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:06:17 +0000 Subject: [PATCH 55/75] further fixing --- src/omlt/base/var.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 4ecfc7ef..73065289 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -221,6 +221,7 @@ def __init__(self, *args, **kwargs): # pyo.ScalarVar.__init__(self, *args, **kwargs) self._pyovar = pyo.ScalarVar(*args, **kwargs) self._parent = None + self._constructed = None def construct(self, data=None): return self._pyovar.construct(data) From e2191147019df7d98892aa1ef0d374a75490571f Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:22:56 +0000 Subject: [PATCH 56/75] adding abstract methods to expression interface --- src/omlt/base/expression.py | 16 +++++++++++++++- src/omlt/base/var.py | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index c6086cf5..d49b95a7 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -1,4 +1,4 @@ -from abc import ABC +from abc import ABC, abstractmethod import pyomo.environ as pyo # from pyomo.core.expr import RelationalExpression @@ -39,6 +39,7 @@ def is_component_type(self): def is_expression_type(self): return True + @abstractmethod def is_indexed(self): pass @@ -46,6 +47,19 @@ def valid_model_component(self): """Return True if this can be used as a model component.""" return True + @property + @abstractmethod + def args(self): + pass + + @abstractmethod + def arg(self, index): + pass + + @abstractmethod + def nargs(self): + pass + class OmltExprScalar(OmltExpr): def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 73065289..5927695f 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -16,7 +16,7 @@ if julia_available: from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar -from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar +from omlt.base.expression import OmltExprIndexed, OmltExprScalar # from omlt.base.constraint import OmltRelation, OmltRelScalar From 7c0dcb4ff59bc5ef33f0d509d415152fdcfe20ed Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 13:33:16 -0700 Subject: [PATCH 57/75] Delete .github/workflows/python-package.yml --- .github/workflows/python-package.yml | 58 ---------------------------- 1 file changed, 58 deletions(-) delete mode 100644 .github/workflows/python-package.yml diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml deleted file mode 100644 index 55870dbc..00000000 --- a/.github/workflows/python-package.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" - with: - fail_ci_if_error: true From b6fed2a09d6e6580b4863ec6e1deaa6cd46c853d Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:42:30 +0000 Subject: [PATCH 58/75] linting (1) --- src/omlt/base/__init__.py | 1 + src/omlt/base/expression.py | 13 +++++++------ src/omlt/base/var.py | 1 + src/omlt/neuralnet/layers/partition_based.py | 8 ++++++-- tests/test_block.py | 3 ++- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index bd526fc3..3d881472 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,6 +1,7 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" from omlt.dependencies import julia_available + if julia_available: from omlt.base.julia import jl, jump diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index d49b95a7..80229d99 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -5,6 +5,7 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE import omlt.base.var as var + # from omlt.dependencies import julia_available # if julia_available: @@ -104,13 +105,13 @@ def _parse_expression_tuple_term(self, term): return term._expression elif isinstance(term, var.OmltVar): return term._pyovar - elif isinstance(term, ( - pyo.Expression, pyo.Var, int, float - )): + elif isinstance(term, (pyo.Expression, pyo.Var, int, float)): return term else: - raise TypeError("Term of expression is an unsupported type. " - "Write a better error message.") + raise TypeError( + "Term of expression is an unsupported type. " + "Write a better error message." + ) def _parse_expression_tuple(self, expr): lhs = self._parse_expression_tuple_term(expr[0]) @@ -214,7 +215,7 @@ def __ge__(self, other): # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) def __le__(self, other): - expr = (self._expression <= other) + expr = self._expression <= other return expr # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 5927695f..a7e5a9b8 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -17,6 +17,7 @@ from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar from omlt.base.expression import OmltExprIndexed, OmltExprScalar + # from omlt.base.constraint import OmltRelation, OmltRelScalar diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 3d2ebff7..2ec724a6 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -161,9 +161,13 @@ def output_node_block(b, *output_index): b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) b.eq_14 = pyo.Constraint( - expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0 + expr=sum(b.z2[s] for s in range(num_splits)) + + bias * (1 - b.sig)._expression + >= 0 ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + + bias + * (1 - b.sig)._expression ) diff --git a/tests/test_block.py b/tests/test_block.py index 35d58a6d..88b1e49b 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -41,6 +41,7 @@ def test_block(): assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + @pytest.mark.skipif( not julia_available, reason="Test only valid when Julia is available" ) @@ -57,7 +58,7 @@ def test_jump_block(): m.b.y = OmltVar(initialize=2, format="jump") assert m.b.y.value == 2 - assert m.b.y.name == 'y' + assert m.b.y.name == "y" m.b.y.lb = 0 m.b.y.ub = 5 assert m.b.y.lb == 0 From bea9863bf90fa2d34a8581f84e40502242c68a3c Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:56:23 +0000 Subject: [PATCH 59/75] linting (2) --- src/omlt/block.py | 1 + src/omlt/neuralnet/layers/partition_based.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/omlt/block.py b/src/omlt/block.py index f97b9c5c..971547b1 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -27,6 +27,7 @@ class is used in combination with a formulation object to construct the from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE from omlt.dependencies import julia_available + if julia_available: from omlt.base import jump diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 2ec724a6..b43f2178 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -167,7 +167,5 @@ def output_node_block(b, *output_index): ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) - + bias - * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression ) From a967418ca055c925d5e881503b036970c7eec1c7 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 13 Jun 2024 22:08:18 +0000 Subject: [PATCH 60/75] Fixing initial batch of ruff errors --- .../notebooks/data/build_sin_quadratic_csv.py | 14 +- src/omlt/__init__.py | 7 +- src/omlt/block.py | 20 +- src/omlt/formulation.py | 33 +- src/omlt/gbt/__init__.py | 5 +- src/omlt/gbt/gbt_formulation.py | 83 +++-- src/omlt/gbt/model.py | 37 +- src/omlt/io/__init__.py | 6 + src/omlt/io/input_bounds.py | 24 +- src/omlt/io/keras/__init__.py | 2 + src/omlt/io/keras/keras_reader.py | 29 +- src/omlt/io/onnx.py | 16 +- src/omlt/io/onnx_parser.py | 348 +++++++++++------- src/omlt/io/torch_geometric/__init__.py | 6 + .../torch_geometric/build_gnn_formulation.py | 36 +- .../torch_geometric/torch_geometric_reader.py | 48 +-- src/omlt/linear_tree/__init__.py | 9 +- src/omlt/linear_tree/lt_definition.py | 94 ++--- src/omlt/linear_tree/lt_formulation.py | 74 ++-- src/omlt/neuralnet/__init__.py | 19 +- src/omlt/neuralnet/activations/__init__.py | 22 +- src/omlt/neuralnet/activations/linear.py | 5 +- src/omlt/neuralnet/activations/relu.py | 11 +- src/omlt/neuralnet/activations/smooth.py | 30 +- src/omlt/neuralnet/layer.py | 182 ++++----- src/omlt/neuralnet/layers/__init__.py | 12 +- src/omlt/neuralnet/layers/full_space.py | 107 +++--- src/omlt/neuralnet/layers/partition_based.py | 53 +-- src/omlt/neuralnet/layers/reduced_space.py | 13 +- src/omlt/neuralnet/network_definition.py | 58 +-- src/omlt/neuralnet/nn_formulation.py | 92 ++--- src/omlt/scaling.py | 112 +++--- tests/conftest.py | 32 +- tests/gbt/test_gbt_formulation.py | 23 +- tests/io/test_input_bounds.py | 2 +- tests/io/test_keras_reader.py | 12 +- tests/io/test_onnx_parser.py | 141 ++++--- tests/io/test_torch_geometric.py | 16 +- tests/linear_tree/test_lt_formulation.py | 94 ++--- tests/neuralnet/test_keras.py | 42 ++- tests/neuralnet/test_layer.py | 21 +- tests/neuralnet/test_network_definition.py | 36 +- tests/neuralnet/test_nn_formulation.py | 269 +++++++------- tests/neuralnet/test_onnx.py | 20 +- tests/neuralnet/test_relu.py | 69 ++-- tests/neuralnet/train_keras_models.py | 27 +- tests/notebooks/test_run_notebooks.py | 10 +- tests/test_block.py | 33 +- tests/test_formulation.py | 7 +- tests/test_scaling.py | 61 ++- 50 files changed, 1385 insertions(+), 1137 deletions(-) diff --git a/docs/notebooks/data/build_sin_quadratic_csv.py b/docs/notebooks/data/build_sin_quadratic_csv.py index 6506022a..72e6c554 100644 --- a/docs/notebooks/data/build_sin_quadratic_csv.py +++ b/docs/notebooks/data/build_sin_quadratic_csv.py @@ -1,5 +1,4 @@ -from random import random - +import matplotlib.pyplot as plt import numpy as np import pandas as pd @@ -7,14 +6,15 @@ w = 5 x = np.linspace(-2, 2, n_samples) -df = pd.DataFrame(x, columns=["x"]) -df["y"] = ( +rng = np.random.default_rng() +sin_quads = pd.DataFrame(x, columns=["x"]) +sin_quads["y"] = ( np.sin(w * x) + x**2 - + np.array([np.random.uniform() * 0.1 for _ in range(n_samples)]) + + np.array([rng.uniform() * 0.1 for _ in range(n_samples)]) ) -plt.plot(df["x"], df["y"]) +plt.plot(sin_quads["x"], sin_quads["y"]) plt.show() -df.to_csv("sin_quadratic.csv") +sin_quads.to_csv("sin_quadratic.csv") diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 8e702f0b..3bf95df2 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -1,8 +1,9 @@ -""" -OMLT +"""OMLT. + ==== -OMLT is a Python package for representing machine learning models (neural networks and gradient-boosted trees) within the Pyomo optimization environment. +OMLT is a Python package for representing machine learning models (neural networks +and gradient-boosted trees) within the Pyomo optimization environment. The package provides various optimization formulations for machine learning models (such as full-space, reduced-space, and MILP) as well as an interface to import sequential Keras and general ONNX models. diff --git a/src/omlt/block.py b/src/omlt/block.py index a6c7bbf2..0a03838d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -1,4 +1,5 @@ -""" +"""OmltBlock. + The omlt.block module contains the implementation of the OmltBlock class. This class is used in combination with a formulation object to construct the necessary constraints and variables to represent ML models. @@ -23,7 +24,6 @@ class is used in combination with a formulation object to construct the pyo.assert_optimal_termination(status) """ -import warnings import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -32,13 +32,14 @@ class is used in combination with a formulation object to construct the @declare_custom_block(name="OmltBlock") class OmltBlockData(_BlockData): def __init__(self, component): - super(OmltBlockData, self).__init__(component) + super().__init__(component) self.__formulation = None self.__input_indexes = None self.__output_indexes = None def _setup_inputs_outputs(self, *, input_indexes, output_indexes): - """ + """Setup inputs and outputs. + This function should be called by the derived class to create the inputs and outputs on the block @@ -53,10 +54,10 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.__input_indexes = input_indexes self.__output_indexes = output_indexes if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide more contextual error msg - raise ValueError( - "OmltBlock must have at least one input and at least one output." - ) + # TODO: implement this check higher up in the class hierarchy to provide + # more contextual error msg + msg = "OmltBlock must have at least one input and at least one output." + raise ValueError(msg) self.inputs_set = pyo.Set(initialize=input_indexes) self.inputs = pyo.Var(self.inputs_set, initialize=0) @@ -64,7 +65,8 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.outputs = pyo.Var(self.outputs_set, initialize=0) def build_formulation(self, formulation): - """ + """Build formulation. + Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network formulation. The formulation object can be accessed later through the diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index fd83ae86..7097fbf1 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -5,7 +5,8 @@ class _PyomoFormulationInterface(abc.ABC): - """ + """Pyomo Formulation Interface. + Base class interface for a Pyomo formulation object. This class is largely internal, and developers of new formulations should derive from _PyomoFormulation. @@ -23,42 +24,46 @@ def _set_block(self, block): @abc.abstractmethod def block(self): """Return the block associated with this formulation.""" - pass @property @abc.abstractmethod def input_indexes(self): - """Return the indices corresponding to the inputs of the + """Input indexes. + + Return the indices corresponding to the inputs of the ML model. This is a list of entries (which may be tuples for higher dimensional inputs). """ - pass @property @abc.abstractmethod def output_indexes(self): - """Return the indices corresponding to the outputs of the + """Output indexes. + + Return the indices corresponding to the outputs of the ML model. This is a list of entries (which may be tuples for higher dimensional outputs). """ - pass @abc.abstractmethod def _build_formulation(self): - """This method is called by the OmltBlock object to build the + """Build formulation. + + This method is called by the OmltBlock object to build the corresponding mathematical formulation of the model. """ - pass class _PyomoFormulation(_PyomoFormulationInterface): - """ + """Pyomo Formulation. + This is a base class for different Pyomo formulations. To create a new - formulation, inherit from this class and implement the abstract methods and properties. + formulation, inherit from this class and implement the abstract methods + and properties. """ def __init__(self): - super(_PyomoFormulation, self).__init__() + super().__init__() self.__block = None def _set_block(self, block): @@ -66,7 +71,11 @@ def _set_block(self, block): @property def block(self): - """The underlying block containing the constraints / variables for this formulation.""" + """Block. + + The underlying block containing the constraints / variables for this + formulation. + """ return self.__block() diff --git a/src/omlt/gbt/__init__.py b/src/omlt/gbt/__init__.py index f62ed421..ebf2bb1c 100644 --- a/src/omlt/gbt/__init__.py +++ b/src/omlt/gbt/__init__.py @@ -1,4 +1,5 @@ -r""" +r"""Gradient-Boosted Trees formulation. + We use the following notation to describe the gradient-boosted trees formulation: .. math:: @@ -25,3 +26,5 @@ from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel + +__all__ = ["GBTBigMFormulation", "GradientBoostedTreeModel"] diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index f2d01296..a51bec98 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -8,17 +8,17 @@ class GBTBigMFormulation(_PyomoFormulation): - """ - This class is the entry-point to build gradient-boosted trees formulations. + """This class is the entry-point to build gradient-boosted trees formulations. This class iterates over all trees in the ensemble and generates constraints to enforce splitting rules according to: - References + References: ---------- * Misic, V. "Optimization of tree ensembles." Operations Research 68.5 (2020): 1605-1624. - * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with gradient-boosted trees embedded." + * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with + gradient-boosted trees embedded." INFORMS Journal on Computing (2020). Parameters @@ -28,6 +28,7 @@ class GBTBigMFormulation(_PyomoFormulation): """ def __init__(self, gbt_model): + """Constructor.""" super().__init__() self.model_definition = gbt_model @@ -42,7 +43,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -60,8 +63,7 @@ def _build_formulation(self): def add_formulation_to_block(block, model_definition, input_vars, output_vars): - r""" - Adds the gradient-boosted trees formulation to the given Pyomo block. + r"""Adds the gradient-boosted trees formulation to the given Pyomo block. .. math:: \begin{align*} @@ -73,7 +75,8 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): \sum\limits_{l \in \text{Right}_{t,s}} z_{t,l} &\leq 1 - y_{i(s),j(s)}, && \forall t \in T, \forall s \in V_t, \\ y_{i,j} &\leq y_{i,j+1}, - && \forall i \in \left [ n \right ], \forall j \in \left [ m_i - 1 \right ], \\ + && \forall i \in \left [ n \right ], \\ + \forall j \in \left [ m_i - 1 \right ], \\ x_{i} &\geq v_{i,0} + \sum\limits_{j=1}^{m_i} \left (v_{i,j} - v_{i,j-1} \right ) \left ( 1 - y_{i,j} \right ), @@ -84,11 +87,12 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): \end{align*} - References + References: ---------- * Misic, V. "Optimization of tree ensembles." Operations Research 68.5 (2020): 1605-1624. - * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with gradient-boosted trees embedded." + * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with + gradient-boosted trees embedded." INFORMS Journal on Computing (2020). Parameters @@ -142,7 +146,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): tree_ids = set(nodes_tree_ids) feature_ids = set(nodes_feature_ids) - continuous_vars = dict() + continuous_vars = {} for var_idx in input_vars: var = input_vars[var_idx] @@ -154,7 +158,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): domain=pe.Reals, ) - branch_value_by_feature_id = dict() + branch_value_by_feature_id = {} branch_value_by_feature_id = collections.defaultdict(list) for f in feature_ids: @@ -164,15 +168,17 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): y_index = [ (f, bi) - for f in continuous_vars.keys() + for f in continuous_vars for bi, _ in enumerate(branch_value_by_feature_id[f]) ] block.y = pe.Var(y_index, domain=pe.Binary) @block.Constraint(tree_ids) def single_leaf(b, tree_id): - r""" - Add constraint to ensure that only one leaf per tree is active, Mistry et al. Equ. (3b). + r"""Single leaf constraint. + + Add constraint to ensure that only one leaf per tree is active, + Mistry et al. Equ. (3b). .. math:: \begin{align*} \sum\limits_{l \in L_t} z_{t,l} &= 1, && \forall t \in T @@ -198,22 +204,28 @@ def _branching_y(tree_id, branch_node_id): feature_id = nodes_feature_ids[node_mask] branch_value = nodes_values[node_mask] if len(branch_value) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a branch value." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a" + " branch value." ) + raise ValueError(msg) if len(feature_id) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a feature." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a" + " feature." ) + raise ValueError(msg) feature_id = feature_id[0] branch_value = branch_value[0] (branch_y_idx,) = np.where( branch_value_by_feature_id[feature_id] == branch_value ) if len(branch_y_idx) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a branch index." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a branch" + " index." ) + raise ValueError(msg) return block.y[feature_id, branch_y_idx[0]] def _sum_of_z_l(tree_id, start_node_id): @@ -235,7 +247,8 @@ def _sum_of_z_l(tree_id, start_node_id): @block.Constraint(nodes_tree_branch_ids) def left_split(b, tree_id, branch_node_id): - r""" + r"""Left split. + Add constraint to activate all left splits leading to an active leaf, Mistry et al. Equ. (3c). .. math:: @@ -252,7 +265,8 @@ def left_split(b, tree_id, branch_node_id): @block.Constraint(nodes_tree_branch_ids) def right_split(b, tree_id, branch_node_id): - r""" + r"""Right split. + Add constraint to activate all right splits leading to an active leaf, Mistry et al. Equ. (3d). .. math:: @@ -269,8 +283,8 @@ def right_split(b, tree_id, branch_node_id): @block.Constraint(y_index) def order_y(b, feature_id, branch_y_idx): - r""" - Add constraint to activate splits in the correct order. + r"""Add constraint to activate splits in the correct order. + Mistry et al. Equ. (3e). .. math:: \begin{align*} @@ -285,8 +299,11 @@ def order_y(b, feature_id, branch_y_idx): @block.Constraint(y_index) def var_lower(b, feature_id, branch_y_idx): - r""" - Add constraint to link discrete tree splits to lower bound of continuous variables. + r"""Lower bound constraint. + + Add constraint to link discrete tree splits to lower bound of continuous + variables. + Mistry et al. Equ. (4a). .. math:: \begin{align*} @@ -304,8 +321,10 @@ def var_lower(b, feature_id, branch_y_idx): @block.Constraint(y_index) def var_upper(b, feature_id, branch_y_idx): - r""" - Add constraint to link discrete tree splits to upper bound of continuous variables. + r"""Upper bound constraint. + + Add constraint to link discrete tree splits to upper bound of continuous + variables. Mistry et al. Equ. (4b). .. math:: \begin{align*} @@ -322,8 +341,8 @@ def var_upper(b, feature_id, branch_y_idx): @block.Constraint() def tree_mean_value(b): - r""" - Add constraint to link block output tree model mean. + r"""Add constraint to link block output tree model mean. + Mistry et al. Equ. (3a). .. math:: \begin{align*} @@ -344,7 +363,7 @@ def tree_mean_value(b): def _node_attributes(node): - attr = dict() + attr = {} for at in node.attribute: attr[at.name] = at return attr diff --git a/src/omlt/gbt/model.py b/src/omlt/gbt/model.py index 9bac2590..0fbc3f7e 100644 --- a/src/omlt/gbt/model.py +++ b/src/omlt/gbt/model.py @@ -1,6 +1,7 @@ class GradientBoostedTreeModel: def __init__(self, onnx_model, scaling_object=None, scaled_input_bounds=None): - """ + """Constructor. + Create a network definition object used to create the gradient-boosted trees formulation in Pyomo @@ -25,27 +26,27 @@ def __init__(self, onnx_model, scaling_object=None, scaled_input_bounds=None): @property def onnx_model(self): - """Returns underlying onnx model of the tree model being used""" + """Returns underlying onnx model of the tree model being used.""" return self.__model @property def n_inputs(self): - """Returns the number of input variables""" + """Returns the number of input variables.""" return self.__n_inputs @property def n_outputs(self): - """Returns the number of output variables""" + """Returns the number of output variables.""" return self.__n_outputs @property def scaling_object(self): - """Return an instance of the scaling object that supports the ScalingInterface""" + """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Return a list of tuples containing lower and upper bounds of tree ensemble inputs""" + """Return a list of tuples of lower and upper bounds of tree ensemble inputs.""" return self.__scaled_input_bounds @scaling_object.setter @@ -54,27 +55,27 @@ def scaling_object(self, scaling_object): def _model_num_inputs(model): - """Returns the number of input variables""" + """Returns the number of input variables.""" graph = model.graph if len(graph.input) != 1: - raise ValueError( - f"Model graph input field is multi-valued {graph.input}. A single value is required." - ) + msg = f"Model graph input field is multi-valued {graph.input}. A single value" + " is required." + raise ValueError(msg) return _tensor_size(graph.input[0]) def _model_num_outputs(model): - """Returns the number of output variables""" + """Returns the number of output variables.""" graph = model.graph if len(graph.output) != 1: - raise ValueError( - f"Model graph output field is multi-valued {graph.output}. A single value is required." - ) + msg = f"Model graph output field is multi-valued {graph.output}. A single value" + " is required." + raise ValueError(msg) return _tensor_size(graph.output[0]) def _tensor_size(tensor): - """Returns the size of an input tensor""" + """Returns the size of an input tensor.""" tensor_type = tensor.type.tensor_type size = None dim_values = [ @@ -85,7 +86,9 @@ def _tensor_size(tensor): if len(dim_values) == 1: size = dim_values[0] elif dim_values == []: - raise ValueError(f"Tensor {tensor} has no positive dimensions.") + msg = f"Tensor {tensor} has no positive dimensions." + raise ValueError(msg) else: - raise ValueError(f"Tensor {tensor} has multiple positive dimensions.") + msg = f"Tensor {tensor} has multiple positive dimensions." + raise ValueError(msg) return size diff --git a/src/omlt/io/__init__.py b/src/omlt/io/__init__.py index 6933e312..b568fb90 100644 --- a/src/omlt/io/__init__.py +++ b/src/omlt/io/__init__.py @@ -14,3 +14,9 @@ if keras_available: from omlt.io.keras import load_keras_sequential + +__all__ = [ + "keras_available", "onnx_available", "torch_available", "torch_geometric_available", + "load_onnx_neural_network", "load_onnx_neural_network_with_bounds", + "write_onnx_model_with_bounds", "load_keras_sequential" +] diff --git a/src/omlt/io/input_bounds.py b/src/omlt/io/input_bounds.py index 7bdb8ea8..9826d498 100644 --- a/src/omlt/io/input_bounds.py +++ b/src/omlt/io/input_bounds.py @@ -1,20 +1,17 @@ import json +from pathlib import Path def write_input_bounds(input_bounds_filename, input_bounds): - """ - Write the specified input bounds to the given file. - """ + """Write the specified input bounds to the given file.""" input_bounds = _prepare_input_bounds(input_bounds) - with open(input_bounds_filename, "w") as f: + with Path.open(input_bounds_filename, "w") as f: json.dump(input_bounds, f) def load_input_bounds(input_bounds_filename): - """ - Read the input bounds from the given file. - """ - with open(input_bounds_filename, "r") as f: + """Read the input bounds from the given file.""" + with Path.open(input_bounds_filename) as f: raw_input_bounds = json.load(f) return dict(_parse_raw_input_bounds(d) for d in raw_input_bounds) @@ -26,12 +23,11 @@ def _prepare_input_bounds(input_bounds): {"key": i, "lower_bound": lb, "upper_bound": ub} for i, (lb, ub) in enumerate(input_bounds) ] - else: - # users should have passed a dict-like - return [ - {"key": key, "lower_bound": lb, "upper_bound": ub} - for key, (lb, ub) in input_bounds.items() - ] + # users should have passed a dict-like + return [ + {"key": key, "lower_bound": lb, "upper_bound": ub} + for key, (lb, ub) in input_bounds.items() + ] def _parse_raw_input_bounds(raw): diff --git a/src/omlt/io/keras/__init__.py b/src/omlt/io/keras/__init__.py index 72f6931a..bd9bbc3e 100644 --- a/src/omlt/io/keras/__init__.py +++ b/src/omlt/io/keras/__init__.py @@ -1 +1,3 @@ from omlt.io.keras.keras_reader import load_keras_sequential + +__all__ = ["load_keras_sequential"] diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index daccf68b..3ec0aaaa 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -1,4 +1,4 @@ -import tensorflow.keras as keras +from tensorflow import keras from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition @@ -7,7 +7,8 @@ def load_keras_sequential( nn, scaling_object=None, scaled_input_bounds=None, unscaled_input_bounds=None ): - """ + """Load Keras sequential network. + Load a keras neural network model (built with Sequential) into an OMLT network definition object. This network definition object can be used in different formulations. @@ -17,8 +18,9 @@ def load_keras_sequential( nn : keras.model A keras model that was built with Sequential scaling_object : instance of ScalingInterface or None - Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + Provide an instance of a scaling object to use to scale inputs --> scaled_inputs + and scaled_outputs --> outputs. If None, no scaling is performed. + See scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -29,7 +31,7 @@ def load_keras_sequential( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- NetworkDefinition """ @@ -45,15 +47,16 @@ def load_keras_sequential( prev_layer = InputLayer([n_inputs]) net.add_layer(prev_layer) - for l in nn.layers: - cfg = l.get_config() - if not isinstance(l, keras.layers.Dense): - raise ValueError( - "Layer type {} encountered. The function load_keras_sequential " - "only supports dense layers at this time. Consider using " - "ONNX and the ONNX parser".format(type(l)) + for layer in nn.layers: + cfg = layer.get_config() + if not isinstance(layer, keras.layers.Dense): + msg = ( + f"Layer type {type(layer)} encountered. The load_keras_sequential " + "function only supports dense layers at this time. Consider using " + "ONNX and the ONNX parser." ) - weights, biases = l.get_weights() + raise TypeError(msg) + weights, biases = layer.get_weights() n_layer_inputs, n_layer_nodes = weights.shape dense_layer = DenseLayer( diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index d41983c2..9676ea31 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -1,4 +1,3 @@ -import json from pathlib import Path import onnx @@ -8,8 +7,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): - """ - Write the ONNX model to the given file. + """Write the ONNX model to the given file. If `input_bounds` is not None, write it alongside the ONNX model. @@ -23,7 +21,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): bounds on the input variables """ if onnx_model is not None: - with open(filename, "wb") as f: + with Path.open(filename, "wb") as f: f.write(onnx_model.SerializeToString()) if input_bounds is not None: @@ -31,15 +29,14 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): def load_onnx_neural_network_with_bounds(filename): - """ - Load a NetworkDefinition with input bounds from an onnx object. + """Load a NetworkDefinition with input bounds from an onnx object. Parameters ---------- filename : str the path where the ONNX model and input bounds file are written - Returns + Returns: ------- NetworkDefinition """ @@ -53,8 +50,7 @@ def load_onnx_neural_network_with_bounds(filename): def load_onnx_neural_network(onnx, scaling_object=None, input_bounds=None): - """ - Load a NetworkDefinition from an onnx object. + """Load a NetworkDefinition from an onnx object. Parameters ---------- @@ -63,7 +59,7 @@ def load_onnx_neural_network(onnx, scaling_object=None, input_bounds=None): scaling_object : instance of object supporting ScalingInterface input_bounds : list of tuples - Returns + Returns: ------- NetworkDefinition """ diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index 511261c0..979b437c 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -1,4 +1,5 @@ import math +from typing import Any import numpy as np from onnx import numpy_helper @@ -14,11 +15,23 @@ _ACTIVATION_OP_TYPES = ["Relu", "Sigmoid", "LogSoftmax", "Tanh", "Softplus"] _POOLING_OP_TYPES = ["MaxPool"] - +DENSE_INPUT_DIMENSIONS = 2 +GEMM_INPUT_DIMENSIONS = 3 +CONV_INPUT_DIMENSIONS = [2, 3] +TWO_D_IMAGE_W_CHANNELS = 3 +RESHAPE_INPUT_DIMENSIONS = 2 +MAXPOOL_INPUT_DIMENSIONS = 1 +MAXPOOL_INPUT_OUTPUT_W_BATCHES = 4 +# Attribute types enum: +ATTR_FLOAT = 1 +ATTR_INT = 2 +ATTR_TENSOR = 4 +ATTR_INTS = 7 class NetworkParser: - """ - References + """Network Parser. + + References: ---------- * https://github.com/onnx/onnx/blob/master/docs/Operators.md """ @@ -42,48 +55,50 @@ def parse_network(self, graph, scaling_object, input_bounds): self._graph = graph # initializers contain constant data - initializers = dict() + initializers = {} for initializer in self._graph.initializer: initializers[initializer.name] = numpy_helper.to_array(initializer) self._initializers = initializers # Build graph - nodes = dict() - nodes_by_output = dict() + nodes = {} + nodes_by_output = {} inputs = set() outputs = set() - self._node_map = dict() + self._node_map = {} network = NetworkDefinition( scaling_object=scaling_object, scaled_input_bounds=input_bounds ) network_input = None - for input in self._graph.input: - nodes[input.name] = ("input", input.type, []) - nodes_by_output[input.name] = input.name - inputs.add(input.name) + for input_node in self._graph.input: + nodes[input_node.name] = ("input", input_node.type, []) + nodes_by_output[input_node.name] = input_node.name + inputs.add(input_node.name) # onnx inputs are tensors. Flatten tensors to a vector. dim_value = None size = [] - for dim in input.type.tensor_type.shape.dim: + for dim in input_node.type.tensor_type.shape.dim: if dim.dim_value > 0: if dim_value is None: dim_value = 1 size.append(dim.dim_value) dim_value *= dim.dim_value if dim_value is None: - raise ValueError( + msg = ( f'All dimensions in graph "{graph.name}" input tensor have 0 value.' ) + raise ValueError(msg) assert network_input is None network_input = InputLayer(size) - self._node_map[input.name] = network_input + self._node_map[input_node.name] = network_input network.add_layer(network_input) if network_input is None: - raise ValueError(f'No valid input layer found in graph "{graph.name}".') + msg = f'No valid input layer found in graph "{graph.name}".' + raise ValueError(msg) self._nodes = nodes self._nodes_by_output = nodes_by_output @@ -97,37 +112,39 @@ def parse_network(self, graph, scaling_object, input_bounds): for output in node.output: nodes_by_output[output] = node.name - self._constants = dict() + self._constants = {} for node in self._graph.node: # add node not connected to anything self._nodes[node.name] = ("node", node, []) # Map inputs by their output name node_inputs = [ - nodes_by_output[input] - for input in node.input - if input not in initializers + nodes_by_output[input_node] + for input_node in node.input + if input_node not in initializers ] if node_inputs: # Now connect inputs to the current node - for input in node_inputs: - self._nodes[input][2].append(node.name) + for input_node in node_inputs: + self._nodes[input_node][2].append(node.name) elif node.op_type == "Constant": for output in node.output: value = _parse_constant_value(node) self._constants[output] = value else: - raise ValueError( - f'Nodes must have inputs or have op_type "Constant". Node "{node.name}" has no inputs and op_type "{node.op_type}".' + msg = ( + 'Nodes must have inputs or have op_type "Constant". Node' + f' "{node.name}" has no inputs and op_type "{node.op_type}".' ) + raise ValueError(msg) # traverse graph self._node_stack = list(inputs) - self._weights = dict() - self._biases = dict() - self._activations = dict() + self._weights = {} + self._biases = {} + self._activations = {} while self._node_stack: node_name = self._node_stack.pop() @@ -141,8 +158,8 @@ def parse_network(self, graph, scaling_object, input_bounds): for layer_input in new_layer_inputs: network.add_edge(layer_input, new_layer) else: - for next in next_nodes: - self._node_stack.append(next) + for next_node in next_nodes: + self._node_stack.append(next_node) return network @@ -167,41 +184,55 @@ def _visit_node(self, node, next_nodes): node, next_nodes ) else: - raise Exception(f"Unhandled node type {node.op_type}") + msg = f"Unhandled node type {node.op_type}" + raise ValueError(msg) - for next in next_nodes: - self._node_stack.append(next) + for next_node in next_nodes: + self._node_stack.append(next_node) return new_layer, new_layer_inputs - def _consume_dense_nodes(self, node, next_nodes): + def _consume_dense_nodes( + self, node: Any, next_nodes: Any + ) -> tuple[Any, Any, list[Any]]: """Starting from a MatMul node, consume nodes to form a dense Ax + b node.""" if node.op_type != "MatMul": - raise ValueError( - f"{node.name} is a {node.op_type} node, only MatMul nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " MatMul nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) != 2: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + + if len(node.input) != DENSE_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 2" + " input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) [in_0, in_1] = list(node.input) input_layer, transformer = self._node_input_and_transformer(in_0) node_weights = self._initializers[in_1] if len(next_nodes) != 1: - raise ValueError( - f"Next nodes must have length 1, {next_nodes} has length {len(next_nodes)}" + msg = ( + f"Next nodes must have length 1, {next_nodes} has length" + f" {len(next_nodes)}" ) + raise ValueError(msg) # expect 'Add' node ahead type_, node, maybe_next_nodes = self._nodes[next_nodes[0]] if type_ != "node": - raise TypeError(f"Expected a node next, got a {type_} instead.") + msg = f"Expected a node next, got a {type_} instead." + raise TypeError(msg) if node.op_type != "Add": - raise ValueError( - f"The first node to be consumed, {node.name}, is a {node.op_type} node. Only Add nodes are supported." + msg = ( + f"The first node to be consumed, {node.name}, is a {node.op_type} node." + " Only Add nodes are supported." ) + raise ValueError(msg) # extract biases next_nodes = maybe_next_nodes @@ -212,18 +243,20 @@ def _consume_dense_nodes(self, node, next_nodes): elif in_1 in self._initializers: node_biases = self._initializers[in_1] else: - raise ValueError(f"Node inputs were not found in graph initializers.") - - if len(node_weights.shape) != 2: - raise ValueError(f"Node weights must be a 2-dimensional matrix.") + msg = "Node inputs were not found in graph initializers." + raise ValueError(msg) + if len(node_weights.shape) != DENSE_INPUT_DIMENSIONS: + msg = "Node weights must be a 2-dimensional matrix." + raise ValueError(msg) if node_weights.shape[1] != node_biases.shape[0]: - raise ValueError( - f"Node weights has {node_weights.shape[1]} columns; node biases has {node_biases.shape[0]} rows. These must be equal." + msg = ( + f"Node weights has {node_weights.shape[1]} columns; node biases has " + f"{node_biases.shape[0]} rows. These must be equal." ) + raise ValueError(msg) if len(node.output) != 1: - raise ValueError( - f"Node output is {node.output} but should be a single value." - ) + msg = f"Node output is {node.output} but should be a single value." + raise ValueError(msg) input_output_size = _get_input_output_size(input_layer, transformer) @@ -254,13 +287,18 @@ def _consume_dense_nodes(self, node, next_nodes): def _consume_gemm_dense_nodes(self, node, next_nodes): """Starting from a Gemm node, consume nodes to form a dense aAB + bC node.""" if node.op_type != "Gemm": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Gemm nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Gemm nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) != 3: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) != GEMM_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 3 input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) attr = _collect_attributes(node) alpha = attr["alpha"] @@ -304,20 +342,26 @@ def _consume_gemm_dense_nodes(self, node, next_nodes): return next_nodes, dense_layer, [input_layer] def _consume_conv_nodes(self, node, next_nodes): - """ + """Consume Conv nodes. + Starting from a Conv node, consume nodes to form a convolution node with (optional) activation function. """ if node.op_type != "Conv": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Conv nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Conv nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) not in [2, 3]: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) not in CONV_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 2 or 3 input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) - if len(node.input) == 2: + if len(node.input) == CONV_INPUT_DIMENSIONS[0]: [in_0, in_1] = list(node.input) in_2 = None else: @@ -327,51 +371,59 @@ def _consume_conv_nodes(self, node, next_nodes): weights = self._initializers[in_1] [out_channels, in_channels, *kernel_shape] = weights.shape - if in_2 is None: - biases = np.zeros(out_channels) - else: - biases = self._initializers[in_2] + biases = np.zeros(out_channels) if in_2 is None else self._initializers[in_2] attr = _collect_attributes(node) strides = attr["strides"] # check only kernel shape and stride are set if attr["kernel_shape"] != kernel_shape: - raise ValueError( - f"Kernel shape attribute {attr['kernel_shape']} does not match initialized kernel shape {kernel_shape}." + msg = ( + f"Kernel shape attribute {attr['kernel_shape']} does not match" + f" initialized kernel shape {kernel_shape}." ) + raise ValueError(msg) if len(kernel_shape) != len(strides): - raise ValueError( - f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + msg = ( + f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} " + f"dimensions. Strides attribute has {len(strides)} dimensions. " + "These must be equal." ) + raise ValueError(msg) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError( - f"Input/output size ({input_output_size}) must have one more dimension than initialized kernel shape ({kernel_shape})." + msg = ( + f"Input/output size ({input_output_size}) must have one more dimension " + f"than initialized kernel shape ({kernel_shape})." ) + raise ValueError(msg) # Check input, output have correct dimensions if biases.shape != (out_channels,): - raise ValueError( - f"Biases shape {biases.shape} must match output weights channels {(out_channels,)}." + msg = ( + f"Biases shape {biases.shape} must match output weights channels" + f" {(out_channels,)}." ) + raise ValueError(msg) if in_channels != input_output_size[0]: - raise ValueError( - f"Input/output size ({input_output_size}) first dimension must match input weights channels ({in_channels})." + msg = ( + f"Input/output size ({input_output_size}) first dimension must match " + f"input weights channels ({in_channels})." ) + raise ValueError(msg) # Other attributes are not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError( - f"{node} has non-identity dilations ({attr['dilations']}). This is not supported." + msg = ( + f"{node} has non-identity dilations ({attr['dilations']}). This is not" + " supported." ) + raise ValueError(msg) if attr["group"] != 1: - raise ValueError( - f"{node} has multiple groups ({attr['group']}). This is not supported." - ) + msg = f"{node} has multiple groups ({attr['group']}). This is unsupported." + raise ValueError(msg) if "pads" in attr and np.any(attr["pads"]): - raise ValueError( - f"{node} has non-zero pads ({attr['pads']}). This is not supported." - ) + msg = f"{node} has non-zero pads ({attr['pads']}). This is not supported." + raise ValueError(msg) # generate new nodes for the node output padding = 0 @@ -391,10 +443,9 @@ def _consume_conv_nodes(self, node, next_nodes): # convolute image one channel at the time # expect 2d image with channels - if len(input_output_size) != 3: - raise ValueError( - f"Expected a 2D image with channels, got {input_output_size}." - ) + if len(input_output_size) != TWO_D_IMAGE_W_CHANNELS: + msg = f"Expected a 2D image with channels, got {input_output_size}." + raise ValueError(msg) conv_layer = ConvLayer2D( input_output_size, @@ -412,13 +463,18 @@ def _consume_conv_nodes(self, node, next_nodes): def _consume_reshape_nodes(self, node, next_nodes): """Parse a Reshape node.""" if node.op_type != "Reshape": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Reshape nodes can be used as starting points for consumption." - ) - if len(node.input) != 2: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." - ) + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Reshape nodes was called. This could indicate changes in the" + " network being parsed." + ) + raise ValueError(msg) + if len(node.input) != RESHAPE_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 2 input dimensions can be used as starting points for parsing." + ) + raise ValueError(msg) [in_0, in_1] = list(node.input) input_layer = self._node_map[in_0] new_shape = self._constants[in_1] @@ -428,37 +484,48 @@ def _consume_reshape_nodes(self, node, next_nodes): return next_nodes def _consume_pool_nodes(self, node, next_nodes): - """ + """Consume MaxPool nodes. + Starting from a MaxPool node, consume nodes to form a pooling node with (optional) activation function. """ if node.op_type not in _POOLING_OP_TYPES: - raise ValueError( - f"{node.name} is a {node.op_type} node, only MaxPool nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " MaxPool nodes was called. This could indicate changes in the" + " network being parsed." ) + raise ValueError(msg) pool_func_name = "max" - # ONNX network should not contain indices output from MaxPool - not supported by OMLT + # ONNX network should not contain indices output from MaxPool - + # not supported by OMLT if len(node.output) != 1: - raise ValueError( - f"The ONNX contains indices output from MaxPool. This is not supported by OMLT." + msg = ( + "The ONNX network contains indices output from MaxPool. This is not" + " supported by OMLT." ) - if len(node.input) != 1: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 1 input dimension can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) != MAXPOOL_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with " + "1 input dimension can be used as starting points for parsing." ) - + raise ValueError(msg) input_layer, transformer = self._node_input_and_transformer(node.input[0]) input_output_size = _get_input_output_size(input_layer, transformer) # currently only support 2D image with channels. - if len(input_output_size) == 4: + if len(input_output_size) == MAXPOOL_INPUT_OUTPUT_W_BATCHES: # this means there is an extra dimension for number of batches - # batches not supported, so only accept if they're not there or there is only 1 batch + # batches not supported, so only accept if they're not there or there is + # only 1 batch if input_output_size[0] != 1: - raise ValueError( - f"{node.name} has {input_output_size[0]} batches, only a single batch is supported." + msg = ( + f"{node.name} has {input_output_size[0]} batches, only single batch" + " is supported." ) + raise ValueError(msg) input_output_size = input_output_size[1:] in_channels = input_output_size[0] @@ -471,37 +538,46 @@ def _consume_pool_nodes(self, node, next_nodes): # check only kernel shape, stride, storage order are set # everything else is not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError( - f"{node.name} has non-identity dilations ({attr['dilations']}). This is not supported." + msg = ( + f"{node.name} has non-identity dilations ({attr['dilations']})." + " This is not supported." ) + raise ValueError(msg) if "pads" in attr and np.any(attr["pads"]): - raise ValueError( - f"{node.name} has non-zero pads ({attr['pads']}). This is not supported." + msg = ( + f"{node.name} has non-zero pads ({attr['pads']})." + " This is not supported." ) + raise ValueError(msg) if ("auto_pad" in attr) and (attr["auto_pad"] != "NOTSET"): - raise ValueError( - f"{node.name} has autopad set ({attr['auto_pad']}). This is not supported." + msg = ( + f"{node.name} has autopad set ({attr['auto_pad']})." + " This is not supported." ) + raise ValueError(msg) if len(kernel_shape) != len(strides): - raise ValueError( - f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + msg = ( + f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. " + f"Strides attribute has {len(strides)} dimensions. These must be equal." ) + raise ValueError(msg) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError( - f"Input/output size ({input_output_size}) must have one more dimension than kernel shape ({kernel_shape})." + msg = ( + f"Input/output size ({input_output_size}) must have one more dimension" + f" than kernel shape ({kernel_shape})." ) + raise ValueError(msg) output_shape_wrapper = math.floor if "ceil_mode" in attr and attr["ceil_mode"] == 1: output_shape_wrapper = math.ceil - output_size = [in_channels] - for i in range(1, len(input_output_size)): - output_size.append( - output_shape_wrapper( - (input_output_size[i] - kernel_shape[i - 1]) / strides[i - 1] + 1 - ) + output_size = [in_channels] + [ + output_shape_wrapper( + (input_output_size[i] - kernel_shape[i - 1]) / strides[i - 1] + 1 ) + for i in range(1, len(input_output_size)) + ] activation = "linear" if len(next_nodes) == 1: @@ -532,31 +608,29 @@ def _node_input_and_transformer(self, node_name): if isinstance(maybe_layer, tuple): transformer, input_layer = maybe_layer return input_layer, transformer - else: - return maybe_layer, None + return maybe_layer, None def _collect_attributes(node): - r = dict() + r = {} for attr in node.attribute: - if attr.type == 1: # FLOAT + if attr.type == ATTR_FLOAT: # FLOAT r[attr.name] = attr.f - elif attr.type == 2: # INT + elif attr.type == ATTR_INT: # INT r[attr.name] = int(attr.i) - elif attr.type == 4: # TENSOR + elif attr.type == ATTR_TENSOR: # TENSOR r[attr.name] = numpy_helper.to_array(attr.t) - pass - elif attr.type == 7: # INTS + elif attr.type == ATTR_INTS: # INTS r[attr.name] = list(attr.ints) else: - raise RuntimeError(f"unhandled attribute type {attr.type}") + msg = f"unhandled attribute type {attr.type}" + raise RuntimeError(msg) return r def _parse_constant_value(node): attr = _collect_attributes(node) - value = attr["value"] - return value + return attr["value"] def _get_input_output_size(input_layer, transformer): diff --git a/src/omlt/io/torch_geometric/__init__.py b/src/omlt/io/torch_geometric/__init__.py index ae94d147..4b908c7a 100644 --- a/src/omlt/io/torch_geometric/__init__.py +++ b/src/omlt/io/torch_geometric/__init__.py @@ -5,3 +5,9 @@ from omlt.io.torch_geometric.torch_geometric_reader import ( load_torch_geometric_sequential, ) + +__all__ = [ + "gnn_with_fixed_graph", + "gnn_with_non_fixed_graph", + "load_torch_geometric_sequential", +] diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index 6e2e04ee..66e48775 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -15,9 +15,11 @@ def gnn_with_non_fixed_graph( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Build formulation for a torch_geometric graph neural network model (built with Sequential). - Since the input graph is not fixed, the elements in adjacency matrix are decision variables. + """Graph neural network with non-fixed graph. + + Build formulation for a torch_geometric graph neural network model (built with + Sequential). Since the input graph is not fixed, the elements in adjacency matrix + are decision variables. Parameters ---------- @@ -29,7 +31,8 @@ def gnn_with_non_fixed_graph( The number of nodes of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -40,11 +43,10 @@ def gnn_with_non_fixed_graph( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- OmltBlock (formulated) """ - # build NetworkDefinition for nn net = load_torch_geometric_sequential( nn=nn, @@ -68,7 +70,7 @@ def gnn_with_non_fixed_graph( block.symmetric_adjacency = pyo.ConstraintList() for u in range(N): for v in range(u + 1, N): - block.symmetric_adjacency.add((block.A[u, v] == block.A[v, u])) + block.symmetric_adjacency.add(block.A[u, v] == block.A[v, u]) # build formulation for GNN block.build_formulation(FullSpaceNNFormulation(net)) @@ -85,9 +87,10 @@ def gnn_with_fixed_graph( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Build formulation for a torch_geometric graph neural network model (built with Sequential). - Given the adjacency matrix, the input graph structure is fixed. + """Graph neural network with non-fixed graph. + + Build formulation for a torch_geometric graph neural network model (built with + Sequential). Given the adjacency matrix, the input graph structure is fixed. Parameters ---------- @@ -101,7 +104,8 @@ def gnn_with_fixed_graph( The adjacency matrix of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -112,13 +116,17 @@ def gnn_with_fixed_graph( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- OmltBlock (formulated) """ - # assume the adjacency matrix is always symmetric - assert np.array_equal(A, np.transpose(A)) + if not np.array_equal(A, np.transpose(A)): + msg = ( + f"Adjacency matrix A of the input graph must be symmetrical. {A} was" + " provided." + ) + raise ValueError(msg) # build NetworkDefinition for nn net = load_torch_geometric_sequential( diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 72d594cc..090d9b5a 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -7,8 +7,7 @@ def _compute_gcn_norm(A): - """ - Calculate the norm for a GCN layer + """Calculate the norm for a GCN layer. Parameters ---------- @@ -26,8 +25,7 @@ def _compute_gcn_norm(A): def _compute_sage_norm(A, aggr): - """ - Calculate the norm for a SAGE layer + """Calculate the norm for a SAGE layer. Parameters ---------- @@ -50,8 +48,7 @@ def _compute_sage_norm(A, aggr): def _process_gnn_parameters(gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm): - """ - Construct the weights and biases for the GNNLayer class + """Construct the weights and biases for the GNNLayer class. Parameters ---------- @@ -64,7 +61,7 @@ def _process_gnn_parameters(gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm gnn_norm : matrix-like the norm for the GNN layer, shape: (N, N) - Returns + Returns: ------- weights : matrix-like the weights for the GNNLayer class, shape: (N * in_channels, N * out_channels) @@ -113,8 +110,9 @@ def load_torch_geometric_sequential( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Load a torch_geometric graph neural network model (built with Sequential) into + """Load a torch_geometric graph neural network model. + + Load a torch_geometric graph neural network model (built with Sequential) into an OMLT network definition object. This network definition object can be used in different formulations. @@ -128,7 +126,8 @@ def load_torch_geometric_sequential( The adjacency matrix of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -139,7 +138,7 @@ def load_torch_geometric_sequential( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- NetworkDefinition """ @@ -163,14 +162,16 @@ def load_torch_geometric_sequential( op_name = l.__class__.__name__ if op_name not in _OP_TYPES: - raise ValueError("this operation is not supported") + msg = f"Operation {op_name} is not supported." + raise ValueError(msg) operations.append(op_name) if A is None: # If A is None, then the graph is not fixed. # Only layers in _LAYER_OP_TYPES_NON_FIXED_GRAPH are supported. # Only "sum" aggregation is supported. - # Since all weights and biases are possibly needed, A is set to correspond to a complete graph. + # Since all weights and biases are possibly needed, A is set to correspond to a + # complete graph. for index, l in enumerate(nn): if ( operations[index] @@ -181,16 +182,15 @@ def load_torch_geometric_sequential( warnings.warn( "nonlinear activation results in a MINLP", stacklevel=2 ) - # Linear layers, all activation functions, and all pooling functions are still supported. + # Linear layers, all activation functions, and all pooling functions are + # still supported. continue if operations[index] not in _LAYER_OP_TYPES_NON_FIXED_GRAPH: - raise ValueError( - "this layer is not supported when the graph is not fixed" - ) - elif l.aggr != "sum": - raise ValueError( - "this aggregation is not supported when the graph is not fixed" - ) + msg = "this layer is not supported when the graph is not fixed." + raise ValueError(msg) + if l.aggr != "sum": + msg = "this aggregation is not supported when the graph is not fixed" + raise ValueError(msg) A = np.ones((N, N)) - np.eye(N) @@ -207,8 +207,10 @@ def load_torch_geometric_sequential( if operations[index] == "Linear": gnn_weights = l.weight.detach().numpy() gnn_biases = l.bias.detach().numpy() - # A linear layer is either applied on each node's features (i.e., prev_layer.output_size[-1] = N * gnn_weights.shape[1]) - # or the features after pooling (i.e., prev_layer.output_size[-1] = gnn_weights.shape[1]) + # A linear layer is either applied on each node's features (i.e., + # prev_layer.output_size[-1] = N * gnn_weights.shape[1]) + # or the features after pooling (i.e., + # prev_layer.output_size[-1] = gnn_weights.shape[1]) gnn_norm = np.eye(prev_layer.output_size[-1] // gnn_weights.shape[1]) weights, biases = _process_gnn_parameters( gnn_weights, gnn_weights, gnn_biases, gnn_norm diff --git a/src/omlt/linear_tree/__init__.py b/src/omlt/linear_tree/__init__.py index 2f89a669..2099e44e 100644 --- a/src/omlt/linear_tree/__init__.py +++ b/src/omlt/linear_tree/__init__.py @@ -1,5 +1,4 @@ -r""" -There are multiple formulations for representing linear model decision trees. +r"""There are multiple formulations for representing linear model decision trees. Please see the following reference: * Ammari et al. (2023) Linear Model Decision Trees as Surrogates in Optimization @@ -23,3 +22,9 @@ LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, ) + +__all__ = [ + "LinearTreeDefinition", + "LinearTreeGDPFormulation", + "LinearTreeHybridBigMFormulation", +] diff --git a/src/omlt/linear_tree/lt_definition.py b/src/omlt/linear_tree/lt_definition.py index 6bd26c8f..8f944a4a 100644 --- a/src/omlt/linear_tree/lt_definition.py +++ b/src/omlt/linear_tree/lt_definition.py @@ -3,8 +3,7 @@ class LinearTreeDefinition: - """ - Class to represent a linear tree model trained in the linear-tree package + """Class to represent a linear tree model trained in the linear-tree package. Attributes: __model (linear-tree model) : Linear Tree Model trained in linear-tree @@ -27,22 +26,24 @@ def __init__( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """Create a LinearTreeDefinition object and define attributes based on the + """Initialize LinearTreeDefinition. + + Create a LinearTreeDefinition object and define attributes based on the trained linear model decision tree. Arguments: - lt_regressor -- A LinearTreeRegressor model that is trained by the + lt_regressor: A LinearTreeRegressor model that is trained by the linear-tree package Keyword Arguments: - scaling_object -- A scaling object to specify the scaling parameters + scaling_object: A scaling object to specify the scaling parameters for the linear model tree inputs and outputs. If None, then no scaling is performed. (default: {None}) - scaled_input_bounds -- A dict that contains the bounds on the scaled + scaled_input_bounds: A dict that contains the bounds on the scaled variables (the direct inputs to the tree). If None, then the user must specify the bounds via the input_bounds argument. (default: {None}) - unscaled_input_bounds -- A dict that contains the bounds on the + unscaled_input_bounds: A dict that contains the bounds on the variables (the direct inputs to the tree). If None, then the user must specify the scaled bounds via the scaled_input_bounds argument. (default: {None}) @@ -65,7 +66,7 @@ def __init__( ) scaled_input_bounds = { - k: (lbs[k], ubs[k]) for k in unscaled_input_bounds.keys() + k: (lbs[k], ubs[k]) for k in unscaled_input_bounds } # If unscaled input bounds provided and no scaler provided, scaled @@ -73,9 +74,8 @@ def __init__( elif unscaled_input_bounds is not None and scaling_object is None: scaled_input_bounds = unscaled_input_bounds elif unscaled_input_bounds is None: - raise ValueError( - "Input Bounds needed to represent linear trees as MIPs" - ) + msg = "Input Bounds needed to represent linear trees as MIPs" + raise ValueError(msg) self.__unscaled_input_bounds = unscaled_input_bounds self.__scaled_input_bounds = scaled_input_bounds @@ -89,48 +89,49 @@ def __init__( @property def scaling_object(self): - """Returns scaling object""" + """Returns scaling object.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Returns dict containing scaled input bounds""" + """Returns dict containing scaled input bounds.""" return self.__scaled_input_bounds @property def splits(self): - """Returns dict containing split information""" + """Returns dict containing split information.""" return self.__splits @property def leaves(self): - """Returns dict containing leaf information""" + """Returns dict containing leaf information.""" return self.__leaves @property def thresholds(self): - """Returns dict containing threshold information""" + """Returns dict containing threshold information.""" return self.__thresholds @property def n_inputs(self): - """Returns number of inputs to the linear tree""" + """Returns number of inputs to the linear tree.""" return self.__n_inputs @property def n_outputs(self): - """Returns number of outputs to the linear tree""" + """Returns number of outputs to the linear tree.""" return self.__n_outputs def _find_all_children_splits(split, splits_dict): - """ + """Find all children splits. + This helper function finds all multigeneration children splits for an argument split. Arguments: - split --The split for which you are trying to find children splits - splits_dict -- A dictionary of all the splits in the tree + split: The split for which you are trying to find children splits + splits_dict: A dictionary of all the splits in the tree Returns: A list containing the Node IDs of all children splits @@ -154,20 +155,19 @@ def _find_all_children_splits(split, splits_dict): def _find_all_children_leaves(split, splits_dict, leaves_dict): - """ + """Find all children leaves. + This helper function finds all multigeneration children leaves for an argument split. Arguments: - split -- The split for which you are trying to find children leaves - splits_dict -- A dictionary of all the split info in the tree - leaves_dict -- A dictionary of all the leaf info in the tree + split: The split for which you are trying to find children leaves + splits_dict: A dictionary of all the split info in the tree + leaves_dict: A dictionary of all the leaf info in the tree Returns: A list containing all the Node IDs of all children leaves """ - all_leaves = [] - # Find all the splits that are children of the relevant split all_splits = _find_all_children_splits(split, splits_dict) @@ -177,20 +177,20 @@ def _find_all_children_leaves(split, splits_dict, leaves_dict): # For each leaf, check if the parents appear in the list of children # splits (all_splits). If so, it must be a leaf of the argument split - for leaf in leaves_dict: - if leaves_dict[leaf]["parent"] in all_splits: - all_leaves.append(leaf) - return all_leaves + return [ + leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits + ] def _find_n_inputs(leaves): - """ + """Find n inputs. + Finds the number of inputs using the length of the slope vector in the first leaf Arguments: - leaves -- Dictionary of leaf information + leaves: Dictionary of leaf information Returns: Number of inputs @@ -199,19 +199,19 @@ def _find_n_inputs(leaves): leaf_indices = np.array(list(leaves[tree_indices[0]].keys())) tree_one = tree_indices[0] leaf_one = leaf_indices[0] - n_inputs = len(np.arange(0, len(leaves[tree_one][leaf_one]["slope"]))) - return n_inputs + return len(np.arange(0, len(leaves[tree_one][leaf_one]["slope"]))) def _reassign_none_bounds(leaves, input_bounds): - """ + """Reassign None bounds. + This helper function reassigns bounds that are None to the bounds input by the user Arguments: - leaves -- The dictionary of leaf information. Attribute of the + leaves: The dictionary of leaf information. Attribute of the LinearTreeDefinition object - input_bounds -- The nested dictionary + input_bounds: The nested dictionary Returns: The modified leaves dict without any bounds that are listed as None @@ -231,15 +231,17 @@ def _reassign_none_bounds(leaves, input_bounds): def _parse_tree_data(model, input_bounds): - """ + """Parse tree data. + This function creates the data structures with the information required for creation of the variables, sets, and constraints in the pyomo reformulation of the linear model decision trees. Note that these data structures are attributes of the LinearTreeDefinition Class. Arguments: - model -- Trained linear-tree model or dic containing linear-tree model + model: Trained linear-tree model or dic containing linear-tree model summary (e.g. dict = model.summary()) + input_bounds: Returns: leaves - Dict containing the following information for each leaf: @@ -277,21 +279,23 @@ def _parse_tree_data(model, input_bounds): # Checks to ensure that the input nested dictionary contains the # correct information for entry in model: - if "children" not in model[entry].keys(): + if "children" not in model[entry]: leaves[entry] = model[entry] else: left_child = model[entry]["children"][0] right_child = model[entry]["children"][1] num_splits_in_model += 1 - if left_child not in model.keys() or right_child not in model.keys(): + if left_child not in model or right_child not in model: count += 1 if count > 0 or num_splits_in_model == 0: - raise ValueError( + msg = ( "Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()" + " e.g. dict = model.summary()" ) + raise ValueError(msg) else: - raise TypeError("Model entry must be dict or linear-tree instance") + msg = "Model entry must be dict or linear-tree instance" + raise TypeError(msg) # This loop adds keys for the slopes and intercept and removes the leaf # keys in the splits dictionary diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 4f83e7f3..5960a442 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -6,7 +6,8 @@ class LinearTreeGDPFormulation(_PyomoFormulation): - r""" + r"""Linear Tree GDP Formulation. + Class to add a Linear Tree GDP formulation to OmltBlock. We use Pyomo.GDP to create the disjuncts and disjunctions and then apply a transformation to convert to a mixed-integer programming representation. @@ -45,18 +46,17 @@ class LinearTreeGDPFormulation(_PyomoFormulation): * Ammari et al. (2023) Linear Model Decision Trees as Surrogates in Optimization of Engineering Applications. Computers & Chemical Engineering * Chen et al. (2022) Pyomo.GDP: An ecosystem for logic based modeling and - optimization development. Optimization and Engineering, 23:607–642 + optimization development. Optimization and Engineering, 23:607-642 """ def __init__(self, lt_definition, transformation="bigm"): - """ - Create a LinearTreeGDPFormulation object + """Create a LinearTreeGDPFormulation object. Arguments: - lt_definition -- LinearTreeDefintion Object + lt_definition: LinearTreeDefintion Object Keyword Arguments: - transformation -- choose which Pyomo.GDP formulation to apply. + transformation: choose which Pyomo.GDP formulation to apply. Supported transformations are bigm, hull, mbigm, and custom (default: {'bigm'}) @@ -70,9 +70,8 @@ def __init__(self, lt_definition, transformation="bigm"): # Ensure that the GDP transformation given is supported supported_transformations = ["bigm", "hull", "mbigm", "custom"] if transformation not in supported_transformations: - raise NotImplementedError( - "Supported transformations are: bigm, mbigm, hull, and custom" - ) + msg = "Supported transformations are: bigm, mbigm, hull, and custom" + raise NotImplementedError(msg) @property def input_indexes(self): @@ -85,7 +84,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -104,8 +105,7 @@ def _build_formulation(self): class LinearTreeHybridBigMFormulation(_PyomoFormulation): - r""" - Class to add a Linear Tree Hybrid Big-M formulation to OmltBlock. + r"""Class to add a Linear Tree Hybrid Big-M formulation to OmltBlock. .. math:: \begin{align*} @@ -134,11 +134,10 @@ class LinearTreeHybridBigMFormulation(_PyomoFormulation): """ def __init__(self, lt_definition): - """ - Create a LinearTreeHybridBigMFormulation object + """Create a LinearTreeHybridBigMFormulation object. Arguments: - lt_definition -- LinearTreeDefinition Object + lt_definition: LinearTreeDefinition Object """ super().__init__() self.model_definition = lt_definition @@ -154,7 +153,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -172,13 +173,14 @@ def _build_formulation(self): def _build_output_bounds(model_def, input_bounds): - """ + """Build output bounds. + This helper function develops bounds of the output variable based on the values of the input_bounds and the signs of the slope Arguments: - model_def -- Model definition - input_bounds -- Dict of input bounds + model_def: Model definition + input_bounds: Dict of input bounds Returns: List that contains the conservative lower and upper bounds of the @@ -217,15 +219,14 @@ def _build_output_bounds(model_def, input_bounds): def _add_gdp_formulation_to_block( block, model_definition, input_vars, output_vars, transformation ): - """ - This function adds the GDP representation to the OmltBlock using Pyomo.GDP + """This function adds the GDP representation to the OmltBlock using Pyomo.GDP. Arguments: - block -- OmltBlock - model_definition -- LinearTreeDefinition Object - input_vars -- input variables to the linear tree model - output_vars -- output variable of the linear tree model - transformation -- Transformation to apply + block: OmltBlock + model_definition: LinearTreeDefinition Object + input_vars: input variables to the linear tree model + output_vars: output variable of the linear tree model + transformation: Transformation to apply """ leaves = model_definition.leaves @@ -234,10 +235,7 @@ def _add_gdp_formulation_to_block( # The set of leaves and the set of features tree_ids = list(leaves.keys()) - t_l = [] - for tree in tree_ids: - for leaf in leaves[tree].keys(): - t_l.append((tree, leaf)) + t_l = [(tree, leaf) for tree in tree_ids for leaf in leaves[tree]] features = np.arange(0, n_inputs) # Use the input_bounds and the linear models in the leaves to calculate @@ -292,14 +290,13 @@ def disjunction_rule(b, tree): def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output_vars): - """ - This function adds the Hybrid BigM representation to the OmltBlock + """This function adds the Hybrid BigM representation to the OmltBlock. Arguments: - block -- OmltBlock - model_definition -- LinearTreeDefinition Object - input_vars -- input variables to the linear tree model - output_vars -- output variable of the linear tree model + block: OmltBlock + model_definition: LinearTreeDefinition Object + input_vars: input variables to the linear tree model + output_vars: output variable of the linear tree model """ leaves = model_definition.leaves input_bounds = model_definition.scaled_input_bounds @@ -309,10 +306,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output tree_ids = list(leaves.keys()) # Create a list of tuples that contains the tree and leaf indices. Note that # the leaf indices depend on the tree in the ensemble. - t_l = [] - for tree in tree_ids: - for leaf in leaves[tree].keys(): - t_l.append((tree, leaf)) + t_l = [(tree, leaf) for tree in tree_ids for leaf in leaves[tree]] features = np.arange(0, n_inputs) diff --git a/src/omlt/neuralnet/__init__.py b/src/omlt/neuralnet/__init__.py index 2b66fc97..ef90caf3 100644 --- a/src/omlt/neuralnet/__init__.py +++ b/src/omlt/neuralnet/__init__.py @@ -1,4 +1,5 @@ -r""" +r"""omlt.neuralnet. + The basic pipeline in source code of OMLT is: .. math:: @@ -12,7 +13,10 @@ \xrightarrow[\text{Constraints}]{\text{Layer 3}}\cdots \end{align*} -where :math:`\mathbf z^{(0)}` is the output of `InputLayer`, :math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, :math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. +where +:math:`\mathbf z^{(0)}` is the output of `InputLayer`, +:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, +:math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. """ @@ -26,3 +30,14 @@ ReluComplementarityFormulation, ReluPartitionFormulation, ) + +__all__ = [ + "NetworkDefinition", + "FullSpaceNNFormulation", + "FullSpaceSmoothNNFormulation", + "ReducedSpaceNNFormulation", + "ReducedSpaceSmoothNNFormulation", + "ReluBigMFormulation", + "ReluComplementarityFormulation", + "ReluPartitionFormulation", +] diff --git a/src/omlt/neuralnet/activations/__init__.py b/src/omlt/neuralnet/activations/__init__.py index 7918d9f1..038a4dbd 100644 --- a/src/omlt/neuralnet/activations/__init__.py +++ b/src/omlt/neuralnet/activations/__init__.py @@ -1,5 +1,8 @@ -r""" -Since all activation functions are element-wised, we only consider how to formulate activation functions for a single neuron, where :math:`x` denotes pre-activation variable, and :math:`y` denotes post-activation variable. +r"""Activation functions. + +Since all activation functions are element-wised, we only consider how to formulate +activation functions for a single neuron, where :math:`x` denotes pre-activation +variable, and :math:`y` denotes post-activation variable. """ @@ -23,3 +26,18 @@ } NON_INCREASING_ACTIVATIONS = [] + +__all__ = [ + "linear_activation_constraint", + "linear_activation_function", + "ComplementarityReLUActivation", + "bigm_relu_activation_constraint", + "sigmoid_activation_constraint", + "sigmoid_activation_function", + "softplus_activation_constraint", + "softplus_activation_function", + "tanh_activation_constraint", + "tanh_activation_function", + "ACTIVATION_FUNCTION_MAP", + "NON_INCREASING_ACTIVATIONS", +] diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index 712049c1..4538401a 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -3,10 +3,9 @@ def linear_activation_function(zhat): def linear_activation_constraint( - net_block, net, layer_block, layer, add_constraint=True + net_block, net, layer_block, layer, *, add_constraint=True ): - r""" - Linear activation constraint generator + r"""Linear activation constraint generator. Generates the constraints for the linear activation function: diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index e14718d7..733abb91 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,10 +1,9 @@ import pyomo.environ as pyo -import pyomo.mpec as mpec +from pyomo import mpec def bigm_relu_activation_constraint(net_block, net, layer_block, layer): - r""" - Big-M ReLU activation formulation. + r"""Big-M ReLU activation formulation. Generates the constraints for the ReLU activation function: @@ -35,7 +34,8 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): y&\le \sigma u \end{align*} - The lower bound of :math:`y` is :math:`\max(0,l)`, and the upper bound of :math:`y` is :math:`\max(0,u)`. + The lower bound of :math:`y` is :math:`\max(0,l)`, and the upper bound of :math:`y` + is :math:`\max(0,u)`. """ layer_block.q_relu = pyo.Var(layer.output_indexes, within=pyo.Binary) @@ -81,8 +81,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): class ComplementarityReLUActivation: - r""" - Complementarity-based ReLU activation formulation. + r"""Complementarity-based ReLU activation formulation. Generates the constraints for the ReLU activation function: diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index b37ac6c7..7f5bd10d 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -2,8 +2,7 @@ def softplus_activation_function(x): - r""" - Applies the softplus function: + r"""Applies the softplus function. .. math:: @@ -16,8 +15,7 @@ def softplus_activation_function(x): def sigmoid_activation_function(x): - r""" - Applies the sigmoid function: + r"""Applies the sigmoid function. .. math:: @@ -30,8 +28,7 @@ def sigmoid_activation_function(x): def tanh_activation_function(x): - r""" - Applies the tanh function: + r"""Applies the tanh function. .. math:: @@ -44,40 +41,31 @@ def tanh_activation_function(x): def softplus_activation_constraint(net_block, net, layer_block, layer): - r""" - Softplus activation constraint generator. - - """ + r"""Softplus activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, softplus_activation_function ) def sigmoid_activation_constraint(net_block, net, layer_block, layer): - r""" - Sigmoid activation constraint generator. - - """ + r"""Sigmoid activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, sigmoid_activation_function ) def tanh_activation_constraint(net_block, net, layer_block, layer): - r""" - tanh activation constraint generator. - - """ + r"""Tanh activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, tanh_activation_function ) def smooth_monotonic_activation_constraint(net_block, net, layer_block, layer, fcn): - r""" - Activation constraint generator for a smooth monotonic function. + r"""Activation constraint generator for a smooth monotonic function. - Generates the constraints for the activation function :math:`f` if it is smooth and monotonic: + Generates the constraints for the activation function :math:`f` if it is smooth and + monotonic: .. math:: diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index 16e068a3..d7a52750 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -1,5 +1,4 @@ -r""" -Neural network layer classes. +r"""Neural network layer classes. We use the following notations to define a layer: @@ -21,10 +20,10 @@ import numpy as np +OUTPUT_DIMENSIONS = 3 class Layer: - """ - Base layer class. + """Base layer class. Parameters ---------- @@ -42,13 +41,11 @@ def __init__( self, input_size, output_size, *, activation=None, input_index_mapper=None ): if not isinstance(input_size, (list, tuple)): - raise TypeError( - f"input_size must be a list or tuple, {type(input_size)} was provided." - ) + msg = f"input_size must be a list or tuple, {type(input_size)} provided." + raise TypeError(msg) if not isinstance(output_size, (list, tuple)): - raise TypeError( - f"output_size must be a list or tuple, {type(output_size)} was provided." - ) + msg = f"output_size must be a list or tuple, {type(output_size)} provided." + raise TypeError(msg) self.__input_size = list(input_size) self.__output_size = list(output_size) self.activation = activation @@ -58,35 +55,34 @@ def __init__( @property def input_size(self): - """Return the size of the input tensor""" + """Return the size of the input tensor.""" return self.__input_size @property def output_size(self): - """Return the size of the output tensor""" + """Return the size of the output tensor.""" return self.__output_size @property def activation(self): - """Return the activation function""" + """Return the activation function.""" return self.__activation @activation.setter def activation(self, new_activation): - """Change the activation function""" + """Change the activation function.""" if new_activation is None: new_activation = "linear" self.__activation = new_activation @property def input_index_mapper(self): - """Return the index mapper""" + """Return the index mapper.""" return self.__input_index_mapper @property def input_indexes_with_input_layer_indexes(self): - """ - Return an iterator generating a tuple of local and input indexes. + """Return an iterator generating a tuple of local and input indexes. Local indexes are indexes over the elements of the current layer. Input indexes are indexes over the elements of the previous layer. @@ -101,17 +97,16 @@ def input_indexes_with_input_layer_indexes(self): @property def input_indexes(self): - """Return a list of the input indexes""" + """Return a list of the input indexes.""" return list(itertools.product(*[range(v) for v in self.__input_size])) @property def output_indexes(self): - """Return a list of the output indexes""" + """Return a list of the output indexes.""" return list(itertools.product(*[range(v) for v in self.__output_size])) def eval_single_layer(self, x): - """ - Evaluate the layer at x. + """Evaluate the layer at x. Parameters ---------- @@ -124,34 +119,35 @@ def eval_single_layer(self, x): else x[:] ) if x_reshaped.shape != tuple(self.input_size): - raise ValueError( - f"Layer requires an input size {self.input_size}, but the input tensor had size {x_reshaped.shape}." + msg = ( + f"Layer requires an input size {self.input_size}, but the input tensor" + f" has size {x_reshaped.shape}." ) + raise ValueError(msg) y = self._eval(x_reshaped) return self._apply_activation(y) def __repr__(self): - return f"<{str(self)} at {hex(id(self))}>" + return f"<{self!s} at {hex(id(self))}>" def _eval(self, x): - raise NotImplementedError() + raise NotImplementedError def _apply_activation(self, x): if self.__activation == "linear" or self.__activation is None: return x - elif self.__activation == "relu": + if self.__activation == "relu": return np.maximum(x, 0) - elif self.__activation == "sigmoid": + if self.__activation == "sigmoid": return 1.0 / (1.0 + np.exp(-x)) - elif self.__activation == "tanh": + if self.__activation == "tanh": return np.tanh(x) - else: - raise ValueError(f"Unknown activation function {self.__activation}") + msg = f"Unknown activation function {self.__activation}" + raise ValueError(msg) class InputLayer(Layer): - """ - The first layer in any network. + """The first layer in any network. Parameters ---------- @@ -172,13 +168,15 @@ def _eval(self, x): class DenseLayer(Layer): - r""" + r"""Dense layer. + The dense layer is defined by: .. math:: \begin{align*} - y_j = \sigma\left(\sum\limits_{i=0}^{F_{in}-1}w_{ij}x_i+b_j\right), && \forall 0\le j= input_layer_block.z[input_index] + layer_block.zhat[output_index] + >= input_layer_block.z[mapped_input_index] ) @@ -337,7 +355,8 @@ def _calculate_n_plus(out_index, l, k, layer, input_layer_block): def _input_layer_and_block(net_block, net, layer): input_layers = list(net.predecessors(layer)) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + msg = "Multiple input layers are not currently supported." + raise ValueError(msg) input_layer = input_layers[0] input_layer_block = net_block.layer[id(input_layer)] return input_layer, input_layer_block diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index f29cadd2..1430332a 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -4,8 +4,7 @@ def default_partition_split_func(w, n): - r""" - Default function for partitioning weights in :math:`w` into :math:`n` partitions. + r"""Default function to partition weights in :math:`w` into :math:`n` partitions. Weights in :math:`w` are sorted and partitioned evenly. @@ -16,15 +15,15 @@ def default_partition_split_func(w, n): def partition_based_dense_relu_layer(net_block, net, layer_block, layer, split_func): - r""" - Partition-based ReLU activation formulation. + r"""Partition-based ReLU activation formulation. Generates the constraints for the ReLU activation function: .. math:: \begin{align*} - y_j = \max\left(0,\sum\limits_{i=0}^{F_{in}-1}w_{ij}x_i+b_j\right), && \forall 0\le j 1: - raise ValueError(f"Layer {layer} has multiple predecessors.") + msg = f"Layer {layer} is not an input layer, but has no predecessors." + raise ValueError(msg) + if len(prev_layers) > 1: + msg = f"Layer {layer} has multiple predecessors." + raise ValueError(msg) prev_layer = prev_layers[0] prev_layer_block = net_block.layer[id(prev_layer)] @@ -100,19 +103,18 @@ def output_node_block(b, *output_index): for split_local_index in splits[split_index]: _, local_index = input_layer_indexes[split_local_index] - if mapper: - input_index = mapper(local_index) - else: - input_index = local_index + input_index = mapper(local_index) if mapper else local_index w = weights[local_index[-1]] expr += prev_layer_block.z[input_index] * w lb, ub = compute_bounds_on_expr(expr) if lb is None: - raise ValueError("Expression is unbounded below.") + msg = "Expression is unbounded below." + raise ValueError(msg) if ub is None: - raise ValueError("Expression is unbounded above.") + msg = "Expression is unbounded above." + raise ValueError(msg) z2 = b.z2[split_index] z2.setlb(min(0, lb)) @@ -133,9 +135,11 @@ def output_node_block(b, *output_index): lb, ub = compute_bounds_on_expr(expr) if lb is None: - raise ValueError("Expression is unbounded below.") + msg = "Expression is unbounded below." + raise ValueError(msg) if ub is None: - raise ValueError("Expression is unbounded above.") + msg = "Expression is unbounded above." + raise ValueError(msg) layer_block.z[output_index].setlb(0) layer_block.z[output_index].setub(max(0, ub)) @@ -144,10 +148,7 @@ def output_node_block(b, *output_index): for split_index in range(num_splits): for split_local_index in splits[split_index]: _, local_index = input_layer_indexes[split_local_index] - if mapper: - input_index = mapper(local_index) - else: - input_index = local_index + input_index = mapper(local_index) if mapper else local_index w = weights[local_index[-1]] eq_13_expr += prev_layer_block.z[input_index] * w diff --git a/src/omlt/neuralnet/layers/reduced_space.py b/src/omlt/neuralnet/layers/reduced_space.py index 95d1f97f..9160f87e 100644 --- a/src/omlt/neuralnet/layers/reduced_space.py +++ b/src/omlt/neuralnet/layers/reduced_space.py @@ -1,6 +1,5 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): - r""" - Add reduced-space formulation of the dense layer to the block + r"""Add reduced-space formulation of the dense layer to the block. .. math:: @@ -12,11 +11,11 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): # not an input layer, process the expressions prev_layers = list(net.predecessors(layer)) if len(prev_layers) == 0: - raise ValueError( - f"Layer {layer} is not an input layer, but has no predecessors." - ) - elif len(prev_layers) > 1: - raise ValueError(f"Layer {layer} has multiple predecessors.") + msg = f"Layer {layer} is not an input layer, but has no predecessors." + raise ValueError(msg) + if len(prev_layers) > 1: + msg = f"Layer {layer} has multiple predecessors." + raise ValueError(msg) prev_layer = prev_layers[0] prev_layer_block = net_block.layer[id(prev_layer)] diff --git a/src/omlt/neuralnet/network_definition.py b/src/omlt/neuralnet/network_definition.py index aeef22eb..783f0c76 100644 --- a/src/omlt/neuralnet/network_definition.py +++ b/src/omlt/neuralnet/network_definition.py @@ -7,7 +7,8 @@ class NetworkDefinition: def __init__( self, scaling_object=None, scaled_input_bounds=None, unscaled_input_bounds=None ): - """ + """Network Definition. + Create a network definition object used to create the neural network formulation in Pyomo @@ -26,7 +27,7 @@ def __init__( parameter will be generated using the scaling object. If None, then no bounds are specified. """ - self.__layers_by_id = dict() + self.__layers_by_id = {} self.__graph = nx.DiGraph() self.__scaling_object = scaling_object @@ -41,10 +42,11 @@ def __init__( ) scaled_input_bounds = { - k: (lbs[k], ubs[k]) for k in unscaled_input_bounds.keys() + k: (lbs[k], ubs[k]) for k in unscaled_input_bounds } - # If unscaled input bounds provided and no scaler provided, scaled input bounds = unscaled input bounds + # If unscaled input bounds provided and no scaler provided, + # scaled input bounds = unscaled input bounds elif unscaled_input_bounds is not None and scaling_object is None: scaled_input_bounds = unscaled_input_bounds @@ -52,8 +54,7 @@ def __init__( self.__scaled_input_bounds = scaled_input_bounds def add_layer(self, layer): - """ - Add a layer to the network. + """Add a layer to the network. Parameters ---------- @@ -65,8 +66,7 @@ def add_layer(self, layer): self.__graph.add_node(layer_id) def add_edge(self, from_layer, to_layer): - """ - Add an edge between two layers. + """Add an edge between two layers. Parameters ---------- @@ -78,69 +78,85 @@ def add_edge(self, from_layer, to_layer): id_to = id(to_layer) id_from = id(from_layer) if id_to not in self.__layers_by_id: - raise ValueError(f"Inbound layer {to_layer} not found in network.") + msg = f"Inbound layer {to_layer} not found in network." + raise ValueError(msg) if id_from not in self.__layers_by_id: - raise ValueError(f"Outbound layer {from_layer} not found in network.") + msg = f"Outbound layer {from_layer} not found in network." + raise ValueError(msg) self.__graph.add_edge(id_from, id_to) @property def scaling_object(self): - """Return an instance of the scaling object that supports the ScalingInterface""" + """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Return a dict of tuples containing lower and upper bounds of neural network inputs""" + """Scaled Input Bounds. + + Return a dict of tuples containing lower and upper bounds of neural network + inputs. + """ return self.__scaled_input_bounds @property def unscaled_input_bounds(self): - """Return a dict of tuples containing lower and upper bounds of unscaled neural network inputs""" + """Unscaled Input Bounds. + + Return a dict of tuples containing lower and upper bounds of unscaled neural + network inputs. + """ return self.__unscaled_input_bounds @property def input_layers(self): - """Return an iterator over the input layers""" + """Return an iterator over the input layers.""" for layer_id, in_degree in self.__graph.in_degree(): if in_degree == 0: yield self.__layers_by_id[layer_id] @property def input_nodes(self): - """An alias for input_layers""" + """An alias for input_layers.""" return self.input_layers @property def output_layers(self): - """Return an iterator over the output layer""" + """Return an iterator over the output layer.""" for layer_id, out_degree in self.__graph.out_degree(): if out_degree == 0: yield self.__layers_by_id[layer_id] @property def output_nodes(self): - """An alias for output_layers""" + """An alias for output_layers.""" return self.output_layers def layer(self, layer_id): - """Return the layer with the given id""" + """Return the layer with the given id.""" return self.__layers_by_id[layer_id] @property def layers(self): - """Return an iterator over all the layers""" + """Return an iterator over all the layers.""" for layer_id in nx.topological_sort(self.__graph): yield self.__layers_by_id[layer_id] def predecessors(self, layer): - """Return an iterator over the layers with outbound connections into the layer""" + """Predecessors. + + Return an iterator over the layers with outbound connections into the layer. + """ if isinstance(layer, Layer): layer = id(layer) for node_id in self.__graph.predecessors(layer): yield self.__layers_by_id[node_id] def successors(self, layer): - """Return an iterator over the layers with an inbound connection from the layer""" + """Successors. + + Return an iterator over the layers with an inbound connection from the layer. + """ if isinstance(layer, Layer): layer = id(layer) for node_id in self.__graph.successors(layer): diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index b0461aa2..d8eb5b37 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,4 +1,3 @@ -import numpy as np import pyomo.environ as pyo from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -57,10 +56,11 @@ def _ignore_input_layer(): "tanh": tanh_activation_constraint, } +MULTI_INPUTS_UNSUPPORTED = "Multiple input layers are not currently supported." +MULTI_OUTPUTS_UNSUPPORTED = "Multiple output layers are not currently supported." class FullSpaceNNFormulation(_PyomoFormulation): - """ - This class is the entry-point to build neural network formulations. + """This class is the entry-point to build neural network formulations. This class iterates over all nodes in the neural network and for each one them, generates the constraints to represent the layer @@ -96,10 +96,10 @@ def __init__( network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_layer_constraints(self): return _DEFAULT_LAYER_CONSTRAINTS @@ -124,7 +124,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -132,15 +132,14 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes def _build_neural_network_formulation( block, network_structure, layer_constraints, activation_constraints ): - """ - Adds the neural network formulation to the given Pyomo block. + """Adds the neural network formulation to the given Pyomo block. Parameters ---------- @@ -183,27 +182,21 @@ def layer(b, layer_id): layer_constraints_func = layer_constraints.get(type(layer), None) if layer_constraints_func is None: - raise ValueError( - "Layer type {} is not supported by this formulation.".format( - type(layer) - ) - ) + msg = f"Layer type {type(layer)} is not supported by this formulation." + raise ValueError(msg) layer_constraints_func(block, net, layer_block, layer) activation_constraints_func = activation_constraints.get(layer.activation, None) if activation_constraints_func is None: - raise ValueError( - "Activation {} is not supported by this formulation.".format( - layer.activation - ) - ) + msg = f"Activation {layer.activation} is not supported by this formulation." + raise ValueError(msg) activation_constraints_func(block, net, layer_block, layer) # setup input variables constraints # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] @block.Constraint(input_layer.output_indexes) @@ -214,7 +207,7 @@ def input_assignment(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -226,7 +219,8 @@ def output_assignment(b, *output_index): class FullSpaceSmoothNNFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Full Space Smooth Neural Network Formulation. + This class is used for building "full-space" formulations of neural network models composed of smooth activations (e.g., tanh, sigmoid, etc.) @@ -249,7 +243,8 @@ def _supported_default_activation_constraints(self): class ReluBigMFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Relu Big-M Formulation. + This class is used for building "full-space" formulations of neural network models composed of relu activations using a big-M formulation @@ -270,7 +265,8 @@ def _supported_default_activation_constraints(self): class ReluComplementarityFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Relu Complementarity Formulation. + This class is used for building "full-space" formulations of neural network models composed of relu activations using a complementarity formulation (smooth represenation) @@ -290,7 +286,8 @@ def _supported_default_activation_constraints(self): class ReducedSpaceNNFormulation(_PyomoFormulation): - """ + """Reduced Space Neural Network Formulation. + This class is used to build reduced-space formulations of neural networks. @@ -322,10 +319,10 @@ def __init__(self, network_structure, activation_functions=None): # # network_inputs = list(self.__network_definition.input_nodes) # if len(network_inputs) != 1: - # raise ValueError("Multiple input layers are not currently supported.") + # raise ValueError(MULTI_INPUTS_UNSUPPORTED) # network_outputs = list(self.__network_definition.output_nodes) # if len(network_outputs) != 1: - # raise ValueError("Multiple output layers are not currently supported.") + # raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_activation_functions(self): return dict(_DEFAULT_ACTIVATION_FUNCTIONS) @@ -346,10 +343,11 @@ def _build_formulation(self): # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError( + msg = ( "build_formulation called with a network that has more than" " one input layer. Only single input layers are supported." ) + raise ValueError(msg) input_layer = input_layers[0] input_layer_id = id(input_layer) input_layer_block = block.layer[input_layer_id] @@ -374,11 +372,11 @@ def z(b, *output_index): layer_func = reduced_space_dense_layer # layer_constraints[type(layer)] activation_func = self._activation_functions.get(layer.activation, None) if activation_func is None: - raise ValueError( - "Activation {} is not supported by this formulation.".format( - layer.activation - ) + msg = ( + f"Activation {layer.activation} is not supported by this" + " formulation." ) + raise ValueError(msg) layer_func(block, net, layer_block, layer, activation_func) @@ -386,10 +384,11 @@ def z(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError( + msg = ( "build_formulation called with a network that has more than" " one output layer. Only single output layers are supported." ) + raise ValueError(msg) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -413,7 +412,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -421,12 +420,13 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes class ReducedSpaceSmoothNNFormulation(ReducedSpaceNNFormulation): - """ + """Reduced Space Smooth Neural Network Formulation. + This class is used to build reduced-space formulations of neural networks with smooth activation functions. @@ -449,7 +449,8 @@ def _supported_default_activation_functions(self): class ReluPartitionFormulation(_PyomoFormulation): - """ + """ReLU Partition Formulation. + This class is used to build partition-based formulations of neural networks. @@ -515,11 +516,14 @@ def layer(b, layer_id): full_space_dense_layer(block, net, layer_block, layer) linear_activation_constraint(block, net, layer_block, layer) else: - raise ValueError( - "ReluPartitionFormulation supports Dense layers with relu or linear activation" + msg = ( + "ReluPartitionFormulation supports Dense layers with relu or" + " linear activation" ) + raise ValueError(msg) else: - raise ValueError("ReluPartitionFormulation supports only Dense layers") + msg = "ReluPartitionFormulation supports only Dense layers" + raise TypeError(msg) # This check is never hit. The formulation._build_formulation() function is # only ever called by an OmltBlock.build_formulation(), and that runs the @@ -530,7 +534,7 @@ def layer(b, layer_id): # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] @block.Constraint(input_layer.output_indexes) @@ -544,7 +548,7 @@ def input_assignment(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -559,7 +563,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -567,5 +571,5 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes diff --git a/src/omlt/scaling.py b/src/omlt/scaling.py index ea7416ba..9bf3bd3f 100644 --- a/src/omlt/scaling.py +++ b/src/omlt/scaling.py @@ -1,7 +1,8 @@ -""" +"""Scaling. + The omlt.scaling module describes the interface for providing different scaling -expressions to the Pyomo model for the inputs and outputs of an ML model. An implementation of a common scaling approach is -included with `OffsetScaling`. +expressions to the Pyomo model for the inputs and outputs of an ML model. An +implementation of a common scaling approach is included with `OffsetScaling`. """ import abc @@ -10,25 +11,32 @@ class ScalingInterface(abc.ABC): @abc.abstractmethod def get_scaled_input_expressions(self, input_vars): - """This method returns a list of expressions for the scaled inputs from - the unscaled inputs""" - pass # pragma: no cover + """Get scaled inputs. + + This method returns a list of expressions for the scaled inputs from + the unscaled inputs + """ + # pragma: no cover @abc.abstractmethod def get_unscaled_output_expressions(self, scaled_output_vars): - """This method returns a list of expressions for the unscaled outputs from - the scaled outputs""" - pass # pragma: no cover + """Get unscaled outputs. + + This method returns a list of expressions for the unscaled outputs from + the scaled outputs + """ + # pragma: no cover def convert_to_dict(x): - if type(x) is dict: + if isinstance(x, dict): return dict(x) - return {i: v for i, v in enumerate(x)} + return dict(enumerate(x)) class OffsetScaling(ScalingInterface): - r""" + r"""OffsetScaling interface. + This scaling object represents the following scaling equations for inputs (x) and outputs (y) @@ -51,7 +59,7 @@ class OffsetScaling(ScalingInterface): """ def __init__(self, offset_inputs, factor_inputs, offset_outputs, factor_outputs): - super(OffsetScaling, self).__init__() + super().__init__() self.__x_offset = convert_to_dict(offset_inputs) self.__x_factor = convert_to_dict(factor_inputs) self.__y_offset = convert_to_dict(offset_outputs) @@ -59,112 +67,102 @@ def __init__(self, offset_inputs, factor_inputs, offset_outputs, factor_outputs) for k, v in self.__x_factor.items(): if v <= 0: - raise ValueError( + msg = ( "OffsetScaling only accepts positive values" " for factor_inputs. Negative value found at" - " index {}.".format(k) + f" index {k}." ) + raise ValueError(msg) for k, v in self.__y_factor.items(): if v <= 0: - raise ValueError( + msg = ( "OffsetScaling only accepts positive values" " for factor_outputs. Negative value found at" - " index {}.".format(k) + f" index {k}." ) + raise ValueError(msg) def get_scaled_input_expressions(self, input_vars): - """ - Get the scaled input expressions of the input variables. - """ + """Get the scaled input expressions of the input variables.""" sorted_keys = sorted(input_vars.keys()) if ( sorted(self.__x_offset) != sorted_keys or sorted(self.__x_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_input_expressions called with input_vars" " that do not have the same indices as offset_inputs" " or factor_inputs.\n" - "Keys in input_vars: {}.\n" - "Keys in offset_inputs: {}.\n" - "Keys in offset_factor: {}.".format( - sorted_keys, sorted(self.__x_offset), sorted(self.__x_factor) - ) + f"Keys in input_vars: {sorted_keys}.\n" + f"Keys in offset_inputs: {sorted(self.__x_offset)}.\n" + f"Keys in offset_factor: {sorted(self.__x_factor)}." ) + raise ValueError(msg) x = input_vars - return {k: (x[k] - self.__x_offset[k]) / self.__x_factor[k] for k in x.keys()} + return {k: (x[k] - self.__x_offset[k]) / self.__x_factor[k] for k in x} def get_unscaled_input_expressions(self, scaled_input_vars): - """ - Get the unscaled input expressions of the scaled input variables. - """ + """Get the unscaled input expressions of the scaled input variables.""" sorted_keys = sorted(scaled_input_vars.keys()) if ( sorted(self.__x_offset) != sorted_keys or sorted(self.__x_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_input_expressions called with input_vars" " that do not have the same indices as offset_inputs" " or factor_inputs.\n" - "Keys in input_vars: {}\n" - "Keys in offset_inputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__x_offset), sorted(self.__x_factor) - ) + f"Keys in input_vars: {sorted_keys}\n" + f"Keys in offset_inputs: {sorted(self.__x_offset)}\n" + f"Keys in offset_factor: {sorted(self.__x_factor)}" ) + raise ValueError(msg) scaled_x = scaled_input_vars return { k: scaled_x[k] * self.__x_factor[k] + self.__x_offset[k] - for k in scaled_x.keys() + for k in scaled_x } def get_scaled_output_expressions(self, output_vars): - """ - Get the scaled output expressions of the output variables. - """ + """Get the scaled output expressions of the output variables.""" sorted_keys = sorted(output_vars.keys()) if ( sorted(self.__y_offset) != sorted_keys or sorted(self.__y_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_output_expressions called with output_vars" " that do not have the same indices as offset_outputs" " or factor_outputs.\n" - "Keys in output_vars: {}\n" - "Keys in offset_outputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__y_offset), sorted(self.__y_factor) - ) + f"Keys in output_vars: {sorted_keys}\n" + f"Keys in offset_outputs: {sorted(self.__y_offset)}\n" + f"Keys in offset_factor: {sorted(self.__y_factor)}" ) + raise ValueError(msg) y = output_vars - return {k: (y[k] - self.__y_offset[k]) / self.__y_factor[k] for k in y.keys()} + return {k: (y[k] - self.__y_offset[k]) / self.__y_factor[k] for k in y} def get_unscaled_output_expressions(self, scaled_output_vars): - """ - Get the unscaled output expressions of the scaled output variables. - """ + """Get the unscaled output expressions of the scaled output variables.""" sorted_keys = sorted(scaled_output_vars.keys()) if ( sorted(self.__y_offset) != sorted_keys or sorted(self.__y_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_output_expressions called with output_vars" " that do not have the same indices as offset_outputs" " or factor_outputs.\n" - "Keys in output_vars: {}\n" - "Keys in offset_outputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__y_offset), sorted(self.__y_factor) - ) + f"Keys in output_vars: {sorted_keys}\n" + f"Keys in offset_outputs: {sorted(self.__y_offset)}\n" + f"Keys in offset_factor: {sorted(self.__y_factor)}" ) + raise ValueError(msg) scaled_y = scaled_output_vars return { k: scaled_y[k] * self.__y_factor[k] + self.__y_offset[k] - for k in scaled_y.keys() + for k in scaled_y } diff --git a/tests/conftest.py b/tests/conftest.py index b9c4daf7..bcea6cff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,46 +2,43 @@ import numpy as np import pytest -from pyomo.common.fileutils import this_file_dir - from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition +from pyomo.common.fileutils import this_file_dir def get_neural_network_data(desc): - """ - Return input and test data for a neural network. + """Return input and test data for a neural network. Parameters ---------- desc : string model name. One of 131 or 2353. """ + rng = np.random.default_rng(42) + if desc == "131": # build data with 1 input and 1 output and 500 data points - x = np.random.uniform(-1, 1, 500) + x = rng.uniform(-1, 1, 500) y = np.sin(x) - x_test = np.random.uniform(-1, 1, 5) + x_test = rng.uniform(-1, 1, 5) return x, y, x_test - elif desc == "2353": + if desc == "2353": # build data with 2 inputs, 3 outputs, and 500 data points - np.random.seed(42) - x = np.random.uniform([-1, 2], [1, 3], (500, 2)) + x = rng.uniform([-1, 2], [1, 3], (500, 2)) y1 = np.sin(x[:, 0] * x[:, 1]) y2 = x[:, 0] + x[:, 1] y3 = np.cos(x[:, 0] / x[:, 1]) y = np.column_stack((y1, y2, y3)) - x_test = np.random.uniform([-1, 2], [1, 3], (5, 2)) + x_test = rng.uniform([-1, 2], [1, 3], (5, 2)) return x, y, x_test return None class _Datadir: - """ - Give access to files in the `models` directory. - """ + """Give access to files in the `models` directory.""" def __init__(self, basedir): self._basedir = basedir @@ -50,16 +47,17 @@ def file(self, filename): return str(self._basedir / filename) -@pytest.fixture +@pytest.fixture() def datadir(): basedir = Path(this_file_dir()) / "models" return _Datadir(basedir) -@pytest.fixture +@pytest.fixture() def two_node_network_relu(): - """ - 1 1 + """Two node network with ReLU activation. + + 1 1 x0 -------- (1) --------- (3) | / | / diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index 57d93427..a7c7557c 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -2,12 +2,16 @@ import pyomo.environ as pe import pytest - from omlt import OmltBlock from omlt.dependencies import onnx, onnx_available from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel +TOTAL_CONSTRAINTS = 423 +Y_VARS = 42 +Z_L_VARS = 160 +SINGLE_LEAVES = 20 +SPLITS = 140 @pytest.mark.skip("Francesco and Alex need to check this test") def test_formulation_with_continuous_variables(): @@ -27,17 +31,18 @@ def test_formulation_with_continuous_variables(): assert ( len(list(m.gbt.component_data_objects(pe.Var))) == 202 + 10 ) # our auto-created variables - assert len(list(m.gbt.component_data_objects(pe.Constraint))) == 423 # TODO: fix? + # TODO: fix below?: + assert len(list(m.gbt.component_data_objects(pe.Constraint))) == TOTAL_CONSTRAINTS - assert len(m.gbt.z_l) == 160 - assert len(m.gbt.y) == 42 + assert len(m.gbt.z_l) == Z_L_VARS + assert len(m.gbt.y) == Y_VARS - assert len(m.gbt.single_leaf) == 20 - assert len(m.gbt.left_split) == 140 - assert len(m.gbt.right_split) == 140 + assert len(m.gbt.single_leaf) == SINGLE_LEAVES + assert len(m.gbt.left_split) == SPLITS + assert len(m.gbt.right_split) == SPLITS assert len(m.gbt.categorical) == 0 - assert len(m.gbt.var_lower) == 42 - assert len(m.gbt.var_upper) == 42 + assert len(m.gbt.var_lower) == Y_VARS + assert len(m.gbt.var_upper) == Y_VARS # TODO: did we remove categorical variables intentionally? diff --git a/tests/io/test_input_bounds.py b/tests/io/test_input_bounds.py index c8f1f439..ea9238fc 100644 --- a/tests/io/test_input_bounds.py +++ b/tests/io/test_input_bounds.py @@ -15,7 +15,7 @@ def test_input_bounds_reader_writer_with_list(): def test_input_bounds_reader_writer_with_dictionary(): - input_bounds = dict(((i, i), (i * 10.0, i * 10.0 + 1.0)) for i in range(10)) + input_bounds = {(i, i): (i * 10.0, i * 10.0 + 1.0) for i in range(10)} with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as f: write_input_bounds(f.name, input_bounds) diff --git a/tests/io/test_keras_reader.py b/tests/io/test_keras_reader.py index 21629c66..1982063b 100644 --- a/tests/io/test_keras_reader.py +++ b/tests/io/test_keras_reader.py @@ -1,7 +1,9 @@ import pytest - from omlt.dependencies import keras, keras_available +NUM_LAYERS_131 = 3 +NUM_LAYERS_BIG = 5 + if keras_available: from omlt.io import load_keras_sequential @@ -14,7 +16,7 @@ def test_keras_reader(datadir): net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 for layer in layers: assert layer.activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -25,7 +27,7 @@ def test_keras_reader(datadir): ) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -36,7 +38,7 @@ def test_keras_reader(datadir): ) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "sigmoid" assert layers[1].weights.shape == (1, 3) @@ -45,7 +47,7 @@ def test_keras_reader(datadir): nn = keras.models.load_model(datadir.file("big.keras"), compile=False) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 5 + assert len(layers) == NUM_LAYERS_BIG assert layers[1].activation == "sigmoid" assert layers[2].activation == "sigmoid" assert layers[3].activation == "sigmoid" diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index 763b282c..2f4510c3 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -1,7 +1,15 @@ import pytest - from omlt.dependencies import onnx, onnx_available +NUM_LAYERS_131 = 3 +NUM_LAYERS_GEMM = 4 +NUM_LAYERS_MAXPOOL = 4 +NUM_LAYERS_BIG = 5 + +MAXPOOL_KERNEL_DEPTH = 3 + +NEAR_EQUAL = 1e-05 + if onnx_available: from omlt.io.onnx import load_onnx_neural_network from omlt.io.onnx_parser import NetworkParser @@ -12,7 +20,7 @@ def test_linear_131(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 for layer in layers: assert layer.activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -24,7 +32,7 @@ def test_linear_131_relu(datadir): model = onnx.load(datadir.file("keras_linear_131_relu.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "relu" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -36,7 +44,7 @@ def test_linear_131_sigmoid(datadir): model = onnx.load(datadir.file("keras_linear_131_sigmoid.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -48,7 +56,7 @@ def test_gemm(datadir): model = onnx.load(datadir.file("gemm.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_GEMM assert layers[1].weights.shape == (784, 75) assert layers[2].weights.shape == (75, 75) assert layers[3].weights.shape == (75, 10) @@ -67,10 +75,10 @@ def test_gemm_transB(datadir): layers_transB = list(net_transB.layers) assert len(layers) == len(layers_transB) assert layers[1].weights.shape == layers_transB[1].weights.shape - assert abs(layers[1].weights[0][0] - layers_transB[1].weights[0][0]) < 1e-05 - assert abs(layers[1].weights[0][1] - layers_transB[1].weights[1][0]) < 1e-05 - assert abs(layers[1].weights[1][0] - layers_transB[1].weights[0][1]) < 1e-05 - assert abs(layers[1].weights[1][1] - layers_transB[1].weights[1][1]) < 1e-05 + assert abs(layers[1].weights[0][0] - layers_transB[1].weights[0][0]) < NEAR_EQUAL + assert abs(layers[1].weights[0][1] - layers_transB[1].weights[1][0]) < NEAR_EQUAL + assert abs(layers[1].weights[1][0] - layers_transB[1].weights[0][1]) < NEAR_EQUAL + assert abs(layers[1].weights[1][1] - layers_transB[1].weights[1][1]) < NEAR_EQUAL @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -78,7 +86,7 @@ def test_conv(datadir): model = onnx.load(datadir.file("convx1_gemmx1.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_GEMM assert layers[1].activation == "linear" assert layers[2].activation == "linear" assert layers[3].activation == "relu" @@ -91,7 +99,7 @@ def test_maxpool(datadir): model = onnx.load(datadir.file("maxpool_2d.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_MAXPOOL assert layers[1].activation == "relu" assert layers[2].activation == "linear" assert layers[3].activation == "linear" @@ -105,7 +113,7 @@ def test_maxpool(datadir): assert layers[2].output_size == [3, 5, 2] assert layers[3].output_size == [3, 2, 1] for layer in layers[1:]: - assert layer.kernel_depth == 3 + assert layer.kernel_depth == MAXPOOL_KERNEL_DEPTH @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -113,10 +121,10 @@ def test_input_tensor_invalid_dims(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) model.graph.input[0].type.tensor_type.shape.dim[1].dim_value = 0 parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, match='All dimensions in graph "tf2onnx" input tensor have 0 value.' + ): parser.parse_network(model.graph, None, None) - expected_msg = 'All dimensions in graph "tf2onnx" input tensor have 0 value.' - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -124,10 +132,10 @@ def test_no_input_layers(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) model.graph.input.remove(model.graph.input[0]) parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, match='No valid input layer found in graph "tf2onnx".' + ): parser.parse_network(model.graph, None, None) - expected_msg = 'No valid input layer found in graph "tf2onnx".' - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -136,10 +144,13 @@ def test_node_no_inputs(datadir): while len(model.graph.node[0].input) > 0: model.graph.node[0].input.pop() parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + expected_msg = ( + 'Nodes must have inputs or have op_type "Constant". Node ' + '"StatefulPartitionedCall/keras_linear_131/dense/MatMul" has' + ' no inputs and op_type "MatMul".' + ) + with pytest.raises(ValueError, match=expected_msg): parser.parse_network(model.graph, None, None) - expected_msg = """Nodes must have inputs or have op_type \"Constant\". Node \"StatefulPartitionedCall/keras_linear_131/dense/MatMul\" has no inputs and op_type \"MatMul\".""" - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -148,45 +159,56 @@ def test_consume_wrong_node_type(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) - with pytest.raises(ValueError) as excinfo: + expected_msg_dense = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for MatMul nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_dense): parser._consume_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MatMul nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_dense - - with pytest.raises(ValueError) as excinfo: + expected_msg_gemm = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Gemm nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_gemm): parser._consume_gemm_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_gemm = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Gemm nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_gemm - - with pytest.raises(ValueError) as excinfo: + expected_msg_conv = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Conv nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_conv): parser._consume_conv_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_conv = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Conv nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_conv - - with pytest.raises(ValueError) as excinfo: + expected_msg_reshape = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Reshape nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_reshape): parser._consume_reshape_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_reshape = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Reshape nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_reshape - - with pytest.raises(ValueError) as excinfo: + expected_msg_pool = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for MaxPool nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_pool): parser._consume_pool_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_pool = """StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MaxPool nodes can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_pool @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -198,13 +220,15 @@ def test_consume_dense_wrong_dims(datadir): parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][ 1 ].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_dense = ( + "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, " + "only nodes with 2 input dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_dense): parser._consume_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][2], ) - expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_dense @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -213,12 +237,14 @@ def test_consume_gemm_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Gemm_0"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_gemm = ( + "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions " + "can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_gemm): parser._consume_gemm_dense_nodes( parser._nodes["Gemm_0"][1], parser._nodes["Gemm_0"][2] ) - expected_msg_gemm = "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_gemm @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -227,12 +253,14 @@ def test_consume_conv_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Conv_0"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_conv = ( + "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input" + " dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_conv): parser._consume_conv_nodes( parser._nodes["Conv_0"][1], parser._nodes["Conv_0"][2] ) - expected_msg_conv = "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_conv @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -241,12 +269,14 @@ def test_consume_reshape_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Reshape_2"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_reshape = ( + "Reshape_2 input has 3 dimensions, only nodes with 2 input" + " dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_reshape): parser._consume_reshape_nodes( parser._nodes["Reshape_2"][1], parser._nodes["Reshape_2"][2] ) - expected_msg_reshape = """Reshape_2 input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_reshape @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -255,7 +285,8 @@ def test_consume_maxpool_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["node1"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_maxpool = ( + "node1 input has 2 dimensions, only nodes with 1 input " + "dimension can be used as starting points for parsing.") + with pytest.raises(ValueError, match = expected_msg_maxpool): parser._consume_pool_nodes(parser._nodes["node1"][1], parser._nodes["node1"][2]) - expected_msg_maxpool = """node1 input has 2 dimensions, only nodes with 1 input dimension can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_maxpool diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index 9cf6905f..fd52e69d 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -1,17 +1,19 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt import OmltBlock from omlt.dependencies import ( - torch, torch_available, - torch_geometric, torch_geometric_available, ) if torch_available and torch_geometric_available: - from torch.nn import Linear, ReLU, Sigmoid, Softplus, Tanh + from omlt.io.torch_geometric import ( + gnn_with_fixed_graph, + gnn_with_non_fixed_graph, + load_torch_geometric_sequential, + ) + from torch.nn import Linear, ReLU, Sigmoid, Tanh from torch_geometric.nn import ( GCNConv, SAGEConv, @@ -21,12 +23,6 @@ global_mean_pool, ) - from omlt.io.torch_geometric import ( - gnn_with_fixed_graph, - gnn_with_non_fixed_graph, - load_torch_geometric_sequential, - ) - @pytest.mark.skipif( not (torch_available and torch_geometric_available), diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 28f6f873..30e3a1a2 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,22 +1,24 @@ import numpy as np import pyomo.environ as pe import pytest -from pytest import approx - from omlt.dependencies import lineartree_available if lineartree_available: from lineartree import LinearTreeRegressor - from sklearn.linear_model import LinearRegression from omlt.linear_tree import ( + LinearTreeDefinition, LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, - LinearTreeDefinition, ) + from sklearn.linear_model import LinearRegression import omlt from omlt import OmltBlock +NUM_INPUTS = 2 +NUM_SPLITS = 5 +NUM_LEAVES = 6 + scip_available = pe.SolverFactory("scip").available() cbc_available = pe.SolverFactory("cbc").available() gurobi_available = pe.SolverFactory("gurobi").available() @@ -100,7 +102,7 @@ def test_linear_tree_model_single_var(): assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -114,12 +116,12 @@ def test_linear_tree_model_single_var(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -130,8 +132,8 @@ def test_linear_tree_model_single_var(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, ensure slope dimension match n_inputs if key == "slope": @@ -187,7 +189,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -221,7 +223,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -255,7 +257,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -289,7 +291,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -317,12 +319,12 @@ def test_scaling(): lt_def2 = LinearTreeDefinition( regr, unscaled_input_bounds=unscaled_input_bounds, scaling_object=scaler ) - assert lt_def2.scaled_input_bounds[0][0] == approx(scaled_input_bounds[0][0]) - assert lt_def2.scaled_input_bounds[0][1] == approx(scaled_input_bounds[0][1]) + assert lt_def2.scaled_input_bounds[0][0] == pytest.approx(scaled_input_bounds[0][0]) + assert lt_def2.scaled_input_bounds[0][1] == pytest.approx(scaled_input_bounds[0][1]) with pytest.raises( Exception, match="Input Bounds needed to represent linear trees as MIPs" ): - ltmodel_scaled = LinearTreeDefinition(regr) + LinearTreeDefinition(regr) #### MULTIVARIATE INPUT TESTING #### @@ -394,12 +396,12 @@ def test_linear_tree_model_multi_var(): # assert attributes in LinearTreeDefinition assert scaled_input_bounds is not None - assert n_inputs == 2 + assert n_inputs == NUM_INPUTS assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -413,12 +415,12 @@ def test_linear_tree_model_multi_var(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -429,8 +431,8 @@ def test_linear_tree_model_multi_var(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, test the shape of it if key == "slope": @@ -494,7 +496,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -536,7 +538,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -578,7 +580,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -620,7 +622,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -641,11 +643,11 @@ def test_summary_dict_as_argument(): # assert attributes in LinearTreeDefinition assert scaled_input_bounds is not None - assert n_inputs == 2 + assert n_inputs == NUM_INPUTS assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -659,12 +661,12 @@ def test_summary_dict_as_argument(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -675,8 +677,8 @@ def test_summary_dict_as_argument(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, test the shape of it if key == "slope": @@ -709,24 +711,26 @@ def test_raise_exception_if_wrong_model_instance(): input_bounds = {0: (min(X[:, 0]), max(X[:, 0])), 1: (min(X[:, 1]), max(X[:, 1]))} with pytest.raises( Exception, - match="Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()", + match=( + "Input dict must be the summary of the linear-tree model" + " e.g. dict = model.summary()" + ), ): - ltmodel_small = LinearTreeDefinition( + LinearTreeDefinition( regr.summary(only_leaves=True), scaled_input_bounds=input_bounds ) with pytest.raises( Exception, match="Model entry must be dict or linear-tree instance" ): - ltmodel_small = LinearTreeDefinition((0, 0), scaled_input_bounds=input_bounds) + LinearTreeDefinition((0, 0), scaled_input_bounds=input_bounds) with pytest.raises( Exception, - match="Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()", + match=( + "Input dict must be the summary of the linear-tree model" + " e.g. dict = model.summary()" + ), ): - ltmodel_small = LinearTreeDefinition( - wrong_summary_dict, scaled_input_bounds=input_bounds - ) + LinearTreeDefinition(wrong_summary_dict, scaled_input_bounds=input_bounds) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -762,4 +766,4 @@ def test_raise_exception_for_wrong_transformation(): Exception, match="Supported transformations are: bigm, mbigm, hull, and custom", ): - formulation = LinearTreeGDPFormulation(model_def, transformation="hello") + LinearTreeGDPFormulation(model_def, transformation="hello") diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 02da81aa..7cc7261d 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -1,28 +1,30 @@ import numpy as np import pyomo.environ as pyo import pytest -from pyomo.common.dependencies import DeferredImportError - from omlt.dependencies import keras, keras_available +from pyomo.common.dependencies import DeferredImportError if keras_available: from omlt.io import load_keras_sequential from conftest import get_neural_network_data - from omlt.block import OmltBlock from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling +LESS_NEAR_EQUAL = 1e-3 +NEAR_EQUAL = 1e-4 +VERY_NEAR_EQUAL = 1e-5 + @pytest.mark.skipif(keras_available, reason="Test only valid when keras not available") def test_keras_not_available_exception(datadir): with pytest.raises(DeferredImportError): - NN = keras.models.load_model(datadir.file("keras_linear_131_relu")) + keras.models.load_model(datadir.file("keras_linear_131_relu")) -def _test_keras_linear_131(keras_fname, reduced_space=False): +def _test_keras_linear_131(keras_fname, *, reduced_space=False): x, y, x_test = get_neural_network_data("131") nn = keras.models.load_model(keras_fname, compile=False) @@ -40,7 +42,10 @@ def _test_keras_linear_131(keras_fname, reduced_space=False): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) def _test_keras_mip_relu_131(keras_fname): @@ -60,7 +65,10 @@ def _test_keras_mip_relu_131(keras_fname): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("cbc").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) def _test_keras_complementarity_relu_131(keras_fname): @@ -81,10 +89,13 @@ def _test_keras_complementarity_relu_131(keras_fname): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-4 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < NEAR_EQUAL + ) -def _test_keras_linear_big(keras_fname, reduced_space=False): +def _test_keras_linear_big(keras_fname, *, reduced_space=False): x, y, x_test = get_neural_network_data("131") nn = keras.models.load_model(keras_fname, compile=False) @@ -103,7 +114,10 @@ def _test_keras_linear_big(keras_fname, reduced_space=False): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) @pytest.mark.skipif(not keras_available, reason="Need keras for this test") @@ -183,12 +197,14 @@ def test_scaling_NN_block(datadir): def obj(mdl): return 1 - for x in np.random.normal(1, 0.5, 10): + rng = np.random.default_rng() + + for x in rng.normal(1, 0.5, 10): model.nn.inputs[0].fix(x) - result = pyo.SolverFactory("cbc").solve(model, tee=False) + pyo.SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] y_s = NN.predict([np.array((x_s,))]) y = y_s * scale_y[1] + scale_y[0] - assert y - pyo.value(model.nn.outputs[0]) <= 1e-3 + assert y - pyo.value(model.nn.outputs[0]) <= LESS_NEAR_EQUAL diff --git a/tests/neuralnet/test_layer.py b/tests/neuralnet/test_layer.py index 4a8944ac..6cf2b6de 100644 --- a/tests/neuralnet/test_layer.py +++ b/tests/neuralnet/test_layer.py @@ -1,6 +1,5 @@ import numpy as np import pytest - from omlt.neuralnet.layer import ( ConvLayer2D, DenseLayer, @@ -133,16 +132,16 @@ def test_gnn_layer_with_input_index_mapper(): y3 = np.array([[[-6, 4, 0, -12, 11, 1, -5, 5, 2], [-1, 0, 1, -1, 0, 1, -1, 0, 1]]]) assert np.array_equal(layer._eval_with_adjacency(inputs, A3), y3) - with pytest.raises(ValueError) as excinfo: - layer = GNNLayer([5], [9], weights, biases, N=3) - assert ( - str(excinfo.value) - == "Input size must equal to the number of nodes multiplied by the number of input node features" + expected_msg = ( + "Input size must equal to the number of nodes multiplied by the number of" + " input node features" ) + with pytest.raises(ValueError, match=expected_msg): + layer = GNNLayer([5], [9], weights, biases, N=3) - with pytest.raises(ValueError) as excinfo: - layer = GNNLayer([6], [8], weights, biases, N=3) - assert ( - str(excinfo.value) - == "Output size must equal to the number of nodes multiplied by the number of output node features" + expected_msg = ( + "Output size must equal to the number of nodes multiplied by the number of" + " output node features" ) + with pytest.raises(ValueError, match=expected_msg): + layer = GNNLayer([6], [8], weights, biases, N=3) diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index f3cadcb7..8dff2365 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -1,18 +1,20 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt.block import OmltBlock from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition from omlt.neuralnet.nn_formulation import FullSpaceNNFormulation from omlt.scaling import OffsetScaling +ALMOST_EXACTLY_EQUAL = 1e-8 + # TODO: Build more tests with different activations and edge cases def test_two_node_full_space(): - """ - 1 1 + """Two node full space network. + + 1 1 x0 -------- (1) --------- (3) | / | / @@ -57,14 +59,16 @@ def test_two_node_full_space(): m.obj1 = pyo.Objective(expr=0) status = pyo.SolverFactory("cbc").solve(m, tee=True) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10.0) < 1e-8 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2.0) < 1e-8 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10.0) < ALMOST_EXACTLY_EQUAL + ) + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2.0) < ALMOST_EXACTLY_EQUAL m.neural_net_block.inputs[0].fix(1) status = pyo.SolverFactory("cbc").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1.0) < 1e-8 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0.0) < 1e-8 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1.0) < ALMOST_EXACTLY_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0.0) < ALMOST_EXACTLY_EQUAL def test_input_bounds_no_scaler(): @@ -91,7 +95,7 @@ def test_input_bound_scaling_1D(): scaled_input_bounds = {0: (0, 5), 1: (-2, 2), 2: (0, 1)} unscaled_input_bounds = {} - for k in scaled_input_bounds.keys(): + for k in scaled_input_bounds: lb, ub = scaled_input_bounds[k] unscaled_input_bounds[k] = ( (lb * xfactor[k]) + xoffset[k], @@ -121,7 +125,7 @@ def test_input_bound_scaling_multiD(): scaled_input_bounds = {(0, 0): (0, 5), (0, 1): (-2, 2), (0, 2): (0, 1)} unscaled_input_bounds = {} - for k in scaled_input_bounds.keys(): + for k in scaled_input_bounds: lb, ub = scaled_input_bounds[k] unscaled_input_bounds[k] = ( (lb * xfactor[k]) + xoffset[k], @@ -135,9 +139,7 @@ def test_input_bound_scaling_multiD(): def _test_add_invalid_edge(direction): - """ - direction can be "in" or "out" - """ + """Direction can be "in" or "out".""" net = NetworkDefinition(scaled_input_bounds=[(-10.0, 10.0)]) input_layer = InputLayer([1]) @@ -162,15 +164,13 @@ def _test_add_invalid_edge(direction): ) if direction == "in": - with pytest.raises(ValueError) as excinfo: - net.add_edge(input_layer, dense_layer_1) expected_msg = f"Inbound layer {dense_layer_1} not found in network." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + net.add_edge(input_layer, dense_layer_1) elif direction == "out": - with pytest.raises(ValueError) as excinfo: - net.add_edge(dense_layer_1, dense_layer_0) expected_msg = f"Outbound layer {dense_layer_1} not found in network." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + net.add_edge(dense_layer_1, dense_layer_0) def test_add_invalid_edge(): diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index 577a5f45..d79d2160 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -1,8 +1,8 @@ +import re + import numpy as np import pyomo.environ as pyo import pytest -from pyomo.contrib.fbbt import interval - from omlt import OmltBlock from omlt.neuralnet import ( FullSpaceNNFormulation, @@ -29,11 +29,22 @@ partition_based_dense_relu_layer, ) from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer +from pyomo.contrib.fbbt import interval +NEAR_EQUAL = 1e-6 +FULLSPACE_SMOOTH_VARS = 14 +FULLSPACE_SMOOTH_CONSTRAINTS = 15 +FULLSPACE_RELU_VARS = 19 +FULLSPACE_RELU_CONSTRAINTS = 26 +REDUCED_VARS = 6 +REDUCED_CONSTRAINTS = 5 +THREE_NODE_VARS = 81 +THREE_NODE_CONSTRAINTS = 120 def two_node_network(activation, input_value): - """ - 1 1 + """Two node network. + + 1 1 x0 -------- (1) --------- (3) | / | / @@ -80,21 +91,21 @@ def _test_two_node_FullSpaceNNFormulation_smooth(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - assert m.nvariables() == 15 - assert m.nconstraints() == 14 + assert m.nvariables() == FULLSPACE_SMOOTH_VARS + assert m.nconstraints() == FULLSPACE_SMOOTH_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_FullSpaceNNFormulation_relu(): @@ -102,21 +113,21 @@ def _test_two_node_FullSpaceNNFormulation_relu(): m.neural_net_block = OmltBlock() net, y = two_node_network("relu", -2.0) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - assert m.nvariables() == 19 - assert m.nconstraints() == 26 + assert m.nvariables() == FULLSPACE_RELU_VARS + assert m.nconstraints() == FULLSPACE_RELU_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network("relu", 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_FullSpaceSmoothNNFormulation(activation): @@ -124,21 +135,21 @@ def _test_two_node_FullSpaceSmoothNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(FullSpaceSmoothNNFormulation(net)) - assert m.nvariables() == 15 - assert m.nconstraints() == 14 + assert m.nvariables() == FULLSPACE_SMOOTH_VARS + assert m.nconstraints() == FULLSPACE_SMOOTH_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_ReducedSpaceNNFormulation(activation): @@ -146,21 +157,21 @@ def _test_two_node_ReducedSpaceNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) - assert m.nvariables() == 6 - assert m.nconstraints() == 5 + assert m.nvariables() == REDUCED_VARS + assert m.nconstraints() == REDUCED_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): @@ -168,21 +179,21 @@ def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(ReducedSpaceSmoothNNFormulation(net)) - assert m.nvariables() == 6 - assert m.nconstraints() == 5 + assert m.nvariables() == REDUCED_VARS + assert m.nconstraints() == REDUCED_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def test_two_node_ReducedSpaceNNFormulation(): @@ -198,10 +209,9 @@ def test_two_node_ReducedSpaceSmoothNNFormulation(): def test_two_node_ReducedSpaceSmoothNNFormulation_invalid_activation(): - with pytest.raises(ValueError) as excinfo: - _test_two_node_ReducedSpaceSmoothNNFormulation("relu") expected_msg = "Activation relu is not supported by this formulation." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _test_two_node_ReducedSpaceSmoothNNFormulation("relu") def test_two_node_FullSpaceNNFormulation(): @@ -218,15 +228,15 @@ def test_two_node_FullSpaceSmoothNNFormulation(): def test_two_node_FullSpaceSmoothNNFormulation_invalid_activation(): - with pytest.raises(ValueError) as excinfo: - _test_two_node_FullSpaceSmoothNNFormulation("relu") expected_msg = "Activation relu is not supported by this formulation." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _test_two_node_FullSpaceSmoothNNFormulation("relu") @pytest.mark.skip(reason="Need to add checks on layer types") def test_invalid_layer_type(): - raise AssertionError("Layer type test not yet implemented") + msg = "Layer type test not yet implemented" + raise AssertionError(msg) def _maxpool_conv_network(inputs): @@ -337,35 +347,27 @@ def test_maxpool_FullSpaceNNFormulation(): inputs_d, inputs_r, inputs_c ] m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0, 0]) - y[0, 0, 0]) < 1e-6 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0, 0]) - y[0, 0, 0]) < NEAR_EQUAL def _test_formulation_initialize_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) net.add_layer(extra_input) - with pytest.raises(ValueError) as excinfo: - if network_formulation == "FullSpace": + + expected_msg = "Multiple input layers are not currently supported." + if network_formulation == "FullSpace": + with pytest.raises(ValueError, match=expected_msg): FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": + elif network_formulation == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): ReducedSpaceNNFormulation(net) - expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg def _test_formulation_added_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) if network_formulation == "FullSpace": @@ -375,19 +377,13 @@ def _test_formulation_added_extra_input(network_formulation): elif network_formulation == "relu": formulation = ReluPartitionFormulation(net) net.add_layer(extra_input) - with pytest.raises(ValueError) as excinfo: - formulation.input_indexes expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _ = formulation.input_indexes def _test_formulation_build_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) if network_formulation == "FullSpace": @@ -399,19 +395,13 @@ def _test_formulation_build_extra_input(network_formulation): net.add_layer(extra_input) m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() - with pytest.raises(ValueError) as excinfo: - m.neural_net_block.build_formulation(formulation) expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + m.neural_net_block.build_formulation(formulation) def _test_formulation_added_extra_output(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be: 'FullSpace', 'ReducedSpace' 'relu'.""" net, y = two_node_network("linear", -2.0) extra_output = DenseLayer( [1, 2], @@ -428,18 +418,13 @@ def _test_formulation_added_extra_output(network_formulation): formulation = ReluPartitionFormulation(net) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) - with pytest.raises(ValueError) as excinfo: - formulation.output_indexes expected_msg = "Multiple output layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _ = formulation.output_indexes def _test_formulation_initialize_extra_output(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - """ + """network_formulation can be: 'FullSpace', 'ReducedSpace'.""" net, y = two_node_network("linear", -2.0) extra_output = DenseLayer( [1, 2], @@ -450,13 +435,14 @@ def _test_formulation_initialize_extra_output(network_formulation): ) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) - with pytest.raises(ValueError) as excinfo: - if network_formulation == "FullSpace": + + expected_msg = "Multiple output layers are not currently supported." + if network_formulation == "FullSpace": + with pytest.raises(ValueError, match=expected_msg): FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": + elif network_formulation == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): ReducedSpaceNNFormulation(net) - expected_msg = "Multiple output layers are not currently supported." - assert str(excinfo.value) == expected_msg def test_FullSpaceNNFormulation_invalid_network(): @@ -489,19 +475,18 @@ def _test_dense_layer_multiple_predecessors(layer_type): test_layer = list(net.layers)[2] net.add_layer(extra_input) net.add_edge(extra_input, test_layer) - with pytest.raises(ValueError) as excinfo: - if layer_type == "PartitionBased": + + expected_msg = re.escape(f"Layer {test_layer} has multiple predecessors.") + if layer_type == "PartitionBased": + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == "ReducedSpace": + elif layer_type == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): reduced_space_dense_layer(m, net, m, test_layer, None) - expected_msg = f"Layer {test_layer} has multiple predecessors." - assert str(excinfo.value) == expected_msg def _test_dense_layer_no_predecessors(layer_type): - """ - Layer type can be "ReducedSpace", or "PartitionBased". - """ + """Layer type can be "ReducedSpace", or "PartitionBased".""" m = pyo.ConcreteModel() net = NetworkDefinition(scaled_input_bounds=[(-10.0, 10.0)]) @@ -513,13 +498,16 @@ def _test_dense_layer_no_predecessors(layer_type): biases=np.array([1.0, 2.0]), ) net.add_layer(test_layer) - with pytest.raises(ValueError) as excinfo: - if layer_type == "PartitionBased": + + expected_msg = re.escape( + f"Layer {test_layer} is not an input layer, but has no predecessors." + ) + if layer_type == "PartitionBased": + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == "ReducedSpace": + elif layer_type == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): reduced_space_dense_layer(m, net, m, test_layer, None) - expected_msg = f"Layer {test_layer} is not an input layer, but has no predecessors." - assert str(excinfo.value) == expected_msg def test_partition_based_dense_layer_predecessors(): @@ -546,12 +534,11 @@ def test_partition_based_unbounded_below(): split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded below." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded below." - assert str(excinfo.value) == expected_msg def test_partition_based_unbounded_above(): @@ -568,12 +555,11 @@ def test_partition_based_unbounded_above(): split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded above." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded above." - assert str(excinfo.value) == expected_msg def test_partition_based_bias_unbounded_below(): @@ -588,12 +574,11 @@ def test_partition_based_bias_unbounded_below(): test_layer.biases[0] = -interval.inf split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded below." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded below." - assert str(excinfo.value) == expected_msg def test_partition_based_bias_unbounded_above(): @@ -607,13 +592,11 @@ def test_partition_based_bias_unbounded_above(): test_layer.biases[0] = interval.inf split_func = lambda w: default_partition_split_func(w, 2) - - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded above." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded above." - assert str(excinfo.value) == expected_msg def test_fullspace_internal_extra_input(): @@ -626,10 +609,9 @@ def test_fullspace_internal_extra_input(): m.neural_net_block.build_formulation(formulation) net.add_layer(extra_input) net.add_edge(extra_input, test_layer) - with pytest.raises(ValueError) as excinfo: - _input_layer_and_block(m.neural_net_block, net, test_layer) expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _input_layer_and_block(m.neural_net_block, net, test_layer) def test_conv2d_extra_activation(): @@ -673,10 +655,14 @@ def test_conv2d_extra_activation(): ) net.add_layer(maxpool_layer_1) net.add_edge(conv_layer_2, maxpool_layer_1) - with pytest.raises(ValueError) as excinfo: + expected_msg = re.escape( + "Activation is applied after convolution layer, but the successor maxpooling" + " layer PoolingLayer(input_size=[1, 3, 4], output_size=[1, 1, 2]," + " strides=[2, 2], kernel_shape=[3, 2]), pool_func_name=max has an activation" + " function also." + ) + with pytest.raises(ValueError, match=expected_msg): m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - expected_msg = """Activation is applied after convolution layer, but the successor max pooling layer PoolingLayer(input_size=[1, 3, 4], output_size=[1, 1, 2], strides=[2, 2], kernel_shape=[3, 2]), pool_func_name=max has an activation function also.""" - assert str(excinfo.value) == expected_msg def test_maxpool2d_bad_input_activation(): @@ -730,13 +716,14 @@ def test_maxpool2d_bad_input_activation(): m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) conv_layer_2.activation = "relu" - - with pytest.raises(ValueError) as excinfo: + expected_msg = ( + "Non-increasing activation functions on the preceding convolutional" + " layer are not supported." + ) + with pytest.raises(ValueError, match=expected_msg): full_space_maxpool2d_layer( m.neural_net_block, net, m.neural_net_block, maxpool_layer_1 ) - expected_msg = """Non-increasing activation functions on the preceding convolutional layer are not supported.""" - assert str(excinfo.value) == expected_msg def test_maxpool2d_bad_input_layer(): @@ -876,15 +863,15 @@ def _test_three_node_graph_neural_network(graph_type): for i in range(6): m.nn.inputs[i].fix(inputs[i]) - assert m.nvariables() == 81 - assert m.nconstraints() == 120 + assert m.nvariables() == THREE_NODE_VARS + assert m.nconstraints() == THREE_NODE_CONSTRAINTS m.obj = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) for i in range(9): - assert abs(pyo.value(m.nn.outputs[i]) - y[i]) < 1e-6 + assert abs(pyo.value(m.nn.outputs[i]) - y[i]) < NEAR_EQUAL for i in range(6): for j in range(3): @@ -893,7 +880,7 @@ def _test_three_node_graph_neural_network(graph_type): pyo.value(m.nn.layer[m.nn.layers.at(1)].zbar[i, j]) - pyo.value(m.nn.A[i // 2, j]) * inputs[i] ) - < 1e-6 + < NEAR_EQUAL ) diff --git a/tests/neuralnet/test_onnx.py b/tests/neuralnet/test_onnx.py index bb9b9dfd..7cad2d78 100644 --- a/tests/neuralnet/test_onnx.py +++ b/tests/neuralnet/test_onnx.py @@ -2,9 +2,8 @@ import numpy as np import pytest -from pyomo.common.dependencies import DeferredImportError - from omlt.dependencies import onnx, onnx_available +from pyomo.common.dependencies import DeferredImportError if onnx_available: import onnxruntime as ort @@ -14,16 +13,15 @@ write_onnx_model_with_bounds, ) -from pyomo.environ import * - from omlt import OffsetScaling, OmltBlock from omlt.neuralnet import FullSpaceNNFormulation +from pyomo.environ import ConcreteModel, SolverFactory, value @pytest.mark.skipif(onnx_available, reason="Test only valid when onnx not available") def test_onnx_not_available_exception(datadir): with pytest.raises(DeferredImportError): - neural_net = onnx.load(datadir.file("keras_linear_131_relu.onnx")) + onnx.load(datadir.file("keras_linear_131_relu.onnx")) @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -58,7 +56,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("cbc").solve(model, tee=False) + SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -101,7 +99,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("cbc").solve(model, tee=False) + SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -145,7 +143,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("ipopt").solve(model, tee=False) + SolverFactory("ipopt").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -159,12 +157,12 @@ def obj(mdl): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") def test_onnx_bounds_loader_writer(datadir): onnx_model = onnx.load(datadir.file("keras_conv_7x7_relu.onnx")) - scaled_input_bounds = dict() + scaled_input_bounds = {} for i in range(7): for j in range(7): scaled_input_bounds[0, i, j] = (0.0, 1.0) with tempfile.NamedTemporaryFile(suffix=".onnx") as f: write_onnx_model_with_bounds(f.name, onnx_model, scaled_input_bounds) net = load_onnx_neural_network_with_bounds(f.name) - for key, value in net.scaled_input_bounds.items(): - assert scaled_input_bounds[key] == value + for key, val in net.scaled_input_bounds.items(): + assert scaled_input_bounds[key] == val diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 15ba97d1..59dc247a 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -1,7 +1,6 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt.block import OmltBlock from omlt.dependencies import onnx_available from omlt.neuralnet import ( @@ -14,6 +13,7 @@ # TODO: Add tests for single dimensional outputs as well +NEAR_EQUAL = 1e-3 def test_two_node_bigm(two_node_network_relu): m = pyo.ConcreteModel() @@ -24,14 +24,14 @@ def test_two_node_bigm(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluBigMFormulation(two_node_network_relu): @@ -43,14 +43,14 @@ def test_two_node_ReluBigMFormulation(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_complementarity(two_node_network_relu): @@ -64,14 +64,14 @@ def test_two_node_complementarity(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluComplementarityFormulation(two_node_network_relu): @@ -82,14 +82,14 @@ def test_two_node_ReluComplementarityFormulation(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluPartitionFormulation(two_node_network_relu): @@ -101,14 +101,14 @@ def test_two_node_ReluPartitionFormulation(two_node_network_relu): m.obj1 = pyo.Objective(expr=0) m.neural_net_block.inputs[0].fix(-2) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -124,8 +124,7 @@ def test_conv_ReluBigMFormulation(datadir): m.obj1 = pyo.Objective(expr=0) # compute expected output for this input - input = np.eye(7, 7).reshape(1, 7, 7) - x = input + x = np.eye(7, 7).reshape(1, 7, 7) for layer in net.layers: x = layer.eval_single_layer(x) output = x @@ -133,7 +132,7 @@ def test_conv_ReluBigMFormulation(datadir): for i in range(7): for j in range(7): m.neural_net_block.inputs[0, i, j].fix(input[0, i, j]) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) d, r, c = output.shape for i in range(d): @@ -141,4 +140,4 @@ def test_conv_ReluBigMFormulation(datadir): for k in range(c): expected = output[i, j, k] actual = pyo.value(m.neural_net_block.outputs[i, j, k]) - assert abs(actual - expected) < 1e-3 + assert abs(actual - expected) < NEAR_EQUAL diff --git a/tests/neuralnet/train_keras_models.py b/tests/neuralnet/train_keras_models.py index c2de9dbc..81469c6a 100644 --- a/tests/neuralnet/train_keras_models.py +++ b/tests/neuralnet/train_keras_models.py @@ -1,13 +1,10 @@ -import pytest import keras - -# from conftest import get_neural_network_data +from conftest import get_neural_network_data from keras.layers import Conv2D, Dense -from keras.models import Model, Sequential -from pyomo.common.fileutils import this_file_dir +from keras.models import Sequential from keras.optimizers import Adamax - from omlt.io import write_onnx_model_with_bounds +from pyomo.common.fileutils import this_file_dir def train_models(): @@ -37,7 +34,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131.keras") @@ -69,7 +66,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid.keras") @@ -102,7 +99,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save( @@ -136,7 +133,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_relu.keras") @@ -169,7 +166,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_relu_output_activation.keras") @@ -202,7 +199,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save( @@ -263,7 +260,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/big.keras") @@ -305,7 +302,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) @@ -333,7 +330,7 @@ def train_conv(): onnx_model, _ = tf2onnx.convert.from_keras(nn) - input_bounds = dict() + input_bounds = {} for i in range(7): for j in range(7): input_bounds[0, i, j] = (0.0, 1.0) diff --git a/tests/notebooks/test_run_notebooks.py b/tests/notebooks/test_run_notebooks.py index 9b1361c9..7871bc87 100644 --- a/tests/notebooks/test_run_notebooks.py +++ b/tests/notebooks/test_run_notebooks.py @@ -1,22 +1,22 @@ import os +from pathlib import Path import pytest -from pyomo.common.fileutils import this_file_dir -from testbook import testbook - from omlt.dependencies import ( keras_available, onnx_available, torch_available, torch_geometric_available, ) +from pyomo.common.fileutils import this_file_dir +from testbook import testbook # TODO: These will be replaced with stronger tests using testbook soon def _test_run_notebook(folder, notebook_fname, n_cells): # Change to notebook directory to allow for testing - cwd = os.getcwd() - os.chdir(os.path.join(this_file_dir(), "..", "..", "docs", "notebooks", folder)) + cwd = Path.cwd() + os.chdir(Path(this_file_dir()) / ".." / ".." / "docs" / "notebooks" / folder) with testbook(notebook_fname, timeout=500, execute=True) as tb: assert tb.code_cells_executed == n_cells os.chdir(cwd) diff --git a/tests/test_block.py b/tests/test_block.py index 6c6311f5..ccb8753f 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -1,10 +1,12 @@ import pyomo.environ as pyo import pytest - from omlt import OmltBlock +INPUTS_LENGTH = 3 +OUTPUTS_LENGTH = 2 + -class dummy_formulation(object): +class DummyFormulation: def __init__(self): self.input_indexes = ["A", "C", "D"] self.output_indexes = [(0, 0), (0, 1), (1, 0), (1, 1)] @@ -26,27 +28,29 @@ def test_block(): output_indexes=[(0, 0), (0, 1), (1, 0), (1, 1)], ) - assert [k for k in m.b.inputs] == ["A", "B", "C"] - assert [k for k in m.b.outputs] == [1, 4] - assert [k for k in m.b2.inputs] == [(1, 3), (42, 1975), (13, 2)] - assert [k for k in m.b2.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(m.b.inputs) == ["A", "B", "C"] + assert list(m.b.outputs) == [1, 4] + assert list(m.b2.inputs) == [(1, 3), (42, 1975), (13, 2)] + assert list(m.b2.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] m = pyo.ConcreteModel() m.b = OmltBlock() - formulation = dummy_formulation() + formulation = DummyFormulation() m.b.build_formulation(formulation) - print(dir(m.b)) + assert m.b._OmltBlockData__formulation is formulation - assert [k for k in m.b.inputs] == ["A", "C", "D"] - assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(m.b.inputs) == ["A", "C", "D"] + assert list(m.b.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_input_output_auto_creation(): m = pyo.ConcreteModel() m.b = OmltBlock() - m.b._setup_inputs_outputs(input_indexes=range(3), output_indexes=range(2)) - assert len(m.b.inputs) == 3 - assert len(m.b.outputs) == 2 + m.b._setup_inputs_outputs( + input_indexes=range(INPUTS_LENGTH), output_indexes=range(OUTPUTS_LENGTH) + ) + assert len(m.b.inputs) == INPUTS_LENGTH + assert len(m.b.outputs) == OUTPUTS_LENGTH m.b2 = OmltBlock() m.b2._setup_inputs_outputs( @@ -57,7 +61,8 @@ def test_input_output_auto_creation(): assert len(m.b2.outputs) == 1 m.b3 = OmltBlock() - with pytest.raises(ValueError): + expected_msg = "OmltBlock must have at least one input and at least one output." + with pytest.raises(ValueError, match=expected_msg): m.b3._setup_inputs_outputs( input_indexes=[], output_indexes=[], diff --git a/tests/test_formulation.py b/tests/test_formulation.py index 4e047845..df4aa0d9 100644 --- a/tests/test_formulation.py +++ b/tests/test_formulation.py @@ -1,9 +1,8 @@ import pytest -from pyomo.environ import ConcreteModel, Objective, SolverFactory, Var, value - from omlt.block import OmltBlock from omlt.formulation import _setup_scaled_inputs_outputs from omlt.scaling import OffsetScaling +from pyomo.environ import ConcreteModel, Objective, SolverFactory, value def test_scaled_inputs_outputs(): @@ -32,7 +31,7 @@ def test_scaled_inputs_outputs(): m.obj = Objective(expr=1) m.b1.inputs.fix(2) m.b1.outputs.fix(1) - status = SolverFactory("ipopt").solve(m) + SolverFactory("ipopt").solve(m) assert value(m.b1.scaled_inputs[(0, 0)]) == pytest.approx(4.0) assert value(m.b1.scaled_inputs[(0, 1)]) == pytest.approx(1.0) @@ -68,7 +67,7 @@ def test_scaled_inputs_outputs(): m.obj = Objective(expr=1) m.b1.inputs.fix(2) m.b1.outputs.fix(1) - status = SolverFactory("ipopt").solve(m) + SolverFactory("ipopt").solve(m) assert value(m.b1.scaled_inputs[0]) == pytest.approx(4.0) assert value(m.b1.scaled_inputs[1]) == pytest.approx(1.0) assert value(m.b1.scaled_inputs[2]) == pytest.approx(0.0) diff --git a/tests/test_scaling.py b/tests/test_scaling.py index 05b0e013..790241bf 100644 --- a/tests/test_scaling.py +++ b/tests/test_scaling.py @@ -1,6 +1,7 @@ +import re + import numpy as np import pytest - from omlt import OffsetScaling from omlt.scaling import convert_to_dict @@ -71,48 +72,44 @@ def test_incorrect_keys(): np.testing.assert_almost_equal(list(test_y_unscal.values()), list(y.values())) x = {1: 42, 2: 65} - with pytest.raises(ValueError) as excinfo: - test_x_scal = scaling.get_scaled_input_expressions(x) - expected_msg = ( + expected_msg = re.escape( "get_scaled_input_expressions called with input_vars that " "do not have the same indices as offset_inputs or factor_inputs.\nKeys " "in input_vars: [1, 2].\nKeys in offset_inputs: [1, 42].\nKeys in " "offset_factor: [1, 42]." ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + test_x_scal = scaling.get_scaled_input_expressions(x) y = {7: -1, 19: 2, 11: 3} - with pytest.raises(ValueError) as excinfo: - test_y_scal = scaling.get_scaled_output_expressions(y) - expected_msg = ( + expected_msg = re.escape( "get_scaled_output_expressions called with output_vars that " "do not have the same indices as offset_outputs or factor_outputs.\nKeys " "in output_vars: [7, 11, 19]\nKeys in offset_outputs: [7, 9, 11]\nKeys in " "offset_factor: [7, 9, 11]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + scaling.get_scaled_output_expressions(y) x_scal = {1: 42, 2: 65} - with pytest.raises(ValueError) as excinfo: - test_x_unscal = scaling.get_unscaled_input_expressions(x_scal) - expected_msg = ( + expected_msg = re.escape( "get_scaled_input_expressions called with input_vars that " "do not have the same indices as offset_inputs or factor_inputs.\nKeys " "in input_vars: [1, 2]\nKeys in offset_inputs: [1, 42]\nKeys in " "offset_factor: [1, 42]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + scaling.get_unscaled_input_expressions(x_scal) y_scal = {7: -1, 8: 2, 11: 3} - with pytest.raises(ValueError) as excinfo: - test_y_unscal = scaling.get_unscaled_output_expressions(y_scal) - expected_msg = ( + expected_msg = re.escape( "get_scaled_output_expressions called with output_vars that do " "not have the same indices as offset_outputs or factor_outputs.\nKeys in " "output_vars: [7, 8, 11]\nKeys in offset_outputs: [7, 9, 11]\nKeys in " "offset_factor: [7, 9, 11]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + test_y_unscal = scaling.get_unscaled_output_expressions(y_scal) def test_negative_offsets(): @@ -121,36 +118,38 @@ def test_negative_offsets(): y_offset = [-4, 2, 1.784] y_factor = [2, 1.5, 1.3] - with pytest.raises(ValueError) as excinfo: - scaling = OffsetScaling( + expected_msg = ( + "OffsetScaling only accepts positive values" + " for factor_inputs. Negative value found at" + " index 0." + ) + + with pytest.raises(ValueError, match=expected_msg): + OffsetScaling( offset_inputs=x_offset, factor_inputs=x_factor, offset_outputs=y_offset, factor_outputs=y_factor, ) - assert ( - str(excinfo.value) == "OffsetScaling only accepts positive values" - " for factor_inputs. Negative value found at" - " index 0." - ) x_offset = [42, 65] x_factor = [1975, 1964] y_offset = [-4, 2, 1.784] y_factor = [2, -1.5, 1.3] - with pytest.raises(ValueError) as excinfo: - scaling = OffsetScaling( + expected_msg = ( + "OffsetScaling only accepts positive values" + " for factor_outputs. Negative value found at" + " index 1." + ) + + with pytest.raises(ValueError, match=expected_msg): + OffsetScaling( offset_inputs=x_offset, factor_inputs=x_factor, offset_outputs=y_offset, factor_outputs=y_factor, ) - assert ( - str(excinfo.value) == "OffsetScaling only accepts positive values" - " for factor_outputs. Negative value found at" - " index 1." - ) if __name__ == "__main__": From 051ac3bfeac932ccb6a62d033810a52546534633 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun, 23 Jun 2024 00:27:31 +0000 Subject: [PATCH 61/75] Fixing ruff linting errors. --- pyproject.toml | 14 ++++ src/omlt/block.py | 19 ++++-- src/omlt/io/input_bounds.py | 4 +- src/omlt/io/keras/keras_reader.py | 1 - src/omlt/io/onnx.py | 2 +- .../torch_geometric/torch_geometric_reader.py | 12 ++-- src/omlt/neuralnet/__init__.py | 4 +- src/omlt/neuralnet/layers/full_space.py | 8 +-- src/omlt/neuralnet/nn_formulation.py | 25 +++---- tests/gbt/test_gbt_formulation.py | 35 +--------- tests/io/test_onnx_parser.py | 2 +- tests/io/test_torch_geometric.py | 28 ++++---- tests/neuralnet/test_keras.py | 2 +- tests/neuralnet/test_network_definition.py | 19 ++++-- tests/neuralnet/test_nn_formulation.py | 66 +++++++++---------- tests/neuralnet/test_relu.py | 16 +++-- tests/notebooks/test_run_notebooks.py | 3 +- tests/test_block.py | 26 ++++++-- 18 files changed, 151 insertions(+), 135 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4ad1ca44..c504866e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,6 +78,19 @@ ignore = [ "ANN401", "COM812", "ISC001", + "SLF001", + "ARG001", + "N803", + "N806", + # Remove these after issue https://github.com/cog-imperial/OMLT/issues/153 is fixed. + "D100", + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", # TODO: Remove these eventually "ANN001", "ANN002", @@ -106,6 +119,7 @@ convention = "google" "INP001", ] "docs/conf.py" = ["D100", "INP001"] +"src/omlt/neuralnet/layer.py" = ["N802"] [tool.mypy] show_error_codes = true diff --git a/src/omlt/block.py b/src/omlt/block.py index 0a03838d..b8bb391d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -53,11 +53,6 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ self.__input_indexes = input_indexes self.__output_indexes = output_indexes - if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide - # more contextual error msg - msg = "OmltBlock must have at least one input and at least one output." - raise ValueError(msg) self.inputs_set = pyo.Set(initialize=input_indexes) self.inputs = pyo.Var(self.inputs_set, initialize=0) @@ -77,6 +72,20 @@ def build_formulation(self, formulation): formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation """ + if not formulation.input_indexes: + msg = ( + "OmltBlock must have at least one input to build a formulation. " + f"{formulation} has no inputs." + ) + raise ValueError(msg) + + if not formulation.output_indexes: + msg = ( + "OmltBlock must have at least one output to build a formulation. " + f"{formulation} has no outputs." + ) + raise ValueError(msg) + self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/src/omlt/io/input_bounds.py b/src/omlt/io/input_bounds.py index 9826d498..f01eb3ca 100644 --- a/src/omlt/io/input_bounds.py +++ b/src/omlt/io/input_bounds.py @@ -5,13 +5,13 @@ def write_input_bounds(input_bounds_filename, input_bounds): """Write the specified input bounds to the given file.""" input_bounds = _prepare_input_bounds(input_bounds) - with Path.open(input_bounds_filename, "w") as f: + with Path(input_bounds_filename).open("w") as f: json.dump(input_bounds, f) def load_input_bounds(input_bounds_filename): """Read the input bounds from the given file.""" - with Path.open(input_bounds_filename) as f: + with Path(input_bounds_filename).open() as f: raw_input_bounds = json.load(f) return dict(_parse_raw_input_bounds(d) for d in raw_input_bounds) diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index 3ec0aaaa..d7429d72 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -35,7 +35,6 @@ def load_keras_sequential( ------- NetworkDefinition """ - # TODO: Add exceptions for unsupported layer types n_inputs = len(nn.layers[0].get_weights()[0]) net = NetworkDefinition( diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index 9676ea31..b48915a9 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -21,7 +21,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): bounds on the input variables """ if onnx_model is not None: - with Path.open(filename, "wb") as f: + with Path(filename).open("wb") as f: f.write(onnx_model.SerializeToString()) if input_bounds is not None: diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 090d9b5a..4203338b 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -224,10 +224,10 @@ def load_torch_geometric_sequential( biases=biases, ) elif operations[index] == "GCNConv": - assert l.improved == False - assert l.cached == False - assert l.add_self_loops == True - assert l.normalize == True + assert not l.improved + assert not l.cached + assert l.add_self_loops + assert l.normalize gnn_weights = l.lin.weight.detach().numpy() gnn_biases = l.bias.detach().numpy() gnn_norm = _compute_gcn_norm(A) @@ -244,8 +244,8 @@ def load_torch_geometric_sequential( N=N, ) elif operations[index] == "SAGEConv": - assert l.normalize == False - assert l.project == False + assert not l.normalize + assert not l.project assert l.aggr in _AGGREGATION_OP_TYPES gnn_weights_uv = l.lin_l.weight.detach().numpy() gnn_biases = l.lin_l.bias.detach().numpy() diff --git a/src/omlt/neuralnet/__init__.py b/src/omlt/neuralnet/__init__.py index ef90caf3..014de739 100644 --- a/src/omlt/neuralnet/__init__.py +++ b/src/omlt/neuralnet/__init__.py @@ -13,9 +13,9 @@ \xrightarrow[\text{Constraints}]{\text{Layer 3}}\cdots \end{align*} -where +where :math:`\mathbf z^{(0)}` is the output of `InputLayer`, -:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, +:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, :math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. """ diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 45b61a90..25fd2dbb 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -205,8 +205,6 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - # for out_d, out_r, out_c in layer.output_indexes: - # output_index = (out_d, out_r, out_c) @layer_block.Constraint(layer.output_indexes) def convolutional_layer(b, *output_index): out_d, out_r, out_c = output_index @@ -217,7 +215,6 @@ def convolutional_layer(b, *output_index): lb, ub = compute_bounds_on_expr(expr) layer_block.zhat[output_index].setlb(lb) layer_block.zhat[output_index].setub(ub) - # layer_block.constraints.add(layer_block.zhat[output_index] == expr) return layer_block.zhat[output_index] == expr @@ -273,8 +270,9 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): " are not supported." ) raise ValueError(msg) - # TODO - add support for non-increasing activation functions on preceding - # convolutional layer + # TODO @cog-imperial: add support for non-increasing activation functions on + # preceding convolutional layer + # https://github.com/cog-imperial/OMLT/issues/154 # note kernel indexes are the same set of values for any output index, so wlog get # kernel indexes for (0, 0, 0) diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index d8eb5b37..8e835d23 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -306,23 +306,18 @@ def __init__(self, network_structure, activation_functions=None): self.__scaling_object = network_structure.scaling_object self.__scaled_input_bounds = network_structure.scaled_input_bounds - # TODO: look into increasing support for other layers / activations - # self._layer_constraints = {**_DEFAULT_LAYER_CONSTRAINTS, **layer_constraints} self._activation_functions = dict( self._supported_default_activation_functions() ) if activation_functions is not None: self._activation_functions.update(activation_functions) - # If we want to do network input/output validation at initialize time instead - # of build time, as it is for FullSpaceNNFormulation: - # - # network_inputs = list(self.__network_definition.input_nodes) - # if len(network_inputs) != 1: - # raise ValueError(MULTI_INPUTS_UNSUPPORTED) - # network_outputs = list(self.__network_definition.output_nodes) - # if len(network_outputs) != 1: - # raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) + network_inputs = list(self.__network_definition.input_nodes) + if len(network_inputs) != 1: + raise ValueError(MULTI_INPUTS_UNSUPPORTED) + network_outputs = list(self.__network_definition.output_nodes) + if len(network_outputs) != 1: + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_activation_functions(self): return dict(_DEFAULT_ACTIVATION_FUNCTIONS) @@ -365,7 +360,13 @@ def z(b, *output_index): # skip the InputLayer continue - # TODO: Add error checking on layer type + if not isinstance(layer, DenseLayer): + msg = ( + f"ReducedSpaceNNFormulation only supports Dense layers. {net}" + f" contains {layer} which is a {type(layer)}." + ) + raise TypeError(msg) + # build the linear expressions and the activation function layer_id = id(layer) layer_block = block.layer[layer_id] diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index a7c7557c..4a99b646 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -31,7 +31,7 @@ def test_formulation_with_continuous_variables(): assert ( len(list(m.gbt.component_data_objects(pe.Var))) == 202 + 10 ) # our auto-created variables - # TODO: fix below?: + assert len(list(m.gbt.component_data_objects(pe.Constraint))) == TOTAL_CONSTRAINTS assert len(m.gbt.z_l) == Z_L_VARS @@ -45,39 +45,6 @@ def test_formulation_with_continuous_variables(): assert len(m.gbt.var_upper) == Y_VARS -# TODO: did we remove categorical variables intentionally? -# def test_formulation_with_categorical_variables(): -# model = onnx.load(Path(__file__).parent / "categorical_model.onnx") - -# m = pe.ConcreteModel() - -# m.x = pe.Var(range(3), bounds=(-2.0, 2.0)) -# # categorical variable -# m.y = pe.Var(bounds=(0, 1), domain=pe.Integers) - -# m.z = pe.Var() - -# m.gbt = pe.Block() -# add_formulation_to_block( -# m.gbt, model, input_vars=[m.x[0], m.x[1], m.x[2], m.y], output_vars=[m.z] -# ) - -# assert len(list(m.gbt.component_data_objects(pe.Var))) == 193 -# # there are 28 * 2 constraints missing -# # related to categorical variables -# assert len(list(m.gbt.component_data_objects(pe.Constraint))) == 391 - -# assert len(m.gbt.z_l) == 160 -# assert len(m.gbt.y) == 31 - -# assert len(m.gbt.single_leaf) == 20 -# assert len(m.gbt.left_split) == 140 -# assert len(m.gbt.right_split) == 140 -# assert len(m.gbt.categorical) == 1 -# assert len(m.gbt.var_lower) == 31 -# assert len(m.gbt.var_upper) == 31 - - @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") def test_big_m_formulation_block(): onnx_model = onnx.load(Path(__file__).parent / "continuous_model.onnx") diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index 2f4510c3..3227e67d 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -66,7 +66,7 @@ def test_gemm(datadir): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") -def test_gemm_transB(datadir): +def test_gemm_trans_b(datadir): model = onnx.load(datadir.file("gemm_not_transB.onnx")) model_transB = onnx.load(datadir.file("gemm_transB.onnx")) net = load_onnx_neural_network(model) diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index fd52e69d..be098406 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -28,7 +28,7 @@ not (torch_available and torch_geometric_available), reason="Test only valid when torch and torch_geometric are available", ) -def GCN_Sequential(activation, pooling): +def gcn_sequential(activation, pooling): return Sequential( "x, edge_index", [ @@ -49,7 +49,7 @@ def GCN_Sequential(activation, pooling): not (torch_available and torch_geometric_available), reason="Test only valid when torch and torch_geometric are available", ) -def SAGE_Sequential(activation, pooling, aggr, root_weight): +def sage_sequential(activation, pooling, aggr, root_weight): return Sequential( "x, edge_index", [ @@ -142,11 +142,11 @@ def _test_gnn_with_non_fixed_graph(nn): def test_torch_geometric_reader(): for activation in [ReLU, Sigmoid, Tanh]: for pooling in [global_mean_pool, global_add_pool]: - nn = GCN_Sequential(activation, pooling) + nn = gcn_sequential(activation, pooling) _test_torch_geometric_reader(nn, activation, pooling) for aggr in ["sum", "mean"]: for root_weight in [False, True]: - nn = SAGE_Sequential(activation, pooling, aggr, root_weight) + nn = sage_sequential(activation, pooling, aggr, root_weight) _test_torch_geometric_reader(nn, activation, pooling) @@ -156,11 +156,11 @@ def test_torch_geometric_reader(): ) def test_gnn_with_fixed_graph(): for pooling in [global_mean_pool, global_add_pool]: - nn = GCN_Sequential(ReLU, pooling) + nn = gcn_sequential(ReLU, pooling) _test_gnn_with_fixed_graph(nn) for aggr in ["sum", "mean"]: for root_weight in [False, True]: - nn = SAGE_Sequential(ReLU, pooling, aggr, root_weight) + nn = sage_sequential(ReLU, pooling, aggr, root_weight) _test_gnn_with_fixed_graph(nn) @@ -172,7 +172,7 @@ def test_gnn_with_non_fixed_graph(): for pooling in [global_mean_pool, global_add_pool]: for aggr in ["sum"]: for root_weight in [False, True]: - nn = SAGE_Sequential(ReLU, pooling, aggr, root_weight) + nn = sage_sequential(ReLU, pooling, aggr, root_weight) _test_gnn_with_non_fixed_graph(nn) @@ -213,16 +213,18 @@ def _test_gnn_value_error(nn, error_info, error_type="ValueError"): reason="Test only valid when torch and torch_geometric are available", ) def test_gnn_value_error(): - nn = SAGE_Sequential(ReLU, global_max_pool, "mean", True) - _test_gnn_value_error(nn, "this operation is not supported") + nn = sage_sequential(ReLU, global_max_pool, "mean", root_weight=True) + _test_gnn_value_error(nn, "Operation global_max_pool is not supported.") - nn = SAGE_Sequential(Sigmoid, global_mean_pool, "sum", True) + nn = sage_sequential(Sigmoid, global_mean_pool, "sum", root_weight=True) _test_gnn_value_error(nn, "nonlinear activation results in a MINLP", "warns") - nn = SAGE_Sequential(ReLU, global_mean_pool, "mean", True) + nn = sage_sequential(ReLU, global_mean_pool, "mean", root_weight=True) _test_gnn_value_error( nn, "this aggregation is not supported when the graph is not fixed" ) - nn = GCN_Sequential(ReLU, global_mean_pool) - _test_gnn_value_error(nn, "this layer is not supported when the graph is not fixed") + nn = gcn_sequential(ReLU, global_mean_pool) + _test_gnn_value_error( + nn, "this layer is not supported when the graph is not fixed." + ) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 7cc7261d..eb3436d6 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -170,7 +170,7 @@ def test_keras_linear_big_reduced_space(datadir): @pytest.mark.skipif(not keras_available, reason="Need keras for this test") -def test_scaling_NN_block(datadir): +def test_scaling_nn_block(datadir): NN = keras.models.load_model(datadir.file("keras_linear_131_relu.keras")) model = pyo.ConcreteModel() diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index 8dff2365..2d58cd3b 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pyomo.environ as pyo import pytest @@ -10,7 +12,8 @@ ALMOST_EXACTLY_EQUAL = 1e-8 -# TODO: Build more tests with different activations and edge cases +# TODO @cog-imperial: Build more tests with different activations and edge cases +# https://github.com/cog-imperial/OMLT/issues/158 def test_two_node_full_space(): """Two node full space network. @@ -79,7 +82,7 @@ def test_input_bounds_no_scaler(): assert net.scaled_input_bounds == scaled_input_bounds -def test_input_bound_scaling_1D(): +def test_input_bound_scaling_1d(): xoffset = {i: float(i) for i in range(3)} xfactor = {i: 0.5 * (i + 1) for i in range(3)} yoffset = {i: -0.25 * i for i in range(2)} @@ -108,7 +111,7 @@ def test_input_bound_scaling_1D(): assert net.scaled_input_bounds == scaled_input_bounds -def test_input_bound_scaling_multiD(): +def test_input_bound_scaling_multi_d(): # Multidimensional test xoffset = {(0, i): float(i) for i in range(3)} xfactor = {(0, i): 0.5 * (i + 1) for i in range(3)} @@ -164,11 +167,17 @@ def _test_add_invalid_edge(direction): ) if direction == "in": - expected_msg = f"Inbound layer {dense_layer_1} not found in network." + expected_msg = re.escape( + "Inbound layer DenseLayer(input_size=[1], output_size=[1]) not" + " found in network." + ) with pytest.raises(ValueError, match=expected_msg): net.add_edge(input_layer, dense_layer_1) elif direction == "out": - expected_msg = f"Outbound layer {dense_layer_1} not found in network." + expected_msg = re.escape( + "Outbound layer DenseLayer(input_size=[1], output_size=[1]) not" + " found in network." + ) with pytest.raises(ValueError, match=expected_msg): net.add_edge(dense_layer_1, dense_layer_0) diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index d79d2160..f88a9425 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -32,8 +32,8 @@ from pyomo.contrib.fbbt import interval NEAR_EQUAL = 1e-6 -FULLSPACE_SMOOTH_VARS = 14 -FULLSPACE_SMOOTH_CONSTRAINTS = 15 +FULLSPACE_SMOOTH_VARS = 15 +FULLSPACE_SMOOTH_CONSTRAINTS = 14 FULLSPACE_RELU_VARS = 19 FULLSPACE_RELU_CONSTRAINTS = 26 REDUCED_VARS = 6 @@ -86,7 +86,7 @@ def two_node_network(activation, input_value): return net, y -def _test_two_node_FullSpaceNNFormulation_smooth(activation): +def _test_two_node_full_space_nn_formulation_smooth(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -108,7 +108,7 @@ def _test_two_node_FullSpaceNNFormulation_smooth(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_FullSpaceNNFormulation_relu(): +def _test_two_node_full_space_nn_formulation_relu(): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network("relu", -2.0) @@ -130,7 +130,7 @@ def _test_two_node_FullSpaceNNFormulation_relu(): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_FullSpaceSmoothNNFormulation(activation): +def _test_two_node_full_space_smooth_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -152,7 +152,7 @@ def _test_two_node_FullSpaceSmoothNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_ReducedSpaceNNFormulation(activation): +def _test_two_node_reduced_space_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -174,7 +174,7 @@ def _test_two_node_ReducedSpaceNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): +def _test_two_node_reduced_space_smooth_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -196,41 +196,41 @@ def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def test_two_node_ReducedSpaceNNFormulation(): - _test_two_node_ReducedSpaceNNFormulation("linear") - _test_two_node_ReducedSpaceNNFormulation("sigmoid") - _test_two_node_ReducedSpaceNNFormulation("tanh") +def test_two_node_reduced_space_nn_formulation(): + _test_two_node_reduced_space_nn_formulation("linear") + _test_two_node_reduced_space_nn_formulation("sigmoid") + _test_two_node_reduced_space_nn_formulation("tanh") -def test_two_node_ReducedSpaceSmoothNNFormulation(): - _test_two_node_ReducedSpaceSmoothNNFormulation("linear") - _test_two_node_ReducedSpaceSmoothNNFormulation("sigmoid") - _test_two_node_ReducedSpaceSmoothNNFormulation("tanh") +def test_two_node_reduced_space_smooth_nn_formulation(): + _test_two_node_reduced_space_smooth_nn_formulation("linear") + _test_two_node_reduced_space_smooth_nn_formulation("sigmoid") + _test_two_node_reduced_space_smooth_nn_formulation("tanh") -def test_two_node_ReducedSpaceSmoothNNFormulation_invalid_activation(): +def test_two_node_reduced_space_smooth_nn_formulation_invalid_activation(): expected_msg = "Activation relu is not supported by this formulation." with pytest.raises(ValueError, match=expected_msg): - _test_two_node_ReducedSpaceSmoothNNFormulation("relu") + _test_two_node_reduced_space_smooth_nn_formulation("relu") -def test_two_node_FullSpaceNNFormulation(): - _test_two_node_FullSpaceNNFormulation_smooth("linear") - _test_two_node_FullSpaceNNFormulation_smooth("sigmoid") - _test_two_node_FullSpaceNNFormulation_smooth("tanh") - _test_two_node_FullSpaceNNFormulation_relu() +def test_two_node_full_space_nn_formulation(): + _test_two_node_full_space_nn_formulation_smooth("linear") + _test_two_node_full_space_nn_formulation_smooth("sigmoid") + _test_two_node_full_space_nn_formulation_smooth("tanh") + _test_two_node_full_space_nn_formulation_relu() -def test_two_node_FullSpaceSmoothNNFormulation(): - _test_two_node_FullSpaceSmoothNNFormulation("linear") - _test_two_node_FullSpaceSmoothNNFormulation("sigmoid") - _test_two_node_FullSpaceSmoothNNFormulation("tanh") +def test_two_node_full_space_smooth_nn_formulation(): + _test_two_node_full_space_smooth_nn_formulation("linear") + _test_two_node_full_space_smooth_nn_formulation("sigmoid") + _test_two_node_full_space_smooth_nn_formulation("tanh") -def test_two_node_FullSpaceSmoothNNFormulation_invalid_activation(): +def test_two_node_full_space_smooth_nn_formulation_invalid_activation(): expected_msg = "Activation relu is not supported by this formulation." with pytest.raises(ValueError, match=expected_msg): - _test_two_node_FullSpaceSmoothNNFormulation("relu") + _test_two_node_full_space_smooth_nn_formulation("relu") @pytest.mark.skip(reason="Need to add checks on layer types") @@ -315,7 +315,7 @@ def _maxpool_conv_network(inputs): return net, y -def test_maxpool_FullSpaceNNFormulation(): +def test_maxpool_full_space_nn_formulation(): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -445,7 +445,7 @@ def _test_formulation_initialize_extra_output(network_formulation): ReducedSpaceNNFormulation(net) -def test_FullSpaceNNFormulation_invalid_network(): +def test_full_space_nn_formulation_invalid_network(): _test_formulation_initialize_extra_input("FullSpace") _test_formulation_added_extra_input("FullSpace") _test_formulation_build_extra_input("FullSpace") @@ -453,15 +453,13 @@ def test_FullSpaceNNFormulation_invalid_network(): _test_formulation_added_extra_output("FullSpace") -def test_ReducedSpaceNNFormulation_invalid_network(): - # _test_formulation_initialize_extra_input("ReducedSpace") +def test_reduced_space_nn_formulation_invalid_network(): _test_formulation_added_extra_input("ReducedSpace") _test_formulation_build_extra_input("ReducedSpace") - # _test_formulation_initialize_extra_output("ReducedSpace") _test_formulation_added_extra_output("ReducedSpace") -def test_ReluPartitionFormulation_invalid_network(): +def test_relu_partition_formulation_invalid_network(): _test_formulation_added_extra_input("relu") _test_formulation_build_extra_input("relu") _test_formulation_added_extra_output("relu") diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 59dc247a..23ed6fee 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -11,7 +11,8 @@ ) from omlt.neuralnet.activations import ComplementarityReLUActivation -# TODO: Add tests for single dimensional outputs as well +# TODO @cog-imperial: Add tests for single dimensional outputs as well +# https://github.com/cog-imperial/OMLT/issues/158 NEAR_EQUAL = 1e-3 @@ -34,7 +35,7 @@ def test_two_node_bigm(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluBigMFormulation(two_node_network_relu): +def test_two_node_relu_big_m_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() formulation = ReluBigMFormulation(two_node_network_relu) @@ -74,7 +75,7 @@ def test_two_node_complementarity(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluComplementarityFormulation(two_node_network_relu): +def test_two_node_relu_complementarity_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() formulation = ReluComplementarityFormulation(two_node_network_relu) @@ -92,7 +93,7 @@ def test_two_node_ReluComplementarityFormulation(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluPartitionFormulation(two_node_network_relu): +def test_two_node_relu_partition_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -112,7 +113,7 @@ def test_two_node_ReluPartitionFormulation(two_node_network_relu): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") -def test_conv_ReluBigMFormulation(datadir): +def test_conv_relu_big_m_formulation(datadir): from omlt.io.onnx import load_onnx_neural_network_with_bounds net = load_onnx_neural_network_with_bounds(datadir.file("keras_conv_7x7_relu.onnx")) @@ -124,14 +125,15 @@ def test_conv_ReluBigMFormulation(datadir): m.obj1 = pyo.Objective(expr=0) # compute expected output for this input - x = np.eye(7, 7).reshape(1, 7, 7) + x_start = np.eye(7, 7).reshape(1, 7, 7) + x = x_start for layer in net.layers: x = layer.eval_single_layer(x) output = x for i in range(7): for j in range(7): - m.neural_net_block.inputs[0, i, j].fix(input[0, i, j]) + m.neural_net_block.inputs[0, i, j].fix(x_start[0, i, j]) pyo.SolverFactory("cbc").solve(m, tee=False) d, r, c = output.shape diff --git a/tests/notebooks/test_run_notebooks.py b/tests/notebooks/test_run_notebooks.py index 7871bc87..62d70d57 100644 --- a/tests/notebooks/test_run_notebooks.py +++ b/tests/notebooks/test_run_notebooks.py @@ -11,8 +11,9 @@ from pyomo.common.fileutils import this_file_dir from testbook import testbook +# TODO @cog-imperial: These will be replaced with stronger tests using testbook soon +# https://github.com/cog-imperial/OMLT/issues/159 -# TODO: These will be replaced with stronger tests using testbook soon def _test_run_notebook(folder, notebook_fname, n_cells): # Change to notebook directory to allow for testing cwd = Path.cwd() diff --git a/tests/test_block.py b/tests/test_block.py index ccb8753f..9711345c 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -17,6 +17,11 @@ def _set_block(self, blk): def _build_formulation(self): pass + def _clear_inputs(self): + self.input_indexes = [] + + def _clear_outputs(self): + self.output_indexes = [] def test_block(): m = pyo.ConcreteModel() @@ -61,9 +66,20 @@ def test_input_output_auto_creation(): assert len(m.b2.outputs) == 1 m.b3 = OmltBlock() - expected_msg = "OmltBlock must have at least one input and at least one output." + formulation1 = DummyFormulation() + formulation1._clear_inputs() + expected_msg = ( + "OmltBlock must have at least one input to build a formulation. " + f"{formulation1} has no inputs." + ) + with pytest.raises(ValueError, match=expected_msg): + m.b3.build_formulation(formulation1) + + formulation2 = DummyFormulation() + formulation2._clear_outputs() + expected_msg = ( + "OmltBlock must have at least one output to build a formulation. " + f"{formulation2} has no outputs." + ) with pytest.raises(ValueError, match=expected_msg): - m.b3._setup_inputs_outputs( - input_indexes=[], - output_indexes=[], - ) + m.b3.build_formulation(formulation2) From 040c858112936134d05bf6f15dd471e46ce13e63 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 24 Jun 2024 05:29:48 +0000 Subject: [PATCH 62/75] Fixing mypy typing errors --- src/omlt/__init__.py | 2 +- src/omlt/formulation.py | 5 +-- src/omlt/gbt/gbt_formulation.py | 3 +- src/omlt/gbt/model.py | 8 ++--- src/omlt/io/keras/keras_reader.py | 4 +-- src/omlt/io/onnx.py | 2 +- src/omlt/io/onnx_parser.py | 17 +++++----- .../torch_geometric/torch_geometric_reader.py | 4 +-- src/omlt/linear_tree/lt_definition.py | 8 ++--- src/omlt/neuralnet/activations/__init__.py | 3 +- src/omlt/scaling.py | 4 +-- tests/neuralnet/test_keras.py | 13 ++++--- tests/neuralnet/test_network_definition.py | 2 +- tests/neuralnet/test_nn_formulation.py | 29 ++++++---------- tests/neuralnet/test_onnx.py | 12 +++---- tests/neuralnet/test_relu.py | 2 +- tests/test_formulation.py | 34 +++++++++---------- tests/test_scaling.py | 20 +++++------ 18 files changed, 84 insertions(+), 88 deletions(-) diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 3bf95df2..dfd36f37 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -11,7 +11,7 @@ """ from omlt._version import __version__ -from omlt.block import OmltBlock +from omlt.block import OmltBlock # type: ignore[attr-defined] from omlt.scaling import OffsetScaling __all__ = [ diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 7097fbf1..442e44bf 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -63,7 +63,6 @@ class _PyomoFormulation(_PyomoFormulationInterface): """ def __init__(self): - super().__init__() self.__block = None def _set_block(self, block): @@ -76,7 +75,9 @@ def block(self): The underlying block containing the constraints / variables for this formulation. """ - return self.__block() + if self.__block is not None: + return self.__block() + return None def scalar_or_tuple(x): diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index a51bec98..4e1069fe 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -1,4 +1,5 @@ import collections +from typing import Any import numpy as np import pyomo.environ as pe @@ -158,7 +159,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): domain=pe.Reals, ) - branch_value_by_feature_id = {} + branch_value_by_feature_id: dict[int, Any] = {} branch_value_by_feature_id = collections.defaultdict(list) for f in feature_ids: diff --git a/src/omlt/gbt/model.py b/src/omlt/gbt/model.py index 0fbc3f7e..693c24f7 100644 --- a/src/omlt/gbt/model.py +++ b/src/omlt/gbt/model.py @@ -44,15 +44,15 @@ def scaling_object(self): """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object + @scaling_object.setter + def scaling_object(self, scaling_object): + self.__scaling_object = scaling_object + @property def scaled_input_bounds(self): """Return a list of tuples of lower and upper bounds of tree ensemble inputs.""" return self.__scaled_input_bounds - @scaling_object.setter - def scaling_object(self, scaling_object): - self.__scaling_object = scaling_object - def _model_num_inputs(model): """Returns the number of input variables.""" diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index d7429d72..2462bed0 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -1,6 +1,6 @@ from tensorflow import keras -from omlt.neuralnet.layer import DenseLayer, InputLayer +from omlt.neuralnet.layer import DenseLayer, InputLayer, Layer from omlt.neuralnet.network_definition import NetworkDefinition @@ -43,7 +43,7 @@ def load_keras_sequential( unscaled_input_bounds=unscaled_input_bounds, ) - prev_layer = InputLayer([n_inputs]) + prev_layer: Layer = InputLayer([n_inputs]) net.add_layer(prev_layer) for layer in nn.layers: diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index b48915a9..6c5b3cb3 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -43,7 +43,7 @@ def load_onnx_neural_network_with_bounds(filename): onnx_model = onnx.load(filename) input_bounds_filename = Path(f"{filename}.bounds.json") input_bounds = None - if input_bounds_filename.exists: + if input_bounds_filename.exists(): input_bounds = load_input_bounds(input_bounds_filename) return load_onnx_neural_network(onnx_model, input_bounds=input_bounds) diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index 979b437c..f35fadb9 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -28,6 +28,7 @@ ATTR_TENSOR = 4 ATTR_INTS = 7 + class NetworkParser: """Network Parser. @@ -41,31 +42,31 @@ def __init__(self): def _reset_state(self): self._graph = None - self._initializers = None - self._constants = None - self._nodes = None + self._initializers = {} + self._constants = {} + self._nodes = {} self._nodes_by_output = None self._inputs = None self._outputs = None - self._node_stack = None - self._node_map = None + self._node_stack = [] + self._node_map = {} def parse_network(self, graph, scaling_object, input_bounds): self._reset_state() self._graph = graph # initializers contain constant data - initializers = {} + initializers: dict[str, Any] = {} for initializer in self._graph.initializer: initializers[initializer.name] = numpy_helper.to_array(initializer) self._initializers = initializers # Build graph - nodes = {} + nodes: dict[str, tuple[str, Any, list[Any]]] = {} nodes_by_output = {} inputs = set() - outputs = set() + outputs: set[Any] = set() self._node_map = {} network = NetworkDefinition( diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 4203338b..d37ec960 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -2,7 +2,7 @@ import numpy as np -from omlt.neuralnet.layer import DenseLayer, GNNLayer, InputLayer +from omlt.neuralnet.layer import DenseLayer, GNNLayer, InputLayer, Layer from omlt.neuralnet.network_definition import NetworkDefinition @@ -150,7 +150,7 @@ def load_torch_geometric_sequential( unscaled_input_bounds=unscaled_input_bounds, ) - prev_layer = InputLayer([n_inputs]) + prev_layer: Layer = InputLayer([n_inputs]) net.add_layer(prev_layer) operations = [] diff --git a/src/omlt/linear_tree/lt_definition.py b/src/omlt/linear_tree/lt_definition.py index 8f944a4a..cf1b5a4a 100644 --- a/src/omlt/linear_tree/lt_definition.py +++ b/src/omlt/linear_tree/lt_definition.py @@ -1,3 +1,5 @@ +from typing import Any + import lineartree import numpy as np @@ -178,9 +180,7 @@ def _find_all_children_leaves(split, splits_dict, leaves_dict): # For each leaf, check if the parents appear in the list of children # splits (all_splits). If so, it must be a leaf of the argument split - return [ - leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits - ] + return [leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits] def _find_n_inputs(leaves): @@ -341,7 +341,7 @@ def _parse_tree_data(model, input_bounds): # For each variable that appears in the tree, go through all the splits # and record its splitting threshold - splitting_thresholds = {} + splitting_thresholds: dict[int, Any] = {} for split in splits: var = splits[split]["col"] splitting_thresholds[var] = {} diff --git a/src/omlt/neuralnet/activations/__init__.py b/src/omlt/neuralnet/activations/__init__.py index 038a4dbd..740022ad 100644 --- a/src/omlt/neuralnet/activations/__init__.py +++ b/src/omlt/neuralnet/activations/__init__.py @@ -5,6 +5,7 @@ variable, and :math:`y` denotes post-activation variable. """ +from typing import Any from .linear import linear_activation_constraint, linear_activation_function from .relu import ComplementarityReLUActivation, bigm_relu_activation_constraint @@ -25,7 +26,7 @@ "tanh": tanh_activation_function, } -NON_INCREASING_ACTIVATIONS = [] +NON_INCREASING_ACTIVATIONS: list[Any] = [] __all__ = [ "linear_activation_constraint", diff --git a/src/omlt/scaling.py b/src/omlt/scaling.py index 9bf3bd3f..5ffaafbe 100644 --- a/src/omlt/scaling.py +++ b/src/omlt/scaling.py @@ -4,8 +4,8 @@ expressions to the Pyomo model for the inputs and outputs of an ML model. An implementation of a common scaling approach is included with `OffsetScaling`. """ - import abc +from typing import Any class ScalingInterface(abc.ABC): @@ -28,7 +28,7 @@ def get_unscaled_output_expressions(self, scaled_output_vars): # pragma: no cover -def convert_to_dict(x): +def convert_to_dict(x: Any) -> dict[Any, Any]: if isinstance(x, dict): return dict(x) return dict(enumerate(x)) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index eb3436d6..99ae8e27 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -8,7 +8,8 @@ from omlt.io import load_keras_sequential from conftest import get_neural_network_data -from omlt.block import OmltBlock +from omlt import OmltBlock +from omlt.formulation import _PyomoFormulation from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling @@ -32,10 +33,9 @@ def _test_keras_linear_131(keras_fname, *, reduced_space=False): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() if reduced_space: - formulation = ReducedSpaceNNFormulation(net) + m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) else: - formulation = FullSpaceNNFormulation(net) - m.neural_net_block.build_formulation(formulation) + m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) nn_outputs = nn.predict(x=x_test) for d in range(len(x_test)): @@ -104,10 +104,9 @@ def _test_keras_linear_big(keras_fname, *, reduced_space=False): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() if reduced_space: - formulation = ReducedSpaceNNFormulation(net) + m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) else: - formulation = FullSpaceNNFormulation(net) - m.neural_net_block.build_formulation(formulation) + m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) nn_outputs = nn.predict(x=x_test) for d in range(len(x_test)): diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index 2d58cd3b..ee073c5e 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -3,7 +3,7 @@ import numpy as np import pyomo.environ as pyo import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition from omlt.neuralnet.nn_formulation import FullSpaceNNFormulation diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index f88a9425..315bb176 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -4,6 +4,7 @@ import pyomo.environ as pyo import pytest from omlt import OmltBlock +from omlt.formulation import _PyomoFormulation from omlt.neuralnet import ( FullSpaceNNFormulation, FullSpaceSmoothNNFormulation, @@ -31,6 +32,12 @@ from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer from pyomo.contrib.fbbt import interval +formulations = { + "FullSpace": FullSpaceNNFormulation, + "ReducedSpace": ReducedSpaceNNFormulation, + "relu": ReluPartitionFormulation, +} + NEAR_EQUAL = 1e-6 FULLSPACE_SMOOTH_VARS = 15 FULLSPACE_SMOOTH_CONSTRAINTS = 14 @@ -41,6 +48,7 @@ THREE_NODE_VARS = 81 THREE_NODE_CONSTRAINTS = 120 + def two_node_network(activation, input_value): """Two node network. @@ -370,12 +378,7 @@ def _test_formulation_added_extra_input(network_formulation): """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_input) expected_msg = "Multiple input layers are not currently supported." with pytest.raises(ValueError, match=expected_msg): @@ -386,12 +389,7 @@ def _test_formulation_build_extra_input(network_formulation): """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_input) m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -410,12 +408,7 @@ def _test_formulation_added_extra_output(network_formulation): weights=np.array([[1.0, 0.0], [5.0, 1.0]]), biases=np.array([3.0, 4.0]), ) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) expected_msg = "Multiple output layers are not currently supported." diff --git a/tests/neuralnet/test_onnx.py b/tests/neuralnet/test_onnx.py index 7cad2d78..7d33675f 100644 --- a/tests/neuralnet/test_onnx.py +++ b/tests/neuralnet/test_onnx.py @@ -59,8 +59,8 @@ def obj(mdl): SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] @@ -102,8 +102,8 @@ def obj(mdl): SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] @@ -146,8 +146,8 @@ def obj(mdl): SolverFactory("ipopt").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_2_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_2_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 23ed6fee..40ed37ef 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -1,7 +1,7 @@ import numpy as np import pyomo.environ as pyo import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.dependencies import onnx_available from omlt.neuralnet import ( FullSpaceNNFormulation, diff --git a/tests/test_formulation.py b/tests/test_formulation.py index df4aa0d9..155e596c 100644 --- a/tests/test_formulation.py +++ b/tests/test_formulation.py @@ -1,5 +1,5 @@ import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.formulation import _setup_scaled_inputs_outputs from omlt.scaling import OffsetScaling from pyomo.environ import ConcreteModel, Objective, SolverFactory, value @@ -7,16 +7,16 @@ def test_scaled_inputs_outputs(): m = ConcreteModel() - xoffset = {(0, i): float(i) for i in range(3)} - xfactor = {(0, i): 0.5 * (i + 1) for i in range(3)} - yoffset = {(1, i): -0.25 * i for i in range(2)} - yfactor = {(1, i): 0.125 * (i + 1) for i in range(2)} + x1offset: dict[tuple[int, int], float] = {(0, i): float(i) for i in range(3)} + x1factor: dict[tuple[int, int], float] = {(0, i): 0.5 * (i + 1) for i in range(3)} + y1offset: dict[tuple[int, int], float] = {(1, i): -0.25 * i for i in range(2)} + y1factor: dict[tuple[int, int], float] = {(1, i): 0.125 * (i + 1) for i in range(2)} scaler = OffsetScaling( - offset_inputs=xoffset, - factor_inputs=xfactor, - offset_outputs=yoffset, - factor_outputs=yfactor, + offset_inputs=x1offset, + factor_inputs=x1factor, + offset_outputs=y1offset, + factor_outputs=y1factor, ) scaled_input_bounds = {(0, 0): (0, 5), (0, 1): (-2, 2), (0, 2): (0, 1)} @@ -47,16 +47,16 @@ def test_scaled_inputs_outputs(): assert m.b1.inputs[(0, 2)].ub == pytest.approx(3.5) m = ConcreteModel() - xoffset = {i: float(i) for i in range(3)} - xfactor = {i: 0.5 * (i + 1) for i in range(3)} - yoffset = {i: -0.25 * i for i in range(2)} - yfactor = {i: 0.125 * (i + 1) for i in range(2)} + x2offset: dict[int, float] = {i: float(i) for i in range(3)} + x2factor: dict[int, float] = {i: 0.5 * (i + 1) for i in range(3)} + y2offset: dict[int, float] = {i: -0.25 * i for i in range(2)} + y2factor: dict[int, float] = {i: 0.125 * (i + 1) for i in range(2)} scaler = OffsetScaling( - offset_inputs=xoffset, - factor_inputs=xfactor, - offset_outputs=yoffset, - factor_outputs=yfactor, + offset_inputs=x2offset, + factor_inputs=x2factor, + offset_outputs=y2offset, + factor_outputs=y2factor, ) input_bounds = {0: (0, 5), 1: (-2, 2), 2: (0, 1)} diff --git a/tests/test_scaling.py b/tests/test_scaling.py index 790241bf..dffc2a03 100644 --- a/tests/test_scaling.py +++ b/tests/test_scaling.py @@ -8,16 +8,16 @@ def test_convert_to_dict(): x = ["a", "b"] - x = convert_to_dict(x) - assert sorted(x.keys()) == [0, 1] - assert x[0] == "a" - assert x[1] == "b" - - x = {2: "a", 1: "b"} - x = convert_to_dict(x) - assert sorted(x.keys()) == [1, 2] - assert x[2] == "a" - assert x[1] == "b" + xd = convert_to_dict(x) + assert sorted(xd.keys()) == [0, 1] + assert xd[0] == "a" + assert xd[1] == "b" + + y = {2: "a", 1: "b"} + yd = convert_to_dict(y) + assert sorted(yd.keys()) == [1, 2] + assert yd[2] == "a" + assert yd[1] == "b" def test_offset_scaling(): From b7b1c5b9b0c41dd8f616039dbe30218cf9d6db19 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 24 Jun 2024 05:29:48 +0000 Subject: [PATCH 63/75] Fixing mypy typing errors --- tests/neuralnet/test_keras.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 99ae8e27..f83c07d1 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -9,7 +9,6 @@ from conftest import get_neural_network_data from omlt import OmltBlock -from omlt.formulation import _PyomoFormulation from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling From 13305603a4feb89e7386032ae48cd48f0086a365 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 8 Jul 2024 23:58:06 +0000 Subject: [PATCH 64/75] Including OmltExpr and OmltConstraints, spreading Omlt classes throughout the codebase. --- .../auto-thermal-reformer-relu.ipynb | 3 +- .../neuralnet/auto-thermal-reformer.ipynb | 3 +- docs/notebooks/trees/bo_with_trees.ipynb | 4 +- src/omlt/base/__init__.py | 13 +- src/omlt/base/constraint.py | 160 ++++++ src/omlt/base/expression.py | 506 +---------------- src/omlt/base/julia.py | 2 +- src/omlt/base/pyomo.py | 521 ++++++++++++++++++ src/omlt/base/var.py | 325 +++-------- src/omlt/block.py | 21 +- src/omlt/formulation.py | 22 +- src/omlt/gbt/gbt_formulation.py | 93 ++-- .../torch_geometric/build_gnn_formulation.py | 10 +- src/omlt/linear_tree/lt_formulation.py | 67 ++- src/omlt/neuralnet/activations/linear.py | 18 +- src/omlt/neuralnet/activations/relu.py | 18 +- src/omlt/neuralnet/activations/smooth.py | 19 +- src/omlt/neuralnet/layers/full_space.py | 36 +- src/omlt/neuralnet/layers/partition_based.py | 24 +- src/omlt/neuralnet/nn_formulation.py | 52 +- tests/{ => base}/test_block.py | 21 +- tests/{ => base}/test_formulation.py | 0 tests/{ => base}/test_scaling.py | 0 tests/{ => base}/test_var.py | 7 +- tests/gbt/test_gbt_formulation.py | 5 +- tests/io/test_torch_geometric.py | 1 + tests/linear_tree/test_lt_formulation.py | 35 +- 27 files changed, 1041 insertions(+), 945 deletions(-) create mode 100644 src/omlt/base/constraint.py create mode 100644 src/omlt/base/pyomo.py rename tests/{ => base}/test_block.py (87%) rename tests/{ => base}/test_formulation.py (100%) rename tests/{ => base}/test_scaling.py (100%) rename tests/{ => base}/test_var.py (85%) diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb index 78c4e1a9..2530183c 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb @@ -84,6 +84,7 @@ "from omlt import OmltBlock, OffsetScaling\n", "from omlt.io.keras import load_keras_sequential\n", "from omlt.neuralnet import ReluBigMFormulation\n", + "from omlt.base import OmltConstraint\n", "import pyomo.environ as pyo\n", "import pandas as pd\n", "import tensorflow.keras as keras\n", @@ -557,7 +558,7 @@ "h2_idx = outputs.index('H2')\n", "n2_idx = outputs.index('N2')\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", - "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" + "m.con = OmltConstraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] }, { diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb index 650f5700..83e3f449 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb @@ -73,6 +73,7 @@ "from omlt import OmltBlock, OffsetScaling\n", "from omlt.io.keras import load_keras_sequential\n", "from omlt.neuralnet import FullSpaceSmoothNNFormulation\n", + "from omlt.base import OmltConstraint\n", "import pyomo.environ as pyo\n", "import pandas as pd\n", "import tensorflow.keras as keras\n", @@ -546,7 +547,7 @@ "h2_idx = outputs.index('H2')\n", "n2_idx = outputs.index('N2')\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", - "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" + "m.con = OmltConstraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] }, { diff --git a/docs/notebooks/trees/bo_with_trees.ipynb b/docs/notebooks/trees/bo_with_trees.ipynb index 11801d96..65fafd4f 100644 --- a/docs/notebooks/trees/bo_with_trees.ipynb +++ b/docs/notebooks/trees/bo_with_trees.ipynb @@ -212,6 +212,8 @@ "outputs": [], "source": [ "import numpy as np\n", + "from omlt.base import OmltVar\n", + "\n", "\n", "def add_unc_metric(opt_model, data):\n", " \n", @@ -222,7 +224,7 @@ " \n", " # alpha capture the uncertainty value\n", " alpha_bound = abs(0.5*np.var(data['y']))\n", - " opt_model.alpha = pe.Var(within=pe.NonNegativeReals, bounds=(0,alpha_bound))\n", + " opt_model.alpha = OmltVar(within=pe.NonNegativeReals, bounds=(0,alpha_bound))\n", " opt_model.unc_constr = pe.ConstraintList()\n", " \n", " for x in data_x:\n", diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 3d881472..5fbdddc4 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -5,7 +5,16 @@ if julia_available: from omlt.base.julia import jl, jump -from omlt.base.var import OmltVar +from omlt.base.constraint import OmltConstraint from omlt.base.expression import OmltExpr +from omlt.base.pyomo import * +from omlt.base.var import OmltVar -# from omlt.base.constraint import OmltConstraint +__all__ = [ + "julia_available", + "jl", + "jump", + "OmltExpr", + "OmltVar", + "OmltConstraint", +] diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py new file mode 100644 index 00000000..9687022b --- /dev/null +++ b/src/omlt/base/constraint.py @@ -0,0 +1,160 @@ +from abc import ABC, abstractmethod +from typing import Any + +import pyomo.environ as pyo +from pyomo.core.expr import EqualityExpression, InequalityExpression + +from omlt.base import DEFAULT_MODELING_LANGUAGE + + +class OmltConstraint(ABC): + def __new__(cls, *indexes, **kwargs: Any): + if not indexes: + instance = OmltConstraintScalar.__new__(OmltConstraintScalar, **kwargs) + else: + instance = OmltConstraintIndexed.__new__( + OmltConstraintIndexed, *indexes, **kwargs + ) + return instance + + @property + def ctype(self): + return pyo.Constraint + + def is_component_type(self): + return True + + def is_expression_type(self, enum): + # The Pyomo ExpressionType.RELATIONAL is enum 1. + return enum.value == 1 + + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + + @abstractmethod + def __call__(self, *args: Any, **kwds: Any) -> Any: + pass + + +class OmltConstraintScalar(OmltConstraint): + def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if lang not in subclass_map: + msg = ( + "Constraint format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + lang, + ) + raise ValueError(msg) + subclass = subclass_map[lang] + instance = super(OmltConstraint, subclass).__new__(subclass) + instance.__init__(**kwargs) + instance._format = lang + return instance + + def __init__(self, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + lhs = kwargs.pop("lhs", None) + if lhs is not None: + self.lhs = lhs + sense = kwargs.pop("sense", None) + if sense is not None: + self.sense = sense + rhs = kwargs.pop("rhs", None) + if rhs is not None: + self.rhs = rhs + if not lhs and not sense and not rhs: + expr_tuple = kwargs.pop("expr_tuple", None) + if expr_tuple and expr_tuple[1] in {"==", ">=", "<=", ">", "<", "in"}: + self.lhs = expr_tuple[0] + self.sense = expr_tuple[1] + self.rhs = expr_tuple[2] + if not lhs and not sense and not rhs and not expr_tuple: + expr = kwargs.pop("expr", None) + if isinstance(expr, EqualityExpression): + self.lhs = expr.arg(0) + self.sense = "==" + self.rhs = expr.arg(1) + if isinstance(expr, InequalityExpression): + self.lhs = expr.arg(0) + self.sense = "<=" + self.rhs = expr.arg(1) + + self.model = kwargs.pop("model", None) + self.format = lang + self._parent = None + + def __call__(self, *args: Any, **kwds: Any) -> Any: + pass + + @property + def args(self): + pass + + +class OmltConstraintIndexed(OmltConstraint): + def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if lang not in subclass_map: + msg = ( + "Constraint format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + lang, + ) + raise ValueError(msg) + subclass = subclass_map[lang] + instance = super(OmltConstraint, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + instance._format = lang + return instance + + def __init__(self, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + self._index_set = indexes + + lhs = kwargs.pop("lhs", None) + if lhs: + self.lhs = lhs + sense = kwargs.pop("sense", None) + if sense: + self.sense = sense + rhs = kwargs.pop("rhs", None) + if rhs: + self.rhs = rhs + if not lhs and not sense and not rhs: + expr_tuple = kwargs.pop("expr_tuple", None) + if expr_tuple and expr_tuple[1] in {"==", ">=", "<=", ">", "<", "in"}: + self.lhs = expr_tuple[0] + self.sense = expr_tuple[1] + self.rhs = expr_tuple[2] + + self.model = kwargs.pop("model", None) + self._parent = None + self.name = None + self.format = lang + + @abstractmethod + def __call__(self, *args: Any, **kwds: Any) -> Any: + pass + + def keys(self, sort=False): + yield from self._index_set + + @property + @abstractmethod + def _constructed(self): + pass + + @property + @abstractmethod + def _active(self): + pass + + @_active.setter + @abstractmethod + def _active(self, val): + pass + + @property + @abstractmethod + def _data(self): + pass diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index 80229d99..734d8f98 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -1,10 +1,9 @@ from abc import ABC, abstractmethod -import pyomo.environ as pyo +from typing import Any -# from pyomo.core.expr import RelationalExpression +import pyomo.environ as pyo from omlt.base import DEFAULT_MODELING_LANGUAGE -import omlt.base.var as var # from omlt.dependencies import julia_available @@ -15,18 +14,12 @@ class OmltExpr(ABC): - # Claim to be a Pyomo Expression so blocks will register - # properly. - @property - def __class__(self): - return pyo.Expression - - def __new__(cls, *indexes, **kwargs): + def __new__(cls, *indexes, **kwargs: Any): if not indexes: - instance = super(OmltExpr, cls).__new__(OmltExprScalar) + instance = super().__new__(OmltExprScalar) instance.__init__(**kwargs) else: - instance = super(OmltExpr, cls).__new__(OmltExprIndexed) + instance = super().__new__(OmltExprIndexed) instance.__init__(*indexes, **kwargs) return instance @@ -63,496 +56,39 @@ def nargs(self): class OmltExprScalar(OmltExpr): - def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( + if lang not in subclass_map: + msg = ( "Expression format %s not recognized. Supported formats " "are 'pyomo' or 'jump'.", - format, + lang, ) - subclass = subclass_map[format] + raise ValueError(msg) + subclass = subclass_map[lang] instance = super(OmltExpr, cls).__new__(subclass) - # instance.__init__(*args, **kwargs) - instance._format = format + instance._format = lang return instance - def __mul__(self, other): + def is_potentially_variable(self): pass - -class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): - format = "pyomo" - - def __init__(self, *args, expr=None, **kwargs): - self._index_set = {} - if isinstance(expr, (pyo.Expression, pyo.NumericValue)): - self._expression = expr - elif isinstance(expr, OmltExprScalarPyomo): - self._expression = expr._expression - elif isinstance(expr, tuple): - self._expression = self._parse_expression_tuple(expr) - else: - print("expression not recognized", expr, type(expr)) - - self._parent = None - self.name = None - - def _parse_expression_tuple_term(self, term): - if isinstance(term, tuple): - return self._parse_expression_tuple(term) - elif isinstance(term, OmltExprScalarPyomo): - return term._expression - elif isinstance(term, var.OmltVar): - return term._pyovar - elif isinstance(term, (pyo.Expression, pyo.Var, int, float)): - return term - else: - raise TypeError( - "Term of expression is an unsupported type. " - "Write a better error message." - ) - - def _parse_expression_tuple(self, expr): - lhs = self._parse_expression_tuple_term(expr[0]) - rhs = self._parse_expression_tuple_term(expr[2]) - - if expr[1] == "+": - return lhs + rhs - - elif expr[1] == "-": - return lhs - rhs - - elif expr[1] == "*": - return lhs * rhs - - elif expr[1] == "/": - return lhs / rhs - - else: - raise ValueError("Expression middle term was {%s}.", expr[1]) - - def __repr__(self): - return repr(self._expression.arg(0)) - - def is_indexed(self): - return False - - def as_numeric(self): - return self._expression._apply_operation(self._expression.args) - - def construct(self, data=None): - return self._expression.construct(data) - - @property - def _constructed(self): - return self._expression.expr._constructed - - @property - def const(self): - return self._expression.const - - @property - def args(self): - return self._expression.args - - def arg(self, index): - return self._expression.arg(index) - - def nargs(self): - return self._expression.nargs() - - def __call__(self): - return self._expression() - - def __add__(self, other): - if isinstance(other, OmltExpr): - expr = self._expression + other._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = self._expression + other - return OmltExpr(format=self._format, expr=expr) - - # def __sub__(self, other): - # expr = (self, "-", other) - # return OmltExpression(format=self._format, expr=expr) - def __mul__(self, other): - if isinstance(other, OmltExpr): - expr = self._expression * other._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = self._expression * other - return OmltExprScalar(format=self._format, expr=expr) - - def __div__(self, other): - expr = (self, "/", other) - return OmltExpr(format=self._format, expr=expr) - - def __truediv__(self, other): - expr = (self, "//", other) - return OmltExpr(format=self._format, expr=expr) - - def __radd__(self, other): - if isinstance(other, OmltExpr): - expr = other._expression + self._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = other + self._expression - return OmltExpr(format=self._format, expr=expr) - - def __rsub__(self, other): - if isinstance(other, OmltExpr): - expr = other._expression - self._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = other - self._expression - return OmltExpr(format=self._format, expr=expr) - - def __rmul__(self, other): - expr = (other, "*", self) - return OmltExpr(format=self._format, expr=expr) - - def __ge__(self, other): - expr = self._expression >= other - return expr - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) - - def __le__(self, other): - expr = self._expression <= other - return expr - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) - - def __eq__(self, other): - expr = self._expression == other - return pyo.Expression(expr=expr) - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + pass class OmltExprIndexed(OmltExpr): - def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( + if lang not in subclass_map: + msg = ( "Variable format %s not recognized. Supported formats are 'pyomo'" " or 'jump'.", - format, + lang, ) - subclass = subclass_map[format] + raise ValueError(msg) + subclass = subclass_map[lang] instance = super(OmltExpr, subclass).__new__(subclass) instance.__init__(*indexes, **kwargs) - instance._format = format + instance._format = lang return instance - - -class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): - format = "pyomo" - - def __init__(self, *indexes, expr=None, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - elif len(indexes) > 1: - raise ValueError("Currently index cross-products are unsupported.") - else: - self._index_set = {} - self._format = format - self._expression = pyo.Expression(self._index_set, expr=expr) - - # self.pyo.construct() - - def is_indexed(self): - return True - - def expression_as_dict(self): - if len(self._index_set) == 1: - return {self._index_set[0]: self._expression} - else: - return {k: self._expression[k] for k in self._index_set} - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._expression[item[0]] - else: - return self._expression[item] - - def __setitem__(self, item, value): - self._expression[item] = value - - def keys(self): - return self._expression.keys() - - def values(self): - return self._expression.values() - - def items(self): - return self._expression.items() - - def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ - return len(self._expression) - - def __contains__(self, idx): - """Return true if the index is in the dictionary""" - return idx in self._expression - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys""" - return self._expression.__iter__() - - @property - def args(self): - return self._expression.args() - - def arg(self, index): - return self._expression.arg(index) - - def nargs(self): - return self._expression.nargs() - - def __call__(self): - return self._expression() - - # # def __str__(self): - # # return parse_expression(self.expr, "").rstrip() - - # def __repr__(self): - # if self._expr is not None: - # return parse_expression(self._expr, "").rstrip() - # else: - # return "empty expression" - - # def set_value(self, value): - # print("setting value:", value) - # self.value = value - - # @property - # def rule(self): - # return self._expr - - def __add__(self, other): - expr = (self, "+", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - # def __sub__(self, other): - # expr = (self, "-", other) - # return OmltExpression(format=self._format, expr=expr) - - # def __mul__(self, other): - # expr = (self, "*", other) - # return OmltExpression(format=self._format, expr=expr) - - def __div__(self, other): - expr = (self, "/", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - def __truediv__(self, other): - expr = (self, "//", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - def __eq__(self, other): - expr = (self, "==", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - def __le__(self, other): - expr = (self, "<=", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - def __ge__(self, other): - expr = (self, ">=", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - -# def parse_expression(expr, string): -# if expr is not None: -# for t in expr: -# if str(t).count(" ") == 2: -# string += "(" + str(t) + ") " -# else: -# string += str(t) + " " -# else: -# string = expr -# return string - - -# def parse_jump_affine(expr_tuple): -# if expr_tuple is not None: -# if isinstance(expr_tuple, JumpVar): -# return jump.AffExpr(0, {expr_tuple.to_jump(): 1}) -# elif isinstance(expr_tuple, (int, float)): -# return jump.AffExpr(expr_tuple, {}) -# elif isinstance(expr_tuple, OmltExprScalar): -# print("found a scalar expression") -# print(expr_tuple) -# print(expr_tuple._expression) -# return expr_tuple._expression -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], JumpVar): -# return jump.AffExpr(0, {expr_tuple[0].to_jump(): 1}) -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): -# return jump.AffExpr(expr_tuple[0], {}) -# elif len(expr_tuple) == 2: -# print("don't know how to deal with 2-element expressions") -# print("expr_tuple") -# elif len(expr_tuple) == 3: -# print("triplet") -# if expr_tuple[1] == "+": -# return parse_jump_affine(expr_tuple[0]) + parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "-": -# return parse_jump_affine(expr_tuple[0]) - parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "*": -# return parse_jump_affine(expr_tuple[0]) * parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "/": -# return parse_jump_affine(expr_tuple[0]) / parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "//": -# return parse_jump_affine(expr_tuple[0]) // parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "**": -# return parse_jump_affine(expr_tuple[0]) ** parse_jump_affine( -# expr_tuple[2] -# ) - - -# def dictplus(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): -# c[k] = a[k] + b[k] -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dictminus(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): -# c[k] = a[k] - b[k] -# print("dictminus gives:", c) -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dicttimes(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): - -# c[k] = a[k] * b[k] -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dictover(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): - -# c[k] = jump_divide(a[k], b[k]) -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def jump_divide(a, b): -# assert isinstance(a, AnyValue) -# print(b.terms) -# assert (isinstance(b, AnyValue) and len(b.terms) == 0) or isinstance( -# b, (int, float) -# ) -# if isinstance(b, AnyValue): -# div_by = b.constant -# else: -# div_by = b -# return jump.AffExpr(a.constant / div_by, {}) - - -# def parse_jump_indexed(expr_tuple, index): -# print("parsing:", expr_tuple) -# if expr_tuple is not None: -# if isinstance(expr_tuple, OmltExpr): -# print("here") -# return expr_tuple.expression_as_dict() -# elif isinstance(expr_tuple, var.OmltVar): -# return expr_tuple.to_jumpexpr() -# elif isinstance(expr_tuple, (int, float)): -# return {k: jump.AffExpr(expr_tuple, {}) for k in index} -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], OmltExpr): -# return expr_tuple[0]._expression -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], var.OmltVar): -# indexed = { -# k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) -# for k, v in expr_tuple[0].items() -# } -# return indexed -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): -# return {k: jump.AffExpr(expr_tuple[0], {}) for k in index} -# elif len(expr_tuple) == 2: -# print("don't know how to deal with 2-element expressions") -# print(expr_tuple) -# elif len(expr_tuple) == 3: -# if expr_tuple[1] == "+": -# return dictplus( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "-": -# return dictminus( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "*": -# return dicttimes( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "/": -# return dictover( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "//": -# return dictover( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "**": -# return parse_jump_indexed(expr_tuple[0], index) ** parse_jump_indexed( -# expr_tuple[2], index -# ) -# elif expr_tuple[1] in relations: -# cnstrnt = constraint.OmltRelation( -# index, -# model=None, -# lhs=parse_jump_indexed(expr_tuple[0], index), -# sense=expr_tuple[1], -# rhs=parse_jump_indexed(expr_tuple[2], index), -# format="jump", -# ) -# indexed = {k: cnstrnt.lhs[k] - cnstrnt.rhs[k] for k in index} -# return indexed diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index b3c9109f..a34ec92f 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,8 +1,8 @@ from omlt.dependencies import julia_available if julia_available: - from juliacall import Main as jl from juliacall import Base + from juliacall import Main as jl jl_err = Base.error jl.seval("import JuMP") diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py new file mode 100644 index 00000000..697c9fa5 --- /dev/null +++ b/src/omlt/base/pyomo.py @@ -0,0 +1,521 @@ +"""Pyomo-backed objects. + +This file contains implementations of the OMLT classes, using +Pyomo objects as the underlying data storage mechanism. +""" +from typing import Any + +import pyomo.environ as pyo +from pyomo.core.base.var import _GeneralVarData + +from omlt.base.constraint import OmltConstraintIndexed, OmltConstraintScalar +from omlt.base.expression import OmltExprIndexed, OmltExprScalar +from omlt.base.var import OmltIndexed, OmltScalar + +# Variables + +class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): + format = "pyomo" + + def __init__(self, *args, **kwargs: Any): + kwargs.pop("lang", None) + self._pyovar = pyo.ScalarVar(*args, **kwargs) + self._parent = None + self._constructed = None + + def construct(self, data=None): + return self._pyovar.construct(data) + + def is_constructed(self): + return self._pyovar.is_constructed() + + def fix(self, value, *, skip_validation=False): + self._pyovar.fix(value, skip_validation) + + @property + def ctype(self): + return pyo.ScalarVar + + @property + def name(self): + self._pyovar._name = self._name + return self._pyovar._name + + @property + def bounds(self): + return (self._pyovar._lb, self._pyovar._ub) + + @bounds.setter + def bounds(self, val): + self._pyovar.lb = val[0] + self._pyovar.ub = val[1] + + @property + def lb(self): + return self._pyovar._lb + + @lb.setter + def lb(self, val): + self._pyovar.setlb(val) + + @property + def ub(self): + return self._pyovar._ub + + @ub.setter + def ub(self, val): + self._pyovar.setub(val) + + @property + def domain(self): + return self._pyovar._domain + + @domain.setter + def domain(self, val): + self._pyovar._domain = val + + # Interface for getting/setting value + @property + def value(self): + return self._pyovar.value + + @value.setter + def value(self, val): + self._pyovar.value = val + + +class OmltIndexedPyomo(pyo.Var, OmltIndexed): + format = "pyomo" + + def __init__(self, *indexes, **kwargs: Any): + kwargs.pop("lang", None) + super().__init__(*indexes, **kwargs) + + def fix(self, value=None, *, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def setub(self, value): + for vardata in self.values(): + vardata.ub = value + + def setlb(self, value): + for vardata in self.values(): + vardata.lb = value + +# Constraints + +class OmltConstraintScalarPyomo(OmltConstraintScalar, pyo.Constraint): + format = "pyomo" + + def __init__(self, *args, **kwargs: Any): + super().__init__(*args, **kwargs) + self.lhs = ( + self.lhs._expression + if isinstance(self.lhs, OmltExprScalar) + else self.lhs + ) + self.rhs = ( + self.rhs._expression + if isinstance(self.rhs, OmltExprScalar) + else self.rhs + ) + + if self.sense == "==": + pyoexpr = self.lhs == self.rhs + if self.sense == ">=": + pyoexpr = self.lhs >= self.rhs + if self.sense == ">": + pyoexpr = self.lhs > self.rhs + if self.sense == "<=": + pyoexpr = self.lhs <= self.rhs + if self.sense == "<": + pyoexpr = self.lhs < self.rhs + + self.constraint = pyo.Constraint(expr=pyoexpr) + self.constraint._parent = self._parent + self.constraint.construct() + + def __call__(self, *args: Any, **kwds: Any) -> Any: + return self.constraint.__call__(*args, **kwds) + + @property + def __class__(self): + return type(self.constraint.expr) + + @property + def args(self): + return self.constraint.expr.args + + @property + def strict(self): + return self.constraint.expr._strict + + @property + def _constructed(self): + return self.constraint._constructed + + @property + def _active(self): + return self.constraint._active + + @property + def _data(self): + return self.constraint._data + + def is_indexed(self): + return False + +class OmltConstraintIndexedPyomo(OmltConstraintIndexed, pyo.Constraint): + format = "pyomo" + + def __init__(self, *args, **kwargs: Any): + super().__init__(*args, **kwargs) + kwargs.pop("model", None) + self.constraint = pyo.Constraint(*self._index_set, **kwargs) + self._index_set = self.constraint._index_set + self.constraint._parent = self._parent + self.constraint.construct() + self.model = self.constraint.model + + self.constraints = {} + + def __setitem__(self, index, expr): + if index in self._index_set: + self.constraint[index] = expr + self.constraints[index] = self.constraint[index] + else: + msg = ( + "Couldn't find index %s in index set %.", + index, + list(self._index_set.data()), + ) + raise KeyError(msg) + + def __getitem__(self, index): + if index in self.constraint._index_set: + return self.constraint[index] + msg = ( + "Couldn't find index %s in index set %.", + index, + list(self._index_set.data()), + ) + raise KeyError(msg) + + def __call__(self, *args: Any, **kwds: Any) -> Any: + return self.constraint.__call__(*args, **kwds) + + def __len__(self): + return len(self.constraint) + + @property + def _constructed(self): + return self.constraint._constructed + + @property + def _active(self): + return self.constraint._active + + @_active.setter + def _active(self, val): + self.constraint._active = val + + @property + def _data(self): + return self.constraint._data + + @property + def doc(self): + return self.constraint.doc + +# Expressions + +class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): + format = "pyomo" + + def __init__(self, expr=None, **kwargs: Any): + self._index_set = {} + if isinstance(expr, (pyo.Expression, pyo.NumericValue)): + self._expression = expr + elif isinstance(expr, OmltExprScalarPyomo): + self._expression = expr._expression + elif isinstance(expr, tuple): + self._expression = self._parse_expression_tuple(expr) + else: + msg = ("Expression %s type %s not recognized", expr, type(expr)) + raise TypeError(msg) + + self._parent = None + self.name = None + self.__class__ = type(self._expression) + self._args_ = self._expression._args_ + + def _parse_expression_tuple_term(self, term): + if isinstance(term, tuple): + return self._parse_expression_tuple(term) + if isinstance(term, OmltExprScalarPyomo): + return term._expression + if isinstance(term, OmltScalarPyomo): + return term._pyovar + if isinstance(term, (pyo.Expression, pyo.Var, _GeneralVarData, int, float)): + return term + msg = ("Term of expression %s is an unsupported type. %s", term, type(term)) + raise TypeError(msg) + + def _parse_expression_tuple(self, expr): + lhs = self._parse_expression_tuple_term(expr[0]) + rhs = self._parse_expression_tuple_term(expr[2]) + + if expr[1] == "+": + return lhs + rhs + + if expr[1] == "-": + return lhs - rhs + + if expr[1] == "*": + return lhs * rhs + + if expr[1] == "/": + return lhs / rhs + + msg = ("Expression middle term was {%s}.", expr[1]) + raise ValueError(msg) + + def __class__(self): + return type(self._expression) + + def is_potentially_variable(self): + return self._expression.is_potentially_variable() + + def is_indexed(self): + return False + + def as_numeric(self): + return self._expression._apply_operation(self._expression.args) + + def construct(self, data=None): + return self._expression.construct(data) + + @property + def _constructed(self): + return self._expression.expr._constructed + + @property + def const(self): + return self._expression.const + + @property + def args(self): + return self._expression.args + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __len__(self): + return 1 + + def __call__(self): + return self._expression() + + def __add__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = self._expression + other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression + other + return OmltExprScalar(format=self._format, expr=expr) + + def __sub__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = self._expression - other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression - other + return OmltExprScalar(format=self._format, expr=expr) + + def __mul__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = self._expression * other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression * other + return OmltExprScalar(format=self._format, expr=expr) + + def __div__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = self._expression / other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression / other + return OmltExprScalar(format=self._format, expr=expr) + + def __truediv__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = self._expression // other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression // other + return OmltExprScalar(format=self._format, expr=expr) + + def __radd__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = other._expression + self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other + self._expression + return OmltExprScalar(format=self._format, expr=expr) + + def __rsub__(self, other): + if isinstance(other, OmltExprScalarPyomo): + expr = other._expression - self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other - self._expression + return OmltExprScalar(format=self._format, expr=expr) + + def __rmul__(self, other): + if isinstance(other, OmltExprScalar): + expr = other._expression * self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other * self._expression + return OmltExprScalar(format=self._format, expr=expr) + + def __ge__(self, other): + if isinstance(other, OmltExprScalarPyomo): + rhs = other._expression + elif isinstance(other, OmltScalarPyomo): + rhs = other._pyovar + else: + rhs = other + return OmltConstraintScalar( + model=self._parent, format=self._format, lhs=self, sense=">=", rhs=rhs + ) + + def __le__(self, other): + if isinstance(other, OmltExprScalarPyomo): + rhs = other._expression + elif isinstance(other, OmltScalarPyomo): + rhs = other._pyovar + else: + rhs = other + return OmltConstraintScalar( + model=self._parent, format=self._format, lhs=self, sense="<=", rhs=rhs + ) + + def __eq__(self, other): + if isinstance(other, OmltExprScalarPyomo): + rhs = other._expression + elif isinstance(other, OmltScalarPyomo): + rhs = other._pyovar + else: + rhs = other + return OmltConstraintScalar( + model=self._parent, format=self._format, lhs=self, sense="==", rhs=rhs + ) + + +class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): + format = "pyomo" + + def __init__( + self, *indexes, expr=None, **kwargs: Any + ): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + elif len(indexes) > 1: + raise ValueError("Currently index cross-products are unsupported.") + else: + self._index_set = {} + self._format = format + self._expression = pyo.Expression(self._index_set, expr=expr) + + def is_indexed(self): + return True + + def expression_as_dict(self): + if len(self._index_set) == 1: + return {self._index_set[0]: self._expression} + return {k: self._expression[k] for k in self._index_set} + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._expression[item[0]] + return self._expression[item] + + def __setitem__(self, item, value): + self._expression[item] = value + + def keys(self): + return self._expression.keys() + + def values(self): + return self._expression.values() + + def items(self): + return self._expression.items() + + def __len__(self): + """Return the number of component data objects stored by this component.""" + return len(self._expression) + + def __contains__(self, idx): + """Return true if the index is in the dictionary.""" + return idx in self._expression + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys.""" + return self._expression.__iter__() + + @property + def args(self): + return self._expression.args() + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __call__(self): + return self._expression() + + def __add__(self, other): + expr = (self, "+", other) + return OmltExprIndexed(self._index_set, format=self._format, expr=expr) + + def __sub__(self, other): + expr = (self, "-", other) + return OmltExprIndexed(self._index_set, format=self._format, expr=expr) + + def __mul__(self, other): + expr = (self, "*", other) + return OmltExprIndexed(self._index_set, format=self._format, expr=expr) + + def __div__(self, other): + expr = (self, "/", other) + return OmltExprIndexed(self._index_set, format=self._format, expr=expr) + + def __truediv__(self, other): + expr = (self, "//", other) + return OmltExprIndexed(self._index_set, format=self._format, expr=expr) + + def __eq__(self, other): + expr = (self, "==", other) + return pyo.Expression(self._index_set, expr=expr) + + def __le__(self, other): + expr = (self, "<=", other) + return pyo.Expression(self._index_set, expr=expr) + + def __ge__(self, other): + expr = (self, ">=", other) + return pyo.Expression(self._index_set, expr=expr) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index a7e5a9b8..e72b1775 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -1,29 +1,25 @@ -""" -Abstraction layer of classes used by OMLT. Underneath these are +"""Abstraction layer of classes used by OMLT. + +Underneath these are objects in a choice of modeling languages: Pyomo (default), JuMP, or others (not yet implemented - e.g. Smoke, Gurobi). - - """ from abc import ABC, abstractmethod +from typing import Any + import pyomo.environ as pyo +from omlt.base import DEFAULT_MODELING_LANGUAGE, expression from omlt.dependencies import julia_available -from omlt.base import DEFAULT_MODELING_LANGUAGE - if julia_available: from omlt.base import jump -from omlt.base.julia import JuMPVarInfo, JumpVar -from omlt.base.expression import OmltExprIndexed, OmltExprScalar - -# from omlt.base.constraint import OmltRelation, OmltRelScalar +from omlt.base.julia import JumpVar, JuMPVarInfo class OmltVar(ABC): - def __new__(cls, *indexes, **kwargs): - + def __new__(cls, *indexes, **kwargs: Any): if not indexes: instance = OmltScalar.__new__(OmltScalar, **kwargs) else: @@ -63,19 +59,20 @@ def valid_model_component(self): class OmltScalar(OmltVar): - def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + def __new__(cls, *args, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( + if lang not in subclass_map: + msg = ( "Variable format %s not recognized. Supported formats " "are 'pyomo' or 'jump'.", - format, + lang, ) - subclass = subclass_map[format] + raise ValueError(msg) + subclass = subclass_map[lang] instance = super(OmltVar, subclass).__new__(subclass) instance.__init__(*args, **kwargs) - instance._format = format + instance._format = lang return instance def is_indexed(self): @@ -135,56 +132,41 @@ def value(self, val): # Interface governing how variables behave in expressions. - # def __lt__(self, other): - # return OmltRelScalar(expr=(self, "<", other)) - - # def __gt__(self, other): - # return OmltRelScalar(expr=(self, ">", other)) - - # def __le__(self, other): - # return OmltRelScalar(expr=(self, "<=", other)) - - # def __ge__(self, other): - # return OmltRelScalar(expr=(self, ">=", other)) - - # def __eq__(self, other): - # return OmltRelScalar(expr=(self, "==", other)) - def __add__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "+", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "+", other)) def __sub__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "-", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "-", other)) def __mul__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "*", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "*", other)) def __div__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "//", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "//", other)) def __truediv__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "/", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "/", other)) def __pow__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "**", other)) + return expression.OmltExprScalar(lang=self._format, expr=(self, "**", other)) def __radd__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "+", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "+", self)) def __rsub__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "-", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "-", self)) def __rmul__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "*", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "*", self)) def __rdiv__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "//", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "//", self)) def __rtruediv__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "/", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "/", self)) def __rpow__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "**", self)) + return expression.OmltExprScalar(lang=self._format, expr=(other, "**", self)) def __iadd__(self, other): return pyo.NumericValue.__iadd__(self, other) @@ -214,74 +196,6 @@ def __abs__(self): return pyo.NumericValue.__abs__(self) -class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): - format = "pyomo" - - def __init__(self, *args, **kwargs): - kwargs.pop("format", None) - # pyo.ScalarVar.__init__(self, *args, **kwargs) - self._pyovar = pyo.ScalarVar(*args, **kwargs) - self._parent = None - self._constructed = None - - def construct(self, data=None): - return self._pyovar.construct(data) - - def fix(self, value, skip_validation): - return self._pyovar.fix(value, skip_validation) - - @property - def ctype(self): - return pyo.ScalarVar - - @property - def name(self): - self._pyovar._name = self._name - return self._pyovar._name - - @property - def bounds(self): - return (self._pyovar._lb, self._pyovar._ub) - - @bounds.setter - def bounds(self, val): - self._pyovar.lb = val[0] - self._pyovar.ub = val[1] - - @property - def lb(self): - return self._pyovar._lb - - @lb.setter - def lb(self, val): - self._pyovar.setlb(val) - - @property - def ub(self): - return self._pyovar._ub - - @ub.setter - def ub(self, val): - self._pyovar.setub(val) - - @property - def domain(self): - return self._pyovar._domain - - @domain.setter - def domain(self, val): - self._pyovar._domain = val - - # Interface for getting/setting value - @property - def value(self): - return self._pyovar.value - - @value.setter - def value(self, val): - self._pyovar.value = val - - class OmltScalarJuMP(OmltScalar): format = "jump" @@ -291,8 +205,7 @@ class OmltScalarJuMP(OmltScalar): def __class__(self): return pyo.ScalarVar - def __init__(self, *args, **kwargs): - + def __init__(self, **kwargs: Any): self._block = kwargs.pop("block", None) self._bounds = kwargs.pop("bounds", None) @@ -304,19 +217,21 @@ def __init__(self, *args, **kwargs): _lb = None _ub = None else: - raise ValueError("Bounds must be given as a tuple") + msg = ("Bounds must be given as a tuple.", self._bounds) + raise ValueError(msg) _domain = kwargs.pop("domain", None) _within = kwargs.pop("within", None) if _domain and _within and _domain != _within: - raise ValueError( + msg = ( "'domain' and 'within' keywords have both " "been supplied and do not agree. Please try " "with a single keyword for the domain of this " "variable." ) - elif _domain: + raise ValueError(msg) + if _domain: self.domain = _domain elif _within: self.domain = _within @@ -343,10 +258,11 @@ def __init__(self, *args, **kwargs): # Pyomo's "scalar" variables can be multidimensional, they're # just not indexed. JuMP scalar variables can only be a single # dimension. Rewrite this error to be more helpful. - raise ValueError( + msg = ( "Initial value for JuMP variables must be an int" f" or float, but {type(_initialize)} was provided." ) + raise ValueError(msg) else: self._value = None @@ -372,7 +288,7 @@ def construct(self, data=None): self._parent()._jumpmodel, self.to_jumpvar() ) - def fix(self, value, skip_validation): + def fix(self, value, *, skip_validation=True): self.fixed = True self._value = value self._varinfo.fixed_value = value @@ -417,8 +333,7 @@ def ub(self, val): def value(self): if self._constructed: return self._var.value - else: - return self._varinfo.start_value + return self._varinfo.start_value @value.setter def value(self, val): @@ -426,7 +341,6 @@ def value(self, val): self._var.value = val else: self._varinfo.start_value = val - self @property def ctype(self): @@ -443,49 +357,26 @@ def name(self, value): def to_jumpvar(self): if self._constructed: return self._var.to_jump() - else: - return self._varinfo.to_jump() + return self._varinfo.to_jump() def to_jumpexpr(self): return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) -""" -Future formats to implement. -""" - - -class OmltScalarSmoke(OmltScalar): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltScalarGurobi(OmltScalar): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - class OmltIndexed(OmltVar): - def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( + if lang not in subclass_map: + msg = ( "Variable format %s not recognized. Supported formats are 'pyomo'" " or 'jump'.", - format, + lang, ) - subclass = subclass_map[format] + raise ValueError(msg) + subclass = subclass_map[lang] instance = super(OmltVar, subclass).__new__(subclass) instance.__init__(*indexes, **kwargs) - instance._format = format + instance._format = lang return instance def is_indexed(self): @@ -540,56 +431,41 @@ def __iter__(self): # Interface governing how variables behave in expressions. - # def __lt__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "<", other)) - - # def __gt__(self, other): - # return OmltRelation(self.index_set(), expr=(self, ">", other)) - - # def __le__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "<=", other)) - - # def __ge__(self, other): - # return OmltRelation(self.index_set(), expr=(self, ">=", other)) - - # def __eq__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "==", other)) - def __add__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "+", other)) def __sub__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "-", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "-", other)) def __mul__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "*", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "*", other)) def __div__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "//", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "//", other)) def __truediv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "/", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "/", other)) def __pow__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "**", other)) + return expression.OmltExprIndexed(self.index_set(), expr=(self, "**", other)) def __radd__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "+", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "+", self)) def __rsub__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "-", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "-", self)) def __rmul__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "*", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "*", self)) def __rdiv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "//", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "//", self)) def __rtruediv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "/", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "/", self)) def __rpow__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "**", self)) + return expression.OmltExprIndexed(self.index_set(), expr=(other, "**", self)) def __iadd__(self, other): return pyo.NumericValue.__iadd__(self, other) @@ -619,31 +495,6 @@ def __abs__(self): return pyo.NumericValue.__abs__(self) -class OmltIndexedPyomo(pyo.Var, OmltIndexed): - format = "pyomo" - - def __init__(self, *indexes, **kwargs): - kwargs.pop("format", None) - super().__init__(*indexes, **kwargs) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def setub(self, value): - for vardata in self.values(): - vardata.ub = value - - def setlb(self, value): - for vardata in self.values(): - vardata.lb = value - - class OmltIndexedJuMP(OmltIndexed): format = "jump" @@ -653,7 +504,7 @@ class OmltIndexedJuMP(OmltIndexed): def __class__(self): return pyo.Var - def __init__(self, *indexes, **kwargs): + def __init__(self, *indexes, **kwargs: Any): if len(indexes) == 1: index_set = indexes[0] i_dict = {} @@ -661,7 +512,8 @@ def __init__(self, *indexes, **kwargs): i_dict[i] = val self._index_set = tuple(i_dict[i] for i in range(len(index_set))) else: - raise ValueError("Currently index cross-products are unsupported.") + msg = ("Currently index cross-products are unsupported.") + raise ValueError(msg) self._block = kwargs.pop("block", None) @@ -677,21 +529,23 @@ def __init__(self, *indexes, **kwargs): _lb = {i: None for i in self._index_set} _ub = {i: None for i in self._index_set} else: - raise ValueError( + msg = ( "Bounds must be given as a tuple," " but %s was given.", self._bounds ) + raise TypeError(msg) _domain = kwargs.pop("domain", None) _within = kwargs.pop("within", None) if _domain and _within and _domain != _within: - raise ValueError( + msg = ( "'domain' and 'within' keywords have both " "been supplied and do not agree. Please try " "with a single keyword for the domain of this " "variable." ) - elif _domain: + raise ValueError(msg) + if _domain: self.domain = _domain elif _within: self.domain = _within @@ -719,11 +573,12 @@ def __init__(self, *indexes, **kwargs): elif len(_initialize) == 1: self._value = {i: _initialize[0] for i in self._index_set} else: - raise ValueError( + msg = ( "Index set has length %s, but initializer has length %s.", len(self._index_set), len(_initialize), ) + raise ValueError(msg) else: self._value = {i: None for i in self._index_set} @@ -746,8 +601,7 @@ def __init__(self, *indexes, **kwargs): def __getitem__(self, item): if isinstance(item, tuple) and len(item) == 1: return self._vars[item[0]] - else: - return self._vars[item] + return self._vars[item] def __setitem__(self, item, value): self._varinfo[item] = value @@ -757,20 +611,17 @@ def __setitem__(self, item, value): def keys(self): if self._parent is not None: return self._varrefs.keys() - else: - return self._vars.keys() + return self._vars.keys() def values(self): if self._parent is not None: return self._varrefs.values() - else: - return self._vars.values() + return self._vars.values() def items(self): if self._parent is not None: return self._varrefs.items() - else: - return self._vars.items() + return self._vars.items() def fix(self, value=None): self.fixed = True @@ -783,14 +634,11 @@ def fix(self, value=None): vardata.has_fix = True def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ + """Return the number of component data objects stored by this component.""" return len(self._vars) def __contains__(self, idx): - """Return true if the index is in the dictionary""" + """Return true if the index is in the dictionary.""" return idx in self._vars # The default implementation is for keys() and __iter__ to be @@ -798,7 +646,7 @@ def __contains__(self, idx): # keys/values/items continue to work for components that implement # other definitions for __iter__ (e.g., Set) def __iter__(self): - """Return an iterator of the component data keys""" + """Return an iterator of the component data keys.""" return self._vars.__iter__() def construct(self, data=None): @@ -845,29 +693,8 @@ def name(self): def to_jumpvar(self): if self._constructed: return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) + msg = "Variable must be constructed before exporting to JuMP." + raise ValueError(msg) def to_jumpexpr(self): return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} - - -""" -Future formats to implement. -""" - - -class OmltIndexedSmoke(OmltIndexed): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltIndexedGurobi(OmltIndexed): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) diff --git a/src/omlt/block.py b/src/omlt/block.py index ea6f7665..ab5559b5 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,7 +25,7 @@ class is used in combination with a formulation object to construct the """ -from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE +from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVar from omlt.dependencies import julia_available if julia_available: @@ -48,8 +48,8 @@ def __init__(self, component): else: self._jumpmodel = None - def set_format(self, format): - self._format = format + def set_format(self, lang): + self._format = lang if self._format == "jump" and self._jumpmodel is None: self._jumpmodel = jump.Model() @@ -71,13 +71,14 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.__output_indexes = output_indexes self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0, format=self._format) + self.inputs = OmltVar(self.inputs_set, initialize=0, lang=self._format) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0, format=self._format) + self.outputs = OmltVar(self.outputs_set, initialize=0, lang=self._format) - def build_formulation(self, formulation, format=None): - """ + def build_formulation(self, formulation, lang=None): + """Build formulation. + Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network formulation. The formulation object can be accessed later through the @@ -87,7 +88,7 @@ def build_formulation(self, formulation, format=None): ---------- formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation - format : str + lang : str Which modelling language to build the formulation in. Currently supported are "pyomo" (default) and "jump". @@ -107,8 +108,8 @@ def build_formulation(self, formulation, format=None): raise ValueError(msg) - if format is not None: - self._format = format + if lang is not None: + self._format = lang if self._format == "jump": self._jumpmodel = jump.Model() diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index db607935..be727992 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -2,7 +2,8 @@ import weakref import pyomo.environ as pyo -from omlt.base import OmltVar + +from omlt.base import OmltConstraint, OmltVar class _PyomoFormulationInterface(abc.ABC): @@ -130,15 +131,14 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): output_unscaling_expressions ) - @block.Constraint(block.scaled_inputs.index_set()) - def _scale_input_constraint(b, *args): - return ( - block.scaled_inputs[args] - == input_scaling_expressions[scalar_or_tuple(args)] - ) + block._scale_input_constraint = OmltConstraint(block.inputs_set) + for idx in block.inputs_set: + block._scale_input_constraint[idx] = ( + block.scaled_inputs[idx] == input_scaling_expressions[idx] + ) - @block.Constraint(block.outputs.index_set()) - def _scale_output_constraint(b, *args): - return ( - block.outputs[args] == output_unscaling_expressions[scalar_or_tuple(args)] + block._scale_output_constraint = OmltConstraint(block.outputs_set) + for idx in block.outputs_set: + block._scale_output_constraint[idx] = ( + block.outputs[idx] == output_unscaling_expressions[idx] ) diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 17e798cf..844a1d09 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -4,7 +4,7 @@ import numpy as np import pyomo.environ as pe -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.gbt.model import GradientBoostedTreeModel @@ -175,8 +175,8 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): ] block.y = OmltVar(y_index, domain=pe.Binary) - @block.Constraint(tree_ids) - def single_leaf(b, tree_id): + block.single_leaf = OmltConstraint(tree_ids) + for tree_id in tree_ids: r"""Single leaf constraint. Add constraint to ensure that only one leaf per tree is active, @@ -187,9 +187,9 @@ def single_leaf(b, tree_id): \end{align*} """ tree_mask = nodes_tree_ids == tree_id - return ( + block.single_leaf[tree_id] = ( sum( - b.z_l[tree_id, node_id] + block.z_l[tree_id, node_id] for node_id in nodes_node_ids[nodes_leaf_mask & tree_mask] ) == 1 @@ -247,8 +247,8 @@ def _sum_of_z_l(tree_id, start_node_id): visit_queue.append(local_true_node_ids[node_id]) return sum_of_z_l - @block.Constraint(nodes_tree_branch_ids) - def left_split(b, tree_id, branch_node_id): + block.left_split = OmltConstraint(nodes_tree_branch_ids) + for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Left split. Add constraint to activate all left splits leading to an active leaf, @@ -263,10 +263,12 @@ def left_split(b, tree_id, branch_node_id): y = _branching_y(tree_id, branch_node_id) subtree_root = nodes_true_node_ids[node_mask][0] - return _sum_of_z_l(tree_id, subtree_root) <= y + block.left_split[tree_id, branch_node_id] = ( + _sum_of_z_l(tree_id, subtree_root) <= y + ) - @block.Constraint(nodes_tree_branch_ids) - def right_split(b, tree_id, branch_node_id): + block.right_split = OmltConstraint(nodes_tree_branch_ids) + for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Right split. Add constraint to activate all right splits leading to an active leaf, @@ -281,10 +283,12 @@ def right_split(b, tree_id, branch_node_id): y = _branching_y(tree_id, branch_node_id) subtree_root = nodes_false_node_ids[node_mask][0] - return _sum_of_z_l(tree_id, subtree_root) <= 1 - y + block.right_split[tree_id, branch_node_id] = ( + _sum_of_z_l(tree_id, subtree_root) <= 1 - y + ) - @block.Constraint(y_index) - def order_y(b, feature_id, branch_y_idx): + block.order_y = OmltConstraint(y_index) + for feature_id, branch_y_idx in y_index: r"""Add constraint to activate splits in the correct order. Mistry et al. Equ. (3e). @@ -295,12 +299,13 @@ def order_y(b, feature_id, branch_y_idx): \end{align*} """ branch_values = branch_value_by_feature_id[feature_id] - if branch_y_idx >= len(branch_values) - 1: - return pe.Constraint.Skip - return b.y[feature_id, branch_y_idx] <= b.y[feature_id, branch_y_idx + 1] + if branch_y_idx < len(branch_values) - 1: + block.order_y[feature_id, branch_y_idx] = ( + block.y[feature_id, branch_y_idx] <= block.y[feature_id, branch_y_idx + 1] + ) - @block.Constraint(y_index) - def var_lower(b, feature_id, branch_y_idx): + block.var_lower = OmltConstraint(y_index) + for feature_id, branch_y_idx in y_index: r"""Lower bound constraint. Add constraint to link discrete tree splits to lower bound of continuous @@ -316,15 +321,15 @@ def var_lower(b, feature_id, branch_y_idx): \end{align*} """ x = input_vars[feature_id] - if x.lb is None: - return pe.Constraint.Skip - branch_value = branch_value_by_feature_id[feature_id][branch_y_idx] - return x >= x.lb + (branch_value - x.lb) * (1 - b.y[feature_id, branch_y_idx]) - - @block.Constraint(y_index) - def var_upper(b, feature_id, branch_y_idx): + if x.lb is not None: + branch_value = branch_value_by_feature_id[feature_id][branch_y_idx] + block.var_lower[feature_id, branch_y_idx] = x >= x.lb + ( + branch_value - x.lb + ) * (1 - block.y[feature_id, branch_y_idx]) + + block.var_upper = OmltConstraint(y_index) + for feature_id, branch_y_idx in y_index: r"""Upper bound constraint. - Add constraint to link discrete tree splits to upper bound of continuous variables. Mistry et al. Equ. (4b). @@ -336,32 +341,34 @@ def var_upper(b, feature_id, branch_y_idx): \end{align*} """ x = input_vars[feature_id] - if x.ub is None: - return pe.Constraint.Skip - branch_value = branch_value_by_feature_id[feature_id][branch_y_idx] - return x <= x.ub + (branch_value - x.ub) * b.y[feature_id, branch_y_idx] - - @block.Constraint() - def tree_mean_value(b): - r"""Add constraint to link block output tree model mean. + if x.ub is not None: + branch_value = branch_value_by_feature_id[feature_id][branch_y_idx] + block.var_upper[feature_id, branch_y_idx] = ( + x <= x.ub + (branch_value - x.ub) * block.y[feature_id, branch_y_idx] + ) - Mistry et al. Equ. (3a). - .. math:: - \begin{align*} - \hat{\mu} &= \sum\limits_{t \in T} \sum\limits_{l \in {L_t}} - F_{t,l} z_{t,l} - \end{align*} - """ - return ( + block.tree_mean_value = OmltConstraint( + expr=( output_vars[0] == sum( - weight * b.z_l[tree_id, node_id] + weight * block.z_l[tree_id, node_id] for tree_id, node_id, weight in zip( target_tree_ids, target_node_ids, target_weights ) ) + base_value ) + ) + + r"""Add constraint to link block output tree model mean. + + Mistry et al. Equ. (3a). + .. math:: + \begin{align*} + \hat{\mu} &= \sum\limits_{t \in T} \sum\limits_{l \in {L_t}} + F_{t,l} z_{t,l} + \end{align*} + """ def _node_attributes(node): diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index 66e48775..bc23140d 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -1,6 +1,7 @@ import numpy as np import pyomo.environ as pyo +from omlt.base import OmltConstraint, OmltVar from omlt.io.torch_geometric.torch_geometric_reader import ( load_torch_geometric_sequential, ) @@ -58,7 +59,7 @@ def gnn_with_non_fixed_graph( ) # define binary variables for adjacency matrix - block.A = pyo.Var( + block.A = OmltVar( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, @@ -67,10 +68,11 @@ def gnn_with_non_fixed_graph( for u in range(N): block.A[u, u].fix(1) # assume the adjacency matrix is always symmetric - block.symmetric_adjacency = pyo.ConstraintList() + indexes = [(u, v) for u in range(N) for v in range(u + 1, N)] + block.symmetric_adjacency = OmltConstraint(indexes) for u in range(N): for v in range(u + 1, N): - block.symmetric_adjacency.add(block.A[u, v] == block.A[v, u]) + block.symmetric_adjacency[(u,v)] = block.A[u, v] == block.A[v, u] # build formulation for GNN block.build_formulation(FullSpaceNNFormulation(net)) @@ -139,7 +141,7 @@ def gnn_with_fixed_graph( ) # define binary variables for adjacency matrix - block.A = pyo.Var( + block.A = OmltVar( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 6c22dab5..eb8dd069 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -2,7 +2,7 @@ import pyomo.environ as pe from pyomo.gdp import Disjunct -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -259,13 +259,11 @@ def disjuncts_rule(dsj, tree, leaf): def lb_rule(dsj, feat): return input_vars[feat] >= leaves[tree][leaf]["bounds"][feat][0] - dsj.lb_constraint = pe.Constraint(features, rule=lb_rule) - + dsj.lb_constraint = OmltConstraint(features, rule=lb_rule) def ub_rule(dsj, feat): return input_vars[feat] <= leaves[tree][leaf]["bounds"][feat][1] - dsj.ub_constraint = pe.Constraint(features, rule=ub_rule) - + dsj.ub_constraint = OmltConstraint(features, rule=ub_rule) slope = leaves[tree][leaf]["slope"] intercept = leaves[tree][leaf]["intercept"] dsj.linear_exp = pe.Constraint( @@ -325,34 +323,36 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output # in tree t is returned. intermediate_output is the output of tree t and # the total output of the model is the sum of the intermediate_output vars block.z = OmltVar(t_l, within=pe.Binary) - block.intermediate_output = pe.Var(tree_ids) + block.intermediate_output = OmltVar(tree_ids) - @block.Constraint(features, tree_ids) - def lower_bound_constraints(mdl, feat, tree): + block.lower_bound_constraints = OmltConstraint(features, tree_ids) + for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) - return ( - sum( - leaves[tree][leaf]["bounds"][feat][0] * mdl.z[tree, leaf] - for leaf in leaf_ids + for feat in features: + block.lower_bound_constraints[feat, tree] = ( + sum( + leaves[tree][leaf]["bounds"][feat][0] * block.z[tree, leaf] + for leaf in leaf_ids + ) + <= input_vars[feat] ) - <= input_vars[feat] - ) - @block.Constraint(features, tree_ids) - def upper_bound_constraints(mdl, feat, tree): + block.upper_bound_constraints = OmltConstraint(features, tree_ids) + for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) - return ( - sum( - leaves[tree][leaf]["bounds"][feat][1] * mdl.z[tree, leaf] - for leaf in leaf_ids + for feat in features: + block.upper_bound_constraints[feat, tree] = ( + sum( + leaves[tree][leaf]["bounds"][feat][1] * block.z[tree, leaf] + for leaf in leaf_ids + ) + >= input_vars[feat] ) - >= input_vars[feat] - ) - @block.Constraint(tree_ids) - def linear_constraint(mdl, tree): + block.linear_constraint = OmltConstraint(tree_ids) + for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) - return block.intermediate_output[tree] == sum( + block.linear_constraint[tree] = block.intermediate_output[tree] == sum( ( sum( leaves[tree][leaf]["slope"][feat] * input_vars[feat] @@ -363,14 +363,13 @@ def linear_constraint(mdl, tree): * block.z[tree, leaf] for leaf in leaf_ids ) - - @block.Constraint(tree_ids) - def only_one_leaf_per_tree(b, tree): + block.only_one_leaf_per_tree = OmltConstraint(tree_ids) + for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) - return sum(block.z[tree, leaf] for leaf in leaf_ids) == 1 - - @block.Constraint() - def output_sum_of_trees(b): - return output_vars[0] == sum( - block.intermediate_output[tree] for tree in tree_ids + block.only_one_leaf_per_tree[tree] = ( + sum(block.z[tree, leaf] for leaf in leaf_ids) == 1 ) + + block.output_sum_of_trees = output_vars[0] == sum( + block.intermediate_output[tree] for tree in tree_ids + ) diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index 4538401a..ffec4c01 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -1,3 +1,6 @@ +from omlt.base import OmltConstraint + + def linear_activation_function(zhat): return zhat @@ -16,10 +19,11 @@ def linear_activation_constraint( \end{align*} """ - - @layer_block.Constraint(layer.output_indexes) - def linear_activation(b, *output_index): - zhat_lb, zhat_ub = b.zhat[output_index].bounds - b.z[output_index].setlb(zhat_lb) - b.z[output_index].setub(zhat_ub) - return b.z[output_index] == b.zhat[output_index] + layer_block.linear_activation = OmltConstraint(layer.output_indexes) + for output_index in layer.output_indexes: + zhat_lb, zhat_ub = layer_block.zhat[output_index].bounds + layer_block.z[output_index].setlb(zhat_lb) + layer_block.z[output_index].setub(zhat_ub) + layer_block.linear_activation[output_index] = ( + layer_block.z[output_index] == layer_block.zhat[output_index] + ) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index eaad5a7f..4f61415e 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,7 +1,7 @@ import pyomo.environ as pyo from pyomo import mpec -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar def bigm_relu_activation_constraint(net_block, net, layer_block, layer): @@ -42,10 +42,18 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): """ layer_block.q_relu = OmltVar(layer.output_indexes, within=pyo.Binary) - layer_block._z_lower_bound_relu = pyo.Constraint(layer.output_indexes) - layer_block._z_lower_bound_zhat_relu = pyo.Constraint(layer.output_indexes) - layer_block._z_upper_bound_relu = pyo.Constraint(layer.output_indexes) - layer_block._z_upper_bound_zhat_relu = pyo.Constraint(layer.output_indexes) + layer_block._z_lower_bound_relu = OmltConstraint( + layer.output_indexes, model=layer_block.model + ) + layer_block._z_lower_bound_zhat_relu = OmltConstraint( + layer.output_indexes, model=layer_block.model + ) + layer_block._z_upper_bound_relu = OmltConstraint( + layer.output_indexes, model=layer_block.model + ) + layer_block._z_upper_bound_zhat_relu = OmltConstraint( + layer.output_indexes, model=layer_block.model + ) # set dummy parameters here to avoid warning message from Pyomo layer_block._big_m_lb_relu = pyo.Param( diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index 7f5bd10d..99c330af 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -1,5 +1,7 @@ from pyomo.environ import exp, log, tanh +from omlt.base import OmltConstraint + def softplus_activation_function(x): r"""Applies the softplus function. @@ -74,12 +76,15 @@ def smooth_monotonic_activation_constraint(net_block, net, layer_block, layer, f \end{align*} """ - - @layer_block.Constraint(layer.output_indexes) - def _smooth_monotonic_activation_constraint(b, *output_index): - zhat_lb, zhat_ub = b.zhat[output_index].bounds + layer_block._smooth_monotonic_activation_constraint = OmltConstraint( + layer.output_indexes + ) + for output_index in layer.output_indexes: + zhat_lb, zhat_ub = layer_block.zhat[output_index].bounds if zhat_lb is not None: - b.z[output_index].setlb(fcn(zhat_lb)) + layer_block.z[output_index].setlb(fcn(zhat_lb)) if zhat_ub is not None: - b.z[output_index].setub(fcn(zhat_ub)) - return b.z[output_index] == fcn(b.zhat[output_index]) + layer_block.z[output_index].setub(fcn(zhat_ub)) + layer_block._smooth_monotonic_activation_constraint[output_index] = ( + layer_block.z[output_index] == fcn(layer_block.zhat[output_index]) + ) diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index b042bdf4..267c287e 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -1,7 +1,7 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar from omlt.neuralnet.activations import NON_INCREASING_ACTIVATIONS from omlt.neuralnet.layer import ConvLayer2D, PoolingLayer2D @@ -18,8 +18,8 @@ def full_space_dense_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - @layer_block.Constraint(layer.output_indexes) - def dense_layer(b, *output_index): + layer_block.dense_layer = OmltConstraint(layer.output_indexes) + for output_index in layer.output_indexes: # dense layers multiply only the last dimension of # their inputs expr = 0.0 @@ -33,7 +33,7 @@ def dense_layer(b, *output_index): layer_block.zhat[output_index].setlb(lb) layer_block.zhat[output_index].setub(ub) - return layer_block.zhat[output_index] == expr + layer_block.dense_layer[output_index] = layer_block.zhat[output_index] == expr def full_space_gnn_layer(net_block, net, layer_block, layer): @@ -89,19 +89,19 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): pyo.Set(initialize=range(layer.N)), initialize=0, ) - input_layer_block._zbar_lower_bound_z_big_m = pyo.Constraint( + input_layer_block._zbar_lower_bound_z_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), ) - input_layer_block._zbar_upper_bound_z_big_m = pyo.Constraint( + input_layer_block._zbar_upper_bound_z_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), ) - input_layer_block._zbar_lower_bound_big_m = pyo.Constraint( + input_layer_block._zbar_lower_bound_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), ) - input_layer_block._zbar_upper_bound_big_m = pyo.Constraint( + input_layer_block._zbar_upper_bound_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), ) @@ -155,8 +155,8 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): <= ub * net_block.A[input_node_index, output_node_index] ) - @layer_block.Constraint(layer.output_indexes) - def gnn_layer(b, *output_index): + layer_block.gnn_layer = OmltConstraint(layer.output_indexes) + for output_index in layer.output_indexes: # gnn layers multiply only the last dimension of # their inputs expr = 0.0 @@ -172,7 +172,7 @@ def gnn_layer(b, *output_index): layer_block.zhat[output_index].setlb(lb) layer_block.zhat[output_index].setub(ub) - return layer_block.zhat[output_index] == expr + layer_block.gnn_layer[output_index] = layer_block.zhat[output_index] == expr def full_space_conv2d_layer(net_block, net, layer_block, layer): @@ -206,8 +206,8 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - @layer_block.Constraint(layer.output_indexes) - def convolutional_layer(b, *output_index): + layer_block.convolutional_layer = OmltConstraint(layer.output_indexes) + for output_index in layer.output_indexes: out_d, out_r, out_c = output_index expr = 0.0 for weight, input_index in layer.kernel_with_input_indexes(out_d, out_r, out_c): @@ -216,7 +216,9 @@ def convolutional_layer(b, *output_index): lb, ub = compute_bounds_on_expr(expr) layer_block.zhat[output_index].setlb(lb) layer_block.zhat[output_index].setub(ub) - return layer_block.zhat[output_index] == expr + layer_block.convolutional_layer[output_index] = ( + layer_block.zhat[output_index] == expr + ) def full_space_maxpool2d_layer(net_block, net, layer_block, layer): @@ -286,11 +288,11 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): layer_block.q_maxpool = OmltVar( layer.output_indexes, layer_block._kernel_indexes, within=pyo.Binary ) - layer_block._q_sum_maxpool = pyo.Constraint(layer.output_indexes) - layer_block._zhat_upper_bound = pyo.Constraint( + layer_block._q_sum_maxpool = OmltConstraint(layer.output_indexes) + layer_block._zhat_upper_bound = OmltConstraint( layer.output_indexes, layer_block._kernel_indexes ) - layer_block._zhat_lower_bound = pyo.Constraint( + layer_block._zhat_lower_bound = OmltConstraint( layer.output_indexes, layer_block._kernel_indexes ) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index b38377de..d09e967b 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -2,7 +2,7 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar def default_partition_split_func(w, n): @@ -91,11 +91,11 @@ def output_node_block(b, *output_index): mapper = layer.input_index_mapper - b.eq_16_lb = pyo.ConstraintList() - b.eq_16_ub = pyo.ConstraintList() + b.eq_16_lb = OmltConstraint(range(num_splits)) + b.eq_16_ub = OmltConstraint(range(num_splits)) - b.eq_17_lb = pyo.ConstraintList() - b.eq_17_ub = pyo.ConstraintList() + b.eq_17_lb = OmltConstraint(range(num_splits)) + b.eq_17_ub = OmltConstraint(range(num_splits)) input_layer_indexes = list(layer.input_indexes_with_input_layer_indexes) @@ -123,12 +123,12 @@ def output_node_block(b, *output_index): z2.setlb(min(0, lb)) z2.setub(max(0, ub)) - b.eq_16_lb.add(b.sig * lb <= expr - z2) - b.eq_16_ub.add(b.sig * ub >= expr - z2) + b.eq_16_lb[split_index] = b.sig * lb <= expr - z2 + b.eq_16_ub[split_index] = b.sig * ub >= expr - z2 minus_sig = 1 - b.sig - b.eq_17_lb.add(minus_sig * lb <= z2) - b.eq_17_ub.add(minus_sig * ub >= z2) + b.eq_17_lb[split_index] = minus_sig * lb <= z2 + b.eq_17_ub[split_index] = minus_sig * ub >= z2 # compute dense layer expression to compute bounds expr = 0.0 @@ -160,13 +160,13 @@ def output_node_block(b, *output_index): eq_13_expr -= b.z2[split_index] eq_13_expr += bias * b.sig - b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) - b.eq_14 = pyo.Constraint( + b.eq_13 = OmltConstraint(expr=eq_13_expr <= 0) + b.eq_14 = OmltConstraint( expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0 ) - b.eq_15 = pyo.Constraint( + b.eq_15 = OmltConstraint( expr=layer_block.z[output_index] == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression ) diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index 6d5fcf8f..30ea6e77 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,6 +1,6 @@ import pyomo.environ as pyo -from omlt.base import OmltVar +from omlt.base import OmltConstraint, OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.neuralnet.activations import ( ACTIVATION_FUNCTION_MAP as _DEFAULT_ACTIVATION_FUNCTIONS, @@ -60,6 +60,7 @@ def _ignore_input_layer(): MULTI_INPUTS_UNSUPPORTED = "Multiple input layers are not currently supported." MULTI_OUTPUTS_UNSUPPORTED = "Multiple output layers are not currently supported." + class FullSpaceNNFormulation(_PyomoFormulation): """This class is the entry-point to build neural network formulations. @@ -200,9 +201,12 @@ def layer(b, layer_id): raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - @block.Constraint(input_layer.output_indexes) - def input_assignment(b, *output_index): - return b.scaled_inputs[output_index] == b.layer[id(input_layer)].z[output_index] + block.input_assignment = OmltConstraint(input_layer.output_indexes) + for output_index in input_layer.output_indexes: + block.input_assignment[output_index] = ( + block.scaled_inputs[output_index] + == block.layer[id(input_layer)].z[output_index] + ) # setup output variables constraints # currently only support a single output layer @@ -211,10 +215,11 @@ def input_assignment(b, *output_index): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - @block.Constraint(output_layer.output_indexes) - def output_assignment(b, *output_index): - return ( - b.scaled_outputs[output_index] == b.layer[id(output_layer)].z[output_index] + block.output_assignment = OmltConstraint(output_layer.output_indexes) + for output_index in output_layer.output_indexes: + block.output_assignment[output_index] = ( + block.scaled_outputs[output_index] + == block.layer[id(output_layer)].z[output_index] ) @@ -393,12 +398,11 @@ def z(b, *output_index): raise ValueError(msg) output_layer = output_layers[0] - @block.Constraint(output_layer.output_indexes) - def output_assignment(b, *output_index): - pb = b.parent_block() - return ( - b.scaled_outputs[output_index] - == b.layer[id(output_layer)].z[output_index] + block.output_assignment = OmltConstraint(output_layer.output_indexes) + for output_index in output_layer.output_indexes: + block.output_assignment[output_index] = ( + block.scaled_outputs[output_index] + == block.layer[id(output_layer)].z[output_index] ) # @property @@ -539,11 +543,11 @@ def layer(b, layer_id): raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - @block.Constraint(input_layer.output_indexes) - def input_assignment(b, *output_index): - return ( - b.scaled_inputs[output_index] - == b.layer[id(input_layer)].z[output_index] + block.input_assignment = OmltConstraint(input_layer.output_indexes) + for output_index in input_layer.output_indexes: + block.input_assignment[output_index] = ( + block.scaled_inputs[output_index] + == block.layer[id(input_layer)].z[output_index] ) # setup output variables constraints @@ -553,11 +557,11 @@ def input_assignment(b, *output_index): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - @block.Constraint(output_layer.output_indexes) - def output_assignment(b, *output_index): - return ( - b.scaled_outputs[output_index] - == b.layer[id(output_layer)].z[output_index] + block.output_assignment = OmltConstraint(output_layer.output_indexes) + for output_index in output_layer.output_indexes: + block.output_assignment[output_index] = ( + block.scaled_outputs[output_index] + == block.layer[id(output_layer)].z[output_index] ) @property diff --git a/tests/test_block.py b/tests/base/test_block.py similarity index 87% rename from tests/test_block.py rename to tests/base/test_block.py index 153e0f78..d14db2c5 100644 --- a/tests/test_block.py +++ b/tests/base/test_block.py @@ -25,6 +25,7 @@ def _clear_inputs(self): def _clear_outputs(self): self.output_indexes = [] + def test_block(): m = pyo.ConcreteModel() m.b = OmltBlock() @@ -58,9 +59,13 @@ def test_jump_block(): m.b = OmltBlock() m.b.set_format("jump") - with pytest.raises(ValueError) as excinfo: + expected_msg = ( + "Initial value for JuMP variables must be an int or float, but" + " was provided." + ) + + with pytest.raises(ValueError, match=expected_msg) as excinfo: m.b.x = OmltVar(initialize=(2, 7), format="jump") - expected_msg = "Initial value for JuMP variables must be an int or float, but was provided." assert str(excinfo.value) == expected_msg @@ -102,17 +107,17 @@ def test_input_output_auto_creation(): formulation1 = DummyFormulation() formulation1._clear_inputs() expected_msg = ( - "OmltBlock must have at least one input to build a formulation. " - f"{formulation1} has no inputs." - ) + "OmltBlock must have at least one input to build a formulation. " + f"{formulation1} has no inputs." + ) with pytest.raises(ValueError, match=expected_msg): m.b3.build_formulation(formulation1) formulation2 = DummyFormulation() formulation2._clear_outputs() expected_msg = ( - "OmltBlock must have at least one output to build a formulation. " - f"{formulation2} has no outputs." - ) + "OmltBlock must have at least one output to build a formulation. " + f"{formulation2} has no outputs." + ) with pytest.raises(ValueError, match=expected_msg): m.b3.build_formulation(formulation2) diff --git a/tests/test_formulation.py b/tests/base/test_formulation.py similarity index 100% rename from tests/test_formulation.py rename to tests/base/test_formulation.py diff --git a/tests/test_scaling.py b/tests/base/test_scaling.py similarity index 100% rename from tests/test_scaling.py rename to tests/base/test_scaling.py diff --git a/tests/test_var.py b/tests/base/test_var.py similarity index 85% rename from tests/test_var.py rename to tests/base/test_var.py index 1639c480..fa890801 100644 --- a/tests/test_var.py +++ b/tests/base/test_var.py @@ -1,12 +1,11 @@ -import pytest - import pyomo.environ as pyo +import pytest from omlt.base import OmltVar from omlt.dependencies import julia_available -def _test_scalar_var(format): - v = OmltVar(format=format, initialize=2, domain=pyo.Integers) +def _test_scalar_var(lang): + v = OmltVar(lang=lang, initialize=2, domain=pyo.Integers) assert v.is_indexed() is False assert v.ctype == pyo.ScalarVar diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index 4a99b646..bf0400fa 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -3,6 +3,7 @@ import pyomo.environ as pe import pytest from omlt import OmltBlock +from omlt.base import OmltVar from omlt.dependencies import onnx, onnx_available from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel @@ -19,11 +20,11 @@ def test_formulation_with_continuous_variables(): m = pe.ConcreteModel() - m.x = pe.Var(range(4), bounds=(-2.0, 2.0)) + m.x = OmltVar(range(4), bounds=(-2.0, 2.0)) m.x[3].setlb(0.0) m.x[3].setub(1.0) - m.z = pe.Var() + m.z = OmltVar() m.gbt = OmltBlock() m.gbt.build_formulation(GBTBigMFormulation(GradientBoostedTreeModel(model))) diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index be098406..0a6861d1 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -132,6 +132,7 @@ def _test_gnn_with_non_fixed_graph(nn): m.nn = OmltBlock() gnn_with_non_fixed_graph(m.nn, nn, N, scaled_input_bounds=input_bounds) assert m.nvariables() == 282 + print(m.nn.symmetric_adjacency.constraints) assert m.nconstraints() == 620 diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 30e3a1a2..7f3ef059 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,6 +1,7 @@ import numpy as np import pyomo.environ as pe import pytest +from omlt.base import OmltVar from omlt.dependencies import lineartree_available if lineartree_available: @@ -169,8 +170,8 @@ def test_bigm_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="bigm") model1 = pe.ConcreteModel() - model1.x = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -203,8 +204,8 @@ def test_hull_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="hull") model1 = pe.ConcreteModel() - model1.x = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -237,8 +238,8 @@ def test_mbigm_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="mbigm") model1 = pe.ConcreteModel() - model1.x = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -271,8 +272,8 @@ def test_hybrid_bigm_formulation_single_var(): formulation1_lt = LinearTreeHybridBigMFormulation(ltmodel_small) model1 = pe.ConcreteModel() - model1.x = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -468,9 +469,9 @@ def test_bigm_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="bigm") model1 = pe.ConcreteModel() - model1.x0 = pe.Var(initialize=0) - model1.x1 = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x0 = OmltVar(initialize=0) + model1.x1 = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -510,9 +511,9 @@ def test_hull_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="hull") model1 = pe.ConcreteModel() - model1.x0 = pe.Var(initialize=0) - model1.x1 = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x0 = OmltVar(initialize=0) + model1.x1 = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -552,9 +553,9 @@ def test_mbigm_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="mbigm") model1 = pe.ConcreteModel() - model1.x0 = pe.Var(initialize=0) - model1.x1 = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x0 = OmltVar(initialize=0) + model1.x1 = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) From 2f91d0710d8ba238d201237fe18f662fb2211b85 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 9 Jul 2024 19:43:38 +0000 Subject: [PATCH 65/75] Making block-level modelling language choices percolate through generated variables and constraints --- src/omlt/base/pyomo.py | 2 ++ src/omlt/formulation.py | 18 +++++++----- src/omlt/gbt/gbt_formulation.py | 18 +++++++----- .../torch_geometric/build_gnn_formulation.py | 4 ++- src/omlt/linear_tree/lt_formulation.py | 6 ++-- src/omlt/neuralnet/activations/linear.py | 4 ++- src/omlt/neuralnet/activations/relu.py | 12 ++++---- src/omlt/neuralnet/activations/smooth.py | 2 +- src/omlt/neuralnet/layers/full_space.py | 23 ++++++++++----- src/omlt/neuralnet/layers/partition_based.py | 20 +++++++------ src/omlt/neuralnet/nn_formulation.py | 29 +++++++++++++------ tests/base/test_block.py | 4 +-- 12 files changed, 89 insertions(+), 53 deletions(-) diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 697c9fa5..e608f2d8 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -177,6 +177,8 @@ class OmltConstraintIndexedPyomo(OmltConstraintIndexed, pyo.Constraint): def __init__(self, *args, **kwargs: Any): super().__init__(*args, **kwargs) kwargs.pop("model", None) + kwargs.pop("lang", None) + self.constraint = pyo.Constraint(*self._index_set, **kwargs) self._index_set = self.constraint._index_set self.constraint._parent = self._parent diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index be727992..972371a1 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -94,11 +94,15 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): k: (float(scaled_input_bounds[k][0]), float(scaled_input_bounds[k][1])) for k in block.inputs_set } - block.scaled_inputs = OmltVar(block.inputs_set, initialize=0, bounds=bnds) + block.scaled_inputs = OmltVar( + block.inputs_set, initialize=0, lang=block._format, bounds=bnds + ) else: - block.scaled_inputs = OmltVar(block.inputs_set, initialize=0) + block.scaled_inputs = OmltVar( + block.inputs_set, initialize=0, lang=block._format + ) - block.scaled_outputs = OmltVar(block.outputs_set, initialize=0) + block.scaled_outputs = OmltVar(block.outputs_set, initialize=0, lang=block._format) if scaled_input_bounds is not None and scaler is None: # set the bounds on the inputs to be the same as the scaled inputs @@ -131,13 +135,13 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): output_unscaling_expressions ) - block._scale_input_constraint = OmltConstraint(block.inputs_set) + block._scale_input_constraint = OmltConstraint(block.inputs_set, lang=block._format) for idx in block.inputs_set: block._scale_input_constraint[idx] = ( - block.scaled_inputs[idx] == input_scaling_expressions[idx] - ) + block.scaled_inputs[idx] == input_scaling_expressions[idx] + ) - block._scale_output_constraint = OmltConstraint(block.outputs_set) + block._scale_output_constraint = OmltConstraint(block.outputs_set, lang=block._format) for idx in block.outputs_set: block._scale_output_constraint[idx] = ( block.outputs[idx] == output_unscaling_expressions[idx] diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 844a1d09..22177e77 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -158,6 +158,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): list(zip(nodes_tree_ids[nodes_leaf_mask], nodes_node_ids[nodes_leaf_mask])), bounds=(0, None), domain=pe.Reals, + lang=block._format, ) branch_value_by_feature_id: dict[int, Any] = {} @@ -173,9 +174,9 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): for f in continuous_vars for bi, _ in enumerate(branch_value_by_feature_id[f]) ] - block.y = OmltVar(y_index, domain=pe.Binary) + block.y = OmltVar(y_index, lang=block._format, domain=pe.Binary) - block.single_leaf = OmltConstraint(tree_ids) + block.single_leaf = OmltConstraint(tree_ids, lang=block._format) for tree_id in tree_ids: r"""Single leaf constraint. @@ -247,7 +248,7 @@ def _sum_of_z_l(tree_id, start_node_id): visit_queue.append(local_true_node_ids[node_id]) return sum_of_z_l - block.left_split = OmltConstraint(nodes_tree_branch_ids) + block.left_split = OmltConstraint(nodes_tree_branch_ids, lang=block._format) for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Left split. @@ -267,7 +268,7 @@ def _sum_of_z_l(tree_id, start_node_id): _sum_of_z_l(tree_id, subtree_root) <= y ) - block.right_split = OmltConstraint(nodes_tree_branch_ids) + block.right_split = OmltConstraint(nodes_tree_branch_ids, lang=block._format) for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Right split. @@ -287,7 +288,7 @@ def _sum_of_z_l(tree_id, start_node_id): _sum_of_z_l(tree_id, subtree_root) <= 1 - y ) - block.order_y = OmltConstraint(y_index) + block.order_y = OmltConstraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Add constraint to activate splits in the correct order. @@ -304,7 +305,7 @@ def _sum_of_z_l(tree_id, start_node_id): block.y[feature_id, branch_y_idx] <= block.y[feature_id, branch_y_idx + 1] ) - block.var_lower = OmltConstraint(y_index) + block.var_lower = OmltConstraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Lower bound constraint. @@ -327,7 +328,7 @@ def _sum_of_z_l(tree_id, start_node_id): branch_value - x.lb ) * (1 - block.y[feature_id, branch_y_idx]) - block.var_upper = OmltConstraint(y_index) + block.var_upper = OmltConstraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Upper bound constraint. Add constraint to link discrete tree splits to upper bound of continuous @@ -357,7 +358,8 @@ def _sum_of_z_l(tree_id, start_node_id): ) ) + base_value - ) + ), + lang = block._format ) r"""Add constraint to link block output tree model mean. diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index bc23140d..d182e5ad 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -63,13 +63,14 @@ def gnn_with_non_fixed_graph( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, + lang=block._format, ) # assume that the self contribution always exists for u in range(N): block.A[u, u].fix(1) # assume the adjacency matrix is always symmetric indexes = [(u, v) for u in range(N) for v in range(u + 1, N)] - block.symmetric_adjacency = OmltConstraint(indexes) + block.symmetric_adjacency = OmltConstraint(indexes, lang=block._format) for u in range(N): for v in range(u + 1, N): block.symmetric_adjacency[(u,v)] = block.A[u, v] == block.A[v, u] @@ -145,6 +146,7 @@ def gnn_with_fixed_graph( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, + lang=block._format, ) # fix A using given values for u in range(N): diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index eb8dd069..9b4eaba6 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -250,7 +250,7 @@ def _add_gdp_formulation_to_block( block.scaled_outputs.setlb(output_bounds[0]) block.intermediate_output = OmltVar( - tree_ids, bounds=(output_bounds[0], output_bounds[1]) + tree_ids, lang=block._format, bounds=(output_bounds[0], output_bounds[1]) ) # Create a disjunct for each leaf containing the bound constraints @@ -322,8 +322,8 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output # Create the intermeditate variables. z is binary that indicates which leaf # in tree t is returned. intermediate_output is the output of tree t and # the total output of the model is the sum of the intermediate_output vars - block.z = OmltVar(t_l, within=pe.Binary) - block.intermediate_output = OmltVar(tree_ids) + block.z = OmltVar(t_l, lang=block._format, within=pe.Binary) + block.intermediate_output = OmltVar(tree_ids, lang=block._format) block.lower_bound_constraints = OmltConstraint(features, tree_ids) for tree in tree_ids: diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index ffec4c01..87269172 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -19,7 +19,9 @@ def linear_activation_constraint( \end{align*} """ - layer_block.linear_activation = OmltConstraint(layer.output_indexes) + layer_block.linear_activation = OmltConstraint( + layer.output_indexes, lang=net_block._format + ) for output_index in layer.output_indexes: zhat_lb, zhat_ub = layer_block.zhat[output_index].bounds layer_block.z[output_index].setlb(zhat_lb) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 4f61415e..a1436a9e 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -40,19 +40,21 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): is :math:`\max(0,u)`. """ - layer_block.q_relu = OmltVar(layer.output_indexes, within=pyo.Binary) + layer_block.q_relu = OmltVar( + layer.output_indexes, lang=net_block._format, within=pyo.Binary + ) layer_block._z_lower_bound_relu = OmltConstraint( - layer.output_indexes, model=layer_block.model + layer.output_indexes, lang=net_block._format, model=layer_block.model ) layer_block._z_lower_bound_zhat_relu = OmltConstraint( - layer.output_indexes, model=layer_block.model + layer.output_indexes, lang=net_block._format, model=layer_block.model ) layer_block._z_upper_bound_relu = OmltConstraint( - layer.output_indexes, model=layer_block.model + layer.output_indexes, lang=net_block._format, model=layer_block.model ) layer_block._z_upper_bound_zhat_relu = OmltConstraint( - layer.output_indexes, model=layer_block.model + layer.output_indexes, lang=net_block._format, model=layer_block.model ) # set dummy parameters here to avoid warning message from Pyomo diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index 99c330af..dc668a01 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -77,7 +77,7 @@ def smooth_monotonic_activation_constraint(net_block, net, layer_block, layer, f """ layer_block._smooth_monotonic_activation_constraint = OmltConstraint( - layer.output_indexes + layer.output_indexes, lang=net_block._format ) for output_index in layer.output_indexes: zhat_lb, zhat_ub = layer_block.zhat[output_index].bounds diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 267c287e..deae8713 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -18,7 +18,7 @@ def full_space_dense_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - layer_block.dense_layer = OmltConstraint(layer.output_indexes) + layer_block.dense_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) for output_index in layer.output_indexes: # dense layers multiply only the last dimension of # their inputs @@ -88,22 +88,28 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), initialize=0, + lang=net_block._format, ) input_layer_block._zbar_lower_bound_z_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), + lang=net_block._format, ) input_layer_block._zbar_upper_bound_z_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), + lang=net_block._format, + ) input_layer_block._zbar_lower_bound_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), + lang=net_block._format, ) input_layer_block._zbar_upper_bound_big_m = OmltConstraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), + lang=net_block._format, ) for local_index, input_index in layer.input_indexes_with_input_layer_indexes: @@ -155,7 +161,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): <= ub * net_block.A[input_node_index, output_node_index] ) - layer_block.gnn_layer = OmltConstraint(layer.output_indexes) + layer_block.gnn_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) for output_index in layer.output_indexes: # gnn layers multiply only the last dimension of # their inputs @@ -206,7 +212,7 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - layer_block.convolutional_layer = OmltConstraint(layer.output_indexes) + layer_block.convolutional_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) for output_index in layer.output_indexes: out_d, out_r, out_c = output_index expr = 0.0 @@ -286,14 +292,17 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): ) ) layer_block.q_maxpool = OmltVar( - layer.output_indexes, layer_block._kernel_indexes, within=pyo.Binary + layer.output_indexes, + layer_block._kernel_indexes, + lang=net_block._format, + within=pyo.Binary, ) - layer_block._q_sum_maxpool = OmltConstraint(layer.output_indexes) + layer_block._q_sum_maxpool = OmltConstraint(layer.output_indexes, lang=net_block._format) layer_block._zhat_upper_bound = OmltConstraint( - layer.output_indexes, layer_block._kernel_indexes + layer.output_indexes, layer_block._kernel_indexes, lang=net_block._format ) layer_block._zhat_lower_bound = OmltConstraint( - layer.output_indexes, layer_block._kernel_indexes + layer.output_indexes, layer_block._kernel_indexes, lang=net_block._format ) for output_index in layer.output_indexes: diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index d09e967b..55bbda16 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -86,16 +86,16 @@ def output_node_block(b, *output_index): splits = split_func(weights) num_splits = len(splits) - b.sig = OmltVar(domain=pyo.Binary) - b.z2 = OmltVar(range(num_splits)) + b.sig = OmltVar(domain=pyo.Binary, lang=net_block._format) + b.z2 = OmltVar(range(num_splits), lang=net_block._format) mapper = layer.input_index_mapper - b.eq_16_lb = OmltConstraint(range(num_splits)) - b.eq_16_ub = OmltConstraint(range(num_splits)) + b.eq_16_lb = OmltConstraint(range(num_splits), lang=net_block._format) + b.eq_16_ub = OmltConstraint(range(num_splits), lang=net_block._format) - b.eq_17_lb = OmltConstraint(range(num_splits)) - b.eq_17_ub = OmltConstraint(range(num_splits)) + b.eq_17_lb = OmltConstraint(range(num_splits), lang=net_block._format) + b.eq_17_ub = OmltConstraint(range(num_splits), lang=net_block._format) input_layer_indexes = list(layer.input_indexes_with_input_layer_indexes) @@ -160,13 +160,15 @@ def output_node_block(b, *output_index): eq_13_expr -= b.z2[split_index] eq_13_expr += bias * b.sig - b.eq_13 = OmltConstraint(expr=eq_13_expr <= 0) + b.eq_13 = OmltConstraint(expr=eq_13_expr <= 0, lang=net_block._format) b.eq_14 = OmltConstraint( expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression - >= 0 + >= 0, + lang=net_block._format, ) b.eq_15 = OmltConstraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression, + lang=net_block._format, ) diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index 30ea6e77..feb3e9ba 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -163,7 +163,7 @@ def _build_neural_network_formulation( @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, initialize=0) + b.z = OmltVar(net_layer.output_indexes, initialize=0, lang=block._format) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -172,7 +172,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, initialize=0) + b.zhat = OmltVar(net_layer.output_indexes, initialize=0, lang=block._format) return b @@ -201,7 +201,9 @@ def layer(b, layer_id): raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - block.input_assignment = OmltConstraint(input_layer.output_indexes) + block.input_assignment = OmltConstraint( + input_layer.output_indexes, lang=block._format + ) for output_index in input_layer.output_indexes: block.input_assignment[output_index] = ( block.scaled_inputs[output_index] @@ -215,7 +217,9 @@ def layer(b, layer_id): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - block.output_assignment = OmltConstraint(output_layer.output_indexes) + block.output_assignment = OmltConstraint( + output_layer.output_indexes, lang=block._format + ) for output_index in output_layer.output_indexes: block.output_assignment[output_index] = ( block.scaled_outputs[output_index] @@ -398,7 +402,9 @@ def z(b, *output_index): raise ValueError(msg) output_layer = output_layers[0] - block.output_assignment = OmltConstraint(output_layer.output_indexes) + block.output_assignment = OmltConstraint( + output_layer.output_indexes, lang=block._format + ) for output_index in output_layer.output_indexes: block.output_assignment[output_index] = ( block.scaled_outputs[output_index] @@ -494,8 +500,9 @@ def _build_formulation(self): # create the z and z_hat variables for each of the layers @block.Block(block.layers) def layer(b, layer_id): + b._format = block._format net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, initialize=0) + b.z = OmltVar(net_layer.output_indexes, lang=b._format, initialize=0) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -504,7 +511,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, initialize=0) + b.zhat = OmltVar(net_layer.output_indexes, lang=b._format, initialize=0) return b @@ -543,7 +550,9 @@ def layer(b, layer_id): raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - block.input_assignment = OmltConstraint(input_layer.output_indexes) + block.input_assignment = OmltConstraint( + input_layer.output_indexes, lang=block._format + ) for output_index in input_layer.output_indexes: block.input_assignment[output_index] = ( block.scaled_inputs[output_index] @@ -557,7 +566,9 @@ def layer(b, layer_id): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - block.output_assignment = OmltConstraint(output_layer.output_indexes) + block.output_assignment = OmltConstraint( + output_layer.output_indexes, lang=block._format + ) for output_index in output_layer.output_indexes: block.output_assignment[output_index] = ( block.scaled_outputs[output_index] diff --git a/tests/base/test_block.py b/tests/base/test_block.py index d14db2c5..c451a24e 100644 --- a/tests/base/test_block.py +++ b/tests/base/test_block.py @@ -65,11 +65,11 @@ def test_jump_block(): ) with pytest.raises(ValueError, match=expected_msg) as excinfo: - m.b.x = OmltVar(initialize=(2, 7), format="jump") + m.b.x = OmltVar(initialize=(2, 7), lang="jump") assert str(excinfo.value) == expected_msg - m.b.y = OmltVar(initialize=2, format="jump") + m.b.y = OmltVar(initialize=2, lang="jump") assert m.b.y.value == 2 assert m.b.y.name == "y" m.b.y.lb = 0 From 29819ee014f6132079a7d3fdd991b88876fc7393 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 9 Jul 2024 21:20:36 +0000 Subject: [PATCH 66/75] Fixing an issue with linear trees --- src/omlt/linear_tree/lt_formulation.py | 5 +++-- tests/linear_tree/test_lt_formulation.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 9b4eaba6..c0d017d2 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -260,6 +260,7 @@ def lb_rule(dsj, feat): return input_vars[feat] >= leaves[tree][leaf]["bounds"][feat][0] dsj.lb_constraint = OmltConstraint(features, rule=lb_rule) + def ub_rule(dsj, feat): return input_vars[feat] <= leaves[tree][leaf]["bounds"][feat][1] @@ -370,6 +371,6 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output sum(block.z[tree, leaf] for leaf in leaf_ids) == 1 ) - block.output_sum_of_trees = output_vars[0] == sum( - block.intermediate_output[tree] for tree in tree_ids + block.output_sum_of_trees = OmltConstraint( + expr=output_vars[0] == sum(block.intermediate_output[tree] for tree in tree_ids) ) diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 7f3ef059..39413837 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -595,9 +595,9 @@ def test_hybrid_bigm_formulation_multi_var(): formulation1_lt = LinearTreeHybridBigMFormulation(ltmodel_small) model1 = pe.ConcreteModel() - model1.x0 = pe.Var(initialize=0) - model1.x1 = pe.Var(initialize=0) - model1.y = pe.Var(initialize=0) + model1.x0 = OmltVar(initialize=0) + model1.x1 = OmltVar(initialize=0) + model1.y = OmltVar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) From cf24e85790d5b79018013caf03a47fcd2f7847cd Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 11 Jul 2024 22:40:17 +0000 Subject: [PATCH 67/75] Moving JuMP objects into their own file --- src/omlt/base/expression.py | 7 - src/omlt/base/julia.py | 379 ++++++++++++++++++++++++++++++++++++ src/omlt/base/var.py | 377 ----------------------------------- 3 files changed, 379 insertions(+), 384 deletions(-) diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index 734d8f98..cda0fdb7 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -5,13 +5,6 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE -# from omlt.dependencies import julia_available - -# if julia_available: -# from omlt.base.julia import jl, jump, JumpVar -# from juliacall import AnyValue -# relations = {"==", ">=", "<=", ">", "<"} - class OmltExpr(ABC): def __new__(cls, *indexes, **kwargs: Any): diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index a34ec92f..8720d0e4 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,3 +1,4 @@ +from omlt.base.var import OmltIndexed, OmltScalar from omlt.dependencies import julia_available if julia_available: @@ -8,6 +9,7 @@ jl.seval("import JuMP") jump = jl.JuMP +# Elements class JuMPVarInfo: def __init__( @@ -115,3 +117,380 @@ def __mul__(self, other): def __eq__(self, other): return (self.omltvar == other)[self.index] + +# Variables + +class OmltScalarJuMP(OmltScalar): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.ScalarVar + + def __init__(self, **kwargs: Any): + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = self._bounds[0] + _ub = self._bounds[1] + elif self._bounds is None: + _lb = None + _ub = None + else: + msg = ("Bounds must be given as a tuple.", self._bounds) + raise ValueError(msg) + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + msg = ( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + raise ValueError(msg) + if _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + if isinstance(_initialize, (int, float)): + self._value = _initialize + elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): + self._value = _initialize[0] + else: + # Pyomo's "scalar" variables can be multidimensional, they're + # just not indexed. JuMP scalar variables can only be a single + # dimension. Rewrite this error to be more helpful. + msg = ( + "Initial value for JuMP variables must be an int" + f" or float, but {type(_initialize)} was provided." + ) + raise ValueError(msg) + else: + self._value = None + + self._varinfo = JuMPVarInfo( + _lb, + _ub, + None, # fix value + self._value, + self.binary, + self.integer, + ) + self._constructed = False + self._parent = None + self._ctype = pyo.ScalarVar + self._name = None + + def construct(self, data=None): + self._var = JumpVar(self._varinfo, self._name) + self._var.omltvar = self + self._constructed = True + if self._parent: + self._blockvar = jump.add_variable( + self._parent()._jumpmodel, self.to_jumpvar() + ) + + def fix(self, value, *, skip_validation=True): + self.fixed = True + self._value = value + self._varinfo.fixed_value = value + self._varinfo.has_fix = value is not None + if self._constructed: + self.construct() + + @property + def bounds(self): + return (self.lb, self.ub) + + @bounds.setter + def bounds(self, val): + if val is None: + self.lb = None + self.ub = None + elif len(val) == 2: + self.lb = val[0] + self.ub = val[1] + + @property + def lb(self): + return self._varinfo.lower_bound + + @lb.setter + def lb(self, val): + self._varinfo.setlb(val) + if self._constructed: + self.construct() + + @property + def ub(self): + return self._varinfo.upper_bound + + @ub.setter + def ub(self, val): + self._varinfo.setub(val) + if self._constructed: + self.construct() + + @property + def value(self): + if self._constructed: + return self._var.value + return self._varinfo.start_value + + @value.setter + def value(self, val): + if self._constructed: + self._var.value = val + else: + self._varinfo.start_value = val + + @property + def ctype(self): + return self._ctype + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + self._name = value + + def to_jumpvar(self): + if self._constructed: + return self._var.to_jump() + return self._varinfo.to_jump() + + def to_jumpexpr(self): + return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) + +class OmltIndexedJuMP(OmltIndexed): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.Var + + def __init__(self, *indexes, **kwargs: Any): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + else: + msg = ("Currently index cross-products are unsupported.") + raise ValueError(msg) + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): + _lb = {k: v[0] for k, v in self._bounds.items()} + _ub = {k: v[1] for k, v in self._bounds.items()} + elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = {i: self._bounds[0] for i in self._index_set} + _ub = {i: self._bounds[1] for i in self._index_set} + elif self._bounds is None: + _lb = {i: None for i in self._index_set} + _ub = {i: None for i in self._index_set} + else: + msg = ( + "Bounds must be given as a tuple," " but %s was given.", self._bounds + ) + raise TypeError(msg) + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + msg = ( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + raise ValueError(msg) + if _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + # If starting values have same length as index set, + # take one for each variable in index. + if len(self._index_set) == len(_initialize): + self._value = _initialize + # If there's a single starting value, use it for all + # variables in index. + elif len(_initialize) == 1: + self._value = {i: _initialize[0] for i in self._index_set} + else: + msg = ( + "Index set has length %s, but initializer has length %s.", + len(self._index_set), + len(_initialize), + ) + raise ValueError(msg) + else: + self._value = {i: None for i in self._index_set} + + self._varinfo = {} + for idx in self._index_set: + self._varinfo[idx] = JuMPVarInfo( + _lb[idx], + _ub[idx], + None, # fix value + self._value[idx], + self.binary, + self.integer, + ) + self._vars = {} + self._varrefs = {} + self._constructed = False + self._ctype = pyo.Var + self._parent = None + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._vars[item[0]] + return self._vars[item] + + def __setitem__(self, item, value): + self._varinfo[item] = value + if self._constructed: + self.construct() + + def keys(self): + if self._parent is not None: + return self._varrefs.keys() + return self._vars.keys() + + def values(self): + if self._parent is not None: + return self._varrefs.values() + return self._vars.values() + + def items(self): + if self._parent is not None: + return self._varrefs.items() + return self._vars.items() + + def fix(self, value=None): + self.fixed = True + if value is not None: + for vardata in self._varinfo(): + vardata.has_fix = True + vardata.fixed_value = value + else: + for vardata in self._varinfo(): + vardata.has_fix = True + + def __len__(self): + """Return the number of component data objects stored by this component.""" + return len(self._vars) + + def __contains__(self, idx): + """Return true if the index is in the dictionary.""" + return idx in self._vars + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys.""" + return self._vars.__iter__() + + def construct(self, data=None): + for idx in self._index_set: + if isinstance(idx, int): + name = str(self.name) + "[" + str(idx) + "]" + else: + name = str(self.name) + str(list(idx)).replace(" ", "") + self._vars[idx] = JumpVar(self._varinfo[idx], name) + self._vars[idx].omltvar = self + self._vars[idx].index = idx + if self._parent is not None: + block = self._parent() + if block._format == "jump" and block._jumpmodel is not None: + self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) + + self._constructed = True + + def setub(self, value): + for idx in self.index_set(): + self._varinfo[idx][2] = True + self._varinfo[idx][3] = value + if self._constructed: + self.construct() + + def setlb(self, value): + for idx in self.index_set(): + self._varinfo[idx][0] = True + self._varinfo[idx][1] = value + if self._constructed: + self.construct() + + @property + def ctype(self): + return self._ctype + + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def to_jumpvar(self): + if self._constructed: + return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) + msg = "Variable must be constructed before exporting to JuMP." + raise ValueError(msg) + + def to_jumpexpr(self): + return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} + + +# Constraints + +# Expressions \ No newline at end of file diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index e72b1775..751d0a61 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -11,11 +11,6 @@ import pyomo.environ as pyo from omlt.base import DEFAULT_MODELING_LANGUAGE, expression -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base import jump -from omlt.base.julia import JumpVar, JuMPVarInfo class OmltVar(ABC): @@ -196,173 +191,6 @@ def __abs__(self): return pyo.NumericValue.__abs__(self) -class OmltScalarJuMP(OmltScalar): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.ScalarVar - - def __init__(self, **kwargs: Any): - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = self._bounds[0] - _ub = self._bounds[1] - elif self._bounds is None: - _lb = None - _ub = None - else: - msg = ("Bounds must be given as a tuple.", self._bounds) - raise ValueError(msg) - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - msg = ( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - raise ValueError(msg) - if _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - if isinstance(_initialize, (int, float)): - self._value = _initialize - elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): - self._value = _initialize[0] - else: - # Pyomo's "scalar" variables can be multidimensional, they're - # just not indexed. JuMP scalar variables can only be a single - # dimension. Rewrite this error to be more helpful. - msg = ( - "Initial value for JuMP variables must be an int" - f" or float, but {type(_initialize)} was provided." - ) - raise ValueError(msg) - else: - self._value = None - - self._varinfo = JuMPVarInfo( - _lb, - _ub, - None, # fix value - self._value, - self.binary, - self.integer, - ) - self._constructed = False - self._parent = None - self._ctype = pyo.ScalarVar - self._name = None - - def construct(self, data=None): - self._var = JumpVar(self._varinfo, self._name) - self._var.omltvar = self - self._constructed = True - if self._parent: - self._blockvar = jump.add_variable( - self._parent()._jumpmodel, self.to_jumpvar() - ) - - def fix(self, value, *, skip_validation=True): - self.fixed = True - self._value = value - self._varinfo.fixed_value = value - self._varinfo.has_fix = value is not None - if self._constructed: - self.construct() - - @property - def bounds(self): - return (self.lb, self.ub) - - @bounds.setter - def bounds(self, val): - if val is None: - self.lb = None - self.ub = None - elif len(val) == 2: - self.lb = val[0] - self.ub = val[1] - - @property - def lb(self): - return self._varinfo.lower_bound - - @lb.setter - def lb(self, val): - self._varinfo.setlb(val) - if self._constructed: - self.construct() - - @property - def ub(self): - return self._varinfo.upper_bound - - @ub.setter - def ub(self, val): - self._varinfo.setub(val) - if self._constructed: - self.construct() - - @property - def value(self): - if self._constructed: - return self._var.value - return self._varinfo.start_value - - @value.setter - def value(self, val): - if self._constructed: - self._var.value = val - else: - self._varinfo.start_value = val - - @property - def ctype(self): - return self._ctype - - @property - def name(self): - return self._name - - @name.setter - def name(self, value): - self._name = value - - def to_jumpvar(self): - if self._constructed: - return self._var.to_jump() - return self._varinfo.to_jump() - - def to_jumpexpr(self): - return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) - - class OmltIndexed(OmltVar): def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} @@ -493,208 +321,3 @@ def __pos__(self): def __abs__(self): return pyo.NumericValue.__abs__(self) - - -class OmltIndexedJuMP(OmltIndexed): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.Var - - def __init__(self, *indexes, **kwargs: Any): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - else: - msg = ("Currently index cross-products are unsupported.") - raise ValueError(msg) - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): - _lb = {k: v[0] for k, v in self._bounds.items()} - _ub = {k: v[1] for k, v in self._bounds.items()} - elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = {i: self._bounds[0] for i in self._index_set} - _ub = {i: self._bounds[1] for i in self._index_set} - elif self._bounds is None: - _lb = {i: None for i in self._index_set} - _ub = {i: None for i in self._index_set} - else: - msg = ( - "Bounds must be given as a tuple," " but %s was given.", self._bounds - ) - raise TypeError(msg) - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - msg = ( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - raise ValueError(msg) - if _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - # If starting values have same length as index set, - # take one for each variable in index. - if len(self._index_set) == len(_initialize): - self._value = _initialize - # If there's a single starting value, use it for all - # variables in index. - elif len(_initialize) == 1: - self._value = {i: _initialize[0] for i in self._index_set} - else: - msg = ( - "Index set has length %s, but initializer has length %s.", - len(self._index_set), - len(_initialize), - ) - raise ValueError(msg) - else: - self._value = {i: None for i in self._index_set} - - self._varinfo = {} - for idx in self._index_set: - self._varinfo[idx] = JuMPVarInfo( - _lb[idx], - _ub[idx], - None, # fix value - self._value[idx], - self.binary, - self.integer, - ) - self._vars = {} - self._varrefs = {} - self._constructed = False - self._ctype = pyo.Var - self._parent = None - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._vars[item[0]] - return self._vars[item] - - def __setitem__(self, item, value): - self._varinfo[item] = value - if self._constructed: - self.construct() - - def keys(self): - if self._parent is not None: - return self._varrefs.keys() - return self._vars.keys() - - def values(self): - if self._parent is not None: - return self._varrefs.values() - return self._vars.values() - - def items(self): - if self._parent is not None: - return self._varrefs.items() - return self._vars.items() - - def fix(self, value=None): - self.fixed = True - if value is not None: - for vardata in self._varinfo(): - vardata.has_fix = True - vardata.fixed_value = value - else: - for vardata in self._varinfo(): - vardata.has_fix = True - - def __len__(self): - """Return the number of component data objects stored by this component.""" - return len(self._vars) - - def __contains__(self, idx): - """Return true if the index is in the dictionary.""" - return idx in self._vars - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys.""" - return self._vars.__iter__() - - def construct(self, data=None): - for idx in self._index_set: - if isinstance(idx, int): - name = str(self.name) + "[" + str(idx) + "]" - else: - name = str(self.name) + str(list(idx)).replace(" ", "") - self._vars[idx] = JumpVar(self._varinfo[idx], name) - self._vars[idx].omltvar = self - self._vars[idx].index = idx - if self._parent is not None: - block = self._parent() - if block._format == "jump" and block._jumpmodel is not None: - self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) - - self._constructed = True - - def setub(self, value): - for idx in self.index_set(): - self._varinfo[idx][2] = True - self._varinfo[idx][3] = value - if self._constructed: - self.construct() - - def setlb(self, value): - for idx in self.index_set(): - self._varinfo[idx][0] = True - self._varinfo[idx][1] = value - if self._constructed: - self.construct() - - @property - def ctype(self): - return self._ctype - - def index_set(self): - return self._index_set - - @property - def name(self): - return self._name - - def to_jumpvar(self): - if self._constructed: - return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) - msg = "Variable must be constructed before exporting to JuMP." - raise ValueError(msg) - - def to_jumpexpr(self): - return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} From 5b8e1fe9cdbfca084a0bfdd231b4387a7ed95a42 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:49:18 +0000 Subject: [PATCH 68/75] Adding tests --- .coveragerc | 1 + src/omlt/base/expression.py | 41 +++++----- src/omlt/base/pyomo.py | 44 +++++------ src/omlt/base/var.py | 84 ++++---------------- src/omlt/block.py | 19 +---- tests/base/test_constraint.py | 40 ++++++++++ tests/base/test_expression.py | 143 ++++++++++++++++++++++++++++++++++ tests/base/test_var.py | 66 +++++++++++++++- 8 files changed, 308 insertions(+), 130 deletions(-) create mode 100644 tests/base/test_constraint.py create mode 100644 tests/base/test_expression.py diff --git a/.coveragerc b/.coveragerc index e21d4ef1..828488b7 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,6 +3,7 @@ branch = True source = optml # omit = bad_file.py +omit = julia.py [paths] source = diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index cda0fdb7..543c300a 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -28,25 +28,13 @@ def is_expression_type(self): @abstractmethod def is_indexed(self): - pass + """Return False for a scalar expression, True for an indexed expression.""" + def valid_model_component(self): """Return True if this can be used as a model component.""" return True - @property - @abstractmethod - def args(self): - pass - - @abstractmethod - def arg(self, index): - pass - - @abstractmethod - def nargs(self): - pass - class OmltExprScalar(OmltExpr): def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): @@ -63,11 +51,25 @@ def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): instance._format = lang return instance + def is_indexed(self): + return False + + @abstractmethod def is_potentially_variable(self): - pass + """Return True if the expression has variable arguments, False if constant.""" - def __mul__(self, other): - pass + @property + @abstractmethod + def args(self): + """Return a list of the args of the expression.""" + + @abstractmethod + def arg(self, index): + """Return the arg corresponding to the given index.""" + + @abstractmethod + def nargs(self): + """Return the number of arguments.""" class OmltExprIndexed(OmltExpr): @@ -75,7 +77,7 @@ def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} if lang not in subclass_map: msg = ( - "Variable format %s not recognized. Supported formats are 'pyomo'" + "Expression format %s not recognized. Supported formats are 'pyomo'" " or 'jump'.", lang, ) @@ -85,3 +87,6 @@ def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): instance.__init__(*indexes, **kwargs) instance._format = lang return instance + + def is_indexed(self): + return True diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index e608f2d8..94e47aa4 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -20,8 +20,9 @@ class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): def __init__(self, *args, **kwargs: Any): kwargs.pop("lang", None) self._pyovar = pyo.ScalarVar(*args, **kwargs) + self._name = None self._parent = None - self._constructed = None + self._constructed = self._pyovar._constructed def construct(self, data=None): return self._pyovar.construct(data) @@ -90,6 +91,7 @@ class OmltIndexedPyomo(pyo.Var, OmltIndexed): def __init__(self, *indexes, **kwargs: Any): kwargs.pop("lang", None) super().__init__(*indexes, **kwargs) + self.bounds = (None, None) def fix(self, value=None, *, skip_validation=False): self.fixed = True @@ -101,10 +103,12 @@ def fix(self, value=None, *, skip_validation=False): vardata.fix(value, skip_validation) def setub(self, value): + self.bounds = (self.bounds[0], value) for vardata in self.values(): vardata.ub = value def setlb(self, value): + self.bounds = (value, self.bounds[1]) for vardata in self.values(): vardata.lb = value @@ -130,12 +134,8 @@ def __init__(self, *args, **kwargs: Any): pyoexpr = self.lhs == self.rhs if self.sense == ">=": pyoexpr = self.lhs >= self.rhs - if self.sense == ">": - pyoexpr = self.lhs > self.rhs if self.sense == "<=": pyoexpr = self.lhs <= self.rhs - if self.sense == "<": - pyoexpr = self.lhs < self.rhs self.constraint = pyo.Constraint(expr=pyoexpr) self.constraint._parent = self._parent @@ -242,10 +242,10 @@ class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): def __init__(self, expr=None, **kwargs: Any): self._index_set = {} - if isinstance(expr, (pyo.Expression, pyo.NumericValue)): - self._expression = expr - elif isinstance(expr, OmltExprScalarPyomo): + if isinstance(expr, OmltExprScalarPyomo): self._expression = expr._expression + elif isinstance(expr, (pyo.Expression, pyo.NumericValue)): + self._expression = expr elif isinstance(expr, tuple): self._expression = self._parse_expression_tuple(expr) else: @@ -294,9 +294,6 @@ def __class__(self): def is_potentially_variable(self): return self._expression.is_potentially_variable() - def is_indexed(self): - return False - def as_numeric(self): return self._expression._apply_operation(self._expression.args) @@ -332,56 +329,56 @@ def __add__(self, other): expr = self._expression + other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression + other - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __sub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression - other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression - other - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __mul__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression * other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression * other - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __div__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression / other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression / other - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __truediv__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression // other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression // other - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __radd__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression + self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other + self._expression - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __rsub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression - self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other - self._expression - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __rmul__(self, other): if isinstance(other, OmltExprScalar): expr = other._expression * self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other * self._expression - return OmltExprScalar(format=self._format, expr=expr) + return OmltExprScalar(lang=self._format, expr=expr) def __ge__(self, other): if isinstance(other, OmltExprScalarPyomo): @@ -391,7 +388,7 @@ def __ge__(self, other): else: rhs = other return OmltConstraintScalar( - model=self._parent, format=self._format, lhs=self, sense=">=", rhs=rhs + model=self._parent, lang=self._format, lhs=self, sense=">=", rhs=rhs ) def __le__(self, other): @@ -402,7 +399,7 @@ def __le__(self, other): else: rhs = other return OmltConstraintScalar( - model=self._parent, format=self._format, lhs=self, sense="<=", rhs=rhs + model=self._parent, lang=self._format, lhs=self, sense="<=", rhs=rhs ) def __eq__(self, other): @@ -413,7 +410,7 @@ def __eq__(self, other): else: rhs = other return OmltConstraintScalar( - model=self._parent, format=self._format, lhs=self, sense="==", rhs=rhs + model=self._parent, lang=self._format, lhs=self, sense="==", rhs=rhs ) @@ -436,9 +433,6 @@ def __init__( self._format = format self._expression = pyo.Expression(self._index_set, expr=expr) - def is_indexed(self): - return True - def expression_as_dict(self): if len(self._index_set) == 1: return {self._index_set[0]: self._expression} diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 751d0a61..71aef65b 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -23,21 +23,21 @@ def __new__(cls, *indexes, **kwargs: Any): @abstractmethod def construct(self, data): - pass + """Construct the variable.""" @abstractmethod def fix(self, value, skip_validation): - pass + """Fix the value of the variable.""" @property @abstractmethod def ctype(self): - pass + """Return the type of the variable.""" @property @abstractmethod def name(self): - pass + """Return the name of the variable.""" # Some methods to tell OMLT (and Pyomo components) that this # is a variable. @@ -46,7 +46,7 @@ def is_component_type(self): @abstractmethod def is_indexed(self): - pass + """Return False for a scalar variable, True for an indexed variable.""" def valid_model_component(self): """Return True if this can be used as a model component.""" @@ -77,53 +77,53 @@ def is_indexed(self): @property @abstractmethod def bounds(self): - pass + """Return a tuple with the lower and upper bounds.""" @bounds.setter @abstractmethod def bounds(self, val): - pass + """Set lower and upper bounds to the given tuple.""" @property @abstractmethod def lb(self): - pass + """Return the lower bound of the variable.""" @lb.setter @abstractmethod def lb(self, val): - pass + """Set lower bound to the given value.""" @property @abstractmethod def ub(self): - pass + """Return the upper bound of the variable.""" @ub.setter @abstractmethod def ub(self, val): - pass + """Set upper bound to the given value.""" @property @abstractmethod def domain(self): - pass + """Return the set of allowable values.""" @domain.setter @abstractmethod def domain(self, val): - pass + """Set the allowable values to the given set.""" # Interface for getting/setting value @property @abstractmethod def value(self): - pass + """Return the current value of the variable.""" @value.setter @abstractmethod def value(self, val): - pass + """Set the current value of the variable.""" # Interface governing how variables behave in expressions. @@ -163,33 +163,6 @@ def __rtruediv__(self, other): def __rpow__(self, other): return expression.OmltExprScalar(lang=self._format, expr=(other, "**", self)) - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) - class OmltIndexed(OmltVar): def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): @@ -294,30 +267,3 @@ def __rtruediv__(self, other): def __rpow__(self, other): return expression.OmltExprIndexed(self.index_set(), expr=(other, "**", self)) - - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) diff --git a/src/omlt/block.py b/src/omlt/block.py index ab5559b5..10827b48 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,15 +25,11 @@ class is used in combination with a formulation object to construct the """ -from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVar -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base import jump - import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block +from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVar + @declare_custom_block(name="OmltBlock") class OmltBlockData(_BlockData): @@ -43,15 +39,10 @@ def __init__(self, component): self.__input_indexes = None self.__output_indexes = None self._format = DEFAULT_MODELING_LANGUAGE - if self._format == "jump": - self._jumpmodel = jump.Model() - else: - self._jumpmodel = None def set_format(self, lang): self._format = lang - if self._format == "jump" and self._jumpmodel is None: - self._jumpmodel = jump.Model() + def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """Setup inputs and outputs. @@ -111,10 +102,6 @@ def build_formulation(self, formulation, lang=None): if lang is not None: self._format = lang - if self._format == "jump": - self._jumpmodel = jump.Model() - - self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/tests/base/test_constraint.py b/tests/base/test_constraint.py new file mode 100644 index 00000000..ab46058c --- /dev/null +++ b/tests/base/test_constraint.py @@ -0,0 +1,40 @@ +import pyomo.environ as pyo +import pytest +from omlt.base import OmltConstraint, OmltExpr, OmltExprIndexed, OmltExprScalar, OmltVar + +VAR1_VALUE = 6 +VAR2_VALUE = 3 +CONST_VALUE = 4 + +def test_build_constraint(): + v1 = OmltVar() + v1.domain = pyo.Integers + v1.value = VAR1_VALUE + e1 = v1 + CONST_VALUE + + v2 = OmltVar() + v2.domain = pyo.Integers + v2.value = VAR2_VALUE + e2 = v2 + CONST_VALUE + + c_eq = e1 == e2 + + assert c_eq.sense == "==" + assert id(c_eq.lhs) == id(e1._expression) + + c_le = OmltConstraint(lhs=e1, sense="<=", rhs=e2) + + assert c_le.sense == "<=" + assert id(c_le.rhs) == id(e2._expression) + +def test_constraint_invalid_lang(): + expected_msg = ( + "Constraint format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'." + ) + + with pytest.raises(ValueError, match=expected_msg): + OmltConstraint(lang="test") + + with pytest.raises(ValueError, match=expected_msg): + OmltConstraint(range(3), lang="test") diff --git a/tests/base/test_expression.py b/tests/base/test_expression.py new file mode 100644 index 00000000..40c78bbc --- /dev/null +++ b/tests/base/test_expression.py @@ -0,0 +1,143 @@ +import pyomo.environ as pyo +import pytest +from omlt.base import OmltExpr, OmltExprIndexed, OmltExprScalar, OmltVar + +VAR1_VALUE = 6 +VAR2_VALUE = 3 +CONST_VALUE = 4 + +def _test_build_scalar_expressions(lang): + v1 = OmltVar(lang=lang) + v2 = OmltVar(lang=lang) + + v1.domain = pyo.Integers + v2.domain = pyo.Integers + v1.value = VAR1_VALUE + v2.value = VAR2_VALUE + + v_sum = v1 + v2 + assert(isinstance(v_sum, OmltExpr)) + assert(v_sum() == VAR1_VALUE + VAR2_VALUE) + + v_diff = v1 - v2 + assert(isinstance(v_diff, OmltExpr)) + assert(v_diff() == VAR1_VALUE - VAR2_VALUE) + + v_prod = v1 * v2 + assert(isinstance(v_prod, OmltExpr)) + assert(v_prod() == VAR1_VALUE * VAR2_VALUE) + + v_quot = v1 / v2 + assert(isinstance(v_quot, OmltExpr)) + assert(v_quot() == VAR1_VALUE / VAR2_VALUE) + + v_radd = CONST_VALUE + v1 + assert(isinstance(v_radd, OmltExpr)) + assert(v_radd() == CONST_VALUE + VAR1_VALUE) + + v_rsub = CONST_VALUE - v1 + assert(isinstance(v_rsub, OmltExpr)) + assert(v_rsub() == CONST_VALUE - VAR1_VALUE) + + v_rprod = CONST_VALUE * v1 + assert(isinstance(v_rprod, OmltExpr)) + assert(v_rprod() == CONST_VALUE * VAR1_VALUE) + + v_rquot = CONST_VALUE / v1 + assert(isinstance(v_rquot, OmltExpr)) + assert(v_rquot() == CONST_VALUE / VAR1_VALUE) + + +def test_build_scalar_exp_pyomo(): + _test_build_scalar_expressions("pyomo") + +def test_init_scalar_expression(): + v1 = OmltVar() + v1.domain = pyo.Integers + v1.value = VAR1_VALUE + e1 = v1 + CONST_VALUE + + e2 = OmltExprScalar(expr=e1) + + assert e2.ctype == pyo.Expression + assert e2.is_component_type() + assert e2.is_expression_type() + assert e2.valid_model_component() + assert not e2.is_indexed() + + assert e2.nargs() == 2 + assert e2.args[1] == CONST_VALUE + assert e2.arg(1) == CONST_VALUE + assert len(e2) == 1 + assert e2() == VAR1_VALUE + CONST_VALUE + + expected_msg = ( + "Expression %s type %s not recognized." + ) + + with pytest.raises(TypeError, match=expected_msg): + OmltExprScalar(expr="test") + + expected_msg = ( + "Expression format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'." + ) + with pytest.raises(ValueError, match=expected_msg): + OmltExprScalar(lang="test") + + expected_msg = "Expression middle term was {%s}." + with pytest.raises(ValueError, match=expected_msg): + OmltExprScalar(expr=(v1, "test", CONST_VALUE)) + +def test_combine_scalar_expression(): + v1 = OmltVar() + v1.domain = pyo.Integers + v1.value = VAR1_VALUE + e1 = v1 + CONST_VALUE + + v2 = OmltVar() + v2.domain = pyo.Integers + v2.value = VAR2_VALUE + e2 = v2 + CONST_VALUE + + e_sum = e1 + e2 + assert e_sum() == VAR1_VALUE + VAR2_VALUE + 2*CONST_VALUE + + e_diff = e1 - e2 + assert e_diff() == VAR1_VALUE - VAR2_VALUE + + e_prod = e1 * e2 + assert e_prod() == (VAR1_VALUE + CONST_VALUE) * (VAR2_VALUE + CONST_VALUE) + + + p_sum = e1 + CONST_VALUE + assert p_sum() == VAR1_VALUE + 2*CONST_VALUE + + p_diff = e1 - CONST_VALUE + assert p_diff() == VAR1_VALUE + + p_prod = e1 * CONST_VALUE + assert p_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE + + +def test_init_indexed_expression(): + v1 = OmltVar(range(3)) + v1.domain = pyo.Integers + v1.value = VAR1_VALUE + e1 = v1 + CONST_VALUE + + e2 = OmltExpr(range(3), expr=e1) + + assert e2.ctype == pyo.Expression + assert e2.is_component_type() + assert e2.is_expression_type() + assert e2.valid_model_component() + assert e2.is_indexed() + + expected_msg = ( + "Expression format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'." + ) + + with pytest.raises(ValueError, match=expected_msg): + OmltExprIndexed(range(3), lang="test") \ No newline at end of file diff --git a/tests/base/test_var.py b/tests/base/test_var.py index fa890801..7473845f 100644 --- a/tests/base/test_var.py +++ b/tests/base/test_var.py @@ -1,3 +1,5 @@ +import re + import pyomo.environ as pyo import pytest from omlt.base import OmltVar @@ -5,19 +7,32 @@ def _test_scalar_var(lang): - v = OmltVar(lang=lang, initialize=2, domain=pyo.Integers) + v = OmltVar(lang=lang, initialize=2) + assert v._parent is None + assert v._constructed is False + assert v.name is None assert v.is_indexed() is False assert v.ctype == pyo.ScalarVar + assert v.is_component_type() + assert v.valid_model_component() v.construct() + assert v.is_constructed() v.value = 3 assert v.value == 3 + + v.fix(2, skip_validation=True) v.bounds = (0, 5) assert v.lb == 0 assert v.ub == 5 - assert v.bounds == (0, 5) + v.lb = 1 + v.ub = 3 + assert v.bounds == (1, 3) + + v.domain = pyo.Integers + assert v.domain == pyo.Integers def test_scalar_pyomo(): @@ -29,3 +44,50 @@ def test_scalar_pyomo(): ) def test_scalar_jump(): _test_scalar_var("jump") + +def test_scalar_invalid_lang(): + expected_msg = ( + "Variable format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'." + ) + with pytest.raises(ValueError, match=expected_msg): + OmltVar(lang="test") + +def _test_indexed_var(lang): + v = OmltVar(range(4), lang=lang, initialize=2) + assert v._parent is None + assert v._constructed is False + assert v.is_indexed() is True + assert v.ctype == pyo.Var + + v.construct() + assert v.is_constructed() + + v.value = 3 + assert v.value == 3 + + + v.fix(2, skip_validation=True) + for e in v: + assert v[e].value == 2 + + v.fix() + + v.bounds = (0, 5) + v.setlb(1) + v.setub(3) + assert v.bounds == (1, 3) + + v.domain = pyo.Integers + assert v.domain == pyo.Integers + +def test_indexed_pyomo(): + _test_indexed_var("pyomo") + +def test_indexed_invalid_lang(): + expected_msg = ( + "Variable format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'." + ) + with pytest.raises(ValueError, match=expected_msg): + OmltVar(range(3), lang="test") From f94b765c6fa10ac32862acc303c063cf855db037 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Jul 2024 23:41:25 +0000 Subject: [PATCH 69/75] Improving test coverage --- src/omlt/base/__init__.py | 29 ++++- src/omlt/base/constraint.py | 28 +--- src/omlt/base/expression.py | 60 +++------ src/omlt/base/julia.py | 16 ++- src/omlt/base/language.py | 1 + src/omlt/base/pyomo.py | 157 ++++------------------- src/omlt/base/var.py | 70 ++-------- src/omlt/formulation.py | 4 +- tests/base/test_block.py | 38 +----- tests/base/test_constraint.py | 62 +++++++-- tests/base/test_expression.py | 51 ++++---- tests/base/test_var.py | 1 - tests/gbt/test_gbt_formulation.py | 4 +- tests/linear_tree/test_lt_formulation.py | 6 +- 14 files changed, 174 insertions(+), 353 deletions(-) create mode 100644 src/omlt/base/language.py diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 5fbdddc4..92e1b811 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,20 +1,39 @@ -DEFAULT_MODELING_LANGUAGE = "pyomo" - from omlt.dependencies import julia_available if julia_available: from omlt.base.julia import jl, jump -from omlt.base.constraint import OmltConstraint +from omlt.base.language import DEFAULT_MODELING_LANGUAGE +from omlt.base.constraint import ( + OmltConstraint, + OmltConstraintIndexed, + OmltConstraintScalar, +) from omlt.base.expression import OmltExpr -from omlt.base.pyomo import * -from omlt.base.var import OmltVar +from omlt.base.pyomo import ( + OmltConstraintIndexedPyomo, + OmltConstraintScalarPyomo, + OmltExprScalarPyomo, + OmltIndexedPyomo, + OmltScalarPyomo, +) +from omlt.base.var import OmltIndexed, OmltScalar, OmltVar __all__ = [ + "DEFAULT_MODELING_LANGUAGE", "julia_available", "jl", "jump", "OmltExpr", + "OmltScalar", + "OmltIndexed", "OmltVar", + "OmltConstraintIndexed", + "OmltConstraintScalar", "OmltConstraint", + "OmltConstraintIndexedPyomo", + "OmltConstraintScalarPyomo", + "OmltExprScalarPyomo", + "OmltIndexedPyomo", + "OmltScalarPyomo", ] diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py index 9687022b..b96b0a64 100644 --- a/src/omlt/base/constraint.py +++ b/src/omlt/base/constraint.py @@ -85,11 +85,11 @@ def __init__(self, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): self._parent = None def __call__(self, *args: Any, **kwds: Any) -> Any: - pass + """Return the value of the body of the constraint.""" @property def args(self): - pass + """Return an iterator over the arguments of the constraint.""" class OmltConstraintIndexed(OmltConstraint): @@ -111,22 +111,6 @@ def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): def __init__(self, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): self._index_set = indexes - lhs = kwargs.pop("lhs", None) - if lhs: - self.lhs = lhs - sense = kwargs.pop("sense", None) - if sense: - self.sense = sense - rhs = kwargs.pop("rhs", None) - if rhs: - self.rhs = rhs - if not lhs and not sense and not rhs: - expr_tuple = kwargs.pop("expr_tuple", None) - if expr_tuple and expr_tuple[1] in {"==", ">=", "<=", ">", "<", "in"}: - self.lhs = expr_tuple[0] - self.sense = expr_tuple[1] - self.rhs = expr_tuple[2] - self.model = kwargs.pop("model", None) self._parent = None self.name = None @@ -142,19 +126,19 @@ def keys(self, sort=False): @property @abstractmethod def _constructed(self): - pass + """Return True if the constraint has been constructed.""" @property @abstractmethod def _active(self): - pass + """Return True if the constraint is active.""" @_active.setter @abstractmethod def _active(self, val): - pass + """Set the constraint status to active or inactive.""" @property @abstractmethod def _data(self): - pass + """Return data from the constraint.""" diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index 543c300a..a70015c4 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -7,13 +7,18 @@ class OmltExpr(ABC): - def __new__(cls, *indexes, **kwargs: Any): - if not indexes: - instance = super().__new__(OmltExprScalar) - instance.__init__(**kwargs) - else: - instance = super().__new__(OmltExprIndexed) - instance.__init__(*indexes, **kwargs) + def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if lang not in subclass_map: + msg = ( + "Expression format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + lang, + ) + raise ValueError(msg) + subclass = subclass_map[lang] + instance = super().__new__(subclass) + instance._format = lang return instance @property @@ -35,24 +40,9 @@ def valid_model_component(self): """Return True if this can be used as a model component.""" return True - -class OmltExprScalar(OmltExpr): - def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Expression format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltExpr, cls).__new__(subclass) - instance._format = lang - return instance - - def is_indexed(self): - return False + @abstractmethod + def __call__(self): + """Return the current value of the expression.""" @abstractmethod def is_potentially_variable(self): @@ -70,23 +60,3 @@ def arg(self, index): @abstractmethod def nargs(self): """Return the number of arguments.""" - - -class OmltExprIndexed(OmltExpr): - def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Expression format %s not recognized. Supported formats are 'pyomo'" - " or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltExpr, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - instance._format = lang - return instance - - def is_indexed(self): - return True diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 8720d0e4..883b9c10 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,13 +1,15 @@ +import pyomo.environ as pyo + from omlt.base.var import OmltIndexed, OmltScalar from omlt.dependencies import julia_available if julia_available: from juliacall import Base - from juliacall import Main as jl + from juliacall import Main as Jl jl_err = Base.error - jl.seval("import JuMP") - jump = jl.JuMP + Jl.seval("import JuMP") + jump = Jl.JuMP # Elements @@ -18,6 +20,7 @@ def __init__( upper_bound=None, fixed_value=None, start_value=None, + *, binary=False, integer=False, ): @@ -100,8 +103,7 @@ def value(self): def add_to_model(self, model, name=None): if name is None: name = self.name - variable_ref = jump.add_variable(model, self.var, name) - return variable_ref + return jump.add_variable(model, self.var, name) def to_jump(self): return self.var @@ -129,7 +131,7 @@ class OmltScalarJuMP(OmltScalar): def __class__(self): return pyo.ScalarVar - def __init__(self, **kwargs: Any): + def __init__(self, **kwargs): self._block = kwargs.pop("block", None) self._bounds = kwargs.pop("bounds", None) @@ -493,4 +495,4 @@ def to_jumpexpr(self): # Constraints -# Expressions \ No newline at end of file +# Expressions diff --git a/src/omlt/base/language.py b/src/omlt/base/language.py new file mode 100644 index 00000000..19665e5f --- /dev/null +++ b/src/omlt/base/language.py @@ -0,0 +1 @@ +DEFAULT_MODELING_LANGUAGE = "pyomo" diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 94e47aa4..95e21ae1 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -3,17 +3,19 @@ This file contains implementations of the OMLT classes, using Pyomo objects as the underlying data storage mechanism. """ + from typing import Any import pyomo.environ as pyo from pyomo.core.base.var import _GeneralVarData from omlt.base.constraint import OmltConstraintIndexed, OmltConstraintScalar -from omlt.base.expression import OmltExprIndexed, OmltExprScalar +from omlt.base.expression import OmltExpr from omlt.base.var import OmltIndexed, OmltScalar # Variables + class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): format = "pyomo" @@ -112,22 +114,20 @@ def setlb(self, value): for vardata in self.values(): vardata.lb = value + # Constraints + class OmltConstraintScalarPyomo(OmltConstraintScalar, pyo.Constraint): format = "pyomo" def __init__(self, *args, **kwargs: Any): super().__init__(*args, **kwargs) self.lhs = ( - self.lhs._expression - if isinstance(self.lhs, OmltExprScalar) - else self.lhs + self.lhs._expression if isinstance(self.lhs, OmltExprScalarPyomo) else self.lhs ) self.rhs = ( - self.rhs._expression - if isinstance(self.rhs, OmltExprScalar) - else self.rhs + self.rhs._expression if isinstance(self.rhs, OmltExprScalarPyomo) else self.rhs ) if self.sense == "==": @@ -171,6 +171,7 @@ def _data(self): def is_indexed(self): return False + class OmltConstraintIndexedPyomo(OmltConstraintIndexed, pyo.Constraint): format = "pyomo" @@ -178,9 +179,10 @@ def __init__(self, *args, **kwargs: Any): super().__init__(*args, **kwargs) kwargs.pop("model", None) kwargs.pop("lang", None) - + kwargs.pop("expr_tuple", None) self.constraint = pyo.Constraint(*self._index_set, **kwargs) self._index_set = self.constraint._index_set + self.constraint._parent = self._parent self.constraint.construct() self.model = self.constraint.model @@ -193,7 +195,7 @@ def __setitem__(self, index, expr): self.constraints[index] = self.constraint[index] else: msg = ( - "Couldn't find index %s in index set %.", + "Couldn't find index %s in index set %s.", index, list(self._index_set.data()), ) @@ -203,7 +205,7 @@ def __getitem__(self, index): if index in self.constraint._index_set: return self.constraint[index] msg = ( - "Couldn't find index %s in index set %.", + "Couldn't find index %s in index set %s.", index, list(self._index_set.data()), ) @@ -235,12 +237,14 @@ def _data(self): def doc(self): return self.constraint.doc + # Expressions -class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): + +class OmltExprScalarPyomo(OmltExpr, pyo.Expression): format = "pyomo" - def __init__(self, expr=None, **kwargs: Any): + def __init__(self, expr=None, **kwargs): self._index_set = {} if isinstance(expr, OmltExprScalarPyomo): self._expression = expr._expression @@ -324,61 +328,57 @@ def __len__(self): def __call__(self): return self._expression() + def is_indexed(self): + return False + def __add__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression + other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression + other - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __sub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression - other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression - other - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __mul__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression * other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression * other - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __div__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression / other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression / other - return OmltExprScalar(lang=self._format, expr=expr) - - def __truediv__(self, other): - if isinstance(other, OmltExprScalarPyomo): - expr = self._expression // other._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = self._expression // other - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __radd__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression + self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other + self._expression - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __rsub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression - self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other - self._expression - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __rmul__(self, other): - if isinstance(other, OmltExprScalar): + if isinstance(other, OmltExprScalarPyomo): expr = other._expression * self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other * self._expression - return OmltExprScalar(lang=self._format, expr=expr) + return OmltExpr(lang=self._format, expr=expr) def __ge__(self, other): if isinstance(other, OmltExprScalarPyomo): @@ -412,106 +412,3 @@ def __eq__(self, other): return OmltConstraintScalar( model=self._parent, lang=self._format, lhs=self, sense="==", rhs=rhs ) - - -class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): - format = "pyomo" - - def __init__( - self, *indexes, expr=None, **kwargs: Any - ): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - elif len(indexes) > 1: - raise ValueError("Currently index cross-products are unsupported.") - else: - self._index_set = {} - self._format = format - self._expression = pyo.Expression(self._index_set, expr=expr) - - def expression_as_dict(self): - if len(self._index_set) == 1: - return {self._index_set[0]: self._expression} - return {k: self._expression[k] for k in self._index_set} - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._expression[item[0]] - return self._expression[item] - - def __setitem__(self, item, value): - self._expression[item] = value - - def keys(self): - return self._expression.keys() - - def values(self): - return self._expression.values() - - def items(self): - return self._expression.items() - - def __len__(self): - """Return the number of component data objects stored by this component.""" - return len(self._expression) - - def __contains__(self, idx): - """Return true if the index is in the dictionary.""" - return idx in self._expression - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys.""" - return self._expression.__iter__() - - @property - def args(self): - return self._expression.args() - - def arg(self, index): - return self._expression.arg(index) - - def nargs(self): - return self._expression.nargs() - - def __call__(self): - return self._expression() - - def __add__(self, other): - expr = (self, "+", other) - return OmltExprIndexed(self._index_set, format=self._format, expr=expr) - - def __sub__(self, other): - expr = (self, "-", other) - return OmltExprIndexed(self._index_set, format=self._format, expr=expr) - - def __mul__(self, other): - expr = (self, "*", other) - return OmltExprIndexed(self._index_set, format=self._format, expr=expr) - - def __div__(self, other): - expr = (self, "/", other) - return OmltExprIndexed(self._index_set, format=self._format, expr=expr) - - def __truediv__(self, other): - expr = (self, "//", other) - return OmltExprIndexed(self._index_set, format=self._format, expr=expr) - - def __eq__(self, other): - expr = (self, "==", other) - return pyo.Expression(self._index_set, expr=expr) - - def __le__(self, other): - expr = (self, "<=", other) - return pyo.Expression(self._index_set, expr=expr) - - def __ge__(self, other): - expr = (self, ">=", other) - return pyo.Expression(self._index_set, expr=expr) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 71aef65b..2da10f38 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -8,8 +8,6 @@ from abc import ABC, abstractmethod from typing import Any -import pyomo.environ as pyo - from omlt.base import DEFAULT_MODELING_LANGUAGE, expression @@ -26,7 +24,7 @@ def construct(self, data): """Construct the variable.""" @abstractmethod - def fix(self, value, skip_validation): + def fix(self, value, *, skip_validation=False): """Fix the value of the variable.""" @property @@ -128,40 +126,28 @@ def value(self, val): # Interface governing how variables behave in expressions. def __add__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "+", other)) + return expression.OmltExpr(lang=self._format, expr=(self, "+", other)) def __sub__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "-", other)) + return expression.OmltExpr(lang=self._format, expr=(self, "-", other)) def __mul__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "*", other)) - - def __div__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "//", other)) + return expression.OmltExpr(lang=self._format, expr=(self, "*", other)) def __truediv__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "/", other)) - - def __pow__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(self, "**", other)) + return expression.OmltExpr(lang=self._format, expr=(self, "/", other)) def __radd__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "+", self)) + return expression.OmltExpr(lang=self._format, expr=(other, "+", self)) def __rsub__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "-", self)) + return expression.OmltExpr(lang=self._format, expr=(other, "-", self)) def __rmul__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "*", self)) - - def __rdiv__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "//", self)) + return expression.OmltExpr(lang=self._format, expr=(other, "*", self)) def __rtruediv__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "/", self)) - - def __rpow__(self, other): - return expression.OmltExprScalar(lang=self._format, expr=(other, "**", self)) + return expression.OmltExpr(lang=self._format, expr=(other, "/", self)) class OmltIndexed(OmltVar): @@ -229,41 +215,3 @@ def __contains__(self, idx): @abstractmethod def __iter__(self): pass - - # Interface governing how variables behave in expressions. - - def __add__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "+", other)) - - def __sub__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "-", other)) - - def __mul__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "*", other)) - - def __div__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "//", other)) - - def __truediv__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "/", other)) - - def __pow__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(self, "**", other)) - - def __radd__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "+", self)) - - def __rsub__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "-", self)) - - def __rmul__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "*", self)) - - def __rdiv__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "//", self)) - - def __rtruediv__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "/", self)) - - def __rpow__(self, other): - return expression.OmltExprIndexed(self.index_set(), expr=(other, "**", self)) diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 972371a1..4f69d54c 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -141,7 +141,9 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): block.scaled_inputs[idx] == input_scaling_expressions[idx] ) - block._scale_output_constraint = OmltConstraint(block.outputs_set, lang=block._format) + block._scale_output_constraint = OmltConstraint( + block.outputs_set, lang=block._format + ) for idx in block.outputs_set: block._scale_output_constraint[idx] = ( block.outputs[idx] == output_unscaling_expressions[idx] diff --git a/tests/base/test_block.py b/tests/base/test_block.py index c451a24e..82445e3d 100644 --- a/tests/base/test_block.py +++ b/tests/base/test_block.py @@ -43,49 +43,15 @@ def test_block(): m = pyo.ConcreteModel() m.b = OmltBlock() + m.b.set_format("pyomo") formulation = DummyFormulation() - m.b.build_formulation(formulation) + m.b.build_formulation(formulation, lang="pyomo") assert m.b._OmltBlockData__formulation is formulation assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] -@pytest.mark.skipif( - not julia_available, reason="Test only valid when Julia is available" -) -def test_jump_block(): - m = pyo.ConcreteModel() - m.b = OmltBlock() - m.b.set_format("jump") - - expected_msg = ( - "Initial value for JuMP variables must be an int or float, but" - " was provided." - ) - - with pytest.raises(ValueError, match=expected_msg) as excinfo: - m.b.x = OmltVar(initialize=(2, 7), lang="jump") - - assert str(excinfo.value) == expected_msg - - m.b.y = OmltVar(initialize=2, lang="jump") - assert m.b.y.value == 2 - assert m.b.y.name == "y" - m.b.y.lb = 0 - m.b.y.ub = 5 - assert m.b.y.lb == 0 - assert m.b.y.ub == 5 - - formulation = dummy_formulation() - - m.b.build_formulation(formulation, format="jump") - - assert m.b._OmltBlockData__formulation is formulation - assert list(m.b.inputs) == ["A", "C", "D"] - assert list(m.b.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] - - def test_input_output_auto_creation(): m = pyo.ConcreteModel() m.b = OmltBlock() diff --git a/tests/base/test_constraint.py b/tests/base/test_constraint.py index ab46058c..88ba2f96 100644 --- a/tests/base/test_constraint.py +++ b/tests/base/test_constraint.py @@ -1,31 +1,57 @@ import pyomo.environ as pyo import pytest -from omlt.base import OmltConstraint, OmltExpr, OmltExprIndexed, OmltExprScalar, OmltVar +from omlt.base import ( + OmltConstraint, + OmltConstraintIndexed, + OmltConstraintScalar, + OmltScalar, +) VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 def test_build_constraint(): - v1 = OmltVar() + v1 = OmltScalar() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - v2 = OmltVar() + v2 = OmltScalar() v2.domain = pyo.Integers v2.value = VAR2_VALUE e2 = v2 + CONST_VALUE - c_eq = e1 == e2 + c_equal_expressions = e1 == e2 - assert c_eq.sense == "==" - assert id(c_eq.lhs) == id(e1._expression) + assert c_equal_expressions.sense == "==" + assert id(c_equal_expressions.lhs) == id(e1._expression) - c_le = OmltConstraint(lhs=e1, sense="<=", rhs=e2) + c_equal_var = e1 == v2 + assert c_equal_var.sense == "==" + assert id(c_equal_var.lhs) == id(e1._expression) - assert c_le.sense == "<=" - assert id(c_le.rhs) == id(e2._expression) + c_equal_const = e1 == CONST_VALUE + assert c_equal_const.sense == "==" + assert id(c_equal_const.lhs) == id(e1._expression) + + c_le_expressions = e1 <= e2 + + assert c_le_expressions.sense == "<=" + assert id(c_le_expressions.rhs) == id(e2._expression) + assert c_le_expressions() == VAR1_VALUE - VAR2_VALUE + + c_le_var = e1 <= v2 + + assert c_le_var.sense == "<=" + assert id(c_le_var.rhs) == id(v2._pyovar) + assert c_le_var() == VAR1_VALUE - VAR2_VALUE + CONST_VALUE + + c_le_const = e1 <= CONST_VALUE + + assert c_le_const.sense == "<=" + assert c_le_const.rhs == CONST_VALUE + assert c_le_const() == VAR1_VALUE + CONST_VALUE def test_constraint_invalid_lang(): expected_msg = ( @@ -34,7 +60,21 @@ def test_constraint_invalid_lang(): ) with pytest.raises(ValueError, match=expected_msg): - OmltConstraint(lang="test") + OmltConstraintScalar(lang="test") with pytest.raises(ValueError, match=expected_msg): - OmltConstraint(range(3), lang="test") + OmltConstraintIndexed(range(3), lang="test") + +def test_constraint_invalid_index(): + v1 = OmltScalar() + v1.domain = pyo.Integers + v1.value = VAR1_VALUE + e1 = v1 + CONST_VALUE + + c = OmltConstraint(range(3)) + expected_msg = "Couldn't find index %s in index set %s." + with pytest.raises(KeyError, match=expected_msg): + c[4] = e1 >= 0 + + with pytest.raises(KeyError, match=expected_msg): + _ = c[4] diff --git a/tests/base/test_expression.py b/tests/base/test_expression.py index 40c78bbc..fe81128c 100644 --- a/tests/base/test_expression.py +++ b/tests/base/test_expression.py @@ -1,14 +1,15 @@ +import omlt.base.pyomo as pobjects import pyomo.environ as pyo import pytest -from omlt.base import OmltExpr, OmltExprIndexed, OmltExprScalar, OmltVar +from omlt.base import OmltExpr, OmltScalar, OmltVar VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 def _test_build_scalar_expressions(lang): - v1 = OmltVar(lang=lang) - v2 = OmltVar(lang=lang) + v1 = OmltScalar(lang=lang) + v2 = OmltScalar(lang=lang) v1.domain = pyo.Integers v2.domain = pyo.Integers @@ -57,12 +58,13 @@ def test_init_scalar_expression(): v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - e2 = OmltExprScalar(expr=e1) + e2 = OmltExpr(expr=e1) assert e2.ctype == pyo.Expression assert e2.is_component_type() assert e2.is_expression_type() assert e2.valid_model_component() + assert e2.is_potentially_variable() assert not e2.is_indexed() assert e2.nargs() == 2 @@ -76,26 +78,31 @@ def test_init_scalar_expression(): ) with pytest.raises(TypeError, match=expected_msg): - OmltExprScalar(expr="test") + OmltExpr(expr="test") expected_msg = ( "Expression format %s not recognized. Supported formats " "are 'pyomo' or 'jump'." ) with pytest.raises(ValueError, match=expected_msg): - OmltExprScalar(lang="test") + OmltExpr(lang="test") expected_msg = "Expression middle term was {%s}." with pytest.raises(ValueError, match=expected_msg): - OmltExprScalar(expr=(v1, "test", CONST_VALUE)) + OmltExpr(expr=(v1, "test", CONST_VALUE)) + + expected_msg = "Term of expression %s is an unsupported type. %s" + + with pytest.raises(TypeError, match=expected_msg): + OmltExpr(expr=((e1, "-", "test"), "+", CONST_VALUE)) def test_combine_scalar_expression(): - v1 = OmltVar() + v1 = OmltScalar() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - v2 = OmltVar() + v2 = OmltScalar() v2.domain = pyo.Integers v2.value = VAR2_VALUE e2 = v2 + CONST_VALUE @@ -119,25 +126,11 @@ def test_combine_scalar_expression(): p_prod = e1 * CONST_VALUE assert p_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE + r_sum = CONST_VALUE + e1 + assert r_sum() == VAR1_VALUE + 2*CONST_VALUE -def test_init_indexed_expression(): - v1 = OmltVar(range(3)) - v1.domain = pyo.Integers - v1.value = VAR1_VALUE - e1 = v1 + CONST_VALUE - - e2 = OmltExpr(range(3), expr=e1) + r_diff = CONST_VALUE - e1 + assert r_diff() == -VAR1_VALUE - assert e2.ctype == pyo.Expression - assert e2.is_component_type() - assert e2.is_expression_type() - assert e2.valid_model_component() - assert e2.is_indexed() - - expected_msg = ( - "Expression format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'." - ) - - with pytest.raises(ValueError, match=expected_msg): - OmltExprIndexed(range(3), lang="test") \ No newline at end of file + r_prod = CONST_VALUE * e1 + assert r_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE \ No newline at end of file diff --git a/tests/base/test_var.py b/tests/base/test_var.py index 7473845f..6eb49c03 100644 --- a/tests/base/test_var.py +++ b/tests/base/test_var.py @@ -1,4 +1,3 @@ -import re import pyomo.environ as pyo import pytest diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index bf0400fa..67a427d7 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -3,7 +3,7 @@ import pyomo.environ as pe import pytest from omlt import OmltBlock -from omlt.base import OmltVar +from omlt.base import OmltIndexed, OmltVar from omlt.dependencies import onnx, onnx_available from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel @@ -20,7 +20,7 @@ def test_formulation_with_continuous_variables(): m = pe.ConcreteModel() - m.x = OmltVar(range(4), bounds=(-2.0, 2.0)) + m.x = OmltIndexed(range(4), bounds=(-2.0, 2.0)) m.x[3].setlb(0.0) m.x[3].setub(1.0) diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 39413837..8399cab9 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,7 +1,7 @@ import numpy as np import pyomo.environ as pe import pytest -from omlt.base import OmltVar +from omlt.base import OmltScalar, OmltVar from omlt.dependencies import lineartree_available if lineartree_available: @@ -170,8 +170,8 @@ def test_bigm_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="bigm") model1 = pe.ConcreteModel() - model1.x = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x = OmltScalar(initialize=0) + model1.y = OmltScalar(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) From 6bdb7f98ae6c36291740603ff59aae9b78472c1f Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 1 Aug 2024 20:06:31 +0000 Subject: [PATCH 70/75] Factory classes for vars and constraints --- .gitignore | 2 + .../auto-thermal-reformer-relu.ipynb | 2 +- .../neuralnet/auto-thermal-reformer.ipynb | 2 +- src/omlt/base/__init__.py | 15 +-- src/omlt/base/constraint.py | 95 +++++++++------- src/omlt/base/pyomo.py | 32 +++--- src/omlt/base/var.py | 102 ++++++++++-------- src/omlt/block.py | 15 +-- src/omlt/formulation.py | 16 +-- src/omlt/gbt/gbt_formulation.py | 33 +++--- .../torch_geometric/build_gnn_formulation.py | 15 ++- src/omlt/linear_tree/lt_formulation.py | 25 +++-- src/omlt/neuralnet/activations/linear.py | 5 +- src/omlt/neuralnet/activations/relu.py | 14 +-- src/omlt/neuralnet/activations/smooth.py | 7 +- src/omlt/neuralnet/layers/full_space.py | 42 +++++--- src/omlt/neuralnet/layers/partition_based.py | 33 ++++-- src/omlt/neuralnet/nn_formulation.py | 35 ++++-- tests/base/test_block.py | 2 - tests/base/test_constraint.py | 33 +++--- tests/base/test_expression.py | 17 +-- tests/base/test_var.py | 33 +++--- tests/gbt/test_gbt_formulation.py | 9 +- tests/linear_tree/test_lt_formulation.py | 41 ++++--- 24 files changed, 366 insertions(+), 259 deletions(-) diff --git a/.gitignore b/.gitignore index 243e16e1..eed9c01e 100644 --- a/.gitignore +++ b/.gitignore @@ -132,3 +132,5 @@ dmypy.json # Pyre type checker .pyre/ .vscode/settings.json + +src/omlt/base/julia.py diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb index 2530183c..0b800812 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb @@ -558,7 +558,7 @@ "h2_idx = outputs.index('H2')\n", "n2_idx = outputs.index('N2')\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", - "m.con = OmltConstraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" + "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] }, { diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb index 83e3f449..f7328c18 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb @@ -547,7 +547,7 @@ "h2_idx = outputs.index('H2')\n", "n2_idx = outputs.index('N2')\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", - "m.con = OmltConstraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" + "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] }, { diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 92e1b811..377bd08a 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,11 +1,12 @@ +from omlt.base.language import DEFAULT_MODELING_LANGUAGE from omlt.dependencies import julia_available -if julia_available: - from omlt.base.julia import jl, jump +# if julia_available: +# from omlt.base.julia import jl, jump -from omlt.base.language import DEFAULT_MODELING_LANGUAGE from omlt.base.constraint import ( OmltConstraint, + OmltConstraintFactory, OmltConstraintIndexed, OmltConstraintScalar, ) @@ -17,20 +18,22 @@ OmltIndexedPyomo, OmltScalarPyomo, ) -from omlt.base.var import OmltIndexed, OmltScalar, OmltVar +from omlt.base.var import OmltIndexed, OmltScalar, OmltVar, OmltVarFactory __all__ = [ "DEFAULT_MODELING_LANGUAGE", "julia_available", - "jl", - "jump", + # "jl", + # "jump", "OmltExpr", "OmltScalar", "OmltIndexed", "OmltVar", + "OmltVarFactory", "OmltConstraintIndexed", "OmltConstraintScalar", "OmltConstraint", + "OmltConstraintFactory", "OmltConstraintIndexedPyomo", "OmltConstraintScalarPyomo", "OmltExprScalarPyomo", diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py index b96b0a64..286d82a0 100644 --- a/src/omlt/base/constraint.py +++ b/src/omlt/base/constraint.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any @@ -8,14 +10,6 @@ class OmltConstraint(ABC): - def __new__(cls, *indexes, **kwargs: Any): - if not indexes: - instance = OmltConstraintScalar.__new__(OmltConstraintScalar, **kwargs) - else: - instance = OmltConstraintIndexed.__new__( - OmltConstraintIndexed, *indexes, **kwargs - ) - return instance @property def ctype(self): @@ -38,22 +32,8 @@ def __call__(self, *args: Any, **kwds: Any) -> Any: class OmltConstraintScalar(OmltConstraint): - def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Constraint format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltConstraint, subclass).__new__(subclass) - instance.__init__(**kwargs) - instance._format = lang - return instance - - def __init__(self, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + + def __init__(self, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any): lhs = kwargs.pop("lhs", None) if lhs is not None: self.lhs = lhs @@ -93,22 +73,10 @@ def args(self): class OmltConstraintIndexed(OmltConstraint): - def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Constraint format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltConstraint, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - instance._format = lang - return instance - - def __init__(self, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): + + def __init__( + self, *indexes: Any, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any + ): self._index_set = indexes self.model = kwargs.pop("model", None) @@ -142,3 +110,50 @@ def _active(self, val): @abstractmethod def _data(self): """Return data from the constraint.""" + + +class OmltConstraintFactory: + def __init__(self): + self.scalars = { + subclass.format: subclass + for subclass in OmltConstraintScalar.__subclasses__() + } + self.indexed = { + subclass.format: subclass + for subclass in OmltConstraintIndexed.__subclasses__() + } + + def register(self, lang, indexed, varclass): + if lang is None: + lang = varclass.format + if indexed: + if lang in self.indexed: + msg = ("Indexed constraint format %s is already registered.", lang) + raise KeyError(msg) + self.indexed[lang] = varclass + else: + if lang in self.scalars: + msg = ("Scalar constraint format %s is already registered.", lang) + raise KeyError(msg) + self.scalars[lang] = varclass + + def new_constraint( + self, *indexes: Any, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any + ) -> Any: + if indexes: + if lang not in self.indexed: + msg = ( + "Constraint format %s not recognized. Supported formats are %s", + lang, + list(self.indexed.keys()), + ) + raise KeyError(msg) + return self.indexed[lang](*indexes, **kwargs) + if lang not in self.scalars: + msg = ( + "Constraint format %s not recognized. Supported formats are %s", + lang, + list(self.scalars.keys()), + ) + raise KeyError(msg) + return self.scalars[lang](**kwargs) diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 95e21ae1..1d0037ef 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -19,8 +19,9 @@ class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): format = "pyomo" - def __init__(self, *args, **kwargs: Any): + def __init__(self, *args: Any, **kwargs: Any): kwargs.pop("lang", None) + self._format = "pyomo" self._pyovar = pyo.ScalarVar(*args, **kwargs) self._name = None self._parent = None @@ -90,8 +91,9 @@ def value(self, val): class OmltIndexedPyomo(pyo.Var, OmltIndexed): format = "pyomo" - def __init__(self, *indexes, **kwargs: Any): + def __init__(self, *indexes: Any, **kwargs: Any): kwargs.pop("lang", None) + self._format = "pyomo" super().__init__(*indexes, **kwargs) self.bounds = (None, None) @@ -121,13 +123,17 @@ def setlb(self, value): class OmltConstraintScalarPyomo(OmltConstraintScalar, pyo.Constraint): format = "pyomo" - def __init__(self, *args, **kwargs: Any): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) - self.lhs = ( - self.lhs._expression if isinstance(self.lhs, OmltExprScalarPyomo) else self.lhs + self.lhs: pyo.Expression = ( + self.lhs._expression + if isinstance(self.lhs, OmltExprScalarPyomo) + else self.lhs ) - self.rhs = ( - self.rhs._expression if isinstance(self.rhs, OmltExprScalarPyomo) else self.rhs + self.rhs: pyo.Expression = ( + self.rhs._expression + if isinstance(self.rhs, OmltExprScalarPyomo) + else self.rhs ) if self.sense == "==": @@ -175,19 +181,19 @@ def is_indexed(self): class OmltConstraintIndexedPyomo(OmltConstraintIndexed, pyo.Constraint): format = "pyomo" - def __init__(self, *args, **kwargs: Any): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) kwargs.pop("model", None) kwargs.pop("lang", None) kwargs.pop("expr_tuple", None) - self.constraint = pyo.Constraint(*self._index_set, **kwargs) + self.constraint = pyo.Constraint(*args, **kwargs) self._index_set = self.constraint._index_set self.constraint._parent = self._parent self.constraint.construct() self.model = self.constraint.model - self.constraints = {} + self.constraints: dict[Any, Any] = {} def __setitem__(self, index, expr): if index in self._index_set: @@ -387,7 +393,7 @@ def __ge__(self, other): rhs = other._pyovar else: rhs = other - return OmltConstraintScalar( + return OmltConstraintScalarPyomo( model=self._parent, lang=self._format, lhs=self, sense=">=", rhs=rhs ) @@ -398,7 +404,7 @@ def __le__(self, other): rhs = other._pyovar else: rhs = other - return OmltConstraintScalar( + return OmltConstraintScalarPyomo( model=self._parent, lang=self._format, lhs=self, sense="<=", rhs=rhs ) @@ -409,6 +415,6 @@ def __eq__(self, other): rhs = other._pyovar else: rhs = other - return OmltConstraintScalar( + return OmltConstraintScalarPyomo( model=self._parent, lang=self._format, lhs=self, sense="==", rhs=rhs ) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 2da10f38..8158d843 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -5,6 +5,8 @@ JuMP, or others (not yet implemented - e.g. Smoke, Gurobi). """ +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any @@ -12,13 +14,6 @@ class OmltVar(ABC): - def __new__(cls, *indexes, **kwargs: Any): - if not indexes: - instance = OmltScalar.__new__(OmltScalar, **kwargs) - else: - instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) - return instance - @abstractmethod def construct(self, data): """Construct the variable.""" @@ -52,21 +47,7 @@ def valid_model_component(self): class OmltScalar(OmltVar): - def __new__(cls, *args, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltVar, subclass).__new__(subclass) - - instance.__init__(*args, **kwargs) - instance._format = lang - return instance + format: str | None = None def is_indexed(self): return False @@ -126,45 +107,32 @@ def value(self, val): # Interface governing how variables behave in expressions. def __add__(self, other): - return expression.OmltExpr(lang=self._format, expr=(self, "+", other)) + return expression.OmltExpr(lang=self.format, expr=(self, "+", other)) def __sub__(self, other): - return expression.OmltExpr(lang=self._format, expr=(self, "-", other)) + return expression.OmltExpr(lang=self.format, expr=(self, "-", other)) def __mul__(self, other): - return expression.OmltExpr(lang=self._format, expr=(self, "*", other)) + return expression.OmltExpr(lang=self.format, expr=(self, "*", other)) def __truediv__(self, other): - return expression.OmltExpr(lang=self._format, expr=(self, "/", other)) + return expression.OmltExpr(lang=self.format, expr=(self, "/", other)) def __radd__(self, other): - return expression.OmltExpr(lang=self._format, expr=(other, "+", self)) + return expression.OmltExpr(lang=self.format, expr=(other, "+", self)) def __rsub__(self, other): - return expression.OmltExpr(lang=self._format, expr=(other, "-", self)) + return expression.OmltExpr(lang=self.format, expr=(other, "-", self)) def __rmul__(self, other): - return expression.OmltExpr(lang=self._format, expr=(other, "*", self)) + return expression.OmltExpr(lang=self.format, expr=(other, "*", self)) def __rtruediv__(self, other): - return expression.OmltExpr(lang=self._format, expr=(other, "/", self)) + return expression.OmltExpr(lang=self.format, expr=(other, "/", self)) class OmltIndexed(OmltVar): - def __new__(cls, *indexes, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Variable format %s not recognized. Supported formats are 'pyomo'" - " or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super(OmltVar, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - instance._format = lang - return instance + format: str | None = None def is_indexed(self): return True @@ -215,3 +183,49 @@ def __contains__(self, idx): @abstractmethod def __iter__(self): pass + + +class OmltVarFactory: + def __init__(self): + self.scalars = { + subclass.format: subclass for subclass in OmltScalar.__subclasses__() + } + self.indexed = { + subclass.format: subclass for subclass in OmltIndexed.__subclasses__() + } + + def register(self, lang, indexed, varclass): + if lang is None: + lang = varclass.format + if indexed: + if lang in self.indexed: + msg = ("Indexed variable format %s is already registered.", lang) + raise KeyError(msg) + self.indexed[lang] = varclass + else: + if lang in self.scalars: + msg = ("Scalar variable format %s is already registered.", lang) + raise KeyError(msg) + self.scalars[lang] = varclass + + def new_var( + self, *indexes: Any, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any + ) -> Any: + if indexes: + if lang not in self.indexed: + msg = ( + "Variable format %s not recognized. Supported formats are %s", + lang, + list(self.indexed.keys()), + ) + raise KeyError(msg) + return self.indexed[lang](*indexes, **kwargs) + if lang not in self.scalars: + msg = ( + "Variable format %s not recognized. Supported formats are %s", + lang, + list(self.scalars.keys()), + ) + raise KeyError(msg) + + return self.scalars[lang](**kwargs) diff --git a/src/omlt/block.py b/src/omlt/block.py index 10827b48..3ffcea5d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -24,11 +24,10 @@ class is used in combination with a formulation object to construct the pyo.assert_optimal_termination(status) """ - import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block -from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVar +from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVarFactory @declare_custom_block(name="OmltBlock") @@ -43,7 +42,6 @@ def __init__(self, component): def set_format(self, lang): self._format = lang - def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """Setup inputs and outputs. @@ -60,12 +58,16 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ self.__input_indexes = input_indexes self.__output_indexes = output_indexes + self.__var_factory = OmltVarFactory() self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0, lang=self._format) + self.inputs = self.__var_factory.new_var( + self.inputs_set, initialize=0, lang=self._format + ) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0, lang=self._format) - + self.outputs = self.__var_factory.new_var( + self.outputs_set, initialize=0, lang=self._format + ) def build_formulation(self, formulation, lang=None): """Build formulation. @@ -98,7 +100,6 @@ def build_formulation(self, formulation, lang=None): ) raise ValueError(msg) - if lang is not None: self._format = lang diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 4f69d54c..ec58083d 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -3,7 +3,7 @@ import pyomo.environ as pyo -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory class _PyomoFormulationInterface(abc.ABC): @@ -89,20 +89,23 @@ def scalar_or_tuple(x): def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): + var_factory = OmltVarFactory() if scaled_input_bounds is not None: bnds = { k: (float(scaled_input_bounds[k][0]), float(scaled_input_bounds[k][1])) for k in block.inputs_set } - block.scaled_inputs = OmltVar( + block.scaled_inputs = var_factory.new_var( block.inputs_set, initialize=0, lang=block._format, bounds=bnds ) else: - block.scaled_inputs = OmltVar( + block.scaled_inputs = var_factory.new_var( block.inputs_set, initialize=0, lang=block._format ) - block.scaled_outputs = OmltVar(block.outputs_set, initialize=0, lang=block._format) + block.scaled_outputs = var_factory.new_var( + block.outputs_set, initialize=0, lang=block._format + ) if scaled_input_bounds is not None and scaler is None: # set the bounds on the inputs to be the same as the scaled inputs @@ -134,14 +137,15 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): output_unscaling_expressions = scaler.get_unscaled_output_expressions( output_unscaling_expressions ) + constraint_factory = OmltConstraintFactory() - block._scale_input_constraint = OmltConstraint(block.inputs_set, lang=block._format) + block._scale_input_constraint = constraint_factory.new_constraint(block.inputs_set, lang=block._format) for idx in block.inputs_set: block._scale_input_constraint[idx] = ( block.scaled_inputs[idx] == input_scaling_expressions[idx] ) - block._scale_output_constraint = OmltConstraint( + block._scale_output_constraint = constraint_factory.new_constraint( block.outputs_set, lang=block._format ) for idx in block.outputs_set: diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 22177e77..80c83d46 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -4,7 +4,7 @@ import numpy as np import pyomo.environ as pe -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.gbt.model import GradientBoostedTreeModel @@ -154,7 +154,9 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): var = input_vars[var_idx] continuous_vars[var_idx] = var - block.z_l = OmltVar( + var_factory = OmltVarFactory() + + block.z_l = var_factory.new_var( list(zip(nodes_tree_ids[nodes_leaf_mask], nodes_node_ids[nodes_leaf_mask])), bounds=(0, None), domain=pe.Reals, @@ -174,9 +176,11 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): for f in continuous_vars for bi, _ in enumerate(branch_value_by_feature_id[f]) ] - block.y = OmltVar(y_index, lang=block._format, domain=pe.Binary) + block.y = var_factory.new_var(y_index, lang=block._format, domain=pe.Binary) + + constraint_factory = OmltConstraintFactory() - block.single_leaf = OmltConstraint(tree_ids, lang=block._format) + block.single_leaf = constraint_factory.new_constraint(tree_ids, lang=block._format) for tree_id in tree_ids: r"""Single leaf constraint. @@ -248,7 +252,9 @@ def _sum_of_z_l(tree_id, start_node_id): visit_queue.append(local_true_node_ids[node_id]) return sum_of_z_l - block.left_split = OmltConstraint(nodes_tree_branch_ids, lang=block._format) + block.left_split = constraint_factory.new_constraint( + nodes_tree_branch_ids, lang=block._format + ) for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Left split. @@ -268,7 +274,9 @@ def _sum_of_z_l(tree_id, start_node_id): _sum_of_z_l(tree_id, subtree_root) <= y ) - block.right_split = OmltConstraint(nodes_tree_branch_ids, lang=block._format) + block.right_split = constraint_factory.new_constraint( + nodes_tree_branch_ids, lang=block._format + ) for tree_id, branch_node_id in nodes_tree_branch_ids: r"""Right split. @@ -288,7 +296,7 @@ def _sum_of_z_l(tree_id, start_node_id): _sum_of_z_l(tree_id, subtree_root) <= 1 - y ) - block.order_y = OmltConstraint(y_index, lang=block._format) + block.order_y = constraint_factory.new_constraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Add constraint to activate splits in the correct order. @@ -302,10 +310,11 @@ def _sum_of_z_l(tree_id, start_node_id): branch_values = branch_value_by_feature_id[feature_id] if branch_y_idx < len(branch_values) - 1: block.order_y[feature_id, branch_y_idx] = ( - block.y[feature_id, branch_y_idx] <= block.y[feature_id, branch_y_idx + 1] + block.y[feature_id, branch_y_idx] + <= block.y[feature_id, branch_y_idx + 1] ) - block.var_lower = OmltConstraint(y_index, lang=block._format) + block.var_lower = constraint_factory.new_constraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Lower bound constraint. @@ -328,7 +337,7 @@ def _sum_of_z_l(tree_id, start_node_id): branch_value - x.lb ) * (1 - block.y[feature_id, branch_y_idx]) - block.var_upper = OmltConstraint(y_index, lang=block._format) + block.var_upper = constraint_factory.new_constraint(y_index, lang=block._format) for feature_id, branch_y_idx in y_index: r"""Upper bound constraint. Add constraint to link discrete tree splits to upper bound of continuous @@ -348,7 +357,7 @@ def _sum_of_z_l(tree_id, start_node_id): x <= x.ub + (branch_value - x.ub) * block.y[feature_id, branch_y_idx] ) - block.tree_mean_value = OmltConstraint( + block.tree_mean_value = constraint_factory.new_constraint( expr=( output_vars[0] == sum( @@ -359,7 +368,7 @@ def _sum_of_z_l(tree_id, start_node_id): ) + base_value ), - lang = block._format + lang=block._format, ) r"""Add constraint to link block output tree model mean. diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index d182e5ad..ee00f6f4 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -1,7 +1,7 @@ import numpy as np import pyomo.environ as pyo -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.io.torch_geometric.torch_geometric_reader import ( load_torch_geometric_sequential, ) @@ -59,7 +59,8 @@ def gnn_with_non_fixed_graph( ) # define binary variables for adjacency matrix - block.A = OmltVar( + var_factory = OmltVarFactory() + block.A = var_factory.new_var( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, @@ -70,10 +71,13 @@ def gnn_with_non_fixed_graph( block.A[u, u].fix(1) # assume the adjacency matrix is always symmetric indexes = [(u, v) for u in range(N) for v in range(u + 1, N)] - block.symmetric_adjacency = OmltConstraint(indexes, lang=block._format) + constraint_factory = OmltConstraintFactory() + block.symmetric_adjacency = constraint_factory.new_constraint( + indexes, lang=block._format + ) for u in range(N): for v in range(u + 1, N): - block.symmetric_adjacency[(u,v)] = block.A[u, v] == block.A[v, u] + block.symmetric_adjacency[(u, v)] = block.A[u, v] == block.A[v, u] # build formulation for GNN block.build_formulation(FullSpaceNNFormulation(net)) @@ -142,7 +146,8 @@ def gnn_with_fixed_graph( ) # define binary variables for adjacency matrix - block.A = OmltVar( + var_factory = OmltVarFactory() + block.A = var_factory.new_var( pyo.Set(initialize=range(N)), pyo.Set(initialize=range(N)), within=pyo.Binary, diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index c0d017d2..561a2ffd 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -2,7 +2,7 @@ import pyomo.environ as pe from pyomo.gdp import Disjunct -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -249,7 +249,8 @@ def _add_gdp_formulation_to_block( block.scaled_outputs.setub(output_bounds[1]) block.scaled_outputs.setlb(output_bounds[0]) - block.intermediate_output = OmltVar( + var_factory = OmltVarFactory() + block.intermediate_output = var_factory.new_var( tree_ids, lang=block._format, bounds=(output_bounds[0], output_bounds[1]) ) @@ -259,12 +260,12 @@ def disjuncts_rule(dsj, tree, leaf): def lb_rule(dsj, feat): return input_vars[feat] >= leaves[tree][leaf]["bounds"][feat][0] - dsj.lb_constraint = OmltConstraint(features, rule=lb_rule) + dsj.lb_constraint = pe.Constraint(features, rule=lb_rule) def ub_rule(dsj, feat): return input_vars[feat] <= leaves[tree][leaf]["bounds"][feat][1] - dsj.ub_constraint = OmltConstraint(features, rule=ub_rule) + dsj.ub_constraint = pe.Constraint(features, rule=ub_rule) slope = leaves[tree][leaf]["slope"] intercept = leaves[tree][leaf]["intercept"] dsj.linear_exp = pe.Constraint( @@ -323,10 +324,12 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output # Create the intermeditate variables. z is binary that indicates which leaf # in tree t is returned. intermediate_output is the output of tree t and # the total output of the model is the sum of the intermediate_output vars - block.z = OmltVar(t_l, lang=block._format, within=pe.Binary) - block.intermediate_output = OmltVar(tree_ids, lang=block._format) + var_factory = OmltVarFactory() + block.z = var_factory.new_var(t_l, lang=block._format, within=pe.Binary) + block.intermediate_output = var_factory.new_var(tree_ids, lang=block._format) - block.lower_bound_constraints = OmltConstraint(features, tree_ids) + constraint_factory = OmltConstraintFactory() + block.lower_bound_constraints = constraint_factory.new_constraint(features, tree_ids) for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) for feat in features: @@ -338,7 +341,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output <= input_vars[feat] ) - block.upper_bound_constraints = OmltConstraint(features, tree_ids) + block.upper_bound_constraints = constraint_factory.new_constraint(features, tree_ids) for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) for feat in features: @@ -350,7 +353,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output >= input_vars[feat] ) - block.linear_constraint = OmltConstraint(tree_ids) + block.linear_constraint = constraint_factory.new_constraint(tree_ids) for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) block.linear_constraint[tree] = block.intermediate_output[tree] == sum( @@ -364,13 +367,13 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output * block.z[tree, leaf] for leaf in leaf_ids ) - block.only_one_leaf_per_tree = OmltConstraint(tree_ids) + block.only_one_leaf_per_tree = constraint_factory.new_constraint(tree_ids) for tree in tree_ids: leaf_ids = list(leaves[tree].keys()) block.only_one_leaf_per_tree[tree] = ( sum(block.z[tree, leaf] for leaf in leaf_ids) == 1 ) - block.output_sum_of_trees = OmltConstraint( + block.output_sum_of_trees = constraint_factory.new_constraint( expr=output_vars[0] == sum(block.intermediate_output[tree] for tree in tree_ids) ) diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index 87269172..69a76842 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -1,4 +1,4 @@ -from omlt.base import OmltConstraint +from omlt.base import OmltConstraintFactory def linear_activation_function(zhat): @@ -19,7 +19,8 @@ def linear_activation_constraint( \end{align*} """ - layer_block.linear_activation = OmltConstraint( + constraint_factory = OmltConstraintFactory() + layer_block.linear_activation = constraint_factory.new_constraint( layer.output_indexes, lang=net_block._format ) for output_index in layer.output_indexes: diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index a1436a9e..f1f365f5 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,7 +1,7 @@ import pyomo.environ as pyo from pyomo import mpec -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory def bigm_relu_activation_constraint(net_block, net, layer_block, layer): @@ -40,20 +40,22 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): is :math:`\max(0,u)`. """ - layer_block.q_relu = OmltVar( + var_factory = OmltVarFactory() + layer_block.q_relu = var_factory.new_var( layer.output_indexes, lang=net_block._format, within=pyo.Binary ) - layer_block._z_lower_bound_relu = OmltConstraint( + constraint_factory = OmltConstraintFactory() + layer_block._z_lower_bound_relu = constraint_factory.new_constraint( layer.output_indexes, lang=net_block._format, model=layer_block.model ) - layer_block._z_lower_bound_zhat_relu = OmltConstraint( + layer_block._z_lower_bound_zhat_relu = constraint_factory.new_constraint( layer.output_indexes, lang=net_block._format, model=layer_block.model ) - layer_block._z_upper_bound_relu = OmltConstraint( + layer_block._z_upper_bound_relu = constraint_factory.new_constraint( layer.output_indexes, lang=net_block._format, model=layer_block.model ) - layer_block._z_upper_bound_zhat_relu = OmltConstraint( + layer_block._z_upper_bound_zhat_relu = constraint_factory.new_constraint( layer.output_indexes, lang=net_block._format, model=layer_block.model ) diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index dc668a01..fe401a17 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -1,6 +1,6 @@ from pyomo.environ import exp, log, tanh -from omlt.base import OmltConstraint +from omlt.base import OmltConstraintFactory def softplus_activation_function(x): @@ -76,8 +76,9 @@ def smooth_monotonic_activation_constraint(net_block, net, layer_block, layer, f \end{align*} """ - layer_block._smooth_monotonic_activation_constraint = OmltConstraint( - layer.output_indexes, lang=net_block._format + constraint_factory = OmltConstraintFactory() + layer_block._smooth_monotonic_activation_constraint = ( + constraint_factory.new_constraint(layer.output_indexes, lang=net_block._format) ) for output_index in layer.output_indexes: zhat_lb, zhat_ub = layer_block.zhat[output_index].bounds diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index deae8713..619a6ba8 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -1,7 +1,7 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.neuralnet.activations import NON_INCREASING_ACTIVATIONS from omlt.neuralnet.layer import ConvLayer2D, PoolingLayer2D @@ -18,7 +18,10 @@ def full_space_dense_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - layer_block.dense_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) + constraint_factory = OmltConstraintFactory() + layer_block.dense_layer = constraint_factory.new_constraint( + layer.output_indexes, lang=net_block._format + ) for output_index in layer.output_indexes: # dense layers multiply only the last dimension of # their inputs @@ -84,29 +87,31 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - input_layer_block.zbar = OmltVar( + var_factory = OmltVarFactory() + constraint_factory = OmltConstraintFactory() + + input_layer_block.zbar = var_factory.new_var( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), initialize=0, lang=net_block._format, ) - input_layer_block._zbar_lower_bound_z_big_m = OmltConstraint( + input_layer_block._zbar_lower_bound_z_big_m = constraint_factory.new_constraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), lang=net_block._format, ) - input_layer_block._zbar_upper_bound_z_big_m = OmltConstraint( + input_layer_block._zbar_upper_bound_z_big_m = constraint_factory.new_constraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), lang=net_block._format, - ) - input_layer_block._zbar_lower_bound_big_m = OmltConstraint( + input_layer_block._zbar_lower_bound_big_m = constraint_factory.new_constraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), lang=net_block._format, ) - input_layer_block._zbar_upper_bound_big_m = OmltConstraint( + input_layer_block._zbar_upper_bound_big_m = constraint_factory.new_constraint( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), lang=net_block._format, @@ -161,7 +166,9 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): <= ub * net_block.A[input_node_index, output_node_index] ) - layer_block.gnn_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) + layer_block.gnn_layer = constraint_factory.new_constraint( + layer.output_indexes, lang=net_block._format + ) for output_index in layer.output_indexes: # gnn layers multiply only the last dimension of # their inputs @@ -211,8 +218,11 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): layer.activation = "linear" input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) + constraint_factory = OmltConstraintFactory() - layer_block.convolutional_layer = OmltConstraint(layer.output_indexes, lang=net_block._format) + layer_block.convolutional_layer = constraint_factory.new_constraint( + layer.output_indexes, lang=net_block._format + ) for output_index in layer.output_indexes: out_d, out_r, out_c = output_index expr = 0.0 @@ -291,17 +301,21 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): for kernel_index, _ in layer.kernel_index_with_input_indexes(0, 0, 0) ) ) - layer_block.q_maxpool = OmltVar( + var_factory = OmltVarFactory() + constraint_factory = OmltConstraintFactory() + layer_block.q_maxpool = var_factory.new_var( layer.output_indexes, layer_block._kernel_indexes, lang=net_block._format, within=pyo.Binary, ) - layer_block._q_sum_maxpool = OmltConstraint(layer.output_indexes, lang=net_block._format) - layer_block._zhat_upper_bound = OmltConstraint( + layer_block._q_sum_maxpool = constraint_factory.new_constraint( + layer.output_indexes, lang=net_block._format + ) + layer_block._zhat_upper_bound = constraint_factory.new_constraint( layer.output_indexes, layer_block._kernel_indexes, lang=net_block._format ) - layer_block._zhat_lower_bound = OmltConstraint( + layer_block._zhat_lower_bound = constraint_factory.new_constraint( layer.output_indexes, layer_block._kernel_indexes, lang=net_block._format ) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 55bbda16..292f6d8c 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -2,7 +2,7 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory def default_partition_split_func(w, n): @@ -86,16 +86,25 @@ def output_node_block(b, *output_index): splits = split_func(weights) num_splits = len(splits) - b.sig = OmltVar(domain=pyo.Binary, lang=net_block._format) - b.z2 = OmltVar(range(num_splits), lang=net_block._format) + var_factory = OmltVarFactory() + b.sig = var_factory.new_var(domain=pyo.Binary, lang=net_block._format) + b.z2 = var_factory.new_var(range(num_splits), lang=net_block._format) mapper = layer.input_index_mapper + constraint_factory = OmltConstraintFactory() + b.eq_16_lb = constraint_factory.new_constraint( + range(num_splits), lang=net_block._format + ) + b.eq_16_ub = constraint_factory.new_constraint( + range(num_splits), lang=net_block._format + ) - b.eq_16_lb = OmltConstraint(range(num_splits), lang=net_block._format) - b.eq_16_ub = OmltConstraint(range(num_splits), lang=net_block._format) - - b.eq_17_lb = OmltConstraint(range(num_splits), lang=net_block._format) - b.eq_17_ub = OmltConstraint(range(num_splits), lang=net_block._format) + b.eq_17_lb = constraint_factory.new_constraint( + range(num_splits), lang=net_block._format + ) + b.eq_17_ub = constraint_factory.new_constraint( + range(num_splits), lang=net_block._format + ) input_layer_indexes = list(layer.input_indexes_with_input_layer_indexes) @@ -160,14 +169,16 @@ def output_node_block(b, *output_index): eq_13_expr -= b.z2[split_index] eq_13_expr += bias * b.sig - b.eq_13 = OmltConstraint(expr=eq_13_expr <= 0, lang=net_block._format) - b.eq_14 = OmltConstraint( + b.eq_13 = constraint_factory.new_constraint( + expr=eq_13_expr <= 0, lang=net_block._format + ) + b.eq_14 = constraint_factory.new_constraint( expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0, lang=net_block._format, ) - b.eq_15 = OmltConstraint( + b.eq_15 = constraint_factory.new_constraint( expr=layer_block.z[output_index] == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression, lang=net_block._format, diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index feb3e9ba..f4c91aa9 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,6 +1,6 @@ import pyomo.environ as pyo -from omlt.base import OmltConstraint, OmltVar +from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.neuralnet.activations import ( ACTIVATION_FUNCTION_MAP as _DEFAULT_ACTIVATION_FUNCTIONS, @@ -160,10 +160,14 @@ def _build_neural_network_formulation( block.layers = pyo.Set(initialize=[id(layer) for layer in layers], ordered=True) # create the z and z_hat variables for each of the layers + var_factory = OmltVarFactory() + @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, initialize=0, lang=block._format) + b.z = var_factory.new_var( + net_layer.output_indexes, initialize=0, lang=block._format + ) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -172,7 +176,9 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, initialize=0, lang=block._format) + b.zhat = var_factory.new_var( + net_layer.output_indexes, initialize=0, lang=block._format + ) return b @@ -196,12 +202,13 @@ def layer(b, layer_id): # setup input variables constraints # currently only support a single input layer + constraint_factory = OmltConstraintFactory() input_layers = list(net.input_layers) if len(input_layers) != 1: raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - block.input_assignment = OmltConstraint( + block.input_assignment = constraint_factory.new_constraint( input_layer.output_indexes, lang=block._format ) for output_index in input_layer.output_indexes: @@ -217,7 +224,7 @@ def layer(b, layer_id): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - block.output_assignment = OmltConstraint( + block.output_assignment = constraint_factory.new_constraint( output_layer.output_indexes, lang=block._format ) for output_index in output_layer.output_indexes: @@ -393,6 +400,7 @@ def z(b, *output_index): # setup output variable constraints # currently only support a single output layer + constraint_factory = OmltConstraintFactory() output_layers = list(net.output_layers) if len(output_layers) != 1: msg = ( @@ -402,7 +410,7 @@ def z(b, *output_index): raise ValueError(msg) output_layer = output_layers[0] - block.output_assignment = OmltConstraint( + block.output_assignment = constraint_factory.new_constraint( output_layer.output_indexes, lang=block._format ) for output_index in output_layer.output_indexes: @@ -498,11 +506,15 @@ def _build_formulation(self): block.layers = pyo.Set(initialize=[id(layer) for layer in layers], ordered=True) # create the z and z_hat variables for each of the layers + var_factory = OmltVarFactory() + @block.Block(block.layers) def layer(b, layer_id): b._format = block._format net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, lang=b._format, initialize=0) + b.z = var_factory.new_var( + net_layer.output_indexes, lang=b._format, initialize=0 + ) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -511,7 +523,9 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, lang=b._format, initialize=0) + b.zhat = var_factory.new_var( + net_layer.output_indexes, lang=b._format, initialize=0 + ) return b @@ -545,12 +559,13 @@ def layer(b, layer_id): # setup input variables constraints # currently only support a single input layer + constraint_factory = OmltConstraintFactory() input_layers = list(net.input_layers) if len(input_layers) != 1: raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] - block.input_assignment = OmltConstraint( + block.input_assignment = constraint_factory.new_constraint( input_layer.output_indexes, lang=block._format ) for output_index in input_layer.output_indexes: @@ -566,7 +581,7 @@ def layer(b, layer_id): raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] - block.output_assignment = OmltConstraint( + block.output_assignment = constraint_factory.new_constraint( output_layer.output_indexes, lang=block._format ) for output_index in output_layer.output_indexes: diff --git a/tests/base/test_block.py b/tests/base/test_block.py index 82445e3d..80af0b70 100644 --- a/tests/base/test_block.py +++ b/tests/base/test_block.py @@ -1,8 +1,6 @@ import pyomo.environ as pyo import pytest from omlt import OmltBlock -from omlt.base import OmltVar -from omlt.dependencies import julia_available INPUTS_LENGTH = 3 OUTPUTS_LENGTH = 2 diff --git a/tests/base/test_constraint.py b/tests/base/test_constraint.py index 88ba2f96..10117952 100644 --- a/tests/base/test_constraint.py +++ b/tests/base/test_constraint.py @@ -1,23 +1,25 @@ import pyomo.environ as pyo import pytest from omlt.base import ( - OmltConstraint, - OmltConstraintIndexed, - OmltConstraintScalar, - OmltScalar, + OmltConstraintFactory, + OmltVarFactory, ) VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 +var_factory = OmltVarFactory() +constraint_factory = OmltConstraintFactory() + + def test_build_constraint(): - v1 = OmltScalar() + v1 = var_factory.new_var() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - v2 = OmltScalar() + v2 = var_factory.new_var() v2.domain = pyo.Integers v2.value = VAR2_VALUE e2 = v2 + CONST_VALUE @@ -53,25 +55,24 @@ def test_build_constraint(): assert c_le_const.rhs == CONST_VALUE assert c_le_const() == VAR1_VALUE + CONST_VALUE + def test_constraint_invalid_lang(): - expected_msg = ( - "Constraint format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'." - ) + expected_msg = "Constraint format %s not recognized. Supported formats are %s" - with pytest.raises(ValueError, match=expected_msg): - OmltConstraintScalar(lang="test") + with pytest.raises(KeyError, match=expected_msg): + constraint_factory.new_constraint(lang="test") + + with pytest.raises(KeyError, match=expected_msg): + constraint_factory.new_constraint(range(3), lang="test") - with pytest.raises(ValueError, match=expected_msg): - OmltConstraintIndexed(range(3), lang="test") def test_constraint_invalid_index(): - v1 = OmltScalar() + v1 = var_factory.new_var() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - c = OmltConstraint(range(3)) + c = constraint_factory.new_constraint(range(3)) expected_msg = "Couldn't find index %s in index set %s." with pytest.raises(KeyError, match=expected_msg): c[4] = e1 >= 0 diff --git a/tests/base/test_expression.py b/tests/base/test_expression.py index fe81128c..7ce53ff8 100644 --- a/tests/base/test_expression.py +++ b/tests/base/test_expression.py @@ -1,15 +1,16 @@ -import omlt.base.pyomo as pobjects import pyomo.environ as pyo import pytest -from omlt.base import OmltExpr, OmltScalar, OmltVar +from omlt.base import OmltExpr, OmltVarFactory VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 +var_factory = OmltVarFactory() + def _test_build_scalar_expressions(lang): - v1 = OmltScalar(lang=lang) - v2 = OmltScalar(lang=lang) + v1 = var_factory.new_var(lang=lang) + v2 = var_factory.new_var(lang=lang) v1.domain = pyo.Integers v2.domain = pyo.Integers @@ -53,7 +54,7 @@ def test_build_scalar_exp_pyomo(): _test_build_scalar_expressions("pyomo") def test_init_scalar_expression(): - v1 = OmltVar() + v1 = var_factory.new_var() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE @@ -97,12 +98,12 @@ def test_init_scalar_expression(): OmltExpr(expr=((e1, "-", "test"), "+", CONST_VALUE)) def test_combine_scalar_expression(): - v1 = OmltScalar() + v1 = var_factory.new_var() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - v2 = OmltScalar() + v2 = var_factory.new_var() v2.domain = pyo.Integers v2.value = VAR2_VALUE e2 = v2 + CONST_VALUE @@ -133,4 +134,4 @@ def test_combine_scalar_expression(): assert r_diff() == -VAR1_VALUE r_prod = CONST_VALUE * e1 - assert r_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE \ No newline at end of file + assert r_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE diff --git a/tests/base/test_var.py b/tests/base/test_var.py index 6eb49c03..cf9a9fa3 100644 --- a/tests/base/test_var.py +++ b/tests/base/test_var.py @@ -1,12 +1,13 @@ - import pyomo.environ as pyo import pytest -from omlt.base import OmltVar +from omlt.base import OmltVarFactory from omlt.dependencies import julia_available +var_factory = OmltVarFactory() + def _test_scalar_var(lang): - v = OmltVar(lang=lang, initialize=2) + v = var_factory.new_var(lang=lang, initialize=2) assert v._parent is None assert v._constructed is False assert v.name is None @@ -21,7 +22,6 @@ def _test_scalar_var(lang): v.value = 3 assert v.value == 3 - v.fix(2, skip_validation=True) v.bounds = (0, 5) assert v.lb == 0 @@ -44,16 +44,15 @@ def test_scalar_pyomo(): def test_scalar_jump(): _test_scalar_var("jump") + def test_scalar_invalid_lang(): - expected_msg = ( - "Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'." - ) - with pytest.raises(ValueError, match=expected_msg): - OmltVar(lang="test") + expected_msg = "Variable format %s not recognized. Supported formats are %s" + with pytest.raises(KeyError, match=expected_msg): + var_factory.new_var(lang="test") + def _test_indexed_var(lang): - v = OmltVar(range(4), lang=lang, initialize=2) + v = var_factory.new_var(range(4), lang=lang, initialize=2) assert v._parent is None assert v._constructed is False assert v.is_indexed() is True @@ -65,7 +64,6 @@ def _test_indexed_var(lang): v.value = 3 assert v.value == 3 - v.fix(2, skip_validation=True) for e in v: assert v[e].value == 2 @@ -80,13 +78,14 @@ def _test_indexed_var(lang): v.domain = pyo.Integers assert v.domain == pyo.Integers + def test_indexed_pyomo(): _test_indexed_var("pyomo") + def test_indexed_invalid_lang(): expected_msg = ( - "Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'." - ) - with pytest.raises(ValueError, match=expected_msg): - OmltVar(range(3), lang="test") + "Variable format %s not recognized. Supported formats are %s" + ) + with pytest.raises(KeyError, match=expected_msg): + var_factory.new_var(range(3), lang="test") diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index 67a427d7..8973ca1a 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -3,7 +3,7 @@ import pyomo.environ as pe import pytest from omlt import OmltBlock -from omlt.base import OmltIndexed, OmltVar +from omlt.base import OmltVarFactory from omlt.dependencies import onnx, onnx_available from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel @@ -14,17 +14,20 @@ SINGLE_LEAVES = 20 SPLITS = 140 +var_factory = OmltVarFactory() + @pytest.mark.skip("Francesco and Alex need to check this test") def test_formulation_with_continuous_variables(): model = onnx.load(Path(__file__).parent / "continuous_model.onnx") m = pe.ConcreteModel() - m.x = OmltIndexed(range(4), bounds=(-2.0, 2.0)) + + m.x = var_factory.new_var(range(4), bounds=(-2.0, 2.0)) m.x[3].setlb(0.0) m.x[3].setub(1.0) - m.z = OmltVar() + m.z = var_factory.new_var() m.gbt = OmltBlock() m.gbt.build_formulation(GBTBigMFormulation(GradientBoostedTreeModel(model))) diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 8399cab9..30e3a1a2 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,7 +1,6 @@ import numpy as np import pyomo.environ as pe import pytest -from omlt.base import OmltScalar, OmltVar from omlt.dependencies import lineartree_available if lineartree_available: @@ -170,8 +169,8 @@ def test_bigm_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="bigm") model1 = pe.ConcreteModel() - model1.x = OmltScalar(initialize=0) - model1.y = OmltScalar(initialize=0) + model1.x = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -204,8 +203,8 @@ def test_hull_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="hull") model1 = pe.ConcreteModel() - model1.x = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -238,8 +237,8 @@ def test_mbigm_formulation_single_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="mbigm") model1 = pe.ConcreteModel() - model1.x = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -272,8 +271,8 @@ def test_hybrid_bigm_formulation_single_var(): formulation1_lt = LinearTreeHybridBigMFormulation(ltmodel_small) model1 = pe.ConcreteModel() - model1.x = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -469,9 +468,9 @@ def test_bigm_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="bigm") model1 = pe.ConcreteModel() - model1.x0 = OmltVar(initialize=0) - model1.x1 = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x0 = pe.Var(initialize=0) + model1.x1 = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -511,9 +510,9 @@ def test_hull_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="hull") model1 = pe.ConcreteModel() - model1.x0 = OmltVar(initialize=0) - model1.x1 = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x0 = pe.Var(initialize=0) + model1.x1 = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -553,9 +552,9 @@ def test_mbigm_formulation_multi_var(): formulation1_lt = LinearTreeGDPFormulation(ltmodel_small, transformation="mbigm") model1 = pe.ConcreteModel() - model1.x0 = OmltVar(initialize=0) - model1.x1 = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x0 = pe.Var(initialize=0) + model1.x1 = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) @@ -595,9 +594,9 @@ def test_hybrid_bigm_formulation_multi_var(): formulation1_lt = LinearTreeHybridBigMFormulation(ltmodel_small) model1 = pe.ConcreteModel() - model1.x0 = OmltVar(initialize=0) - model1.x1 = OmltVar(initialize=0) - model1.y = OmltVar(initialize=0) + model1.x0 = pe.Var(initialize=0) + model1.x1 = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) model1.obj = pe.Objective(expr=1) model1.lt = OmltBlock() model1.lt.build_formulation(formulation1_lt) From 328fb229c1ab7be6f4960fd17ff80fb8afd794ba Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 2 Aug 2024 19:52:09 +0000 Subject: [PATCH 71/75] Removing Julia pieces (for now) and more mypy cleanup --- src/omlt/base/__init__.py | 3 +- src/omlt/base/expression.py | 43 ++- src/omlt/base/julia.py | 498 ---------------------------------- src/omlt/base/pyomo.py | 19 +- src/omlt/base/var.py | 35 ++- tests/base/test_expression.py | 66 +++-- 6 files changed, 102 insertions(+), 562 deletions(-) delete mode 100644 src/omlt/base/julia.py diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 377bd08a..95b95fce 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -10,7 +10,7 @@ OmltConstraintIndexed, OmltConstraintScalar, ) -from omlt.base.expression import OmltExpr +from omlt.base.expression import OmltExpr, OmltExprFactory from omlt.base.pyomo import ( OmltConstraintIndexedPyomo, OmltConstraintScalarPyomo, @@ -26,6 +26,7 @@ # "jl", # "jump", "OmltExpr", + "OmltExprFactory", "OmltScalar", "OmltIndexed", "OmltVar", diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index a70015c4..5665ca74 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any @@ -7,19 +9,7 @@ class OmltExpr(ABC): - def __new__(cls, lang=DEFAULT_MODELING_LANGUAGE, **kwargs: Any): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if lang not in subclass_map: - msg = ( - "Expression format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - lang, - ) - raise ValueError(msg) - subclass = subclass_map[lang] - instance = super().__new__(subclass) - instance._format = lang - return instance + format: str | None = None @property def ctype(self): @@ -60,3 +50,30 @@ def arg(self, index): @abstractmethod def nargs(self): """Return the number of arguments.""" + +class OmltExprFactory: + def __init__(self): + self.exprs = { + subclass.format: subclass + for subclass in OmltExpr.__subclasses__() + } + + def register(self, lang, varclass): + if lang is None: + lang = varclass.format + if lang in self.exprs: + msg = ("Expression format %s is already registered.", lang) + raise KeyError(msg) + self.exprs[lang] = varclass + + def new_expression( + self, lang: str | None = DEFAULT_MODELING_LANGUAGE, **kwargs: Any + ) -> Any: + if lang not in self.exprs: + msg = ( + "Expression format %s not recognized. Supported formats are %s", + lang, + list(self.exprs.keys()), + ) + raise KeyError(msg) + return self.exprs[lang](**kwargs) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py deleted file mode 100644 index 883b9c10..00000000 --- a/src/omlt/base/julia.py +++ /dev/null @@ -1,498 +0,0 @@ -import pyomo.environ as pyo - -from omlt.base.var import OmltIndexed, OmltScalar -from omlt.dependencies import julia_available - -if julia_available: - from juliacall import Base - from juliacall import Main as Jl - - jl_err = Base.error - Jl.seval("import JuMP") - jump = Jl.JuMP - -# Elements - -class JuMPVarInfo: - def __init__( - self, - lower_bound=None, - upper_bound=None, - fixed_value=None, - start_value=None, - *, - binary=False, - integer=False, - ): - self.has_lb = lower_bound is not None - self.lb = lower_bound - self.has_ub = upper_bound is not None - self.ub = upper_bound - self.has_fix = fixed_value is not None - self.fixed_value = fixed_value - self.has_start = start_value is not None - self.start_value = start_value - self.binary = binary - self.integer = integer - - @property - def lower_bound(self): - return self.lb - - @lower_bound.setter - def lower_bound(self, value=None): - self.lb = value - self.has_lb = value is not None - - def setlb(self, value): - self.lower_bound = value - - @property - def upper_bound(self): - return self.ub - - @upper_bound.setter - def upper_bound(self, value=None): - self.ub = value - self.has_ub = value is not None - - def setub(self, value): - self.upper_bound = value - - def to_jump(self): - return jump.VariableInfo( - self.has_lb, - self.lower_bound, - self.has_ub, - self.upper_bound, - self.has_fix, - self.fixed_value, - self.has_start, - self.start_value, - self.binary, - self.integer, - ) - - -class JumpVar: - def __init__(self, varinfo: JuMPVarInfo, name): - self.info = varinfo - self.name = name - self.omltvar = None - self.index = None - self.construct() - - def __str__(self): - return self.name - - def setlb(self, value): - self.info.setlb(value) - self.construct() - - def setub(self, value): - self.info.setlb(value) - self.construct() - - def construct(self): - self.var = jump.build_variable(Base.error, self.info.to_jump()) - - @property - def value(self): - return self.var.info.start - - def add_to_model(self, model, name=None): - if name is None: - name = self.name - return jump.add_variable(model, self.var, name) - - def to_jump(self): - return self.var - - def __add__(self, other): - return (self.omltvar + other)[self.index] - - def __sub__(self, other): - return (self.omltvar - other)[self.index] - - def __mul__(self, other): - return (self.omltvar * other)[self.index] - - def __eq__(self, other): - return (self.omltvar == other)[self.index] - -# Variables - -class OmltScalarJuMP(OmltScalar): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.ScalarVar - - def __init__(self, **kwargs): - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = self._bounds[0] - _ub = self._bounds[1] - elif self._bounds is None: - _lb = None - _ub = None - else: - msg = ("Bounds must be given as a tuple.", self._bounds) - raise ValueError(msg) - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - msg = ( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - raise ValueError(msg) - if _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - if isinstance(_initialize, (int, float)): - self._value = _initialize - elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): - self._value = _initialize[0] - else: - # Pyomo's "scalar" variables can be multidimensional, they're - # just not indexed. JuMP scalar variables can only be a single - # dimension. Rewrite this error to be more helpful. - msg = ( - "Initial value for JuMP variables must be an int" - f" or float, but {type(_initialize)} was provided." - ) - raise ValueError(msg) - else: - self._value = None - - self._varinfo = JuMPVarInfo( - _lb, - _ub, - None, # fix value - self._value, - self.binary, - self.integer, - ) - self._constructed = False - self._parent = None - self._ctype = pyo.ScalarVar - self._name = None - - def construct(self, data=None): - self._var = JumpVar(self._varinfo, self._name) - self._var.omltvar = self - self._constructed = True - if self._parent: - self._blockvar = jump.add_variable( - self._parent()._jumpmodel, self.to_jumpvar() - ) - - def fix(self, value, *, skip_validation=True): - self.fixed = True - self._value = value - self._varinfo.fixed_value = value - self._varinfo.has_fix = value is not None - if self._constructed: - self.construct() - - @property - def bounds(self): - return (self.lb, self.ub) - - @bounds.setter - def bounds(self, val): - if val is None: - self.lb = None - self.ub = None - elif len(val) == 2: - self.lb = val[0] - self.ub = val[1] - - @property - def lb(self): - return self._varinfo.lower_bound - - @lb.setter - def lb(self, val): - self._varinfo.setlb(val) - if self._constructed: - self.construct() - - @property - def ub(self): - return self._varinfo.upper_bound - - @ub.setter - def ub(self, val): - self._varinfo.setub(val) - if self._constructed: - self.construct() - - @property - def value(self): - if self._constructed: - return self._var.value - return self._varinfo.start_value - - @value.setter - def value(self, val): - if self._constructed: - self._var.value = val - else: - self._varinfo.start_value = val - - @property - def ctype(self): - return self._ctype - - @property - def name(self): - return self._name - - @name.setter - def name(self, value): - self._name = value - - def to_jumpvar(self): - if self._constructed: - return self._var.to_jump() - return self._varinfo.to_jump() - - def to_jumpexpr(self): - return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) - -class OmltIndexedJuMP(OmltIndexed): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.Var - - def __init__(self, *indexes, **kwargs: Any): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - else: - msg = ("Currently index cross-products are unsupported.") - raise ValueError(msg) - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): - _lb = {k: v[0] for k, v in self._bounds.items()} - _ub = {k: v[1] for k, v in self._bounds.items()} - elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = {i: self._bounds[0] for i in self._index_set} - _ub = {i: self._bounds[1] for i in self._index_set} - elif self._bounds is None: - _lb = {i: None for i in self._index_set} - _ub = {i: None for i in self._index_set} - else: - msg = ( - "Bounds must be given as a tuple," " but %s was given.", self._bounds - ) - raise TypeError(msg) - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - msg = ( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - raise ValueError(msg) - if _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - # If starting values have same length as index set, - # take one for each variable in index. - if len(self._index_set) == len(_initialize): - self._value = _initialize - # If there's a single starting value, use it for all - # variables in index. - elif len(_initialize) == 1: - self._value = {i: _initialize[0] for i in self._index_set} - else: - msg = ( - "Index set has length %s, but initializer has length %s.", - len(self._index_set), - len(_initialize), - ) - raise ValueError(msg) - else: - self._value = {i: None for i in self._index_set} - - self._varinfo = {} - for idx in self._index_set: - self._varinfo[idx] = JuMPVarInfo( - _lb[idx], - _ub[idx], - None, # fix value - self._value[idx], - self.binary, - self.integer, - ) - self._vars = {} - self._varrefs = {} - self._constructed = False - self._ctype = pyo.Var - self._parent = None - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._vars[item[0]] - return self._vars[item] - - def __setitem__(self, item, value): - self._varinfo[item] = value - if self._constructed: - self.construct() - - def keys(self): - if self._parent is not None: - return self._varrefs.keys() - return self._vars.keys() - - def values(self): - if self._parent is not None: - return self._varrefs.values() - return self._vars.values() - - def items(self): - if self._parent is not None: - return self._varrefs.items() - return self._vars.items() - - def fix(self, value=None): - self.fixed = True - if value is not None: - for vardata in self._varinfo(): - vardata.has_fix = True - vardata.fixed_value = value - else: - for vardata in self._varinfo(): - vardata.has_fix = True - - def __len__(self): - """Return the number of component data objects stored by this component.""" - return len(self._vars) - - def __contains__(self, idx): - """Return true if the index is in the dictionary.""" - return idx in self._vars - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys.""" - return self._vars.__iter__() - - def construct(self, data=None): - for idx in self._index_set: - if isinstance(idx, int): - name = str(self.name) + "[" + str(idx) + "]" - else: - name = str(self.name) + str(list(idx)).replace(" ", "") - self._vars[idx] = JumpVar(self._varinfo[idx], name) - self._vars[idx].omltvar = self - self._vars[idx].index = idx - if self._parent is not None: - block = self._parent() - if block._format == "jump" and block._jumpmodel is not None: - self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) - - self._constructed = True - - def setub(self, value): - for idx in self.index_set(): - self._varinfo[idx][2] = True - self._varinfo[idx][3] = value - if self._constructed: - self.construct() - - def setlb(self, value): - for idx in self.index_set(): - self._varinfo[idx][0] = True - self._varinfo[idx][1] = value - if self._constructed: - self.construct() - - @property - def ctype(self): - return self._ctype - - def index_set(self): - return self._index_set - - @property - def name(self): - return self._name - - def to_jumpvar(self): - if self._constructed: - return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) - msg = "Variable must be constructed before exporting to JuMP." - raise ValueError(msg) - - def to_jumpexpr(self): - return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} - - -# Constraints - -# Expressions diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 1d0037ef..ce96ede1 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -10,7 +10,7 @@ from pyomo.core.base.var import _GeneralVarData from omlt.base.constraint import OmltConstraintIndexed, OmltConstraintScalar -from omlt.base.expression import OmltExpr +from omlt.base.expression import OmltExpr, OmltExprFactory from omlt.base.var import OmltIndexed, OmltScalar # Variables @@ -20,6 +20,7 @@ class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): format = "pyomo" def __init__(self, *args: Any, **kwargs: Any): + OmltScalar.__init__(self) kwargs.pop("lang", None) self._format = "pyomo" self._pyovar = pyo.ScalarVar(*args, **kwargs) @@ -266,6 +267,8 @@ def __init__(self, expr=None, **kwargs): self.name = None self.__class__ = type(self._expression) self._args_ = self._expression._args_ + self._format = "pyomo" + self.expr_factory = OmltExprFactory() def _parse_expression_tuple_term(self, term): if isinstance(term, tuple): @@ -342,49 +345,49 @@ def __add__(self, other): expr = self._expression + other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression + other - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __sub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression - other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression - other - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __mul__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression * other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression * other - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __div__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = self._expression / other._expression elif isinstance(other, (int, float, pyo.Expression)): expr = self._expression / other - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __radd__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression + self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other + self._expression - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __rsub__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression - self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other - self._expression - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __rmul__(self, other): if isinstance(other, OmltExprScalarPyomo): expr = other._expression * self._expression elif isinstance(other, (int, float, pyo.Expression)): expr = other * self._expression - return OmltExpr(lang=self._format, expr=expr) + return self.expr_factory.new_expression(lang=self._format, expr=expr) def __ge__(self, other): if isinstance(other, OmltExprScalarPyomo): diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 8158d843..1e61378c 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -49,6 +49,9 @@ def valid_model_component(self): class OmltScalar(OmltVar): format: str | None = None + def __init__(self): + self.expr_factory = expression.OmltExprFactory() + def is_indexed(self): return False @@ -107,28 +110,44 @@ def value(self, val): # Interface governing how variables behave in expressions. def __add__(self, other): - return expression.OmltExpr(lang=self.format, expr=(self, "+", other)) + return self.expr_factory.new_expression( + lang=self.format, expr=(self, "+", other) + ) def __sub__(self, other): - return expression.OmltExpr(lang=self.format, expr=(self, "-", other)) + return self.expr_factory.new_expression( + lang=self.format, expr=(self, "-", other) + ) def __mul__(self, other): - return expression.OmltExpr(lang=self.format, expr=(self, "*", other)) + return self.expr_factory.new_expression( + lang=self.format, expr=(self, "*", other) + ) def __truediv__(self, other): - return expression.OmltExpr(lang=self.format, expr=(self, "/", other)) + return self.expr_factory.new_expression( + lang=self.format, expr=(self, "/", other) + ) def __radd__(self, other): - return expression.OmltExpr(lang=self.format, expr=(other, "+", self)) + return self.expr_factory.new_expression( + lang=self.format, expr=(other, "+", self) + ) def __rsub__(self, other): - return expression.OmltExpr(lang=self.format, expr=(other, "-", self)) + return self.expr_factory.new_expression( + lang=self.format, expr=(other, "-", self) + ) def __rmul__(self, other): - return expression.OmltExpr(lang=self.format, expr=(other, "*", self)) + return self.expr_factory.new_expression( + lang=self.format, expr=(other, "*", self) + ) def __rtruediv__(self, other): - return expression.OmltExpr(lang=self.format, expr=(other, "/", self)) + return self.expr_factory.new_expression( + lang=self.format, expr=(other, "/", self) + ) class OmltIndexed(OmltVar): diff --git a/tests/base/test_expression.py b/tests/base/test_expression.py index 7ce53ff8..34e1d3e7 100644 --- a/tests/base/test_expression.py +++ b/tests/base/test_expression.py @@ -1,12 +1,14 @@ import pyomo.environ as pyo import pytest -from omlt.base import OmltExpr, OmltVarFactory +from omlt.base import OmltExpr, OmltExprFactory, OmltVarFactory VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 var_factory = OmltVarFactory() +expr_factory = OmltExprFactory() + def _test_build_scalar_expressions(lang): v1 = var_factory.new_var(lang=lang) @@ -18,48 +20,49 @@ def _test_build_scalar_expressions(lang): v2.value = VAR2_VALUE v_sum = v1 + v2 - assert(isinstance(v_sum, OmltExpr)) - assert(v_sum() == VAR1_VALUE + VAR2_VALUE) + assert isinstance(v_sum, OmltExpr) + assert v_sum() == VAR1_VALUE + VAR2_VALUE v_diff = v1 - v2 - assert(isinstance(v_diff, OmltExpr)) - assert(v_diff() == VAR1_VALUE - VAR2_VALUE) + assert isinstance(v_diff, OmltExpr) + assert v_diff() == VAR1_VALUE - VAR2_VALUE v_prod = v1 * v2 - assert(isinstance(v_prod, OmltExpr)) - assert(v_prod() == VAR1_VALUE * VAR2_VALUE) + assert isinstance(v_prod, OmltExpr) + assert v_prod() == VAR1_VALUE * VAR2_VALUE v_quot = v1 / v2 - assert(isinstance(v_quot, OmltExpr)) - assert(v_quot() == VAR1_VALUE / VAR2_VALUE) + assert isinstance(v_quot, OmltExpr) + assert v_quot() == VAR1_VALUE / VAR2_VALUE v_radd = CONST_VALUE + v1 - assert(isinstance(v_radd, OmltExpr)) - assert(v_radd() == CONST_VALUE + VAR1_VALUE) + assert isinstance(v_radd, OmltExpr) + assert v_radd() == CONST_VALUE + VAR1_VALUE v_rsub = CONST_VALUE - v1 - assert(isinstance(v_rsub, OmltExpr)) - assert(v_rsub() == CONST_VALUE - VAR1_VALUE) + assert isinstance(v_rsub, OmltExpr) + assert v_rsub() == CONST_VALUE - VAR1_VALUE v_rprod = CONST_VALUE * v1 - assert(isinstance(v_rprod, OmltExpr)) - assert(v_rprod() == CONST_VALUE * VAR1_VALUE) + assert isinstance(v_rprod, OmltExpr) + assert v_rprod() == CONST_VALUE * VAR1_VALUE v_rquot = CONST_VALUE / v1 - assert(isinstance(v_rquot, OmltExpr)) - assert(v_rquot() == CONST_VALUE / VAR1_VALUE) + assert isinstance(v_rquot, OmltExpr) + assert v_rquot() == CONST_VALUE / VAR1_VALUE def test_build_scalar_exp_pyomo(): _test_build_scalar_expressions("pyomo") + def test_init_scalar_expression(): v1 = var_factory.new_var() v1.domain = pyo.Integers v1.value = VAR1_VALUE e1 = v1 + CONST_VALUE - e2 = OmltExpr(expr=e1) + e2 = expr_factory.new_expression(expr=e1) assert e2.ctype == pyo.Expression assert e2.is_component_type() @@ -74,28 +77,24 @@ def test_init_scalar_expression(): assert len(e2) == 1 assert e2() == VAR1_VALUE + CONST_VALUE - expected_msg = ( - "Expression %s type %s not recognized." - ) + expected_msg = "Expression %s type %s not recognized." with pytest.raises(TypeError, match=expected_msg): - OmltExpr(expr="test") + expr_factory.new_expression(expr="test") - expected_msg = ( - "Expression format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'." - ) - with pytest.raises(ValueError, match=expected_msg): - OmltExpr(lang="test") + expected_msg = "Expression format %s not recognized. Supported formats are %s" + with pytest.raises(KeyError, match=expected_msg): + expr_factory.new_expression(lang="test") expected_msg = "Expression middle term was {%s}." with pytest.raises(ValueError, match=expected_msg): - OmltExpr(expr=(v1, "test", CONST_VALUE)) + expr_factory.new_expression(expr=(v1, "test", CONST_VALUE)) expected_msg = "Term of expression %s is an unsupported type. %s" with pytest.raises(TypeError, match=expected_msg): - OmltExpr(expr=((e1, "-", "test"), "+", CONST_VALUE)) + expr_factory.new_expression(expr=((e1, "-", "test"), "+", CONST_VALUE)) + def test_combine_scalar_expression(): v1 = var_factory.new_var() @@ -109,7 +108,7 @@ def test_combine_scalar_expression(): e2 = v2 + CONST_VALUE e_sum = e1 + e2 - assert e_sum() == VAR1_VALUE + VAR2_VALUE + 2*CONST_VALUE + assert e_sum() == VAR1_VALUE + VAR2_VALUE + 2 * CONST_VALUE e_diff = e1 - e2 assert e_diff() == VAR1_VALUE - VAR2_VALUE @@ -117,9 +116,8 @@ def test_combine_scalar_expression(): e_prod = e1 * e2 assert e_prod() == (VAR1_VALUE + CONST_VALUE) * (VAR2_VALUE + CONST_VALUE) - p_sum = e1 + CONST_VALUE - assert p_sum() == VAR1_VALUE + 2*CONST_VALUE + assert p_sum() == VAR1_VALUE + 2 * CONST_VALUE p_diff = e1 - CONST_VALUE assert p_diff() == VAR1_VALUE @@ -128,7 +126,7 @@ def test_combine_scalar_expression(): assert p_prod() == (VAR1_VALUE + CONST_VALUE) * CONST_VALUE r_sum = CONST_VALUE + e1 - assert r_sum() == VAR1_VALUE + 2*CONST_VALUE + assert r_sum() == VAR1_VALUE + 2 * CONST_VALUE r_diff = CONST_VALUE - e1 assert r_diff() == -VAR1_VALUE From 1230eba02d6e2d636a4ef7632035b80f67704381 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun, 18 Aug 2024 00:00:00 +0000 Subject: [PATCH 72/75] Fixing indexed variables --- src/omlt/base/pyomo.py | 53 ++++++++++++++++++++++++++++++++++++++++-- src/omlt/base/var.py | 26 +++++++-------------- 2 files changed, 60 insertions(+), 19 deletions(-) diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index ce96ede1..27b7d364 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -89,15 +89,64 @@ def value(self, val): self._pyovar.value = val -class OmltIndexedPyomo(pyo.Var, OmltIndexed): +class OmltIndexedPyomo(OmltIndexed, pyo.Var): format = "pyomo" def __init__(self, *indexes: Any, **kwargs: Any): kwargs.pop("lang", None) self._format = "pyomo" - super().__init__(*indexes, **kwargs) + self._pyovar = pyo.Var(*indexes, **kwargs) + self._name = None + self._parent = None + self._constructed = self._pyovar._constructed + self._index_set = self._pyovar._index_set + self._rule_init = self._pyovar._rule_init + self._rule_domain = self._pyovar._rule_domain + self._rule_bounds = self._pyovar._rule_bounds + self._dense = self._pyovar._dense + self._data = self._pyovar._data + self._units = self._pyovar._units + self._implicit_subsets = self._pyovar._implicit_subsets + self.doc = self._pyovar.doc + self._ctype = pyo.Var self.bounds = (None, None) + @property + def ctype(self): + return pyo.Var + + def construct(self, data=None): + self._pyovar.construct(data) + + def is_constructed(self): + return self._pyovar.is_constructed() + + @property + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def items(self): + return self._pyovar.items() + + def keys(self): + return self._pyovar.keys() + + def values(self, sort=False): # noqa: FBT002 + return self._pyovar.values(sort) + + def __contains__(self, idx): + return idx in self.index_set + + def __getitem__(self, item): + return self._pyovar[item] + + def __len__(self): + return len(self.index_set) + def fix(self, value=None, *, skip_validation=False): self.fixed = True if value is None: diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 1e61378c..b20ce74f 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -159,49 +159,41 @@ def is_indexed(self): @property @abstractmethod def index_set(self): - pass + """Return the index set for the variable.""" # Bound-setting interface for indexed variables: @abstractmethod def setub(self, value): - pass + """Set upper bounds on all component variables.""" @abstractmethod def setlb(self, value): - pass + """Set lower bounds on all component variables.""" # Interface: act as a dict for the sub-variables. @abstractmethod def __getitem__(self, item): pass - @abstractmethod - def __setitem__(self, item, value): - pass - @abstractmethod def keys(self): - pass + """Return iterator over the index set.""" @abstractmethod - def values(self): - pass + def values(self, sort): + """Return iterator over the component variables.""" @abstractmethod def items(self): - pass + """Return iterator over the key-value pairs.""" @abstractmethod def __len__(self): - pass + """Return size of the index set.""" @abstractmethod def __contains__(self, idx): - pass - - @abstractmethod - def __iter__(self): - pass + """Return true if idx is in the index set.""" class OmltVarFactory: From b7108ac9bc8520c8218bce6683cf1e7c00c539e7 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:54:45 +0000 Subject: [PATCH 73/75] Cleaned up some unnecessary methods. --- src/omlt/base/constraint.py | 26 -------------------------- src/omlt/base/pyomo.py | 24 ------------------------ tests/base/test_constraint.py | 3 --- 3 files changed, 53 deletions(-) diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py index 286d82a0..cd881854 100644 --- a/src/omlt/base/constraint.py +++ b/src/omlt/base/constraint.py @@ -15,20 +15,10 @@ class OmltConstraint(ABC): def ctype(self): return pyo.Constraint - def is_component_type(self): - return True - - def is_expression_type(self, enum): - # The Pyomo ExpressionType.RELATIONAL is enum 1. - return enum.value == 1 - def valid_model_component(self): """Return True if this can be used as a model component.""" return True - @abstractmethod - def __call__(self, *args: Any, **kwds: Any) -> Any: - pass class OmltConstraintScalar(OmltConstraint): @@ -44,12 +34,6 @@ def __init__(self, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any): if rhs is not None: self.rhs = rhs if not lhs and not sense and not rhs: - expr_tuple = kwargs.pop("expr_tuple", None) - if expr_tuple and expr_tuple[1] in {"==", ">=", "<=", ">", "<", "in"}: - self.lhs = expr_tuple[0] - self.sense = expr_tuple[1] - self.rhs = expr_tuple[2] - if not lhs and not sense and not rhs and not expr_tuple: expr = kwargs.pop("expr", None) if isinstance(expr, EqualityExpression): self.lhs = expr.arg(0) @@ -64,8 +48,6 @@ def __init__(self, lang: str = DEFAULT_MODELING_LANGUAGE, **kwargs: Any): self.format = lang self._parent = None - def __call__(self, *args: Any, **kwds: Any) -> Any: - """Return the value of the body of the constraint.""" @property def args(self): @@ -84,9 +66,6 @@ def __init__( self.name = None self.format = lang - @abstractmethod - def __call__(self, *args: Any, **kwds: Any) -> Any: - pass def keys(self, sort=False): yield from self._index_set @@ -101,11 +80,6 @@ def _constructed(self): def _active(self): """Return True if the constraint is active.""" - @_active.setter - @abstractmethod - def _active(self, val): - """Set the constraint status to active or inactive.""" - @property @abstractmethod def _data(self): diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 27b7d364..5a12d9d6 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -197,9 +197,6 @@ def __init__(self, *args: Any, **kwargs: Any): self.constraint._parent = self._parent self.constraint.construct() - def __call__(self, *args: Any, **kwds: Any) -> Any: - return self.constraint.__call__(*args, **kwds) - @property def __class__(self): return type(self.constraint.expr) @@ -235,7 +232,6 @@ def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) kwargs.pop("model", None) kwargs.pop("lang", None) - kwargs.pop("expr_tuple", None) self.constraint = pyo.Constraint(*args, **kwargs) self._index_set = self.constraint._index_set @@ -267,9 +263,6 @@ def __getitem__(self, index): ) raise KeyError(msg) - def __call__(self, *args: Any, **kwds: Any) -> Any: - return self.constraint.__call__(*args, **kwds) - def __len__(self): return len(self.constraint) @@ -281,10 +274,6 @@ def _constructed(self): def _active(self): return self.constraint._active - @_active.setter - def _active(self, val): - self.constraint._active = val - @property def _data(self): return self.constraint._data @@ -314,7 +303,6 @@ def __init__(self, expr=None, **kwargs): self._parent = None self.name = None - self.__class__ = type(self._expression) self._args_ = self._expression._args_ self._format = "pyomo" self.expr_factory = OmltExprFactory() @@ -350,8 +338,6 @@ def _parse_expression_tuple(self, expr): msg = ("Expression middle term was {%s}.", expr[1]) raise ValueError(msg) - def __class__(self): - return type(self._expression) def is_potentially_variable(self): return self._expression.is_potentially_variable() @@ -359,16 +345,6 @@ def is_potentially_variable(self): def as_numeric(self): return self._expression._apply_operation(self._expression.args) - def construct(self, data=None): - return self._expression.construct(data) - - @property - def _constructed(self): - return self._expression.expr._constructed - - @property - def const(self): - return self._expression.const @property def args(self): diff --git a/tests/base/test_constraint.py b/tests/base/test_constraint.py index 10117952..e85e5fa8 100644 --- a/tests/base/test_constraint.py +++ b/tests/base/test_constraint.py @@ -41,19 +41,16 @@ def test_build_constraint(): assert c_le_expressions.sense == "<=" assert id(c_le_expressions.rhs) == id(e2._expression) - assert c_le_expressions() == VAR1_VALUE - VAR2_VALUE c_le_var = e1 <= v2 assert c_le_var.sense == "<=" assert id(c_le_var.rhs) == id(v2._pyovar) - assert c_le_var() == VAR1_VALUE - VAR2_VALUE + CONST_VALUE c_le_const = e1 <= CONST_VALUE assert c_le_const.sense == "<=" assert c_le_const.rhs == CONST_VALUE - assert c_le_const() == VAR1_VALUE + CONST_VALUE def test_constraint_invalid_lang(): From 8b0f0cf044cddd09e6846a9fd80c7fa7656bead9 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 27 Sep 2024 22:32:48 +0000 Subject: [PATCH 74/75] Making blocks more generic --- src/omlt/block.py | 26 ++++++++++++++------------ tests/base/test_block.py | 2 +- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/omlt/block.py b/src/omlt/block.py index 3ffcea5d..0b20c42c 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -30,18 +30,7 @@ class is used in combination with a formulation object to construct the from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVarFactory -@declare_custom_block(name="OmltBlock") -class OmltBlockData(_BlockData): - def __init__(self, component): - super().__init__(component) - self.__formulation = None - self.__input_indexes = None - self.__output_indexes = None - self._format = DEFAULT_MODELING_LANGUAGE - - def set_format(self, lang): - self._format = lang - +class OmltBlockCore: def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """Setup inputs and outputs. @@ -115,3 +104,16 @@ def build_formulation(self, formulation, lang=None): # tell the formulation object to construct the necessary models self.__formulation._build_formulation() + + +@declare_custom_block(name="OmltBlock") +class OmltBlockData(_BlockData, OmltBlockCore): + def __init__(self, component): + super().__init__(component) + self.__formulation = None + self.__input_indexes = None + self.__output_indexes = None + self._format = DEFAULT_MODELING_LANGUAGE + + def set_format(self, lang): + self._format = lang diff --git a/tests/base/test_block.py b/tests/base/test_block.py index 80af0b70..bc0b0e39 100644 --- a/tests/base/test_block.py +++ b/tests/base/test_block.py @@ -45,7 +45,7 @@ def test_block(): formulation = DummyFormulation() m.b.build_formulation(formulation, lang="pyomo") - assert m.b._OmltBlockData__formulation is formulation + assert m.b._OmltBlockCore__formulation is formulation assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] From ac0912c38249a58db8d547bb97a474d11bed8813 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed, 2 Oct 2024 18:24:40 -0700 Subject: [PATCH 75/75] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 877aac1851bdf884c3c1e2e1b21df913d10f0a3e Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Oct 3 01:14:21 2024 +0000 Cleaning up integrated changes commit 6dfd2c067accec714bfec2b92d5d44ed176ef235 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Sep 27 22:32:48 2024 +0000 Making blocks more generic commit 164b179de4620be0d4246abd7ee43a2e2f63d788 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed Sep 18 23:54:45 2024 +0000 Cleaned up some unnecessary methods. commit d42cae2fa7f27ba0026c3168ecb9bd25932f8048 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun Aug 18 00:00:00 2024 +0000 Fixing indexed variables commit 6c648cae9cf225c6e42bf1d29f51a454701fea69 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Aug 2 19:52:09 2024 +0000 Removing Julia pieces (for now) and more mypy cleanup commit 905e5289d57b395f18865a4be91253180b91a993 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Aug 1 20:06:31 2024 +0000 Factory classes for vars and constraints commit 8b18e971904dc9f26a465f15eea679a27a0e3b2a Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Jul 22 23:41:25 2024 +0000 Improving test coverage commit 069c3ef7bf019e66ab52010cceaaed35d478093e Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Jul 15 21:49:18 2024 +0000 Adding tests commit fe433a3fe084c517b990035fd3a4a508df15648c Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jul 11 22:40:17 2024 +0000 Moving JuMP objects into their own file commit 79af2050a7ec07582ac8d84bfa72a9e2515f6a2c Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue Jul 9 21:20:36 2024 +0000 Fixing an issue with linear trees commit 8c459f137880937bf5171a510e455c5a6a960332 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue Jul 9 19:43:38 2024 +0000 Making block-level modelling language choices percolate through generated variables and constraints commit e01a352c62e7fa68ac70140202264f9d29aa5e28 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Jul 8 23:58:06 2024 +0000 Including OmltExpr and OmltConstraints, spreading Omlt classes throughout the codebase. commit c5b866b5ac5a07f35cd46f35433ea348d3650597 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 20:56:23 2024 +0000 linting (2) commit 98518f8844eb3bde810e729fba0a9f9aac6daec7 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 20:42:30 2024 +0000 linting (1) commit 6e5292daa546b39cb6d5dd5567c0e5a7976c7f41 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 13:33:16 2024 -0700 Delete .github/workflows/python-package.yml commit 7adf6e4628eaa09abdfa791698378fb7503c437c Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 20:22:56 2024 +0000 adding abstract methods to expression interface commit 1a8c1245e449fd101276d125a5773e7742d450cd Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 19:06:17 2024 +0000 further fixing commit 2764df183edb932b473edc7de6c0a5d99267e73f Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 18:58:51 2024 +0000 fixing variable initialization commit dd6939474de0894c8ac34c255f5267b3ddc8d47a Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 18:47:01 2024 +0000 tidying var.py commit 6e141d4acd783f6c897efa5db4d42388055d62c8 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 6 18:22:30 2024 +0000 cleanup in expression.py commit ef0885b9dc0a8ee2ba21af484c7894825c4eaebe Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed Jun 5 20:01:17 2024 +0000 Including OmltExpr expressions for the OmltVars commit a64f6d7c0bd52f524caf0ae736bf535403504b66 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:33:14 2024 -0700 Update setup.cfg commit 83ccaef1bed64ad3d491d87b77048ca6bce6cc80 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun Apr 21 18:05:47 2024 -0700 Update setup.cfg commit 63f0e5fa866118531edc426412c8a18b53021a92 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Mar 18 22:41:10 2024 -0700 Create python-package.yml commit ea1154cb0cc88c1d9b45647144cd8aa18e527185 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:45:42 2024 -0700 Update main.yml commit 7eecd26e942d2d622a70432f908826cafbaac34e Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:44:36 2024 -0700 Update main.yml commit f844c2d140e87c59b7a5f9a846c7fa158db752b9 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:42:42 2024 -0700 Update main.yml commit 9ab7fc33830c57287f44d0e18c11f09060443bbf Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:40:35 2024 -0700 Update main.yml commit ab2554240cec48c6079a9219a94fd2edb1215aaf Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:39:03 2024 -0700 Update setup.cfg for Keras version commit 61c8daffcba8a59661897b7e8005e9dd4b46139a Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri May 17 11:38:30 2024 -0700 Update Python versions in main.yml commit 0ae5b757dcc4d67791f26238266beb90d96927ad Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Apr 22 00:35:06 2024 +0000 Fixing some whitespace linting commit cbcefcb79cb0230ad0b052351385243d948c4d9e Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Apr 22 00:20:48 2024 +0000 restoring action workflow file commit c911bb061b404d99e57a0cd4c528d996b32255e1 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Apr 22 00:16:13 2024 +0000 removing tweaked action file commit c929d54febbcbcc3806624a7138e9be8804d57e4 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat Apr 20 23:21:18 2024 -0700 Fix Keras version at 2.9 Keras 3 requires models to have the .keras file format. Going forward we should probably update the test models to use this format, but to unblock I'm holding back the Keras version. commit 738f7fdf3f329d4b2781a595b99e01d217ab14dc Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat Apr 20 23:01:19 2024 -0700 Use tensorflow-cpu for testing to save space commit c4ab25702b5bf4b26ae4da69e745547b0739d177 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 17:43:43 2024 -0700 Make test for JuMP variables conditional on presence of JuMP commit 09c994532d443201fc886d383d795639dbf5cae5 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 17:35:36 2024 -0700 Update var.py commit 991dd3788f1d374f8d91dff8796e30fc151df146 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 17:29:08 2024 -0700 Update var.py commit b57848ae4f981cd1b69d3c9c5df2b9b1f7f63119 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 16:58:01 2024 -0700 Getting dependencies lined up correctly commit 1490f4218a7f1473ea0bb86ca95dcdd79c86609a Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 16:52:08 2024 -0700 Removing duplicate line commit ef42ba341d8d35517655e2e818f66330a7ebd8c4 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 19:19:29 2024 +0000 Cleaning up variables - MOI dependency commit fa62661344e44c5d4f4092c36231cb118adae681 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 19:19:29 2024 +0000 Cleaning up variables - MOI dependency commit 5dae012e56354b1b8868993005a5f7c3d374bb08 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri Apr 19 19:19:29 2024 +0000 Cleaning up variables commit 29b89bc873cf4cfb2204195cc7f7015f7f0fc784 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Apr 8 18:28:49 2024 +0000 Implementing JuMP format scalar and indexed variables. commit 3c2061120adbd48ea6704ab3f6e6202b22e3645d Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue Mar 19 00:58:12 2024 -0700 Removing ipopt from CI workflow commit 6e36c471086904cdba5b2da466c9bfb59a403c9b Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Mar 18 22:16:04 2024 -0700 Create main.yml copying CI workflow over commit 9178a1b81be758997a5afe3f29aea9e55303e7dd Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue Mar 19 02:05:50 2024 +0000 OmltVar wrapper class commit 0e86c9f2addae4ea814293b300683d26b46d26b0 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue Mar 19 02:05:50 2024 +0000 OmltVar wrapper class commit 7515f5781b801951ac89544f5e4e58449caf45d3 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Jun 24 05:29:48 2024 +0000 Fixing mypy typing errors commit 7bb6f0d1e5a53e70e590ddb28744dfed3ec53a70 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon Jun 24 05:29:48 2024 +0000 Fixing mypy typing errors commit ce6a94426a2f8a0e7cc304e470ac370b73f2557e Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun Jun 23 00:27:31 2024 +0000 Fixing ruff linting errors. commit 8a44751ec2f9e8fa913dd6e10d7ca2bca91705a5 Author: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu Jun 13 22:08:18 2024 +0000 Fixing initial batch of ruff errors commit 0379ec60b12d33485c28b9ba45aa12bd6201ffc0 Author: Lukas Turcani Date: Thu May 30 18:32:52 2024 +0100 Add back for mypy commit 2b0f991d35f655140234f58a78c00bd96a9cc546 Author: Lukas Turcani Date: Thu May 30 18:31:55 2024 +0100 remove unnecessary things commit 551530dcf5c10faaef9203674373f258a430bcc6 Author: Lukas Turcani Date: Thu May 30 18:21:20 2024 +0100 wip commit 4c40b8b9305eafc4887bf7d1536da63da14022e7 Author: Lukas Turcani Date: Thu May 30 18:20:48 2024 +0100 thing commit 8042185e7746fbc1a8855233c76743b94cd46524 Author: Lukas Turcani Date: Thu May 30 18:20:00 2024 +0100 wip commit fd4ef72eb98ea2e4f5bc2d4a529ddb31a74f9b30 Author: Lukas Turcani Date: Thu May 30 18:18:08 2024 +0100 add link commit 5492ddb7ffcc2d1d10d48ef732d04f722b0653a8 Author: Lukas Turcani Date: Thu May 30 18:03:26 2024 +0100 wip commit 3d056e9a4b6f6f960afa53b515e2f4703a36310a Author: Lukas Turcani Date: Thu May 30 12:57:50 2024 +0100 Add thing commit 0790eb84cbe443143671434cfd944dc90523d66a Author: Lukas Turcani Date: Thu May 30 11:37:53 2024 +0100 Thing commit e7357075444bdc09a3deb8c49881898d92689ce9 Author: Lukas Turcani Date: Thu May 30 11:34:35 2024 +0100 Add conda commit cc07a307e32a3961dedbd6faa060a41b5cd7e399 Author: Lukas Turcani Date: Wed May 29 16:16:38 2024 +0100 wip commit c58ec681330d1635bab3ad0315fb94154eaf2c4f Author: Lukas Turcani Date: Tue May 28 22:41:59 2024 +0100 wip commit 0a2671f49478936aa04f081362fb296d14333fbb Author: Lukas Turcani Date: Tue May 28 22:20:10 2024 +0100 Add workflows commit 321a2e213df0a4707184595c399aa52e9bdad69b Author: Jiří Němeček Date: Sat Aug 24 19:15:47 2024 +0200 Fixing 404 errors of links to notebooks in the documentation (#143) I assume that the notebooks have been moved, but the documentation links did not reflect that **Legal Acknowledgement**\ By contributing to this software project, I agree my contributions are submitted under the BSD license. I represent I am authorized to make the contributions and grant the license. If my employer has rights to intellectual property that includes these contributions, I represent that I have received permission to make contributions and grant the required license on behalf of that employer. commit caebfc411a4e3bb43db57cf5c2082e2c7e5e41d0 Author: Andrew Lee Date: Thu Aug 22 13:28:24 2024 -0400 Replace _BlockData with BlockData (#144) Pyomo recently made ComponentData classes public (https://github.com/Pyomo/pyomo/pull/3221) which will be part of the upcoming release. Currently, this causes the following error to occur in OMLT: ``` TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases ``` The Pyomo team is working to try to address this issue, however OMLT should update its code to address this as otherwise deprecation warnings will be emitted when using the old class names. The fix is to replace all instances of `_BlockData` with `BlockData` (just removing the underscore) - this applies to any other instance of Pyomo component data objects as well (although I could only find 2 instances of these in the OMLT code). **Legal Acknowledgement**\ By contributing to this software project, I agree my contributions are submitted under the BSD license. I represent I am authorized to make the contributions and grant the license. If my employer has rights to intellectual property that includes these contributions, I represent that I have received permission to make contributions and grant the required license on behalf of that employer. Co-authored-by: jalving commit c6d274fbc2ce078827dbabe1e38efeec952eb26d Author: Emma Johnson <12833636+emma58@users.noreply.github.com> Date: Thu Aug 22 10:56:10 2024 -0400 Add tolerance to enforce strict inequalities in linear tree formulations (#163) This PR adds a tolerance at which to enforce ``strict'' inequalities in linear model trees: That is, the right branch will require that the feature value be greater than or equal to the bound plus this tolerance (epsilon). This means that users can tune epsilon in order to ensure that the MIP solution will match the tree prediction. Additionally, the PR simplifies the implementation of the hybrid bigm linear tree formulation by using two modern pyomo.gdp transformations. This does mean that the linear tree formulations will rely on pyomo>=6.7.1 though, if that's okay. **Legal Acknowledgement**\ By contributing to this software project, I agree my contributions are submitted under the BSD license. I represent I am authorized to make the contributions and grant the license. If my employer has rights to intellectual property that includes these contributions, I represent that I have received permission to make contributions and grant the required license on behalf of that employer. --------- Co-authored-by: Emma Johnson commit d43643a474bca8da9fbfdf53a83cb1a51e4f3780 Author: Lukas Turcani Date: Tue Aug 20 23:53:51 2024 +0100 Clean up package boilerplate (#149) This PR does a couple of things to clean up the boilerplate related to packaging OMLT, see sections below for detailed explanations of the changes. * Remove `setup.cfg` , `setup.py`, `docs/requirements.txt`, `tox.ini` in favour of `pyproject.toml`. * Place `conda` requirements into `environment.yml` * Create new workflows `tests.yml` and `publish_release.yml` * Add quality checks using `ruff`, `mypy`, `doctest` * Use `just` for developer experience * Updated the `Development` section of `README` to talk about `just` * Clean up `conf.py` * Move `pull_request_template.md` * Allow publishing of package to pypi by pushing a new version tag # Other comments * consider internal package structure * force squash merge of PRs - this keeps git history for the `main` branch nice and clean # Using `pyproject.toml` `pyrpoject.toml` is the simplest way to provide package metadata for a Python package. It is easy to read and also provides sections for configurating tools such as `pytest`, `ruff` and `mypy` all in one place. It works seamlessly with the modern Python ecosystem. I set up `pyproject.toml` to automactically detect the version of the code from git tags. No need to duplicate version numbers across the repo. Just add a new tag and everything will be updated. In addition, when a new git tag is pushed to the GitHub repo, the new `publish_release` workflow will be triggered and a new PYPI version released. (See more on this below). I also set it up so that the version is automatically added to a file called `src/omlt/_version.py` which holds the `__version__` variable. this file is autogenerated and therefore added to `.gitignore`. The `__version__` veriable is then re-exported in `src/omlt/__init__.py` so that our users have access to it. I tried to perserve all the information stored in the `setup.cfg` and other deleted files -- let me know if there is something i missed! ## Optional dependencies The `pyproject.toml` file allows the creation of optional dependencies. For example, our users can install ```bash pip install omlt[keras] # or pip install omlt[torch] # or pip install omlt[linear-tree,keras-gpu] ``` Ofc any combination of optional dependencies is valid too. This allows our users to install the dependencies specific to their use case. Note that: * I made `onnx` and `onnxruntime` a required dependency because from my understanding it is almost always used * I added an optinoal dependency set called `dev` which developers can use to install all developer tools and all dependencies -- you need this to run all the tests for example * There is also `dev-gpu` which installs the GPU version of tensorflow in case the developer has a GPU The available optional dependencies are: * `linear-tree`, installs the linear tree dependency * `keras`, installs tensorflow and keras * `keras-gpu`, installs tensorflow for the gpu and keras * `torch`, installs torch and torch geometric * `dev-tools` - this is not to be used directly but allows easy re-use of dev tools in other optional dependencies, namely dev and dev-gpu * `docs` - installs dependencies required to compile docs * `dev` - dependecies needed for developing the project, such tooling * `dev-gpu` - same as dev but installed with gpu support Our documentation probably needs to be updated to tell users they wanna install omlt with some combination of `linear-tree`, `keras`, `keras-gpu`, `torch` optional dependencies depending on what features of the package they are using # Quality checks with `ruff`, `mypy` and `doctest` I've enabled `ruff`, `mypy` and `doctest`. Currently there are no doctests, but its good to have it set up so that it runs in case any are added in the future. Both `ruff` and `mypy` are failing because there are a number of things which need to fixed. For both `ruff` and `mypy` I have disabled some checks which it would be good to enable eventually but are probably a fair amount of work to fix -- these have comments in `pyproject.toml`. The remaining failing checks are ones which I would reccomend fixing ASAP. There's two approaches, merge now and fix these errors later. Or keep a separate branch where these are incrementally fixed. Up to you to decide what you prefer. I told ruff to check for `google` style docstrings. I think these are the best because they have good readbility and work the best with type hints in my opinion. # Using `just` instead of `tox` https://github.com/casey/just is a simple command runner. It allows the developers to define and re-use common operations, for example I can define a `check` recipe and then run ```bash just check ``` in my command line and it will run all the tests. The beauty of this is that `just` is extremely simple. If you read the file its basically a sequence of bash instructions for each recipe. This makes the `recipes` really transparent, and easy to understand, and works as code-as-documentation. Users can just read the recipe and run the commands one by one to get the same effect without having `just` installed. There is no magic which helps with debugging issues. It's also language agnostic. `just` comes as a small stand-alone binary, which makes it a very non-intrusive tool to have on your computer that does not need any dependencies. The downside is that it does not provide automatic management for Python environments, which I belive tox does provide. The other side of this is that we allow developers to use their favorite tools for managing venvs rather than proscribing certain tools for this repo. (the difference with `just` being that it is essentially optional tool and also serving as documentation) I may be overly opinionated on this one, so feel free to push back. # Cleaning up `docs/conf.py` I removed a bunch of the commented out code. This makes it easier to see what the configuration is and also prevents the commented out options from becoming out of date when a new release of sphinx is made. # Moving `pull_request_template.md` I moved this into the `.github` folder because it is GitHub configuration. Very optional, but makes more sense to me. # `readthedocs` automated action this guide https://docs.readthedocs.io/en/stable/guides/pull-requests.html shows how to set it up. requires admin permissions on readthedocs -- can jump on a call to help with this # publishing with to `PYPI` with a git tag for this an API key for PYPI needs to be created and added to the repos secrets -- can jump on a call to help with this # consider `_internal` package structure One way to make it easier to manage private vs public code in a repository is to create an `_internal` folder where all the code goes. This way all code can be shared easily and moved between modules and its by default private, so changes to internal code does not break users. Public modules then just re-export code in the `_internal` submodules. You can see an example of this structure here https://github.com/lukasturcani/stk. Not a huge issue but I find it very helpful for managing what things are actually exposed to users the code-base grows. **Legal Acknowledgement**\ By contributing to this software project, I agree my contributions are submitted under the BSD license. I represent I am authorized to make the contributions and grant the license. If my employer has rights to intellectual property that includes these contributions, I represent that I have received permission to make contributions and grant the required license on behalf of that employer. --------- Co-authored-by: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> --- docs/api_doc/omlt.block.rst | 2 +- docs/notebooks.rst | 4 +- .../notebooks/data/build_sin_quadratic_csv.py | 4 +- .../auto-thermal-reformer-relu.ipynb | 109 +++-- .../neuralnet/auto-thermal-reformer.ipynb | 109 +++-- docs/notebooks/neuralnet/build_network.ipynb | 42 +- .../graph_neural_network_formulation.ipynb | 19 +- docs/notebooks/neuralnet/import_network.ipynb | 60 +-- docs/notebooks/neuralnet/index_handling.ipynb | 3 +- .../mnist_example_convolutional.ipynb | 159 +++---- .../neuralnet/mnist_example_dense.ipynb | 161 ++++---- .../neural_network_formulations.ipynb | 389 +++++++++++------- docs/notebooks/trees/bo_with_trees.ipynb | 170 ++++---- .../trees/linear_tree_formulations.ipynb | 279 +++++++------ pyproject.toml | 52 ++- setup.cfg | 171 ++++++++ src/omlt/base/constraint.py | 4 +- src/omlt/base/pyomo.py | 14 +- src/omlt/block.py | 5 +- src/omlt/formulation.py | 11 +- src/omlt/gbt/gbt_formulation.py | 34 +- src/omlt/io/__init__.py | 11 +- src/omlt/io/onnx_parser.py | 19 +- .../torch_geometric/build_gnn_formulation.py | 4 +- .../torch_geometric/torch_geometric_reader.py | 53 ++- src/omlt/linear_tree/lt_definition.py | 4 +- src/omlt/linear_tree/lt_formulation.py | 133 ++---- src/omlt/neuralnet/activations/relu.py | 2 +- src/omlt/neuralnet/layer.py | 21 +- src/omlt/neuralnet/layers/full_space.py | 28 +- src/omlt/neuralnet/layers/partition_based.py | 2 +- src/omlt/neuralnet/nn_formulation.py | 16 +- src/omlt/scaling.py | 6 +- tests/base/test_block.py | 4 +- tests/base/test_expression.py | 3 +- tests/base/test_var.py | 12 +- tests/io/test_onnx_parser.py | 5 +- tests/io/test_torch_geometric.py | 13 +- tests/linear_tree/test_lt_formulation.py | 60 ++- tests/neuralnet/test_network_definition.py | 2 - tests/neuralnet/test_nn_formulation.py | 15 +- tests/neuralnet/test_relu.py | 3 +- tests/neuralnet/train_keras_models.py | 36 +- 43 files changed, 1316 insertions(+), 937 deletions(-) create mode 100644 setup.cfg diff --git a/docs/api_doc/omlt.block.rst b/docs/api_doc/omlt.block.rst index bb111700..4824f793 100644 --- a/docs/api_doc/omlt.block.rst +++ b/docs/api_doc/omlt.block.rst @@ -8,7 +8,7 @@ OMLT Block :show-inheritance: .. note:: - `OmltBlock` is the name used to declare the custom Pyomo block which is exposed to the user. The block functionality is given by `OmltBlockData` which inherits from Pyomo `_BlockData`. + `OmltBlock` is the name used to declare the custom Pyomo block which is exposed to the user. The block functionality is given by `OmltBlockData` which inherits from Pyomo `BlockData`. .. autoclass:: omlt.block.OmltBlockData :members: diff --git a/docs/notebooks.rst b/docs/notebooks.rst index f7da92f4..ae587d87 100644 --- a/docs/notebooks.rst +++ b/docs/notebooks.rst @@ -14,7 +14,7 @@ The first set of notebooks demonstrates the basic mechanics of OMLT and shows ho * `index_handling.ipynb `_ shows how to use `IndexMapper` to handle the mappings between indexes. -* `bo_with_trees.ipynb `_ incorporates gradient-boosted trees into a Bayesian optimization loop to optimize the Rosenbrock function. +* `bo_with_trees.ipynb `_ incorporates gradient-boosted trees into a Bayesian optimization loop to optimize the Rosenbrock function. * `linear_tree_formulations.ipynb `_ showcases the different linear model decision tree formulations available in OMLT. @@ -24,7 +24,7 @@ The second set of notebooks gives application-specific examples: * `mnist_example_convolutional.ipynb `_ trains a convolutional neural network on MNIST and uses OMLT to find adversarial examples. -* `graph_neural_network_formulation.ipynb `_ transforms graph neural networks into OMLT and builds formulation to solve optimization problems. +* `graph_neural_network_formulation.ipynb `_ transforms graph neural networks into OMLT and builds formulation to solve optimization problems. * `auto-thermal-reformer.ipynb `_ develops a neural network surrogate (using sigmoid activations) with data from a process model built using `IDAES-PSE `_. diff --git a/docs/notebooks/data/build_sin_quadratic_csv.py b/docs/notebooks/data/build_sin_quadratic_csv.py index 72e6c554..261525eb 100644 --- a/docs/notebooks/data/build_sin_quadratic_csv.py +++ b/docs/notebooks/data/build_sin_quadratic_csv.py @@ -9,9 +9,7 @@ rng = np.random.default_rng() sin_quads = pd.DataFrame(x, columns=["x"]) sin_quads["y"] = ( - np.sin(w * x) - + x**2 - + np.array([rng.uniform() * 0.1 for _ in range(n_samples)]) + np.sin(w * x) + x**2 + np.array([rng.uniform() * 0.1 for _ in range(n_samples)]) ) plt.plot(sin_quads["x"], sin_quads["y"]) diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb index 0b800812..f49a771e 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer-relu.ipynb @@ -78,20 +78,20 @@ ], "source": [ "import os\n", - "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress CUDA warnings from tensorflow\n", + "\n", + "os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # suppress CUDA warnings from tensorflow\n", "\n", "# import the necessary packages\n", - "from omlt import OmltBlock, OffsetScaling\n", - "from omlt.io.keras import load_keras_sequential\n", - "from omlt.neuralnet import ReluBigMFormulation\n", - "from omlt.base import OmltConstraint\n", - "import pyomo.environ as pyo\n", "import pandas as pd\n", - "import tensorflow.keras as keras\n", - "from tensorflow.keras.models import Sequential\n", + "import pyomo.environ as pyo\n", + "from tensorflow import keras\n", "from tensorflow.keras.layers import Dense\n", + "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.optimizers import Adam\n", - "from tensorflow.keras.callbacks import ModelCheckpoint" + "\n", + "from omlt import OffsetScaling, OmltBlock\n", + "from omlt.io.keras import load_keras_sequential\n", + "from omlt.neuralnet import ReluBigMFormulation" ] }, { @@ -152,10 +152,23 @@ ], "source": [ "# read in our csv data\n", - "columns = ['Bypass Fraction', 'NG Steam Ratio', 'Steam Flow',\n", - " 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n", - " 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n", - "df = pd.read_csv('../data/reformer.csv', usecols=columns)\n", + "columns = [\n", + " \"Bypass Fraction\",\n", + " \"NG Steam Ratio\",\n", + " \"Steam Flow\",\n", + " \"Reformer Duty\",\n", + " \"AR\",\n", + " \"C2H6\",\n", + " \"C3H8\",\n", + " \"C4H10\",\n", + " \"CH4\",\n", + " \"CO\",\n", + " \"CO2\",\n", + " \"H2\",\n", + " \"H2O\",\n", + " \"N2\",\n", + "]\n", + "df = pd.read_csv(\"../data/reformer.csv\", usecols=columns)\n", "print(df)" ] }, @@ -170,9 +183,21 @@ "outputs": [], "source": [ "# separate the data into inputs and outputs\n", - "inputs = ['Bypass Fraction', 'NG Steam Ratio']\n", - "outputs = [ 'Steam Flow', 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n", - " 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n", + "inputs = [\"Bypass Fraction\", \"NG Steam Ratio\"]\n", + "outputs = [\n", + " \"Steam Flow\",\n", + " \"Reformer Duty\",\n", + " \"AR\",\n", + " \"C2H6\",\n", + " \"C3H8\",\n", + " \"C4H10\",\n", + " \"CH4\",\n", + " \"CO\",\n", + " \"CO2\",\n", + " \"H2\",\n", + " \"H2O\",\n", + " \"N2\",\n", + "]\n", "dfin = df[inputs]\n", "dfout = df[outputs]" ] @@ -199,8 +224,8 @@ "\n", "# capture the minimum and maximum values of the scaled inputs\n", "# so we don't use the model outside the valid range\n", - "scaled_lb = dfin.min()[inputs].values\n", - "scaled_ub = dfin.max()[inputs].values" + "scaled_lb = dfin.min()[inputs].to_numpy()\n", + "scaled_ub = dfin.max()[inputs].to_numpy()" ] }, { @@ -223,13 +248,13 @@ ], "source": [ "# create our Keras Sequential model\n", - "nn = Sequential(name='reformer_relu_4_20')\n", - "nn.add(Dense(units=10, input_dim=len(inputs), activation='relu'))\n", - "nn.add(Dense(units=10, activation='relu'))\n", - "nn.add(Dense(units=10, activation='relu'))\n", - "nn.add(Dense(units=10, activation='relu'))\n", + "nn = Sequential(name=\"reformer_relu_4_20\")\n", + "nn.add(Dense(units=10, input_dim=len(inputs), activation=\"relu\"))\n", + "nn.add(Dense(units=10, activation=\"relu\"))\n", + "nn.add(Dense(units=10, activation=\"relu\"))\n", + "nn.add(Dense(units=10, activation=\"relu\"))\n", "nn.add(Dense(units=len(outputs)))\n", - "nn.compile(optimizer=Adam(), loss='mse')" + "nn.compile(optimizer=Adam(), loss=\"mse\")" ] }, { @@ -450,8 +475,8 @@ ], "source": [ "# train our model\n", - "x = dfin.values\n", - "y = dfout.values\n", + "x = dfin.to_numpy()\n", + "y = dfout.to_numpy()\n", "\n", "history = nn.fit(x, y, epochs=100)" ] @@ -469,7 +494,7 @@ "# save the model to disk\n", "# While not technically necessary, this shows how we can load a previously saved model into\n", "# our optimization formulation)\n", - "nn.save('reformer_nn_relu.keras')" + "nn.save(\"reformer_nn_relu.keras\")" ] }, { @@ -523,22 +548,24 @@ "outputs": [], "source": [ "# load the Keras model\n", - "nn_reformer = keras.models.load_model('reformer_nn_relu.keras', compile=False)\n", + "nn_reformer = keras.models.load_model(\"reformer_nn_relu.keras\", compile=False)\n", "\n", "# Note: The neural network is in the scaled space. We want access to the\n", "# variables in the unscaled space. Therefore, we need to tell OMLT about the\n", "# scaling factors\n", "scaler = OffsetScaling(\n", - " offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n", - " factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n", - " offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n", - " factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))}\n", - " )\n", + " offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n", + " factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n", + " offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n", + " factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))},\n", + ")\n", "\n", "scaled_input_bounds = {i: (scaled_lb[i], scaled_ub[i]) for i in range(len(inputs))}\n", "\n", "# create a network definition from the Keras model\n", - "net = load_keras_sequential(nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds)\n", + "net = load_keras_sequential(\n", + " nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds\n", + ")\n", "\n", "# create the variables and constraints for the neural network in Pyomo\n", "m.reformer.build_formulation(ReluBigMFormulation(net))" @@ -555,8 +582,8 @@ "outputs": [], "source": [ "# now add the objective and the constraints\n", - "h2_idx = outputs.index('H2')\n", - "n2_idx = outputs.index('N2')\n", + "h2_idx = outputs.index(\"H2\")\n", + "n2_idx = outputs.index(\"N2\")\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] @@ -572,7 +599,7 @@ "outputs": [], "source": [ "# now solve the optimization problem (this may take some time)\n", - "solver = pyo.SolverFactory('cbc')\n", + "solver = pyo.SolverFactory(\"cbc\")\n", "status = solver.solve(m, tee=False)" ] }, @@ -597,10 +624,10 @@ } ], "source": [ - "print('Bypass Fraction:', pyo.value(m.reformer.inputs[0]))\n", - "print('NG Steam Ratio:', pyo.value(m.reformer.inputs[1]))\n", - "print('H2 Concentration:', pyo.value(m.reformer.outputs[h2_idx]))\n", - "print('N2 Concentration:', pyo.value(m.reformer.outputs[n2_idx]))" + "print(\"Bypass Fraction:\", pyo.value(m.reformer.inputs[0]))\n", + "print(\"NG Steam Ratio:\", pyo.value(m.reformer.inputs[1]))\n", + "print(\"H2 Concentration:\", pyo.value(m.reformer.outputs[h2_idx]))\n", + "print(\"N2 Concentration:\", pyo.value(m.reformer.outputs[n2_idx]))" ] } ], diff --git a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb index f7328c18..6f7d4320 100644 --- a/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb +++ b/docs/notebooks/neuralnet/auto-thermal-reformer.ipynb @@ -67,20 +67,20 @@ "outputs": [], "source": [ "import os\n", - "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress CUDA warnings from tensorflow\n", + "\n", + "os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # suppress CUDA warnings from tensorflow\n", "\n", "# import the necessary packages\n", - "from omlt import OmltBlock, OffsetScaling\n", - "from omlt.io.keras import load_keras_sequential\n", - "from omlt.neuralnet import FullSpaceSmoothNNFormulation\n", - "from omlt.base import OmltConstraint\n", - "import pyomo.environ as pyo\n", "import pandas as pd\n", - "import tensorflow.keras as keras\n", - "from tensorflow.keras.models import Sequential\n", + "import pyomo.environ as pyo\n", + "from tensorflow import keras\n", "from tensorflow.keras.layers import Dense\n", + "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.optimizers import Adam\n", - "from tensorflow.keras.callbacks import ModelCheckpoint" + "\n", + "from omlt import OffsetScaling, OmltBlock\n", + "from omlt.io.keras import load_keras_sequential\n", + "from omlt.neuralnet import FullSpaceSmoothNNFormulation" ] }, { @@ -141,10 +141,23 @@ ], "source": [ "# read in our csv data\n", - "columns = ['Bypass Fraction', 'NG Steam Ratio', 'Steam Flow',\n", - " 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n", - " 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n", - "df = pd.read_csv('../data/reformer.csv', usecols=columns)\n", + "columns = [\n", + " \"Bypass Fraction\",\n", + " \"NG Steam Ratio\",\n", + " \"Steam Flow\",\n", + " \"Reformer Duty\",\n", + " \"AR\",\n", + " \"C2H6\",\n", + " \"C3H8\",\n", + " \"C4H10\",\n", + " \"CH4\",\n", + " \"CO\",\n", + " \"CO2\",\n", + " \"H2\",\n", + " \"H2O\",\n", + " \"N2\",\n", + "]\n", + "df = pd.read_csv(\"../data/reformer.csv\", usecols=columns)\n", "print(df)" ] }, @@ -159,9 +172,21 @@ "outputs": [], "source": [ "# separate the data into inputs and outputs\n", - "inputs = ['Bypass Fraction', 'NG Steam Ratio']\n", - "outputs = [ 'Steam Flow', 'Reformer Duty','AR', 'C2H6', 'C3H8', 'C4H10',\n", - " 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'N2']\n", + "inputs = [\"Bypass Fraction\", \"NG Steam Ratio\"]\n", + "outputs = [\n", + " \"Steam Flow\",\n", + " \"Reformer Duty\",\n", + " \"AR\",\n", + " \"C2H6\",\n", + " \"C3H8\",\n", + " \"C4H10\",\n", + " \"CH4\",\n", + " \"CO\",\n", + " \"CO2\",\n", + " \"H2\",\n", + " \"H2O\",\n", + " \"N2\",\n", + "]\n", "dfin = df[inputs]\n", "dfout = df[outputs]" ] @@ -188,8 +213,8 @@ "\n", "# capture the minimum and maximum values of the scaled inputs\n", "# so we don't use the model outside the valid range\n", - "scaled_lb = dfin.min()[inputs].values\n", - "scaled_ub = dfin.max()[inputs].values" + "scaled_lb = dfin.min()[inputs].to_numpy()\n", + "scaled_ub = dfin.max()[inputs].to_numpy()" ] }, { @@ -212,13 +237,13 @@ ], "source": [ "# create our Keras Sequential model\n", - "nn = Sequential(name='reformer_sigmoid_4_20')\n", - "nn.add(Dense(units=20, input_dim=len(inputs), activation='sigmoid'))\n", - "nn.add(Dense(units=20, activation='sigmoid'))\n", - "nn.add(Dense(units=20, activation='sigmoid'))\n", - "nn.add(Dense(units=20, activation='sigmoid'))\n", + "nn = Sequential(name=\"reformer_sigmoid_4_20\")\n", + "nn.add(Dense(units=20, input_dim=len(inputs), activation=\"sigmoid\"))\n", + "nn.add(Dense(units=20, activation=\"sigmoid\"))\n", + "nn.add(Dense(units=20, activation=\"sigmoid\"))\n", + "nn.add(Dense(units=20, activation=\"sigmoid\"))\n", "nn.add(Dense(units=len(outputs)))\n", - "nn.compile(optimizer=Adam(), loss='mse')" + "nn.compile(optimizer=Adam(), loss=\"mse\")" ] }, { @@ -439,8 +464,8 @@ ], "source": [ "# train our model\n", - "x = dfin.values\n", - "y = dfout.values\n", + "x = dfin.to_numpy()\n", + "y = dfout.to_numpy()\n", "\n", "history = nn.fit(x, y, epochs=100)" ] @@ -458,7 +483,7 @@ "# save the model to disk\n", "# While not technically necessary, this shows how we can load a previously saved model into\n", "# our optimization formulation)\n", - "nn.save('reformer_nn.keras')" + "nn.save(\"reformer_nn.keras\")" ] }, { @@ -512,22 +537,24 @@ "outputs": [], "source": [ "# load the Keras model\n", - "nn_reformer = keras.models.load_model('reformer_nn.keras', compile=False)\n", + "nn_reformer = keras.models.load_model(\"reformer_nn.keras\", compile=False)\n", "\n", "# Note: The neural network is in the scaled space. We want access to the\n", "# variables in the unscaled space. Therefore, we need to tell OMLT about the\n", "# scaling factors\n", "scaler = OffsetScaling(\n", - " offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n", - " factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n", - " offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n", - " factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))}\n", - " )\n", + " offset_inputs={i: x_offset[inputs[i]] for i in range(len(inputs))},\n", + " factor_inputs={i: x_factor[inputs[i]] for i in range(len(inputs))},\n", + " offset_outputs={i: y_offset[outputs[i]] for i in range(len(outputs))},\n", + " factor_outputs={i: y_factor[outputs[i]] for i in range(len(outputs))},\n", + ")\n", "\n", "scaled_input_bounds = {i: (scaled_lb[i], scaled_ub[i]) for i in range(len(inputs))}\n", "\n", "# create a network definition from the Keras model\n", - "net = load_keras_sequential(nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds)\n", + "net = load_keras_sequential(\n", + " nn_reformer, scaling_object=scaler, scaled_input_bounds=scaled_input_bounds\n", + ")\n", "\n", "# create the variables and constraints for the neural network in Pyomo\n", "m.reformer.build_formulation(FullSpaceSmoothNNFormulation(net))" @@ -544,8 +571,8 @@ "outputs": [], "source": [ "# now add the objective and the constraints\n", - "h2_idx = outputs.index('H2')\n", - "n2_idx = outputs.index('N2')\n", + "h2_idx = outputs.index(\"H2\")\n", + "n2_idx = outputs.index(\"N2\")\n", "m.obj = pyo.Objective(expr=m.reformer.outputs[h2_idx], sense=pyo.maximize)\n", "m.con = pyo.Constraint(expr=m.reformer.outputs[n2_idx] <= 0.34)" ] @@ -688,7 +715,7 @@ ], "source": [ "# now solve the optimization problem\n", - "solver = pyo.SolverFactory('ipopt')\n", + "solver = pyo.SolverFactory(\"ipopt\")\n", "status = solver.solve(m, tee=True)" ] }, @@ -713,10 +740,10 @@ } ], "source": [ - "print('Bypass Fraction:', pyo.value(m.reformer.inputs[0]))\n", - "print('NG Steam Ratio:', pyo.value(m.reformer.inputs[1]))\n", - "print('H2 Concentration:', pyo.value(m.reformer.outputs[h2_idx]))\n", - "print('N2 Concentration:', pyo.value(m.reformer.outputs[n2_idx]))" + "print(\"Bypass Fraction:\", pyo.value(m.reformer.inputs[0]))\n", + "print(\"NG Steam Ratio:\", pyo.value(m.reformer.inputs[1]))\n", + "print(\"H2 Concentration:\", pyo.value(m.reformer.outputs[h2_idx]))\n", + "print(\"N2 Concentration:\", pyo.value(m.reformer.outputs[n2_idx]))" ] } ], diff --git a/docs/notebooks/neuralnet/build_network.ipynb b/docs/notebooks/neuralnet/build_network.ipynb index 4b0bd499..008042c1 100644 --- a/docs/notebooks/neuralnet/build_network.ipynb +++ b/docs/notebooks/neuralnet/build_network.ipynb @@ -37,11 +37,11 @@ "metadata": {}, "outputs": [], "source": [ - "import pyomo.environ as pyo\n", "import numpy as np\n", + "import pyomo.environ as pyo\n", "\n", "from omlt.neuralnet import NetworkDefinition\n", - "from omlt.neuralnet.layer import InputLayer, DenseLayer, IndexMapper" + "from omlt.neuralnet.layer import DenseLayer, IndexMapper, InputLayer" ] }, { @@ -75,10 +75,14 @@ "metadata": {}, "outputs": [], "source": [ - "net = NetworkDefinition(scaled_input_bounds={(0,0):(-1.0, 1.0), \n", - " (0,1):(-1.0, 1.0),\n", - " (1,0):(-1.0, 1.0),\n", - " (1,1):(-1.0, 1.0)})" + "net = NetworkDefinition(\n", + " scaled_input_bounds={\n", + " (0, 0): (-1.0, 1.0),\n", + " (0, 1): (-1.0, 1.0),\n", + " (1, 0): (-1.0, 1.0),\n", + " (1, 1): (-1.0, 1.0),\n", + " }\n", + ")" ] }, { @@ -156,7 +160,7 @@ } ], "source": [ - "Image(filename='../images/input-layer.png', height=300)" + "Image(filename=\"../images/input-layer.png\", height=300)" ] }, { @@ -224,7 +228,7 @@ } ], "source": [ - "Image(filename='../images/dense-layer-0.png', height=250)" + "Image(filename=\"../images/dense-layer-0.png\", height=250)" ] }, { @@ -238,7 +242,7 @@ " output_size=[2, 1],\n", " activation=\"linear\",\n", " weights=np.array([[1.0], [-0.5]]),\n", - " biases=np.array([[0.1], [0.25]])\n", + " biases=np.array([[0.1], [0.25]]),\n", ")" ] }, @@ -288,7 +292,7 @@ } ], "source": [ - "Image(filename='../images/network-structure-0.png', height=200)" + "Image(filename=\"../images/network-structure-0.png\", height=200)" ] }, { @@ -368,10 +372,10 @@ "y = x\n", "z = np.maximum(0, y)\n", "\n", - "plt.plot(x, y, label='Before Activation')\n", - "plt.plot(x, z, label='After Activation')\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", + "plt.plot(x, y, label=\"Before Activation\")\n", + "plt.plot(x, z, label=\"After Activation\")\n", + "plt.xlabel(\"x\")\n", + "plt.ylabel(\"y\")\n", "plt.legend()" ] }, @@ -427,7 +431,7 @@ } ], "source": [ - "Image(filename='../images/dense-layer-1.png', height=250)" + "Image(filename=\"../images/dense-layer-1.png\", height=250)" ] }, { @@ -476,7 +480,7 @@ } ], "source": [ - "Image(filename='../images/network-structure-1.png', height=200)" + "Image(filename=\"../images/network-structure-1.png\", height=200)" ] }, { @@ -672,8 +676,10 @@ } ], "source": [ - "m.neural_net.layer[m.neural_net.layers.at(1)].z.pprint() #Note, the input layer does not have zhat\n", - "m.neural_net.layer[m.neural_net.layers.at(2)].zhat.pprint() " + "m.neural_net.layer[\n", + " m.neural_net.layers.at(1)\n", + "].z.pprint() # Note, the input layer does not have zhat\n", + "m.neural_net.layer[m.neural_net.layers.at(2)].zhat.pprint()" ] }, { diff --git a/docs/notebooks/neuralnet/graph_neural_network_formulation.ipynb b/docs/notebooks/neuralnet/graph_neural_network_formulation.ipynb index 69cb9675..2699d171 100644 --- a/docs/notebooks/neuralnet/graph_neural_network_formulation.ipynb +++ b/docs/notebooks/neuralnet/graph_neural_network_formulation.ipynb @@ -54,13 +54,13 @@ ], "source": [ "import numpy as np\n", + "import pyomo.environ as pyo\n", "import torch\n", "from torch.nn import Linear, ReLU, Sigmoid\n", - "from torch_geometric.nn import Sequential, GCNConv\n", - "from torch_geometric.nn import global_mean_pool\n", - "from omlt.io.torch_geometric import gnn_with_fixed_graph\n", - "import pyomo.environ as pyo\n", + "from torch_geometric.nn import GCNConv, Sequential, global_mean_pool\n", + "\n", "from omlt import OmltBlock\n", + "from omlt.io.torch_geometric import gnn_with_fixed_graph\n", "\n", "\n", "def GCN_Sequential(activation, pooling):\n", @@ -78,7 +78,7 @@ " activation(),\n", " Linear(2, 1),\n", " ],\n", - " )\n" + " )" ] }, { @@ -478,14 +478,13 @@ "outputs": [], "source": [ "import numpy as np\n", + "import pyomo.environ as pyo\n", "import torch\n", - "from torch.nn import Linear, ReLU\n", - "from torch_geometric.nn import Sequential, SAGEConv\n", - "from torch_geometric.nn import global_add_pool\n", - "from omlt.io.torch_geometric import gnn_with_non_fixed_graph\n", + "from torch.nn import ReLU\n", + "from torch_geometric.nn import SAGEConv, global_add_pool\n", "\n", - "import pyomo.environ as pyo\n", "from omlt import OmltBlock\n", + "from omlt.io.torch_geometric import gnn_with_non_fixed_graph\n", "\n", "\n", "def SAGE_Sequential(activation, pooling):\n", diff --git a/docs/notebooks/neuralnet/import_network.ipynb b/docs/notebooks/neuralnet/import_network.ipynb index 3f056572..673fa974 100644 --- a/docs/notebooks/neuralnet/import_network.ipynb +++ b/docs/notebooks/neuralnet/import_network.ipynb @@ -170,7 +170,7 @@ "source": [ "import pandas as pd\n", "\n", - "df = pd.read_csv('../data/diabetes.csv')\n", + "df = pd.read_csv(\"../data/diabetes.csv\")\n", "\n", "df.head()" ] @@ -215,8 +215,8 @@ "metadata": {}, "outputs": [], "source": [ - "X = df.iloc[:, :8].values\n", - "Y = df.iloc[:, 8].values" + "X = df.iloc[:, :8].to_numpy()\n", + "Y = df.iloc[:, 8].to_numpy()" ] }, { @@ -265,7 +265,7 @@ "\n", "lb = np.min(X, axis=0)\n", "ub = np.max(X, axis=0)\n", - "input_bounds = [(l, u) for l, u in zip(lb, ub)]\n", + "input_bounds = list(zip(lb, ub))\n", "input_bounds" ] }, @@ -292,7 +292,7 @@ } ], "source": [ - "from omlt.io import write_onnx_model_with_bounds, load_onnx_neural_network_with_bounds" + "from omlt.io import load_onnx_neural_network_with_bounds, write_onnx_model_with_bounds" ] }, { @@ -350,17 +350,17 @@ ], "source": [ "import os\n", - "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n", - "import keras\n", - "from keras.models import Sequential\n", + "\n", + "os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # or any {'0', '1', '2'}\n", "from keras.layers import Dense\n", + "from keras.models import Sequential\n", "\n", "model = Sequential()\n", - "model.add(Dense(12, input_dim=8, activation='relu'))\n", - "model.add(Dense(8, activation='relu'))\n", - "model.add(Dense(1, activation='linear'))\n", + "model.add(Dense(12, input_dim=8, activation=\"relu\"))\n", + "model.add(Dense(8, activation=\"relu\"))\n", + "model.add(Dense(1, activation=\"linear\"))\n", "\n", - "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])" + "model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])" ] }, { @@ -730,13 +730,13 @@ "# Add output_names for compatibility:\n", "model.output_names = [output.name for output in model.outputs]\n", "\n", - "from tensorflow import TensorSpec\n", "import tf2onnx\n", + "from tensorflow import TensorSpec\n", "\n", "spec = [TensorSpec(input.shape, input.dtype, input.name) for input in model.inputs]\n", "onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature=spec)\n", "\n", - "with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:\n", + "with tempfile.NamedTemporaryFile(suffix=\".onnx\", delete=False) as f:\n", " write_onnx_model_with_bounds(f.name, onnx_model, input_bounds)\n", " print(f\"Wrote ONNX model with bounds at {f.name}\")" ] @@ -770,7 +770,7 @@ } ], "source": [ - "Image(filename='../images/simple-neural-network.png', height=600)" + "Image(filename=\"../images/simple-neural-network.png\", height=600)" ] }, { @@ -816,10 +816,11 @@ ], "source": [ "import torch\n", - "import torch.nn as nn\n", "import torch.nn.functional as F\n", + "from torch import nn\n", "from torch.utils.data import DataLoader, TensorDataset\n", "\n", + "\n", "class PyTorchModel(nn.Module):\n", " def __init__(self):\n", " super().__init__()\n", @@ -830,18 +831,20 @@ " def forward(self, x):\n", " x = F.relu(self.dense_0(x))\n", " x = F.relu(self.dense_1(x))\n", - " x = self.out(x)\n", - " return x\n", + " return self.out(x)\n", + "\n", "\n", "model = PyTorchModel()\n", "loss_function = nn.L1Loss()\n", - "optimizer = torch.optim.Adam(model.parameters(),lr=0.01)\n", + "optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n", "\n", - "dataset = TensorDataset(torch.as_tensor(X, dtype=torch.float32), torch.as_tensor(Y, dtype=torch.float32))\n", + "dataset = TensorDataset(\n", + " torch.as_tensor(X, dtype=torch.float32), torch.as_tensor(Y, dtype=torch.float32)\n", + ")\n", "dataloader = DataLoader(dataset, batch_size=10)\n", "\n", "for epoch in range(150):\n", - " for id_batch, (x_batch, y_batch) in enumerate(dataloader):\n", + " for x_batch, y_batch in dataloader:\n", " y_batch_pred = model(x_batch)\n", " loss = loss_function(y_batch_pred, y_batch.view(*y_batch_pred.shape))\n", " optimizer.zero_grad()\n", @@ -849,7 +852,7 @@ " optimizer.step()\n", "\n", " if epoch % 10 == 0:\n", - " print(f\"Epoch number: {epoch} loss : {loss.item()}\")\n" + " print(f\"Epoch number: {epoch} loss : {loss.item()}\")" ] }, { @@ -878,17 +881,14 @@ "# model input used for exporting\n", "x = torch.randn(10, 8, requires_grad=True)\n", "pytorch_model = None\n", - "with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:\n", + "with tempfile.NamedTemporaryFile(suffix=\".onnx\", delete=False) as f:\n", " torch.onnx.export(\n", " model,\n", " x,\n", " f,\n", - " input_names=['input'],\n", - " output_names=['output'],\n", - " dynamic_axes={\n", - " 'input': {0: 'batch_size'},\n", - " 'output': {0: 'batch_size'}\n", - " }\n", + " input_names=[\"input\"],\n", + " output_names=[\"output\"],\n", + " dynamic_axes={\"input\": {0: \"batch_size\"}, \"output\": {0: \"batch_size\"}},\n", " )\n", " write_onnx_model_with_bounds(f.name, None, input_bounds)\n", " print(f\"Wrote PyTorch model to {f.name}\")\n", @@ -917,7 +917,7 @@ } ], "source": [ - "Image(filename='../images/torch-neural-network.png', height=500)" + "Image(filename=\"../images/torch-neural-network.png\", height=500)" ] }, { diff --git a/docs/notebooks/neuralnet/index_handling.ipynb b/docs/notebooks/neuralnet/index_handling.ipynb index 36ed4338..7b3a9b6a 100644 --- a/docs/notebooks/neuralnet/index_handling.ipynb +++ b/docs/notebooks/neuralnet/index_handling.ipynb @@ -29,8 +29,9 @@ "outputs": [], "source": [ "import numpy as np\n", + "\n", "from omlt.neuralnet import NetworkDefinition\n", - "from omlt.neuralnet.layer import IndexMapper, InputLayer, DenseLayer, PoolingLayer2D" + "from omlt.neuralnet.layer import DenseLayer, IndexMapper, InputLayer, PoolingLayer2D" ] }, { diff --git a/docs/notebooks/neuralnet/mnist_example_convolutional.ipynb b/docs/notebooks/neuralnet/mnist_example_convolutional.ipynb index 1de8f770..cf44882c 100644 --- a/docs/notebooks/neuralnet/mnist_example_convolutional.ipynb +++ b/docs/notebooks/neuralnet/mnist_example_convolutional.ipynb @@ -47,26 +47,29 @@ } ], "source": [ - "#Import requisite packages\n", - "#data manipulation\n", - "import numpy as np\n", + "# Import requisite packages\n", + "# data manipulation\n", "import tempfile\n", "\n", - "#pytorch for training neural network\n", - "import torch, torch.onnx\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torch.optim as optim\n", - "from torchvision import datasets, transforms\n", - "from torch.optim.lr_scheduler import StepLR\n", + "import numpy as np\n", "\n", - "#pyomo for optimization\n", + "# pyomo for optimization\n", "import pyomo.environ as pyo\n", "\n", - "#omlt for interfacing our neural network with pyomo\n", + "# pytorch for training neural network\n", + "import torch\n", + "import torch.onnx\n", + "from torch import nn, optim\n", + "from torch.optim.lr_scheduler import StepLR\n", + "from torchvision import datasets, transforms\n", + "\n", + "# omlt for interfacing our neural network with pyomo\n", "from omlt import OmltBlock\n", - "from omlt.neuralnet import FullSpaceNNFormulation\n", - "from omlt.io.onnx import write_onnx_model_with_bounds, load_onnx_neural_network_with_bounds" + "from omlt.io.onnx import (\n", + " load_onnx_neural_network_with_bounds,\n", + " write_onnx_model_with_bounds,\n", + ")\n", + "from omlt.neuralnet import FullSpaceNNFormulation" ] }, { @@ -84,14 +87,16 @@ "metadata": {}, "outputs": [], "source": [ - "#set training and test batch sizes\n", - "train_kwargs = {'batch_size': 64}\n", - "test_kwargs = {'batch_size': 1000}\n", + "# set training and test batch sizes\n", + "train_kwargs = {\"batch_size\": 64}\n", + "test_kwargs = {\"batch_size\": 1000}\n", "\n", - "#build DataLoaders for training and test sets\n", - "dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor())\n", - "dataset2 = datasets.MNIST('../data', train=False, transform=transforms.ToTensor())\n", - "train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs, shuffle=True)\n", + "# build DataLoaders for training and test sets\n", + "dataset1 = datasets.MNIST(\n", + " \"../data\", train=True, download=True, transform=transforms.ToTensor()\n", + ")\n", + "dataset2 = datasets.MNIST(\"../data\", train=False, transform=transforms.ToTensor())\n", + "train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs, shuffle=True)\n", "test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)" ] }, @@ -110,28 +115,28 @@ "source": [ "hidden_size = 10\n", "\n", + "\n", "class Net(nn.Module):\n", - " #define layers of neural network\n", + " # define layers of neural network\n", " def __init__(self):\n", " super().__init__()\n", - " self.conv1 = nn.Conv2d(1, 2, (4,4), (2,2), 0)\n", - " self.conv2 = nn.Conv2d(2, 2, (4,4), (2,2), 0)\n", - " self.hidden1 = nn.Linear(5*5*2, hidden_size)\n", - " self.output = nn.Linear(hidden_size, 10)\n", + " self.conv1 = nn.Conv2d(1, 2, (4, 4), (2, 2), 0)\n", + " self.conv2 = nn.Conv2d(2, 2, (4, 4), (2, 2), 0)\n", + " self.hidden1 = nn.Linear(5 * 5 * 2, hidden_size)\n", + " self.output = nn.Linear(hidden_size, 10)\n", " self.relu = nn.ReLU()\n", " self.softmax = nn.LogSoftmax(dim=1)\n", "\n", - " #define forward pass of neural network\n", + " # define forward pass of neural network\n", " def forward(self, x):\n", " self.x1 = self.conv1(x)\n", " self.x2 = self.relu(self.x1)\n", " self.x3 = self.conv2(self.x2)\n", " self.x4 = self.relu(self.x3)\n", - " self.x5 = self.hidden1(self.x4.view((-1,5*5*2)))\n", + " self.x5 = self.hidden1(self.x4.view((-1, 5 * 5 * 2)))\n", " self.x6 = self.relu(self.x5)\n", " self.x7 = self.output(self.x6)\n", - " x = self.softmax(self.x7) \n", - " return x" + " return self.softmax(self.x7)" ] }, { @@ -147,33 +152,38 @@ "metadata": {}, "outputs": [], "source": [ - "#training function computes loss and its gradient on batch, and prints status after every 200 batches\n", + "# training function computes loss and its gradient on batch, and prints status after every 200 batches\n", "def train(model, train_loader, optimizer, epoch):\n", - " model.train(); criterion = nn.NLLLoss()\n", + " model.train()\n", + " criterion = nn.NLLLoss()\n", " for batch_idx, (data, target) in enumerate(train_loader):\n", " optimizer.zero_grad()\n", " output = model(data)\n", " loss = criterion(output, target)\n", " loss.backward()\n", " optimizer.step()\n", - " if batch_idx % 200 == 0:\n", - " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", - " epoch, batch_idx * len(data), len(train_loader.dataset),\n", - " 100. * batch_idx / len(train_loader), loss.item()))\n", + " if batch_idx % 200 == 0:\n", + " print(\n", + " f\"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100.0 * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item():.6f}\"\n", + " )\n", "\n", - "#testing function computes loss and prints overall model accuracy on test set\n", + "\n", + "# testing function computes loss and prints overall model accuracy on test set\n", "def test(model, test_loader):\n", - " model.eval(); criterion = nn.NLLLoss(reduction='sum')\n", - " test_loss = 0; correct = 0\n", + " model.eval()\n", + " criterion = nn.NLLLoss(reduction=\"sum\")\n", + " test_loss = 0\n", + " correct = 0\n", " with torch.no_grad():\n", " for data, target in test_loader:\n", " output = model(data)\n", - " test_loss += criterion(output, target).item() \n", - " pred = output.argmax(dim=1, keepdim=True) \n", + " test_loss += criterion(output, target).item()\n", + " pred = output.argmax(dim=1, keepdim=True)\n", " correct += pred.eq(target.view_as(pred)).sum().item()\n", " test_loss /= len(test_loader.dataset)\n", - " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", - " test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) " + " print(\n", + " f\"\\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100.0 * correct / len(test_loader.dataset):.0f}%)\\n\"\n", + " )" ] }, { @@ -237,12 +247,12 @@ } ], "source": [ - "#define model and optimizer\n", + "# define model and optimizer\n", "model = Net()\n", "optimizer = optim.Adadelta(model.parameters(), lr=1)\n", "scheduler = StepLR(optimizer, step_size=1, gamma=0.7)\n", "\n", - "#train CNN model for five epochs\n", + "# train CNN model for five epochs\n", "for epoch in range(5):\n", " train(model, train_loader, optimizer, epoch)\n", " test(model, test_loader)\n", @@ -283,27 +293,27 @@ ], "source": [ "class NoSoftmaxNet(nn.Module):\n", - " #define layers of neural network\n", + " # define layers of neural network\n", " def __init__(self):\n", " super().__init__()\n", - " self.conv1 = nn.Conv2d(1, 2, (4,4), (2,2), 0)\n", - " self.conv2 = nn.Conv2d(2, 2, (4,4), (2,2), 0)\n", + " self.conv1 = nn.Conv2d(1, 2, (4, 4), (2, 2), 0)\n", + " self.conv2 = nn.Conv2d(2, 2, (4, 4), (2, 2), 0)\n", " self.hidden1 = nn.Linear(5 * 5 * 2, hidden_size)\n", - " self.output = nn.Linear(hidden_size, 10)\n", + " self.output = nn.Linear(hidden_size, 10)\n", " self.relu = nn.ReLU()\n", "\n", - " #define forward pass of neural network\n", + " # define forward pass of neural network\n", " def forward(self, x):\n", " self.x1 = self.conv1(x)\n", " self.x2 = self.relu(self.x1)\n", " self.x3 = self.conv2(self.x2)\n", " self.x4 = self.relu(self.x3)\n", - " self.x5 = self.hidden1(self.x4.view((-1,5*5*2)))\n", + " self.x5 = self.hidden1(self.x4.view((-1, 5 * 5 * 2)))\n", " self.x6 = self.relu(self.x5)\n", - " x = self.output(self.x6) \n", - " return x\n", + " return self.output(self.x6)\n", + "\n", "\n", - "#create neural network without LogSoftmax and load parameters from existing model\n", + "# create neural network without LogSoftmax and load parameters from existing model\n", "model2 = NoSoftmaxNet()\n", "model2.load_state_dict(model.state_dict())" ] @@ -331,24 +341,24 @@ "metadata": {}, "outputs": [], "source": [ - "#load image and true label from test set with index 'problem_index'\n", + "# load image and true label from test set with index 'problem_index'\n", "problem_index = 0\n", "image = dataset2[problem_index][0].detach().numpy()\n", "label = dataset2[problem_index][1]\n", "\n", - "#define input region defined by infinity norm\n", + "# define input region defined by infinity norm\n", "epsilon_infty = 1e-3\n", "lb = np.maximum(0, image - epsilon_infty)\n", "ub = np.minimum(1, image + epsilon_infty)\n", "\n", - "#save input bounds as dictionary, note that the first index 0 corresponds to the single-channel input\n", + "# save input bounds as dictionary, note that the first index 0 corresponds to the single-channel input\n", "input_bounds = {}\n", "for i in range(28):\n", " for j in range(28):\n", - " input_bounds[(0,i,j)] = (float(lb[0][i,j]), float(ub[0][i,j])) \n", - " \n", - "#define dummy input tensor \n", - "x = dataset2[problem_index][0].view(-1,1,28,28)" + " input_bounds[(0, i, j)] = (float(lb[0][i, j]), float(ub[0][i, j]))\n", + "\n", + "# define dummy input tensor\n", + "x = dataset2[problem_index][0].view(-1, 1, 28, 28)" ] }, { @@ -364,22 +374,19 @@ "metadata": {}, "outputs": [], "source": [ - "with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:\n", - " #export neural network to ONNX\n", + "with tempfile.NamedTemporaryFile(suffix=\".onnx\", delete=False) as f:\n", + " # export neural network to ONNX\n", " torch.onnx.export(\n", " model2,\n", " x,\n", " f,\n", - " input_names=['input'],\n", - " output_names=['output'],\n", - " dynamic_axes={\n", - " 'input': {0: 'batch_size'},\n", - " 'output': {0: 'batch_size'}\n", - " }\n", + " input_names=[\"input\"],\n", + " output_names=[\"output\"],\n", + " dynamic_axes={\"input\": {0: \"batch_size\"}, \"output\": {0: \"batch_size\"}},\n", " )\n", - " #write ONNX model and its bounds using OMLT\n", + " # write ONNX model and its bounds using OMLT\n", " write_onnx_model_with_bounds(f.name, None, input_bounds)\n", - " #load the network definition from the ONNX model\n", + " # load the network definition from the ONNX model\n", " network_definition = load_onnx_neural_network_with_bounds(f.name)" ] }, @@ -798,12 +805,12 @@ } ], "source": [ - "#create pyomo model\n", + "# create pyomo model\n", "m = pyo.ConcreteModel()\n", "\n", - "#create an OMLT block for the neural network and build its formulation\n", + "# create an OMLT block for the neural network and build its formulation\n", "m.nn = OmltBlock()\n", - "m.nn.build_formulation(formulation) " + "m.nn.build_formulation(formulation)" ] }, { @@ -820,7 +827,7 @@ "outputs": [], "source": [ "adversary = (label + 1) % 10\n", - "m.obj = pyo.Objective(expr=(-(m.nn.outputs[0,adversary]-m.nn.outputs[0,label])))" + "m.obj = pyo.Objective(expr=(-(m.nn.outputs[0, adversary] - m.nn.outputs[0, label])))" ] }, { @@ -1003,7 +1010,7 @@ } ], "source": [ - "solver = pyo.SolverFactory('cbc')\n", + "solver = pyo.SolverFactory(\"cbc\")\n", "solver.solve(m, tee=True)" ] }, diff --git a/docs/notebooks/neuralnet/mnist_example_dense.ipynb b/docs/notebooks/neuralnet/mnist_example_dense.ipynb index e7af1f06..fecd4467 100644 --- a/docs/notebooks/neuralnet/mnist_example_dense.ipynb +++ b/docs/notebooks/neuralnet/mnist_example_dense.ipynb @@ -46,26 +46,29 @@ } ], "source": [ - "#Import requisite packages\n", - "#data manipulation\n", - "import numpy as np\n", + "# Import requisite packages\n", + "# data manipulation\n", "import tempfile\n", "\n", - "#pytorch for training neural network\n", - "import torch, torch.onnx\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torch.optim as optim\n", - "from torchvision import datasets, transforms\n", - "from torch.optim.lr_scheduler import StepLR\n", + "import numpy as np\n", "\n", - "#pyomo for optimization\n", + "# pyomo for optimization\n", "import pyomo.environ as pyo\n", "\n", - "#omlt for interfacing our neural network with pyomo\n", + "# pytorch for training neural network\n", + "import torch\n", + "import torch.onnx\n", + "from torch import nn, optim\n", + "from torch.optim.lr_scheduler import StepLR\n", + "from torchvision import datasets, transforms\n", + "\n", + "# omlt for interfacing our neural network with pyomo\n", "from omlt import OmltBlock\n", - "from omlt.neuralnet import FullSpaceNNFormulation\n", - "from omlt.io.onnx import write_onnx_model_with_bounds, load_onnx_neural_network_with_bounds" + "from omlt.io.onnx import (\n", + " load_onnx_neural_network_with_bounds,\n", + " write_onnx_model_with_bounds,\n", + ")\n", + "from omlt.neuralnet import FullSpaceNNFormulation" ] }, { @@ -83,14 +86,16 @@ "metadata": {}, "outputs": [], "source": [ - "#set training and test batch sizes\n", - "train_kwargs = {'batch_size': 64}\n", - "test_kwargs = {'batch_size': 1000}\n", + "# set training and test batch sizes\n", + "train_kwargs = {\"batch_size\": 64}\n", + "test_kwargs = {\"batch_size\": 1000}\n", "\n", - "#build DataLoaders for training and test sets\n", - "dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor())\n", - "dataset2 = datasets.MNIST('../data', train=False, transform=transforms.ToTensor())\n", - "train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs, shuffle=True)\n", + "# build DataLoaders for training and test sets\n", + "dataset1 = datasets.MNIST(\n", + " \"../data\", train=True, download=True, transform=transforms.ToTensor()\n", + ")\n", + "dataset2 = datasets.MNIST(\"../data\", train=False, transform=transforms.ToTensor())\n", + "train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs, shuffle=True)\n", "test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)" ] }, @@ -109,25 +114,25 @@ "source": [ "hidden_size = 50\n", "\n", + "\n", "class Net(nn.Module):\n", - " #define layers of neural network\n", + " # define layers of neural network\n", " def __init__(self):\n", " super().__init__()\n", - " self.hidden1 = nn.Linear(784, hidden_size)\n", - " self.hidden2 = nn.Linear(hidden_size, hidden_size)\n", - " self.output = nn.Linear(hidden_size, 10)\n", + " self.hidden1 = nn.Linear(784, hidden_size)\n", + " self.hidden2 = nn.Linear(hidden_size, hidden_size)\n", + " self.output = nn.Linear(hidden_size, 10)\n", " self.relu = nn.ReLU()\n", " self.softmax = nn.LogSoftmax(dim=1)\n", "\n", - " #define forward pass of neural network\n", + " # define forward pass of neural network\n", " def forward(self, x):\n", " x = self.hidden1(x)\n", " x = self.relu(x)\n", " x = self.hidden2(x)\n", " x = self.relu(x)\n", " x = self.output(x)\n", - " x = self.softmax(x) \n", - " return x" + " return self.softmax(x)" ] }, { @@ -143,33 +148,38 @@ "metadata": {}, "outputs": [], "source": [ - "#training function computes loss and its gradient on batch, and prints status after every 200 batches\n", + "# training function computes loss and its gradient on batch, and prints status after every 200 batches\n", "def train(model, train_loader, optimizer, epoch):\n", - " model.train(); criterion = nn.NLLLoss()\n", + " model.train()\n", + " criterion = nn.NLLLoss()\n", " for batch_idx, (data, target) in enumerate(train_loader):\n", " optimizer.zero_grad()\n", - " output = model(data.view(-1, 28*28))\n", + " output = model(data.view(-1, 28 * 28))\n", " loss = criterion(output, target)\n", " loss.backward()\n", " optimizer.step()\n", - " if batch_idx % 200 == 0:\n", - " print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", - " epoch, batch_idx * len(data), len(train_loader.dataset),\n", - " 100. * batch_idx / len(train_loader), loss.item()))\n", + " if batch_idx % 200 == 0:\n", + " print(\n", + " f\"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100.0 * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item():.6f}\"\n", + " )\n", + "\n", "\n", - "#testing function computes loss and prints overall model accuracy on test set\n", + "# testing function computes loss and prints overall model accuracy on test set\n", "def test(model, test_loader):\n", - " model.eval(); criterion = nn.NLLLoss( reduction='sum')\n", - " test_loss = 0; correct = 0\n", + " model.eval()\n", + " criterion = nn.NLLLoss(reduction=\"sum\")\n", + " test_loss = 0\n", + " correct = 0\n", " with torch.no_grad():\n", " for data, target in test_loader:\n", - " output = model(data.view(-1, 28*28))\n", - " test_loss += criterion(output, target).item() \n", - " pred = output.argmax(dim=1, keepdim=True) \n", + " output = model(data.view(-1, 28 * 28))\n", + " test_loss += criterion(output, target).item()\n", + " pred = output.argmax(dim=1, keepdim=True)\n", " correct += pred.eq(target.view_as(pred)).sum().item()\n", " test_loss /= len(test_loader.dataset)\n", - " print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", - " test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) " + " print(\n", + " f\"\\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100.0 * correct / len(test_loader.dataset):.0f}%)\\n\"\n", + " )" ] }, { @@ -233,12 +243,12 @@ } ], "source": [ - "#define model and optimizer\n", + "# define model and optimizer\n", "model = Net()\n", "optimizer = optim.Adadelta(model.parameters(), lr=1)\n", "scheduler = StepLR(optimizer, step_size=1, gamma=0.7)\n", "\n", - "#train neural network for five epochs\n", + "# train neural network for five epochs\n", "for epoch in range(5):\n", " train(model, train_loader, optimizer, epoch)\n", " test(model, test_loader)\n", @@ -279,24 +289,24 @@ ], "source": [ "class NoSoftmaxNet(nn.Module):\n", - " #define layers of neural network\n", + " # define layers of neural network\n", " def __init__(self):\n", " super().__init__()\n", - " self.hidden1 = nn.Linear(784, hidden_size)\n", - " self.hidden2 = nn.Linear(hidden_size, hidden_size)\n", - " self.output = nn.Linear(hidden_size, 10)\n", + " self.hidden1 = nn.Linear(784, hidden_size)\n", + " self.hidden2 = nn.Linear(hidden_size, hidden_size)\n", + " self.output = nn.Linear(hidden_size, 10)\n", " self.relu = nn.ReLU()\n", "\n", - " #define forward pass of neural network\n", + " # define forward pass of neural network\n", " def forward(self, x):\n", " x = self.hidden1(x)\n", " x = self.relu(x)\n", " x = self.hidden2(x)\n", " x = self.relu(x)\n", - " x = self.output(x)\n", - " return x\n", + " return self.output(x)\n", + "\n", "\n", - "#create neural network without LogSoftmax and load parameters from existing model\n", + "# create neural network without LogSoftmax and load parameters from existing model\n", "model2 = NoSoftmaxNet()\n", "model2.load_state_dict(model.state_dict())" ] @@ -324,23 +334,23 @@ "metadata": {}, "outputs": [], "source": [ - "#load image and true label from test set with index 'problem_index'\n", + "# load image and true label from test set with index 'problem_index'\n", "problem_index = 0\n", - "image = dataset2[problem_index][0].view(-1,28*28).detach().numpy()\n", + "image = dataset2[problem_index][0].view(-1, 28 * 28).detach().numpy()\n", "label = dataset2[problem_index][1]\n", "\n", - "#define input region defined by infinity norm\n", + "# define input region defined by infinity norm\n", "epsilon_infty = 5e-2\n", "lb = np.maximum(0, image - epsilon_infty)\n", "ub = np.minimum(1, image + epsilon_infty)\n", "\n", - "#save input bounds as dictionary\n", + "# save input bounds as dictionary\n", "input_bounds = {}\n", - "for i in range(28*28):\n", - " input_bounds[i] = (float(lb[0][i]), float(ub[0][i])) \n", - " \n", - "#define dummy input tensor \n", - "x_temp = dataset2[problem_index][0].view(-1,28*28)" + "for i in range(28 * 28):\n", + " input_bounds[i] = (float(lb[0][i]), float(ub[0][i]))\n", + "\n", + "# define dummy input tensor\n", + "x_temp = dataset2[problem_index][0].view(-1, 28 * 28)" ] }, { @@ -356,22 +366,19 @@ "metadata": {}, "outputs": [], "source": [ - "with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:\n", - " #export neural network to ONNX\n", + "with tempfile.NamedTemporaryFile(suffix=\".onnx\", delete=False) as f:\n", + " # export neural network to ONNX\n", " torch.onnx.export(\n", " model2,\n", " x_temp,\n", " f,\n", - " input_names=['input'],\n", - " output_names=['output'],\n", - " dynamic_axes={\n", - " 'input': {0: 'batch_size'},\n", - " 'output': {0: 'batch_size'}\n", - " }\n", + " input_names=[\"input\"],\n", + " output_names=[\"output\"],\n", + " dynamic_axes={\"input\": {0: \"batch_size\"}, \"output\": {0: \"batch_size\"}},\n", " )\n", - " #write ONNX model and its bounds using OMLT\n", + " # write ONNX model and its bounds using OMLT\n", " write_onnx_model_with_bounds(f.name, None, input_bounds)\n", - " #load the network definition from the ONNX model\n", + " # load the network definition from the ONNX model\n", " network_definition = load_onnx_neural_network_with_bounds(f.name)" ] }, @@ -777,12 +784,12 @@ } ], "source": [ - "#create pyomo model\n", + "# create pyomo model\n", "m = pyo.ConcreteModel()\n", "\n", - "#create an OMLT block for the neural network and build its formulation\n", + "# create an OMLT block for the neural network and build its formulation\n", "m.nn = OmltBlock()\n", - "m.nn.build_formulation(formulation) " + "m.nn.build_formulation(formulation)" ] }, { @@ -799,7 +806,7 @@ "outputs": [], "source": [ "adversary = (label + 1) % 10\n", - "m.obj = pyo.Objective(expr=(-(m.nn.outputs[adversary]-m.nn.outputs[label])))" + "m.obj = pyo.Objective(expr=(-(m.nn.outputs[adversary] - m.nn.outputs[label])))" ] }, { @@ -961,7 +968,7 @@ } ], "source": [ - "pyo.SolverFactory('cbc').solve(m, tee=True)" + "pyo.SolverFactory(\"cbc\").solve(m, tee=True)" ] } ], diff --git a/docs/notebooks/neuralnet/neural_network_formulations.ipynb b/docs/notebooks/neuralnet/neural_network_formulations.ipynb index 3317acd9..07613122 100644 --- a/docs/notebooks/neuralnet/neural_network_formulations.ipynb +++ b/docs/notebooks/neuralnet/neural_network_formulations.ipynb @@ -45,6 +45,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "7fb27b941602401d91542211134fc71a", "metadata": { "pycharm": { "name": "#%%\n" @@ -62,31 +63,37 @@ } ], "source": [ - "#Start by importing the following libraries\n", - "#data manipulation and plotting\n", - "import pandas as pd\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", + "# Start by importing the following libraries\n", + "# data manipulation and plotting\n", "import matplotlib\n", - "matplotlib.rc('font', size=24)\n", - "plt.rc('axes', titlesize=24)\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", "\n", - "#tensorflow objects\n", - "from tensorflow.keras.models import Sequential, Model\n", + "matplotlib.rc(\"font\", size=24)\n", + "plt.rc(\"axes\", titlesize=24)\n", + "\n", + "# tensorflow objects\n", + "# pyomo for optimization\n", + "import pyomo.environ as pyo\n", "from tensorflow.keras.layers import Dense, Input\n", + "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.optimizers import Adam\n", "\n", - "#pyomo for optimization\n", - "import pyomo.environ as pyo\n", + "import omlt\n", "\n", - "#omlt for interfacing our neural network with pyomo\n", + "# omlt for interfacing our neural network with pyomo\n", "from omlt import OmltBlock\n", - "from omlt.neuralnet import NetworkDefinition, FullSpaceNNFormulation, \\\n", - "FullSpaceSmoothNNFormulation, ReducedSpaceSmoothNNFormulation, ReluBigMFormulation,\\\n", - "ReluComplementarityFormulation, ReluPartitionFormulation\n", - "from omlt.neuralnet.activations import ComplementarityReLUActivation\n", "from omlt.io.keras import keras_reader\n", - "import omlt" + "from omlt.neuralnet import (\n", + " FullSpaceNNFormulation,\n", + " FullSpaceSmoothNNFormulation,\n", + " ReducedSpaceSmoothNNFormulation,\n", + " ReluBigMFormulation,\n", + " ReluComplementarityFormulation,\n", + " ReluPartitionFormulation,\n", + ")\n", + "from omlt.neuralnet.activations import ComplementarityReLUActivation" ] }, { @@ -116,6 +123,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "acae54e37e7d407bbb7b55eff062a284", "metadata": { "pycharm": { "name": "#%%\n" @@ -123,7 +131,7 @@ }, "outputs": [], "source": [ - "df = pd.read_csv(\"../data/sin_quadratic.csv\",index_col=[0]);" + "df = pd.read_csv(\"../data/sin_quadratic.csv\", index_col=[0]);" ] }, { @@ -141,6 +149,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "9a63283cbaf04dbcab1f6479b197f3a8", "metadata": { "pycharm": { "name": "#%%\n" @@ -159,27 +168,27 @@ } ], "source": [ - "#retrieve input 'x' and output 'y' from the dataframe\n", + "# retrieve input 'x' and output 'y' from the dataframe\n", "x = df[\"x\"]\n", "y = df[\"y\"]\n", "\n", - "#calculate mean and standard deviation, add scaled 'x' and scaled 'y' to the dataframe\n", + "# calculate mean and standard deviation, add scaled 'x' and scaled 'y' to the dataframe\n", "mean_data = df.mean(axis=0)\n", "std_data = df.std(axis=0)\n", - "df[\"x_scaled\"] = (df['x'] - mean_data['x']) / std_data['x']\n", - "df[\"y_scaled\"] = (df['y'] - mean_data['y']) / std_data['y']\n", + "df[\"x_scaled\"] = (df[\"x\"] - mean_data[\"x\"]) / std_data[\"x\"]\n", + "df[\"y_scaled\"] = (df[\"y\"] - mean_data[\"y\"]) / std_data[\"y\"]\n", "\n", - "#create plots for unscaled and scaled data\n", - "f, (ax1, ax2) = plt.subplots(1, 2,figsize = (16,8))\n", + "# create plots for unscaled and scaled data\n", + "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))\n", "\n", "ax1.plot(x, y)\n", "ax1.set_xlabel(\"x\")\n", - "ax1.set_ylabel(\"y\");\n", + "ax1.set_ylabel(\"y\")\n", "ax1.set_title(\"Training Data\")\n", "\n", "ax2.plot(df[\"x_scaled\"], df[\"y_scaled\"])\n", "ax2.set_xlabel(\"x_scaled\")\n", - "ax2.set_ylabel(\"y_scaled\");\n", + "ax2.set_ylabel(\"y_scaled\")\n", "ax2.set_title(\"Scaled Training Data\")\n", "\n", "plt.tight_layout()" @@ -205,6 +214,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "8dd0d8092fe74a7c96281538738b07e2", "metadata": { "pycharm": { "name": "#%%\n" @@ -212,34 +222,35 @@ }, "outputs": [], "source": [ - "#sigmoid neural network\n", - "nn1 = Sequential(name='sin_wave_sigmoid')\n", + "# sigmoid neural network\n", + "nn1 = Sequential(name=\"sin_wave_sigmoid\")\n", "nn1.add(Input(np.array((1,))))\n", - "nn1.add(Dense(50, activation='sigmoid'))\n", - "nn1.add(Dense(50, activation='sigmoid'))\n", + "nn1.add(Dense(50, activation=\"sigmoid\"))\n", + "nn1.add(Dense(50, activation=\"sigmoid\"))\n", "nn1.add(Dense(1))\n", - "nn1.compile(optimizer=Adam(), loss='mse')\n", + "nn1.compile(optimizer=Adam(), loss=\"mse\")\n", "\n", - "#relu neural network\n", - "nn2 = Sequential(name='sin_wave_relu')\n", + "# relu neural network\n", + "nn2 = Sequential(name=\"sin_wave_relu\")\n", "nn2.add(Input(np.array((1,))))\n", - "nn2.add(Dense(30, activation='relu'))\n", - "nn2.add(Dense(30, activation='relu'))\n", + "nn2.add(Dense(30, activation=\"relu\"))\n", + "nn2.add(Dense(30, activation=\"relu\"))\n", "nn2.add(Dense(1))\n", - "nn2.compile(optimizer=Adam(), loss='mse')\n", + "nn2.compile(optimizer=Adam(), loss=\"mse\")\n", "\n", - "#mixed neural network\n", - "nn3 = Sequential(name='sin_wave_mixed')\n", + "# mixed neural network\n", + "nn3 = Sequential(name=\"sin_wave_mixed\")\n", "nn3.add(Input(np.array((1,))))\n", - "nn3.add(Dense(50, activation='sigmoid'))\n", - "nn3.add(Dense(50, activation='relu'))\n", + "nn3.add(Dense(50, activation=\"sigmoid\"))\n", + "nn3.add(Dense(50, activation=\"relu\"))\n", "nn3.add(Dense(1))\n", - "nn3.compile(optimizer=Adam(), loss='mse')" + "nn3.compile(optimizer=Adam(), loss=\"mse\")" ] }, { "cell_type": "code", "execution_count": 5, + "id": "72eea5119410473aa328ad9291626812", "metadata": { "pycharm": { "name": "#%%\n" @@ -850,15 +861,14 @@ "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 923us/step - loss: 0.0090\n", "Epoch 150/150\n", "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 929us/step - loss: 0.0087\n" - ] } ], "source": [ - "#train all three neural networks\n", - "history1 = nn1.fit(x=df['x_scaled'], y=df['y_scaled'],verbose=1, epochs=75)\n", - "history2 = nn2.fit(x=df['x_scaled'], y=df['y_scaled'],verbose=1, epochs=75)\n", - "history3 = nn3.fit(x=df['x_scaled'], y=df['y_scaled'],verbose=1, epochs=150)" + "# train all three neural networks\n", + "history1 = nn1.fit(x=df[\"x_scaled\"], y=df[\"y_scaled\"], verbose=1, epochs=75)\n", + "history2 = nn2.fit(x=df[\"x_scaled\"], y=df[\"y_scaled\"], verbose=1, epochs=75)\n", + "history3 = nn3.fit(x=df[\"x_scaled\"], y=df[\"y_scaled\"], verbose=1, epochs=150)" ] }, { @@ -877,6 +887,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "8edb47106e1a46a883d545849b8ab81b", "metadata": { "pycharm": { "name": "#%%\n" @@ -894,23 +905,24 @@ } ], "source": [ - "#note: we calculate the unscaled output for each neural network to check the predictions\n", - "#nn1\n", - "y_predict_scaled_sigmoid = nn1.predict(x=df['x_scaled'])\n", - "y_predict_sigmoid = y_predict_scaled_sigmoid*(std_data['y']) + mean_data['y']\n", - "\n", - "#nn2\n", - "y_predict_scaled_relu = nn2.predict(x=df['x_scaled'])\n", - "y_predict_relu = y_predict_scaled_relu*(std_data['y']) + mean_data['y']\n", - "\n", - "#nn3\n", - "y_predict_scaled_mixed = nn3.predict(x=df['x_scaled'])\n", - "y_predict_mixed = y_predict_scaled_mixed*(std_data['y']) + mean_data['y']" + "# note: we calculate the unscaled output for each neural network to check the predictions\n", + "# nn1\n", + "y_predict_scaled_sigmoid = nn1.predict(x=df[\"x_scaled\"])\n", + "y_predict_sigmoid = y_predict_scaled_sigmoid * (std_data[\"y\"]) + mean_data[\"y\"]\n", + "\n", + "# nn2\n", + "y_predict_scaled_relu = nn2.predict(x=df[\"x_scaled\"])\n", + "y_predict_relu = y_predict_scaled_relu * (std_data[\"y\"]) + mean_data[\"y\"]\n", + "\n", + "# nn3\n", + "y_predict_scaled_mixed = nn3.predict(x=df[\"x_scaled\"])\n", + "y_predict_mixed = y_predict_scaled_mixed * (std_data[\"y\"]) + mean_data[\"y\"]" ] }, { "cell_type": "code", "execution_count": 7, + "id": "10185d26023b46108eb7d9f57d49d2b3", "metadata": { "pycharm": { "name": "#%%\n" @@ -929,12 +941,12 @@ } ], "source": [ - "#create a single plot with the original data and each neural network's predictions\n", - "fig,ax = plt.subplots(1,figsize = (8,8))\n", - "ax.plot(x,y,linewidth = 3.0,label = \"data\", alpha = 0.5)\n", - "ax.plot(x,y_predict_relu,linewidth = 3.0,linestyle=\"dotted\",label = \"relu\")\n", - "ax.plot(x,y_predict_sigmoid,linewidth = 3.0,linestyle=\"dotted\",label = \"sigmoid\")\n", - "ax.plot(x,y_predict_mixed,linewidth = 3.0,linestyle=\"dotted\",label = \"mixed\")\n", + "# create a single plot with the original data and each neural network's predictions\n", + "fig, ax = plt.subplots(1, figsize=(8, 8))\n", + "ax.plot(x, y, linewidth=3.0, label=\"data\", alpha=0.5)\n", + "ax.plot(x, y_predict_relu, linewidth=3.0, linestyle=\"dotted\", label=\"relu\")\n", + "ax.plot(x, y_predict_sigmoid, linewidth=3.0, linestyle=\"dotted\", label=\"sigmoid\")\n", + "ax.plot(x, y_predict_mixed, linewidth=3.0, linestyle=\"dotted\", label=\"mixed\")\n", "plt.xlabel(\"x\")\n", "plt.ylabel(\"y\")\n", "plt.legend();" @@ -1051,6 +1063,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "8763a12b2bbd4a93a75aff182afb95dc", "metadata": { "pycharm": { "name": "#%%\n" @@ -1067,17 +1080,23 @@ } ], "source": [ - "#create an omlt scaling object\n", - "scaler = omlt.scaling.OffsetScaling(offset_inputs=[mean_data['x']],\n", - " factor_inputs=[std_data['x']],\n", - " offset_outputs=[mean_data['y']],\n", - " factor_outputs=[std_data['y']])\n", - "\n", - "#create the input bounds. note that the key `0` corresponds to input `0` and that we also scale the input bounds\n", - "input_bounds={0:((min(df['x']) - mean_data['x'])/std_data['x'],\n", - " (max(df['x']) - mean_data['x'])/std_data['x'])};\n", + "# create an omlt scaling object\n", + "scaler = omlt.scaling.OffsetScaling(\n", + " offset_inputs=[mean_data[\"x\"]],\n", + " factor_inputs=[std_data[\"x\"]],\n", + " offset_outputs=[mean_data[\"y\"]],\n", + " factor_outputs=[std_data[\"y\"]],\n", + ")\n", + "\n", + "# create the input bounds. note that the key `0` corresponds to input `0` and that we also scale the input bounds\n", + "input_bounds = {\n", + " 0: (\n", + " (min(df[\"x\"]) - mean_data[\"x\"]) / std_data[\"x\"],\n", + " (max(df[\"x\"]) - mean_data[\"x\"]) / std_data[\"x\"],\n", + " )\n", + "}\n", "print(scaler)\n", - "print(\"Scaled input bounds: \",input_bounds)" + "print(\"Scaled input bounds: \", input_bounds)" ] }, { @@ -1099,6 +1118,7 @@ { "cell_type": "code", "execution_count": 9, + "id": "7623eae2785240b9bd12b16a66d81610", "metadata": { "pycharm": { "name": "#%%\n" @@ -1181,39 +1201,43 @@ } ], "source": [ - "#create a network definition\n", - "net_sigmoid = keras_reader.load_keras_sequential(nn1,scaler,input_bounds)\n", + "# create a network definition\n", + "net_sigmoid = keras_reader.load_keras_sequential(nn1, scaler, input_bounds)\n", "\n", - "#create a pyomo model with variables x and y\n", + "# create a pyomo model with variables x and y\n", "model1_reduced = pyo.ConcreteModel()\n", - "model1_reduced.x = pyo.Var(initialize = 0)\n", - "model1_reduced.y = pyo.Var(initialize = 0)\n", + "model1_reduced.x = pyo.Var(initialize=0)\n", + "model1_reduced.y = pyo.Var(initialize=0)\n", "model1_reduced.obj = pyo.Objective(expr=(model1_reduced.y))\n", "\n", - "#create an OmltBlock\n", + "# create an OmltBlock\n", "model1_reduced.nn = OmltBlock()\n", "\n", - "#use the reduced-space formulation\n", + "# use the reduced-space formulation\n", "formulation1_reduced = ReducedSpaceSmoothNNFormulation(net_sigmoid)\n", "model1_reduced.nn.build_formulation(formulation1_reduced)\n", "\n", - "#connect pyomo variables to the neural network\n", + "\n", + "# connect pyomo variables to the neural network\n", "@model1_reduced.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model1_reduced.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "#solve the model and query the solution\n", - "status_1_reduced = pyo.SolverFactory('ipopt').solve(model1_reduced, tee=True)\n", - "solution_1_reduced = (pyo.value(model1_reduced.x),pyo.value(model1_reduced.y))" + "\n", + "# solve the model and query the solution\n", + "status_1_reduced = pyo.SolverFactory(\"ipopt\").solve(model1_reduced, tee=True)\n", + "solution_1_reduced = (pyo.value(model1_reduced.x), pyo.value(model1_reduced.y))" ] }, { "cell_type": "code", "execution_count": 10, + "id": "7cdc8c89c7104fffa095e18ddfef8986", "metadata": { "pycharm": { "name": "#%%\n" @@ -1234,13 +1258,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Reduced Space Solution:\")\n", - "print(\"# of variables: \",model1_reduced.nvariables())\n", - "print(\"# of constraints: \",model1_reduced.nconstraints())\n", + "print(\"# of variables: \", model1_reduced.nvariables())\n", + "print(\"# of constraints: \", model1_reduced.nconstraints())\n", "print(\"x = \", solution_1_reduced[0])\n", "print(\"y = \", solution_1_reduced[1])\n", - "print(\"Solve Time: \", status_1_reduced['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_1_reduced[\"Solver\"][0][\"Time\"])" ] }, { @@ -1261,6 +1285,7 @@ { "cell_type": "code", "execution_count": 11, + "id": "b118ea5561624da68c537baed56e602f", "metadata": { "pycharm": { "name": "#%%\n" @@ -1447,32 +1472,36 @@ } ], "source": [ - "net_sigmoid = keras_reader.load_keras_sequential(nn1,scaler,input_bounds)\n", + "net_sigmoid = keras_reader.load_keras_sequential(nn1, scaler, input_bounds)\n", "\n", "model1_full = pyo.ConcreteModel()\n", - "model1_full.x = pyo.Var(initialize = 0)\n", - "model1_full.y = pyo.Var(initialize = 0)\n", + "model1_full.x = pyo.Var(initialize=0)\n", + "model1_full.y = pyo.Var(initialize=0)\n", "model1_full.obj = pyo.Objective(expr=(model1_full.y))\n", "model1_full.nn = OmltBlock()\n", "\n", "formulation2_full = FullSpaceSmoothNNFormulation(net_sigmoid)\n", "model1_full.nn.build_formulation(formulation2_full)\n", "\n", + "\n", "@model1_full.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model1_full.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "status_1_full = pyo.SolverFactory('ipopt').solve(model1_full, tee=True)\n", - "solution_1_full = (pyo.value(model1_full.x),pyo.value(model1_full.y))" + "\n", + "status_1_full = pyo.SolverFactory(\"ipopt\").solve(model1_full, tee=True)\n", + "solution_1_full = (pyo.value(model1_full.x), pyo.value(model1_full.y))" ] }, { "cell_type": "code", "execution_count": 12, + "id": "938c804e27f84196a10c8828c723f798", "metadata": { "pycharm": { "name": "#%%\n" @@ -1493,13 +1522,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Full Space Solution:\")\n", - "print(\"# of variables: \",model1_full.nvariables())\n", - "print(\"# of constraints: \",model1_full.nconstraints())\n", + "print(\"# of variables: \", model1_full.nvariables())\n", + "print(\"# of constraints: \", model1_full.nconstraints())\n", "print(\"x = \", solution_1_full[0])\n", "print(\"y = \", solution_1_full[1])\n", - "print(\"Solve Time: \", status_1_full['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_1_full[\"Solver\"][0][\"Time\"])" ] }, { @@ -1521,6 +1550,7 @@ { "cell_type": "code", "execution_count": 13, + "id": "504fb2a444614c0babb325280ed9130a", "metadata": { "pycharm": { "name": "#%%\n" @@ -1668,32 +1698,36 @@ } ], "source": [ - "net_relu = keras_reader.load_keras_sequential(nn2,scaler,input_bounds)\n", + "net_relu = keras_reader.load_keras_sequential(nn2, scaler, input_bounds)\n", "\n", "model2_comp = pyo.ConcreteModel()\n", - "model2_comp.x = pyo.Var(initialize = 0)\n", - "model2_comp.y = pyo.Var(initialize = 0)\n", + "model2_comp.x = pyo.Var(initialize=0)\n", + "model2_comp.y = pyo.Var(initialize=0)\n", "model2_comp.obj = pyo.Objective(expr=(model2_comp.y))\n", "model2_comp.nn = OmltBlock()\n", "\n", "formulation2_comp = ReluComplementarityFormulation(net_relu)\n", "model2_comp.nn.build_formulation(formulation2_comp)\n", "\n", + "\n", "@model2_comp.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model2_comp.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "status_2_comp = pyo.SolverFactory('ipopt').solve(model2_comp, tee=True)\n", - "solution_2_comp = (pyo.value(model2_comp.x),pyo.value(model2_comp.y))" + "\n", + "status_2_comp = pyo.SolverFactory(\"ipopt\").solve(model2_comp, tee=True)\n", + "solution_2_comp = (pyo.value(model2_comp.x), pyo.value(model2_comp.y))" ] }, { "cell_type": "code", "execution_count": 14, + "id": "59bbdb311c014d738909a11f9e486628", "metadata": { "pycharm": { "name": "#%%\n" @@ -1714,13 +1748,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"ReLU Complementarity Solution:\")\n", - "print(\"# of variables: \",model2_comp.nvariables())\n", - "print(\"# of constraints: \",model2_comp.nconstraints())\n", + "print(\"# of variables: \", model2_comp.nvariables())\n", + "print(\"# of constraints: \", model2_comp.nconstraints())\n", "print(\"x = \", solution_2_comp[0])\n", "print(\"y = \", solution_2_comp[1])\n", - "print(\"Solve Time: \", status_2_comp['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_2_comp[\"Solver\"][0][\"Time\"])" ] }, { @@ -1741,6 +1775,7 @@ { "cell_type": "code", "execution_count": 15, + "id": "b43b363d81ae4b689946ece5c682cd59", "metadata": { "pycharm": { "name": "#%%\n" @@ -1748,32 +1783,36 @@ }, "outputs": [], "source": [ - "net_relu = keras_reader.load_keras_sequential(nn2,scaler,input_bounds)\n", + "net_relu = keras_reader.load_keras_sequential(nn2, scaler, input_bounds)\n", "\n", "model2_bigm = pyo.ConcreteModel()\n", - "model2_bigm.x = pyo.Var(initialize = 0)\n", - "model2_bigm.y = pyo.Var(initialize = 0)\n", + "model2_bigm.x = pyo.Var(initialize=0)\n", + "model2_bigm.y = pyo.Var(initialize=0)\n", "model2_bigm.obj = pyo.Objective(expr=(model2_bigm.y))\n", "model2_bigm.nn = OmltBlock()\n", "\n", "formulation2_bigm = ReluBigMFormulation(net_relu)\n", "model2_bigm.nn.build_formulation(formulation2_bigm)\n", "\n", + "\n", "@model2_bigm.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model2_bigm.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "status_2_bigm = pyo.SolverFactory('cbc').solve(model2_bigm, tee=False)\n", - "solution_2_bigm = (pyo.value(model2_bigm.x),pyo.value(model2_bigm.y))" + "\n", + "status_2_bigm = pyo.SolverFactory(\"cbc\").solve(model2_bigm, tee=False)\n", + "solution_2_bigm = (pyo.value(model2_bigm.x), pyo.value(model2_bigm.y))" ] }, { "cell_type": "code", "execution_count": 16, + "id": "8a65eabff63a45729fe45fb5ade58bdc", "metadata": { "pycharm": { "name": "#%%\n" @@ -1794,13 +1833,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"ReLU BigM Solution:\")\n", - "print(\"# of variables: \",model2_bigm.nvariables())\n", - "print(\"# of constraints: \",model2_bigm.nconstraints())\n", + "print(\"# of variables: \", model2_bigm.nvariables())\n", + "print(\"# of constraints: \", model2_bigm.nconstraints())\n", "print(\"x = \", solution_2_bigm[0])\n", "print(\"y = \", solution_2_bigm[1])\n", - "print(\"Solve Time: \", status_2_bigm['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_2_bigm[\"Solver\"][0][\"Time\"])" ] }, { @@ -1822,6 +1861,7 @@ { "cell_type": "code", "execution_count": 17, + "id": "c3933fab20d04ec698c2621248eb3be0", "metadata": { "pycharm": { "name": "#%%\n" @@ -1956,46 +1996,54 @@ } ], "source": [ - "net_relu_partition = keras_reader.load_keras_sequential(nn2,scaler,input_bounds)\n", + "net_relu_partition = keras_reader.load_keras_sequential(nn2, scaler, input_bounds)\n", + "\n", "\n", - "#create a function that partitions a vector of weights w` into `n` partitions\n", - "#by default, the `ReluPartitionFormulation` will use this function with n=2\n", + "# create a function that partitions a vector of weights w` into `n` partitions\n", + "# by default, the `ReluPartitionFormulation` will use this function with n=2\n", "def partition_split_func(w, n):\n", " sorted_indexes = np.argsort(w)\n", " n = min(n, len(sorted_indexes))\n", " return np.array_split(sorted_indexes, n)\n", "\n", - "#change the number of partitions and create a function we can pass to the formulation\n", + "\n", + "# change the number of partitions and create a function we can pass to the formulation\n", "#'N = 1' corresponds to BigM, 'N = n_inputs' corresponds to a convex hull formulation\n", "N = 1\n", "split_func = lambda w: partition_split_func(w, N)\n", "\n", "model2_partition = pyo.ConcreteModel()\n", - "model2_partition.x = pyo.Var(initialize = 0)\n", - "model2_partition.y = pyo.Var(initialize = 0)\n", + "model2_partition.x = pyo.Var(initialize=0)\n", + "model2_partition.y = pyo.Var(initialize=0)\n", "model2_partition.obj = pyo.Objective(expr=(model2_partition.y))\n", "model2_partition.nn = OmltBlock()\n", "\n", - "formulation2_partition = ReluPartitionFormulation(net_relu_partition, split_func=split_func)\n", + "formulation2_partition = ReluPartitionFormulation(\n", + " net_relu_partition, split_func=split_func\n", + ")\n", "model2_partition.nn.build_formulation(formulation2_partition)\n", "\n", + "\n", "@model2_partition.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model2_partition.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "solver = pyo.SolverFactory('cbc')\n", + "\n", + "solver = pyo.SolverFactory(\"cbc\")\n", "solver.options[\"printingOptions\"] = \"normal\"\n", - "status_2_partition=solver.solve(model2_partition, tee=True)\n", - "solution_2_partition = (pyo.value(model2_partition.x),pyo.value(model2_partition.y))" + "status_2_partition = solver.solve(model2_partition, tee=True)\n", + "solution_2_partition = (pyo.value(model2_partition.x), pyo.value(model2_partition.y))" ] }, { "cell_type": "code", "execution_count": 18, + "id": "4dd4641cc4064e0191573fe9c69df29b", "metadata": { "pycharm": { "name": "#%%\n" @@ -2016,13 +2064,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"ReLU Partition Solution:\")\n", - "print(\"# of variables: \",model2_partition.nvariables())\n", - "print(\"# of constraints: \",model2_partition.nconstraints())\n", + "print(\"# of variables: \", model2_partition.nvariables())\n", + "print(\"# of constraints: \", model2_partition.nconstraints())\n", "print(\"x = \", solution_2_partition[0])\n", "print(\"y = \", solution_2_partition[1])\n", - "print(\"Solve Time: \", status_2_partition['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_2_partition[\"Solver\"][0][\"Time\"])" ] }, { @@ -2041,6 +2089,7 @@ { "cell_type": "code", "execution_count": 19, + "id": "8309879909854d7188b41380fd92a7c3", "metadata": { "pycharm": { "name": "#%%\n" @@ -2173,34 +2222,39 @@ } ], "source": [ - "net_mixed = keras_reader.load_keras_sequential(nn3,scaler,input_bounds)\n", + "net_mixed = keras_reader.load_keras_sequential(nn3, scaler, input_bounds)\n", "\n", "model3_mixed = pyo.ConcreteModel()\n", - "model3_mixed.x = pyo.Var(initialize = 0)\n", - "model3_mixed.y = pyo.Var(initialize = 0)\n", + "model3_mixed.x = pyo.Var(initialize=0)\n", + "model3_mixed.y = pyo.Var(initialize=0)\n", "model3_mixed.obj = pyo.Objective(expr=(model3_mixed.y))\n", "model3_mixed.nn = OmltBlock()\n", "\n", - "formulation3_mixed = FullSpaceNNFormulation(net_mixed,activation_constraints={\n", - " \"relu\": ComplementarityReLUActivation()})\n", + "formulation3_mixed = FullSpaceNNFormulation(\n", + " net_mixed, activation_constraints={\"relu\": ComplementarityReLUActivation()}\n", + ")\n", "model3_mixed.nn.build_formulation(formulation3_mixed)\n", "\n", + "\n", "@model3_mixed.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.nn.inputs[0]\n", "\n", + "\n", "@model3_mixed.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.nn.outputs[0]\n", "\n", - "solver = pyo.SolverFactory('ipopt')\n", - "status_3_mixed = solver.solve(model3_mixed, tee='true')\n", - "solution_3_mixed = (pyo.value(model3_mixed.x),pyo.value(model3_mixed.y))" + "\n", + "solver = pyo.SolverFactory(\"ipopt\")\n", + "status_3_mixed = solver.solve(model3_mixed, tee=\"true\")\n", + "solution_3_mixed = (pyo.value(model3_mixed.x), pyo.value(model3_mixed.y))" ] }, { "cell_type": "code", "execution_count": 20, + "id": "3ed186c9a28b402fb0bc4494df01f08d", "metadata": { "pycharm": { "name": "#%%\n" @@ -2221,13 +2275,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Mixed NN Solution:\")\n", - "print(\"# of variables: \",model3_mixed.nvariables())\n", - "print(\"# of constraints: \",model3_mixed.nconstraints())\n", + "print(\"# of variables: \", model3_mixed.nvariables())\n", + "print(\"# of constraints: \", model3_mixed.nconstraints())\n", "print(\"x = \", solution_3_mixed[0])\n", "print(\"y = \", solution_3_mixed[1])\n", - "print(\"Solve Time: \", status_3_mixed['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_3_mixed[\"Solver\"][0][\"Time\"])" ] }, { @@ -2253,6 +2307,7 @@ { "cell_type": "code", "execution_count": 21, + "id": "cb1e1581032b452c9409d6c6813c49d1", "metadata": { "pycharm": { "name": "#%%\n" @@ -2271,28 +2326,50 @@ } ], "source": [ - "#create a plot with 3 subplots\n", - "fig,axs = plt.subplots(1,3,figsize = (24,8))\n", + "# create a plot with 3 subplots\n", + "fig, axs = plt.subplots(1, 3, figsize=(24, 8))\n", "\n", - "#nn1 - sigmoid\n", - "axs[0].plot(x,y_predict_sigmoid,linewidth = 3.0,linestyle=\"dotted\",color = \"orange\")\n", + "# nn1 - sigmoid\n", + "axs[0].plot(x, y_predict_sigmoid, linewidth=3.0, linestyle=\"dotted\", color=\"orange\")\n", "axs[0].set_title(\"sigmoid\")\n", - "axs[0].scatter([solution_1_reduced[0]],[solution_1_reduced[1]],color = \"black\",s = 300, label=\"reduced space\")\n", - "axs[0].scatter([solution_1_full[0]],[solution_1_full[1]],color = \"blue\",s = 300, label=\"full space\")\n", + "axs[0].scatter(\n", + " [solution_1_reduced[0]],\n", + " [solution_1_reduced[1]],\n", + " color=\"black\",\n", + " s=300,\n", + " label=\"reduced space\",\n", + ")\n", + "axs[0].scatter(\n", + " [solution_1_full[0]], [solution_1_full[1]], color=\"blue\", s=300, label=\"full space\"\n", + ")\n", "axs[0].legend()\n", "\n", - "#nn2 - relu\n", - "axs[1].plot(x,y_predict_relu,linewidth = 3.0,linestyle=\"dotted\",color = \"green\")\n", + "# nn2 - relu\n", + "axs[1].plot(x, y_predict_relu, linewidth=3.0, linestyle=\"dotted\", color=\"green\")\n", "axs[1].set_title(\"relu\")\n", - "axs[1].scatter([solution_2_comp[0]],[solution_2_comp[1]],color = \"black\",s = 300, label=\"complementarity\")\n", - "axs[1].scatter([solution_2_bigm[0]],[solution_2_bigm[1]],color = \"blue\",s = 300, label=\"bigm\")\n", - "axs[1].scatter([solution_2_partition[0]],[solution_2_partition[1]],color = \"purple\",s = 300, label=\"partition\")\n", + "axs[1].scatter(\n", + " [solution_2_comp[0]],\n", + " [solution_2_comp[1]],\n", + " color=\"black\",\n", + " s=300,\n", + " label=\"complementarity\",\n", + ")\n", + "axs[1].scatter(\n", + " [solution_2_bigm[0]], [solution_2_bigm[1]], color=\"blue\", s=300, label=\"bigm\"\n", + ")\n", + "axs[1].scatter(\n", + " [solution_2_partition[0]],\n", + " [solution_2_partition[1]],\n", + " color=\"purple\",\n", + " s=300,\n", + " label=\"partition\",\n", + ")\n", "axs[1].legend()\n", "\n", - "#nn3 - mixed\n", - "axs[2].plot(x,y_predict_mixed,linewidth = 3.0,linestyle=\"dotted\", color = \"red\")\n", + "# nn3 - mixed\n", + "axs[2].plot(x, y_predict_mixed, linewidth=3.0, linestyle=\"dotted\", color=\"red\")\n", "axs[2].set_title(\"mixed\")\n", - "axs[2].scatter([solution_3_mixed[0]],[solution_3_mixed[1]],color = \"black\",s = 300);" + "axs[2].scatter([solution_3_mixed[0]], [solution_3_mixed[1]], color=\"black\", s=300);" ] }, { diff --git a/docs/notebooks/trees/bo_with_trees.ipynb b/docs/notebooks/trees/bo_with_trees.ipynb index 65fafd4f..0f52e595 100644 --- a/docs/notebooks/trees/bo_with_trees.ipynb +++ b/docs/notebooks/trees/bo_with_trees.ipynb @@ -47,29 +47,33 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import random\n", "\n", + "import numpy as np\n", + "\n", + "\n", "def f(x, y):\n", - " return (1-x)**2 + 100* ((y-x**2))**2\n", + " return (1 - x) ** 2 + 100 * (y - x**2) ** 2\n", + "\n", + "\n", + "f_bnds = [(-2.048, 2.048) for _ in range(2)]\n", "\n", - "f_bnds = [(-2.048,2.048) for _ in range(2)]\n", "\n", "def generate_samples(num_samples, bb_bnds):\n", - " data = {'X': [], 'y': []}\n", + " data = {\"X\": [], \"y\": []}\n", "\n", " for _ in range(num_samples):\n", " sample = []\n", "\n", " # iterate through all dimension bounds\n", - " for idx, var_bnds in enumerate(bb_bnds):\n", + " for _, var_bnds in enumerate(bb_bnds):\n", " val = random.uniform(var_bnds[0], var_bnds[1])\n", "\n", " # populate the sample\n", " sample.append(val)\n", "\n", - " data['X'].append(sample)\n", - " data['y'].append(f(sample[0], sample[1]))\n", + " data[\"X\"].append(sample)\n", + " data[\"y\"].append(f(sample[0], sample[1]))\n", " return data" ] }, @@ -89,29 +93,28 @@ "metadata": {}, "outputs": [], "source": [ - "import lightgbm as lgb\n", "import warnings\n", "\n", + "import lightgbm as lgb\n", + "\n", + "\n", "def train_tree(data):\n", " with warnings.catch_warnings():\n", " warnings.simplefilter(\"ignore\")\n", - " PARAMS = {'objective': 'regression',\n", - " 'metric': 'rmse',\n", - " 'boosting': 'gbdt',\n", - " 'num_trees': 50,\n", - " 'max_depth': 3,\n", - " 'min_data_in_leaf': 2,\n", - " 'random_state': 100,\n", - " 'verbose': -1}\n", - " train_x = np.asarray(data['X'])\n", - " train_data = lgb.Dataset(train_x, \n", - " label=data['y'],\n", - " params={'verbose': -1})\n", - "\n", - " model = lgb.train(PARAMS, \n", - " train_data,\n", - " verbose_eval=False)\n", - " return model" + " PARAMS = {\n", + " \"objective\": \"regression\",\n", + " \"metric\": \"rmse\",\n", + " \"boosting\": \"gbdt\",\n", + " \"num_trees\": 50,\n", + " \"max_depth\": 3,\n", + " \"min_data_in_leaf\": 2,\n", + " \"random_state\": 100,\n", + " \"verbose\": -1,\n", + " }\n", + " train_x = np.asarray(data[\"X\"])\n", + " train_data = lgb.Dataset(train_x, label=data[\"y\"], params={\"verbose\": -1})\n", + "\n", + " return lgb.train(PARAMS, train_data, verbose_eval=False)" ] }, { @@ -133,14 +136,12 @@ "from onnxmltools.convert.lightgbm.convert import convert\n", "from skl2onnx.common.data_types import FloatTensorType\n", "\n", + "\n", "def get_onnx_model(lgb_model):\n", " # export onnx model\n", " float_tensor_type = FloatTensorType([None, lgb_model.num_feature()])\n", - " initial_types = [('float_input', float_tensor_type)]\n", - " onnx_model = convert(lgb_model, \n", - " initial_types=initial_types, \n", - " target_opset=8)\n", - " return onnx_model" + " initial_types = [(\"float_input\", float_tensor_type)]\n", + " return convert(lgb_model, initial_types=initial_types, target_opset=8)" ] }, { @@ -160,9 +161,10 @@ "source": [ "def write_onnx_to_file(onnx_model, path, file_name=\"output.onnx\"):\n", " from pathlib import Path\n", + "\n", " with open(Path(path) / file_name, \"wb\") as onnx_file:\n", " onnx_file.write(onnx_model.SerializeToString())\n", - " print(f'Onnx model written to {onnx_file.name}')" + " print(f\"Onnx model written to {onnx_file.name}\")" ] }, { @@ -182,15 +184,16 @@ "outputs": [], "source": [ "import pyomo.environ as pe\n", + "\n", "from omlt.block import OmltBlock\n", "from omlt.gbt import GBTBigMFormulation, GradientBoostedTreeModel\n", "\n", + "\n", "def add_tree_model(opt_model, onnx_model, input_bounds):\n", " # init omlt block and gbt model based on the onnx format\n", " opt_model.gbt = OmltBlock()\n", - " gbt_model = GradientBoostedTreeModel(onnx_model, \n", - " scaled_input_bounds=input_bounds)\n", - " \n", + " gbt_model = GradientBoostedTreeModel(onnx_model, scaled_input_bounds=input_bounds)\n", + "\n", " # omlt uses a big-m formulation to encode the tree models\n", " formulation = GBTBigMFormulation(gbt_model)\n", " opt_model.gbt.build_formulation(formulation)" @@ -211,29 +214,26 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "from omlt.base import OmltVar\n", - "\n", - "\n", "def add_unc_metric(opt_model, data):\n", - " \n", " # compute mean and std for standardization\n", - " data_x = np.asarray(data['X'])\n", + " data_x = np.asarray(data[\"X\"])\n", " std = np.std(data_x, axis=0)\n", " mean = np.mean(data_x, axis=0)\n", - " \n", + "\n", " # alpha capture the uncertainty value\n", - " alpha_bound = abs(0.5*np.var(data['y']))\n", - " opt_model.alpha = OmltVar(within=pe.NonNegativeReals, bounds=(0,alpha_bound))\n", + " alpha_bound = abs(0.5 * np.var(data[\"y\"]))\n", + " opt_model.alpha = pe.Var(within=pe.NonNegativeReals, bounds=(0, alpha_bound))\n", " opt_model.unc_constr = pe.ConstraintList()\n", - " \n", + "\n", " for x in data_x:\n", " x_var = opt_model.gbt.inputs\n", " opt_model.unc_constr.add(\n", - " opt_model.alpha <= \\\n", - " sum((x[idx]-(x_var[idx]-mean[idx])/std[idx])*\\\n", - " (x[idx]-(x_var[idx]-mean[idx])/std[idx]) \n", - " for idx in range(len(x_var)) )\n", + " opt_model.alpha\n", + " <= sum(\n", + " (x[idx] - (x_var[idx] - mean[idx]) / std[idx])\n", + " * (x[idx] - (x_var[idx] - mean[idx]) / std[idx])\n", + " for idx in range(len(x_var))\n", + " )\n", " )" ] }, @@ -272,11 +272,13 @@ "random.seed(10)\n", "data = generate_samples(5, f_bnds)\n", "\n", + "\n", "def plot_progress(data, input_bounds):\n", " # plot contour line and data points\n", " import matplotlib.pyplot as plt\n", + "\n", " fig = plt.figure()\n", - " ax = fig.add_axes([0,0,2,2])\n", + " ax = fig.add_axes([0, 0, 2, 2])\n", "\n", " # create mesh\n", " s = 0.01\n", @@ -285,19 +287,20 @@ " X, Y = np.meshgrid(X, Y)\n", "\n", " # rosenbrock function\n", - " Z = f(X,Y)\n", + " Z = f(X, Y)\n", "\n", " # plot contour line\n", - " clevf = np.arange(Z.min(),Z.max(), 10)\n", + " clevf = np.arange(Z.min(), Z.max(), 10)\n", " CS = plt.contourf(X, Y, Z, clevf)\n", " fig.colorbar(CS)\n", "\n", " # plot initial data set\n", - " ax.scatter([x[0] for x in data['X']], [x[1] for x in data['X']], c='r', s=100)\n", + " ax.scatter([x[0] for x in data[\"X\"]], [x[1] for x in data[\"X\"]], c=\"r\", s=100)\n", "\n", - " plt.rcParams.update({'font.size': 15})\n", + " plt.rcParams.update({\"font.size\": 15})\n", " plt.show()\n", - " \n", + "\n", + "\n", "plot_progress(data, f_bnds)" ] }, @@ -326,29 +329,28 @@ " # building the optimization model\n", " onnx_model = get_onnx_model(lgb_model)\n", " opt_model = pe.ConcreteModel()\n", - " \n", + "\n", " add_tree_model(opt_model, onnx_model, f_bnds)\n", - " \n", + "\n", " if has_unc:\n", " add_unc_metric(opt_model, data)\n", - " opt_model.obj = pe.Objective(expr=opt_model.gbt.outputs[0] - 1.96*opt_model.alpha)\n", + " opt_model.obj = pe.Objective(\n", + " expr=opt_model.gbt.outputs[0] - 1.96 * opt_model.alpha\n", + " )\n", "\n", " # add uncertainty leads to non-convex MIQP, i.e. solvers like Gurobi can solve this\n", - " solver = pe.SolverFactory('gurobi')\n", - " solver.options['NonConvex'] = 2\n", + " solver = pe.SolverFactory(\"gurobi\")\n", + " solver.options[\"NonConvex\"] = 2\n", " solution = solver.solve(opt_model, tee=False)\n", " else:\n", " opt_model.obj = pe.Objective(expr=opt_model.gbt.outputs[0])\n", "\n", " # without uncerainty we can use cbc to solve the model\n", - " solver = pe.SolverFactory('cbc')\n", - " solution = solver.solve(opt_model, tee=False) \n", - " \n", + " solver = pe.SolverFactory(\"cbc\")\n", + " solution = solver.solve(opt_model, tee=False)\n", + "\n", " # extract solution from solved model\n", - " next_x = [opt_model.gbt.inputs[idx].value \n", - " for idx in range(len(opt_model.gbt.inputs))]\n", - " \n", - " return next_x" + " return [opt_model.gbt.inputs[idx].value for idx in range(len(opt_model.gbt.inputs))]" ] }, { @@ -402,40 +404,43 @@ "source": [ "from tqdm.notebook import tqdm\n", "\n", - "for itr in tqdm(range(80)):\n", + "for _ in tqdm(range(80)):\n", " # training the tree ensemble\n", " lgb_model = train_tree(data)\n", - " \n", + "\n", " # minimize the trained model\n", " next_x = minimize_model(f_bnds, lgb_model, has_unc=False)\n", - " \n", + "\n", " # evaluating the following input\n", " next_y = f(next_x[0], next_x[1])\n", "\n", - " data['X'].append(next_x)\n", - " data['y'].append(next_y)\n", - " \n", - "def plot_progress(data): \n", + " data[\"X\"].append(next_x)\n", + " data[\"y\"].append(next_y)\n", + "\n", + "\n", + "def plot_progress(data):\n", " # set up plot\n", " import matplotlib.pyplot as plt\n", + "\n", " fig = plt.figure()\n", - " ax = fig.add_axes([0,0,2,2])\n", + " ax = fig.add_axes([0, 0, 2, 2])\n", " plt.ylabel(\"Black-Box Function Objective\")\n", " plt.xlabel(\"# Iterations\")\n", "\n", " # extract best_y\n", " min_y = []\n", - " curr_min = data['y'][0]\n", - " for y in data['y']:\n", - " curr_min = min(y,curr_min)\n", + " curr_min = data[\"y\"][0]\n", + " for y in data[\"y\"]:\n", + " curr_min = min(y, curr_min)\n", " min_y.append(curr_min)\n", - " \n", + "\n", " # plot steps to show progress\n", - " ax.step(np.arange(len(data['y'])), min_y, linewidth=2, color=\"b\")\n", - " plt.axhline(y=0.0, color='r', linewidth=3, linestyle='--')\n", - " \n", + " ax.step(np.arange(len(data[\"y\"])), min_y, linewidth=2, color=\"b\")\n", + " plt.axhline(y=0.0, color=\"r\", linewidth=3, linestyle=\"--\")\n", + "\n", " plt.show()\n", "\n", + "\n", "plot_progress(data)" ] }, @@ -471,7 +476,8 @@ ], "source": [ "from IPython.display import Image\n", - "Image(filename='images/bo-with-trees.png', height=300)" + "\n", + "Image(filename=\"images/bo-with-trees.png\", height=300)" ] }, { diff --git a/docs/notebooks/trees/linear_tree_formulations.ipynb b/docs/notebooks/trees/linear_tree_formulations.ipynb index f98373e1..e7f263ae 100644 --- a/docs/notebooks/trees/linear_tree_formulations.ipynb +++ b/docs/notebooks/trees/linear_tree_formulations.ipynb @@ -55,26 +55,30 @@ }, "outputs": [], "source": [ - "#Start by importing the following libraries\n", - "#data manipulation and plotting\n", - "import pandas as pd\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", + "# Start by importing the following libraries\n", + "# data manipulation and plotting\n", "import matplotlib\n", - "matplotlib.rc('font', size=24)\n", - "plt.rc('axes', titlesize=24)\n", + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "\n", + "matplotlib.rc(\"font\", size=24)\n", + "plt.rc(\"axes\", titlesize=24)\n", "\n", - "#linear-tree objects\n", + "# linear-tree objects\n", + "# pyomo for optimization\n", + "import pyomo.environ as pyo\n", "from lineartree import LinearTreeRegressor\n", "from sklearn.linear_model import LinearRegression\n", "\n", - "#pyomo for optimization\n", - "import pyomo.environ as pyo\n", + "import omlt\n", "\n", - "#omlt for interfacing our linear tree with pyomo\n", + "# omlt for interfacing our linear tree with pyomo\n", "from omlt import OmltBlock\n", - "from omlt.linear_tree import LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, LinearTreeDefinition\n", - "import omlt" + "from omlt.linear_tree import (\n", + " LinearTreeDefinition,\n", + " LinearTreeGDPFormulation,\n", + " LinearTreeHybridBigMFormulation,\n", + ")" ] }, { @@ -114,7 +118,7 @@ }, "outputs": [], "source": [ - "df = pd.read_csv(\"../data/sin_quadratic.csv\",index_col=[0])" + "df = pd.read_csv(\"../data/sin_quadratic.csv\", index_col=[0])" ] }, { @@ -152,18 +156,18 @@ } ], "source": [ - "#retrieve input 'x' and output 'y' from the dataframe\n", + "# retrieve input 'x' and output 'y' from the dataframe\n", "x = df[\"x\"]\n", "y = df[\"y\"]\n", "\n", - "#calculate mean and standard deviation, add scaled 'x' and scaled 'y' to the dataframe\n", + "# calculate mean and standard deviation, add scaled 'x' and scaled 'y' to the dataframe\n", "mean_data = df.mean(axis=0)\n", "std_data = df.std(axis=0)\n", - "df[\"x_scaled\"] = (df['x'] - mean_data['x']) / std_data['x']\n", - "df[\"y_scaled\"] = (df['y'] - mean_data['y']) / std_data['y']\n", + "df[\"x_scaled\"] = (df[\"x\"] - mean_data[\"x\"]) / std_data[\"x\"]\n", + "df[\"y_scaled\"] = (df[\"y\"] - mean_data[\"y\"]) / std_data[\"y\"]\n", "\n", - "#create plots for unscaled and scaled data\n", - "f, (ax1, ax2) = plt.subplots(1, 2,figsize = (16,8))\n", + "# create plots for unscaled and scaled data\n", + "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))\n", "\n", "ax1.plot(x, y)\n", "ax1.set_xlabel(\"x\")\n", @@ -202,12 +206,10 @@ }, "outputs": [], "source": [ - "#Build the linear-tree model\n", - "regr = LinearTreeRegressor(LinearRegression(), \n", - " criterion='mse', \n", - " max_bins=120, \n", - " min_samples_leaf=30, \n", - " max_depth=8)" + "# Build the linear-tree model\n", + "regr = LinearTreeRegressor(\n", + " LinearRegression(), criterion=\"mse\", max_bins=120, min_samples_leaf=30, max_depth=8\n", + ")" ] }, { @@ -221,12 +223,12 @@ }, "outputs": [], "source": [ - "#Data needs to be in array and reshaped\n", - "x_scaled = df[\"x_scaled\"].to_numpy().reshape(-1,1)\n", - "y_scaled = df[\"y_scaled\"].to_numpy().reshape(-1,1)\n", + "# Data needs to be in array and reshaped\n", + "x_scaled = df[\"x_scaled\"].to_numpy().reshape(-1, 1)\n", + "y_scaled = df[\"y_scaled\"].to_numpy().reshape(-1, 1)\n", "\n", - "#train the linear tree on the scaled data\n", - "history1 = regr.fit(x_scaled,y_scaled)" + "# train the linear tree on the scaled data\n", + "history1 = regr.fit(x_scaled, y_scaled)" ] }, { @@ -278,9 +280,9 @@ }, "outputs": [], "source": [ - "#note: we calculate the unscaled output for each neural network to check the predictions\n", + "# note: we calculate the unscaled output for each neural network to check the predictions\n", "y_predict_scaled_lt = regr.predict(x_scaled)\n", - "y_predict_lt = y_predict_scaled_lt*(std_data['y']) + mean_data['y']" + "y_predict_lt = y_predict_scaled_lt * (std_data[\"y\"]) + mean_data[\"y\"]" ] }, { @@ -315,10 +317,10 @@ } ], "source": [ - "#create a single plot with the original data and each neural network's predictions\n", - "fig,ax = plt.subplots(1,figsize = (8,8))\n", - "ax.plot(x,y,linewidth = 3.0,label = \"data\", alpha = 0.5)\n", - "ax.plot(x,y_predict_lt,linewidth = 3.0,linestyle=\"dotted\",label = \"linear-tree\")\n", + "# create a single plot with the original data and each neural network's predictions\n", + "fig, ax = plt.subplots(1, figsize=(8, 8))\n", + "ax.plot(x, y, linewidth=3.0, label=\"data\", alpha=0.5)\n", + "ax.plot(x, y_predict_lt, linewidth=3.0, linestyle=\"dotted\", label=\"linear-tree\")\n", "plt.xlabel(\"x\")\n", "plt.ylabel(\"y\")\n", "plt.legend()" @@ -392,17 +394,23 @@ } ], "source": [ - "#create an omlt scaling object\n", - "scaler = omlt.scaling.OffsetScaling(offset_inputs=[mean_data['x']],\n", - " factor_inputs=[std_data['x']],\n", - " offset_outputs=[mean_data['y']],\n", - " factor_outputs=[std_data['y']])\n", - "\n", - "#create the input bounds. note that the key `0` corresponds to input `0` and that we also scale the input bounds\n", - "input_bounds={0:((min(df['x']) - mean_data['x'])/std_data['x'],\n", - " (max(df['x']) - mean_data['x'])/std_data['x'])};\n", + "# create an omlt scaling object\n", + "scaler = omlt.scaling.OffsetScaling(\n", + " offset_inputs=[mean_data[\"x\"]],\n", + " factor_inputs=[std_data[\"x\"]],\n", + " offset_outputs=[mean_data[\"y\"]],\n", + " factor_outputs=[std_data[\"y\"]],\n", + ")\n", + "\n", + "# create the input bounds. note that the key `0` corresponds to input `0` and that we also scale the input bounds\n", + "input_bounds = {\n", + " 0: (\n", + " (min(df[\"x\"]) - mean_data[\"x\"]) / std_data[\"x\"],\n", + " (max(df[\"x\"]) - mean_data[\"x\"]) / std_data[\"x\"],\n", + " )\n", + "}\n", "print(scaler)\n", - "print(\"Scaled input bounds: \",input_bounds)" + "print(\"Scaled input bounds: \", input_bounds)" ] }, { @@ -569,34 +577,37 @@ } ], "source": [ - "#create a LinearTreeDefinition Object\n", - "ltmodel = LinearTreeDefinition(regr,scaler,input_bounds)\n", + "# create a LinearTreeDefinition Object\n", + "ltmodel = LinearTreeDefinition(regr, scaler, input_bounds)\n", "\n", - "#create a pyomo model with variables x and y\n", + "# create a pyomo model with variables x and y\n", "model1 = pyo.ConcreteModel()\n", - "model1.x = pyo.Var(initialize = 0)\n", - "model1.y = pyo.Var(initialize = 0)\n", + "model1.x = pyo.Var(initialize=0)\n", + "model1.y = pyo.Var(initialize=0)\n", "model1.obj = pyo.Objective(expr=(model1.y))\n", "\n", - "#create an OmltBlock\n", + "# create an OmltBlock\n", "model1.lt = OmltBlock()\n", "\n", - "#use the GDP formulation with a big-M, transformation\n", - "formulation1_lt = LinearTreeGDPFormulation(ltmodel, transformation='bigm')\n", + "# use the GDP formulation with a big-M, transformation\n", + "formulation1_lt = LinearTreeGDPFormulation(ltmodel, transformation=\"bigm\")\n", "model1.lt.build_formulation(formulation1_lt)\n", "\n", - "#connect pyomo variables to the neural network\n", + "\n", + "# connect pyomo variables to the neural network\n", "@model1.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.lt.inputs[0]\n", "\n", + "\n", "@model1.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.lt.outputs[0]\n", "\n", - "#solve the model and query the solution\n", - "status_1_bigm = pyo.SolverFactory('cbc').solve(model1, tee=True)\n", - "solution_1_bigm = (pyo.value(model1.x),pyo.value(model1.y))" + "\n", + "# solve the model and query the solution\n", + "status_1_bigm = pyo.SolverFactory(\"cbc\").solve(model1, tee=True)\n", + "solution_1_bigm = (pyo.value(model1.x), pyo.value(model1.y))" ] }, { @@ -623,13 +634,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Big-M Transformation Solution:\")\n", - "print(\"# of variables: \",model1.nvariables())\n", - "print(\"# of constraints: \",model1.nconstraints())\n", + "print(\"# of variables: \", model1.nvariables())\n", + "print(\"# of constraints: \", model1.nconstraints())\n", "print(\"x = \", solution_1_bigm[0])\n", "print(\"y = \", solution_1_bigm[1])\n", - "print(\"Solve Time: \", status_1_bigm['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_1_bigm[\"Solver\"][0][\"Time\"])" ] }, { @@ -724,31 +735,34 @@ } ], "source": [ - "#create a pyomo model with variables x and y\n", + "# create a pyomo model with variables x and y\n", "model2 = pyo.ConcreteModel()\n", - "model2.x = pyo.Var(initialize = 0)\n", - "model2.y = pyo.Var(initialize = 0)\n", + "model2.x = pyo.Var(initialize=0)\n", + "model2.y = pyo.Var(initialize=0)\n", "model2.obj = pyo.Objective(expr=(model2.y))\n", "\n", - "#create an OmltBlock\n", + "# create an OmltBlock\n", "model2.lt = OmltBlock()\n", "\n", - "#use the GDP formulation with a hull transformation\n", - "formulation2_lt = LinearTreeGDPFormulation(ltmodel, transformation='hull')\n", + "# use the GDP formulation with a hull transformation\n", + "formulation2_lt = LinearTreeGDPFormulation(ltmodel, transformation=\"hull\")\n", "model2.lt.build_formulation(formulation2_lt)\n", "\n", - "#connect pyomo variables to the neural network\n", + "\n", + "# connect pyomo variables to the neural network\n", "@model2.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.lt.inputs[0]\n", "\n", + "\n", "@model2.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.lt.outputs[0]\n", "\n", - "#solve the model and query the solution\n", - "status_2_hull = pyo.SolverFactory('cbc').solve(model2, tee=True)\n", - "solution_2_hull = (pyo.value(model2.x),pyo.value(model2.y))" + "\n", + "# solve the model and query the solution\n", + "status_2_hull = pyo.SolverFactory(\"cbc\").solve(model2, tee=True)\n", + "solution_2_hull = (pyo.value(model2.x), pyo.value(model2.y))" ] }, { @@ -771,13 +785,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Hull Transformation Solution:\")\n", - "print(\"# of variables: \",model2.nvariables())\n", - "print(\"# of constraints: \",model2.nconstraints())\n", + "print(\"# of variables: \", model2.nvariables())\n", + "print(\"# of constraints: \", model2.nconstraints())\n", "print(\"x = \", solution_2_hull[0])\n", "print(\"y = \", solution_2_hull[1])\n", - "print(\"Solve Time: \", status_2_hull['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_2_hull[\"Solver\"][0][\"Time\"])" ] }, { @@ -939,35 +953,38 @@ } ], "source": [ - "#create a pyomo model with variables x and y\n", + "# create a pyomo model with variables x and y\n", "model_c = pyo.ConcreteModel()\n", - "model_c.x = pyo.Var(initialize = 0)\n", - "model_c.y = pyo.Var(initialize = 0)\n", + "model_c.x = pyo.Var(initialize=0)\n", + "model_c.y = pyo.Var(initialize=0)\n", "model_c.obj = pyo.Objective(expr=(model_c.y))\n", "\n", - "#create an OmltBlock\n", + "# create an OmltBlock\n", "model_c.lt = OmltBlock()\n", "\n", - "#use the GDP formulation with a custom transformation\n", - "formulation_c_lt = LinearTreeGDPFormulation(ltmodel, transformation='custom')\n", + "# use the GDP formulation with a custom transformation\n", + "formulation_c_lt = LinearTreeGDPFormulation(ltmodel, transformation=\"custom\")\n", "model_c.lt.build_formulation(formulation_c_lt)\n", "\n", - "#connect pyomo variables to the neural network\n", + "\n", + "# connect pyomo variables to the neural network\n", "@model_c.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.lt.inputs[0]\n", "\n", + "\n", "@model_c.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.lt.outputs[0]\n", "\n", + "\n", "# NOTE: Since we passed the 'custom' transformation option, the user must\n", "# transform the model or the omlt block before passing the model to the solver\n", - "pyo.TransformationFactory('gdp.bigm').apply_to(model_c)\n", + "pyo.TransformationFactory(\"gdp.bigm\").apply_to(model_c)\n", "\n", - "#solve the model and query the solution\n", - "status_c_bigm = pyo.SolverFactory('cbc').solve(model_c, tee=True)\n", - "solution_c_bigm = (pyo.value(model_c.x),pyo.value(model_c.y))" + "# solve the model and query the solution\n", + "status_c_bigm = pyo.SolverFactory(\"cbc\").solve(model_c, tee=True)\n", + "solution_c_bigm = (pyo.value(model_c.x), pyo.value(model_c.y))" ] }, { @@ -990,13 +1007,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"BigM Transformation Solution:\")\n", - "print(\"# of variables: \",model_c.nvariables())\n", - "print(\"# of constraints: \",model_c.nconstraints())\n", + "print(\"# of variables: \", model_c.nvariables())\n", + "print(\"# of constraints: \", model_c.nconstraints())\n", "print(\"x = \", solution_c_bigm[0])\n", "print(\"y = \", solution_c_bigm[1])\n", - "print(\"Solve Time: \", status_c_bigm['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_c_bigm[\"Solver\"][0][\"Time\"])" ] }, { @@ -1292,31 +1309,34 @@ } ], "source": [ - "#create a pyomo model with variables x and y\n", + "# create a pyomo model with variables x and y\n", "model3 = pyo.ConcreteModel()\n", - "model3.x = pyo.Var(initialize = 0)\n", - "model3.y = pyo.Var(initialize = 0)\n", + "model3.x = pyo.Var(initialize=0)\n", + "model3.y = pyo.Var(initialize=0)\n", "model3.obj = pyo.Objective(expr=(model3.y))\n", "\n", - "#create an OmltBlock\n", + "# create an OmltBlock\n", "model3.lt = OmltBlock()\n", "\n", - "#use the Hybrid Big-M formulation\n", + "# use the Hybrid Big-M formulation\n", "formulation3_lt = LinearTreeHybridBigMFormulation(ltmodel)\n", "model3.lt.build_formulation(formulation3_lt)\n", "\n", - "#connect pyomo variables to the neural network\n", + "\n", + "# connect pyomo variables to the neural network\n", "@model3.Constraint()\n", "def connect_inputs(mdl):\n", " return mdl.x == mdl.lt.inputs[0]\n", "\n", + "\n", "@model3.Constraint()\n", "def connect_outputs(mdl):\n", " return mdl.y == mdl.lt.outputs[0]\n", "\n", - "#solve the model and query the solution\n", - "status_3_hyb = pyo.SolverFactory('scip').solve(model3, tee=True)\n", - "solution_3_hyb = (pyo.value(model3.x),pyo.value(model3.y))" + "\n", + "# solve the model and query the solution\n", + "status_3_hyb = pyo.SolverFactory(\"scip\").solve(model3, tee=True)\n", + "solution_3_hyb = (pyo.value(model3.x), pyo.value(model3.y))" ] }, { @@ -1339,13 +1359,13 @@ } ], "source": [ - "#print out model size and solution values\n", + "# print out model size and solution values\n", "print(\"Hull Transformation Solution:\")\n", - "print(\"# of variables: \",model3.nvariables())\n", - "print(\"# of constraints: \",model3.nconstraints())\n", + "print(\"# of variables: \", model3.nvariables())\n", + "print(\"# of constraints: \", model3.nconstraints())\n", "print(\"x = \", solution_3_hyb[0])\n", "print(\"y = \", solution_3_hyb[1])\n", - "print(\"Solve Time: \", status_3_hyb['Solver'][0]['Time'])" + "print(\"Solve Time: \", status_3_hyb[\"Solver\"][0][\"Time\"])" ] }, { @@ -1394,26 +1414,53 @@ } ], "source": [ - "#create a plot with 3 subplots\n", - "fig,axs = plt.subplots(1,3,figsize = (24,8))\n", + "# create a plot with 3 subplots\n", + "fig, axs = plt.subplots(1, 3, figsize=(24, 8))\n", "\n", - "#GDP Representation - Big-M Transformation\n", - "axs[0].plot(x,y_predict_lt,linewidth = 3.0,linestyle=\"dotted\",color = \"orange\", label='Fitted Model')\n", + "# GDP Representation - Big-M Transformation\n", + "axs[0].plot(\n", + " x,\n", + " y_predict_lt,\n", + " linewidth=3.0,\n", + " linestyle=\"dotted\",\n", + " color=\"orange\",\n", + " label=\"Fitted Model\",\n", + ")\n", "axs[0].set_title(\"Big-M\")\n", - "axs[0].scatter([solution_1_bigm[0]],[solution_1_bigm[1]],color = \"black\",s = 300, label='Optimum')\n", + "axs[0].scatter(\n", + " [solution_1_bigm[0]], [solution_1_bigm[1]], color=\"black\", s=300, label=\"Optimum\"\n", + ")\n", "axs[0].legend()\n", "\n", - "#GDP Representation - Hull Transformation\n", - "axs[1].plot(x,y_predict_lt,linewidth = 3.0,linestyle=\"dotted\",color = \"orange\", label='Fitted Model')\n", + "# GDP Representation - Hull Transformation\n", + "axs[1].plot(\n", + " x,\n", + " y_predict_lt,\n", + " linewidth=3.0,\n", + " linestyle=\"dotted\",\n", + " color=\"orange\",\n", + " label=\"Fitted Model\",\n", + ")\n", "axs[1].set_title(\"Convex Hull\")\n", - "axs[1].scatter([solution_2_hull[0]],[solution_2_hull[1]],color = \"black\",s = 300, label='Optimum')\n", + "axs[1].scatter(\n", + " [solution_2_hull[0]], [solution_2_hull[1]], color=\"black\", s=300, label=\"Optimum\"\n", + ")\n", "axs[1].legend()\n", "\n", "\n", - "#Hybrid Big-M Representation\n", - "axs[2].plot(x,y_predict_lt,linewidth = 3.0,linestyle=\"dotted\",color = \"orange\", label='Fitted Model')\n", + "# Hybrid Big-M Representation\n", + "axs[2].plot(\n", + " x,\n", + " y_predict_lt,\n", + " linewidth=3.0,\n", + " linestyle=\"dotted\",\n", + " color=\"orange\",\n", + " label=\"Fitted Model\",\n", + ")\n", "axs[2].set_title(\"Hybrid Big-M\")\n", - "axs[2].scatter([solution_3_hyb[0]],[solution_3_hyb[1]],color = \"black\",s = 300, label='Optimum')\n", + "axs[2].scatter(\n", + " [solution_3_hyb[0]], [solution_3_hyb[1]], color=\"black\", s=300, label=\"Optimum\"\n", + ")\n", "axs[2].legend()" ] }, diff --git a/pyproject.toml b/pyproject.toml index c504866e..2b215f71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,8 +11,8 @@ authors = [ dependencies = [ "networkx", "numpy", - # TODO: Remove constraint when fix to https://github.com/Pyomo/pyomo/issues/3262 is released - "pyomo==6.6.2", + # Pyomo release that included #3262 fix and has transformations for linear trees + "pyomo>=6.7.3", "onnx", "onnxruntime", ] @@ -120,6 +120,26 @@ convention = "google" ] "docs/conf.py" = ["D100", "INP001"] "src/omlt/neuralnet/layer.py" = ["N802"] +"docs/notebooks/data/build_sin_quadratic_csv.py" = ["INP001"] +"docs/notebooks/*" = [ + "T201", + "F811", + "E402", + "ICN001", + "E501", + "PD901", + "E731", + "F841", + "FBT002", + "PTH123", + "S311", + "N812", + "A001", + "E741", + "N802", + "PERF401", + "PLR2004", +] [tool.mypy] show_error_codes = true @@ -151,4 +171,30 @@ module = [ ignore_missing_imports = true [tool.pytest.ini_options] -addopts = "--cov omlt --cov-report term-missing --verbose" +addopts = "--cov omlt --cov-report term-missing --cov-config pyproject.toml --verbose" + +[tool.coverage.run] +branch = true + +[tool.coverage.paths] +source = [ + "src/", + "*/site-packages/", +] + +[tool.coverage.report] +# Regexes for lines to exclude from consideration +exclude_lines = [ + # Have to re-enable the standard pragma + "pragma: no cover", + + # Don't complain about missing debug-only code: + "def __repr__", + + # Don't complain if tests don't hit defensive assertion code: + "raise AssertionError", + "raise NotImplementedError", + + # Don't complain if non-runnable code isn't run: + "if __name__ == .__main__.:", +] diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..1c1752ab --- /dev/null +++ b/setup.cfg @@ -0,0 +1,171 @@ +# This file is used to configure your project. +# Read more about the various options under: +# http://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files + +[metadata] +name = omlt +description = OMLT is a Python package for representing machine learning models (such as neural networks) within the Pyomo optimization environment. +author = The OMLT Developers +author_email = omlt@googlegroups.com +license = BSD 3-Clause +long_description = file: README.rst +long_description_content_type = text/x-rst; charset=UTF-8 +url = https://github.com/cog-imperial/OMLT/ +# Add here related links, for example: +project_urls = + Source = https://github.com/cog-imperial/OMLT/ + Twitter = https://twitter.com/cogimperial +# Changelog = https://pyscaffold.org/en/latest/changelog.html +# Tracker = https://github.com/pyscaffold/pyscaffold/issues +# Conda-Forge = https://anaconda.org/conda-forge/pyscaffold +# Download = https://pypi.org/project/PyScaffold/#files + +# Change if running only on Windows, Mac or Linux (comma-separated) +platforms = any + +# Add here all kinds of additional classifiers as defined under +# https://pypi.python.org/pypi?%3Aaction=list_classifiers +classifiers = + Development Status :: 4 - Beta + Programming Language :: Python + + +[options] +zip_safe = False +packages = find_namespace: +include_package_data = True +package_dir = + =src + +# Require a min/specific Python version (comma-separated conditions) +# OMLT currently supports Python 3.7 and above +python_requires = >=3.7 + +# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. +# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in +# new major versions. This works if the required packages follow Semantic Versioning. +# For more information, check out https://semver.org/. +install_requires = + importlib-metadata; python_version<"3.8" + networkx + pyomo==6.6.2 + numpy + protobuf==3.20.3 + + +[options.packages.find] +where = src +exclude = + tests + +[options.extras_require] +# Add here additional requirements for extra features, to install with: +# `pip install omlt[PDF]` like: +# PDF = ReportLab; RXP + +# Add here test requirements (semicolon/line-separated) +testing = + setuptools + pytest + pytest-cov + testbook + nbmake + tox + flake8 + tensorflow-cpu + ipywidgets + jupyter + lightgbm + linear-tree + matplotlib + pandas + keras>=3.0 + onnx + onnxruntime + onnxmltools + tf2onnx>=1.12 + torch + torchvision + tqdm + protobuf==3.20.3 + torch_geometric + +testing_lean = + setuptools + pytest + pytest-cov + testbook + nbmake + tox + flake8 + ipywidgets + jupyter + lightgbm + matplotlib + pandas + torch + torchvision + tqdm + +[options.entry_points] +# Add here console scripts like: +# console_scripts = +# script_name = omlt.module:function +# For example: +# console_scripts = +# fibonacci = omlt.skeleton:run +# And any other entry points, for example: +# pyscaffold.cli = +# awesome = pyscaffoldext.awesome.extension:AwesomeExtension + +[tool:pytest] +# Specify command line options as you would do when invoking pytest directly. +# e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml +# in order to write a coverage file that can be read by Jenkins. +# CAUTION: --cov flags may prohibit setting breakpoints while debugging. +# Comment those flags to avoid this py.test issue. +addopts = + --cov omlt --cov-report term-missing + --verbose +norecursedirs = + dist + build + .tox +testpaths = tests +# Use pytest markers to select/deselect specific tests +# markers = +# slow: mark tests as slow (deselect with '-m "not slow"') +# system: mark end-to-end system tests + +[bdist_wheel] +# Use this option if your package is pure-python +universal = 1 + +[devpi:upload] +# Options for the devpi: PyPI server and packaging tool +# VCS export must be deactivated since we are using setuptools-scm +no_vcs = 1 +formats = bdist_wheel + +[flake8] +# Some sane defaults for the code style checker flake8 +max_line_length = 88 +extend_ignore = E203, W503 +# ^ Black-compatible +# E203 and W503 have edge cases handled by black +exclude = + .tox + build + dist + .eggs + docs/conf.py +per_file_ignores = + # ignore docstrings in tests + tests/*:D100,D101,D102,D103,D104,D105,D106,D107 + +[pyscaffold] +# PyScaffold's parameters when the project was created. +# This will be used when updating. Do not change! +version = 4.0.2 +package = omlt +extensions = diff --git a/src/omlt/base/constraint.py b/src/omlt/base/constraint.py index cd881854..6a3bbd75 100644 --- a/src/omlt/base/constraint.py +++ b/src/omlt/base/constraint.py @@ -1,6 +1,6 @@ from __future__ import annotations -from abc import ABC, abstractmethod +from abc import abstractmethod from typing import Any import pyomo.environ as pyo @@ -9,7 +9,7 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE -class OmltConstraint(ABC): +class OmltConstraint: @property def ctype(self): diff --git a/src/omlt/base/pyomo.py b/src/omlt/base/pyomo.py index 5a12d9d6..c3d9a095 100644 --- a/src/omlt/base/pyomo.py +++ b/src/omlt/base/pyomo.py @@ -98,6 +98,7 @@ def __init__(self, *indexes: Any, **kwargs: Any): self._pyovar = pyo.Var(*indexes, **kwargs) self._name = None self._parent = None + self._pyovar._parent = None self._constructed = self._pyovar._constructed self._index_set = self._pyovar._index_set self._rule_init = self._pyovar._rule_init @@ -106,7 +107,6 @@ def __init__(self, *indexes: Any, **kwargs: Any): self._dense = self._pyovar._dense self._data = self._pyovar._data self._units = self._pyovar._units - self._implicit_subsets = self._pyovar._implicit_subsets self.doc = self._pyovar.doc self._ctype = pyo.Var self.bounds = (None, None) @@ -166,6 +166,16 @@ def setlb(self, value): for vardata in self.values(): vardata.lb = value + @property + def _parent(self): + return self._pyovar._parent + + @_parent.setter + def _parent(self, value): + self._pyovar._parent = value + if self.is_constructed(): + for idx in self.keys(): + self[idx]._parent = value # Constraints @@ -289,7 +299,7 @@ def doc(self): class OmltExprScalarPyomo(OmltExpr, pyo.Expression): format = "pyomo" - def __init__(self, expr=None, **kwargs): + def __init__(self, expr=None): self._index_set = {} if isinstance(expr, OmltExprScalarPyomo): self._expression = expr._expression diff --git a/src/omlt/block.py b/src/omlt/block.py index 0b20c42c..d57ec62d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -24,8 +24,9 @@ class is used in combination with a formulation object to construct the pyo.assert_optimal_termination(status) """ + import pyomo.environ as pyo -from pyomo.core.base.block import _BlockData, declare_custom_block +from pyomo.core.base.block import BlockData, declare_custom_block from omlt.base import DEFAULT_MODELING_LANGUAGE, OmltVarFactory @@ -107,7 +108,7 @@ def build_formulation(self, formulation, lang=None): @declare_custom_block(name="OmltBlock") -class OmltBlockData(_BlockData, OmltBlockCore): +class OmltBlockData(BlockData, OmltBlockCore): def __init__(self, component): super().__init__(component) self.__formulation = None diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index ec58083d..e6f4416a 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -74,12 +74,9 @@ def _set_block(self, block): def block(self): """Block. - The underlying block containing the constraints / variables for this - formulation. + The underlying block containing the constraints/variables for this formulation. """ - if self.__block is not None: - return self.__block() - return None + return self.__block() def scalar_or_tuple(x): @@ -139,7 +136,9 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): ) constraint_factory = OmltConstraintFactory() - block._scale_input_constraint = constraint_factory.new_constraint(block.inputs_set, lang=block._format) + block._scale_input_constraint = constraint_factory.new_constraint( + block.inputs_set, lang=block._format + ) for idx in block.inputs_set: block._scale_input_constraint[idx] = ( block.scaled_inputs[idx] == input_scaling_expressions[idx] diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 80c83d46..638b888e 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -23,10 +23,9 @@ class GBTBigMFormulation(_PyomoFormulation): gradient-boosted trees embedded." INFORMS Journal on Computing (2020). - Parameters - ---------- - tree_ensemble_structure : GradientBoostedTreeModel - the tree ensemble definition + Parameters: + tree_ensemble_structure (GradientBoostedTreeModel): + the tree ensemble definition """ def __init__(self, gbt_model): @@ -64,7 +63,7 @@ def _build_formulation(self): ) -def add_formulation_to_block(block, model_definition, input_vars, output_vars): +def add_formulation_to_block(block, model_definition, input_vars, output_vars): # noqa: C901, PLR0915 r"""Adds the gradient-boosted trees formulation to the given Pyomo block. .. math:: @@ -97,16 +96,15 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): gradient-boosted trees embedded." INFORMS Journal on Computing (2020). - Parameters - ---------- - block : Block - the Pyomo block - tree_ensemble_structure : GradientBoostedTreeModel - the tree ensemble definition - input_vars : Var - the input variables of the Pyomo block - output_vars : Var - the output variables of the Pyomo block + Parameters: + block (Block): + the Pyomo block + tree_ensemble_structure (GradientBoostedTreeModel): + the tree ensemble definition + input_vars (Var): + the input variables of the Pyomo block + output_vars (Var): + the output variables of the Pyomo block """ if isinstance(model_definition, GradientBoostedTreeModel): @@ -130,13 +128,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): nodes_node_ids = np.array(attr["nodes_nodeids"].ints) nodes_false_node_ids = np.array(attr["nodes_falsenodeids"].ints) nodes_true_node_ids = np.array(attr["nodes_truenodeids"].ints) - nodes_hitrates = np.array(attr["nodes_hitrates"].floats) - nodes_missing_value_tracks_true = np.array( - attr["nodes_missing_value_tracks_true"].ints - ) - n_targets = attr["n_targets"].i - target_ids = np.array(attr["target_ids"].ints) target_node_ids = np.array(attr["target_nodeids"].ints) target_tree_ids = np.array(attr["target_treeids"].ints) target_weights = np.array(attr["target_weights"].floats) diff --git a/src/omlt/io/__init__.py b/src/omlt/io/__init__.py index b568fb90..64fa72e1 100644 --- a/src/omlt/io/__init__.py +++ b/src/omlt/io/__init__.py @@ -16,7 +16,12 @@ from omlt.io.keras import load_keras_sequential __all__ = [ - "keras_available", "onnx_available", "torch_available", "torch_geometric_available", - "load_onnx_neural_network", "load_onnx_neural_network_with_bounds", - "write_onnx_model_with_bounds", "load_keras_sequential" + "keras_available", + "onnx_available", + "torch_available", + "torch_geometric_available", + "load_onnx_neural_network", + "load_onnx_neural_network_with_bounds", + "write_onnx_model_with_bounds", + "load_keras_sequential", ] diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index f35fadb9..85b37526 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -1,5 +1,5 @@ import math -from typing import Any +from typing import TYPE_CHECKING, Any import numpy as np from onnx import numpy_helper @@ -13,6 +13,9 @@ ) from omlt.neuralnet.network_definition import NetworkDefinition +if TYPE_CHECKING: + from collections.abc import Callable + _ACTIVATION_OP_TYPES = ["Relu", "Sigmoid", "LogSoftmax", "Tanh", "Softplus"] _POOLING_OP_TYPES = ["MaxPool"] DENSE_INPUT_DIMENSIONS = 2 @@ -33,8 +36,7 @@ class NetworkParser: """Network Parser. References: - ---------- - * https://github.com/onnx/onnx/blob/master/docs/Operators.md + * https://github.com/onnx/onnx/blob/master/docs/Operators.md """ def __init__(self): @@ -51,7 +53,7 @@ def _reset_state(self): self._node_stack = [] self._node_map = {} - def parse_network(self, graph, scaling_object, input_bounds): + def parse_network(self, graph, scaling_object, input_bounds): # noqa: C901, PLR0912, PLR0915 self._reset_state() self._graph = graph @@ -92,7 +94,6 @@ def parse_network(self, graph, scaling_object, input_bounds): f'All dimensions in graph "{graph.name}" input tensor have 0 value.' ) raise ValueError(msg) - assert network_input is None network_input = InputLayer(size) self._node_map[input_node.name] = network_input network.add_layer(network_input) @@ -193,7 +194,7 @@ def _visit_node(self, node, next_nodes): return new_layer, new_layer_inputs - def _consume_dense_nodes( + def _consume_dense_nodes( # noqa: C901, PLR0912 self, node: Any, next_nodes: Any ) -> tuple[Any, Any, list[Any]]: """Starting from a MatMul node, consume nodes to form a dense Ax + b node.""" @@ -342,7 +343,7 @@ def _consume_gemm_dense_nodes(self, node, next_nodes): return next_nodes, dense_layer, [input_layer] - def _consume_conv_nodes(self, node, next_nodes): + def _consume_conv_nodes(self, node, next_nodes): # noqa: PLR0912, C901, PLR0915 """Consume Conv nodes. Starting from a Conv node, consume nodes to form a convolution node with @@ -484,7 +485,7 @@ def _consume_reshape_nodes(self, node, next_nodes): self._node_map[node.output[0]] = (transformer, input_layer) return next_nodes - def _consume_pool_nodes(self, node, next_nodes): + def _consume_pool_nodes(self, node, next_nodes): # noqa: PLR0912, C901, PLR0915 """Consume MaxPool nodes. Starting from a MaxPool node, consume nodes to form a pooling node with @@ -569,7 +570,7 @@ def _consume_pool_nodes(self, node, next_nodes): ) raise ValueError(msg) - output_shape_wrapper = math.floor + output_shape_wrapper: Callable[[float], int] = math.floor if "ceil_mode" in attr and attr["ceil_mode"] == 1: output_shape_wrapper = math.ceil diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index ee00f6f4..34968de8 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -8,7 +8,7 @@ from omlt.neuralnet import FullSpaceNNFormulation -def gnn_with_non_fixed_graph( +def gnn_with_non_fixed_graph( # noqa: PLR0913 block, nn, N, @@ -85,7 +85,7 @@ def gnn_with_non_fixed_graph( return block -def gnn_with_fixed_graph( +def gnn_with_fixed_graph( # noqa: PLR0913 block, nn, N, diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index d37ec960..5ce9b315 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -102,7 +102,7 @@ def _process_gnn_parameters(gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm _OP_TYPES = _LAYER_OP_TYPES_FIXED_GRAPH + _ACTIVATION_OP_TYPES + _POOLING_OP_TYPES -def load_torch_geometric_sequential( +def load_torch_geometric_sequential( # noqa: C901, PLR0913, PLR0912, PLR0915 nn, N, A=None, @@ -154,12 +154,12 @@ def load_torch_geometric_sequential( net.add_layer(prev_layer) operations = [] - for l in nn: + for layer in nn: op_name = None - if l.__class__.__name__ == "function": - op_name = l.__name__ + if layer.__class__.__name__ == "function": + op_name = layer.__name__ else: - op_name = l.__class__.__name__ + op_name = layer.__class__.__name__ if op_name not in _OP_TYPES: msg = f"Operation {op_name} is not supported." @@ -167,18 +167,20 @@ def load_torch_geometric_sequential( operations.append(op_name) if A is None: + supported_layers = { + "Linear", + *_ACTIVATION_OP_TYPES, + *_POOLING_OP_TYPES, + } # If A is None, then the graph is not fixed. # Only layers in _LAYER_OP_TYPES_NON_FIXED_GRAPH are supported. # Only "sum" aggregation is supported. # Since all weights and biases are possibly needed, A is set to correspond to a # complete graph. - for index, l in enumerate(nn): - if ( - operations[index] - in ["Linear"] + _ACTIVATION_OP_TYPES + _POOLING_OP_TYPES - ): + for index, layer in enumerate(nn): + if operations[index] in supported_layers: # nonlinear activation results in a MINLP - if operations[index] in ["Sigmoid", "LogSoftmax", "Softplus", "Tanh"]: + if operations[index] in {"Sigmoid", "LogSoftmax", "Softplus", "Tanh"}: warnings.warn( "nonlinear activation results in a MINLP", stacklevel=2 ) @@ -188,13 +190,13 @@ def load_torch_geometric_sequential( if operations[index] not in _LAYER_OP_TYPES_NON_FIXED_GRAPH: msg = "this layer is not supported when the graph is not fixed." raise ValueError(msg) - if l.aggr != "sum": + if layer.aggr != "sum": msg = "this aggregation is not supported when the graph is not fixed" raise ValueError(msg) A = np.ones((N, N)) - np.eye(N) - for index, l in enumerate(nn): + for index, layer in enumerate(nn): if operations[index] in _ACTIVATION_OP_TYPES: # Skip activation layers since they are already handled in last layer continue @@ -205,8 +207,8 @@ def load_torch_geometric_sequential( activation = operations[index + 1].lower() if operations[index] == "Linear": - gnn_weights = l.weight.detach().numpy() - gnn_biases = l.bias.detach().numpy() + gnn_weights = layer.weight.detach().numpy() + gnn_biases = layer.bias.detach().numpy() # A linear layer is either applied on each node's features (i.e., # prev_layer.output_size[-1] = N * gnn_weights.shape[1]) # or the features after pooling (i.e., @@ -224,12 +226,8 @@ def load_torch_geometric_sequential( biases=biases, ) elif operations[index] == "GCNConv": - assert not l.improved - assert not l.cached - assert l.add_self_loops - assert l.normalize - gnn_weights = l.lin.weight.detach().numpy() - gnn_biases = l.bias.detach().numpy() + gnn_weights = layer.lin.weight.detach().numpy() + gnn_biases = layer.bias.detach().numpy() gnn_norm = _compute_gcn_norm(A) weights, biases = _process_gnn_parameters( gnn_weights, gnn_weights, gnn_biases, gnn_norm @@ -244,15 +242,12 @@ def load_torch_geometric_sequential( N=N, ) elif operations[index] == "SAGEConv": - assert not l.normalize - assert not l.project - assert l.aggr in _AGGREGATION_OP_TYPES - gnn_weights_uv = l.lin_l.weight.detach().numpy() - gnn_biases = l.lin_l.bias.detach().numpy() + gnn_weights_uv = layer.lin_l.weight.detach().numpy() + gnn_biases = layer.lin_l.bias.detach().numpy() gnn_weights_vv = np.zeros(shape=gnn_weights_uv.shape) - if l.root_weight: - gnn_weights_vv = l.lin_r.weight.detach().numpy() - gnn_norm = _compute_sage_norm(A, l.aggr) + if layer.root_weight: + gnn_weights_vv = layer.lin_r.weight.detach().numpy() + gnn_norm = _compute_sage_norm(A, layer.aggr) weights, biases = _process_gnn_parameters( gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm ) diff --git a/src/omlt/linear_tree/lt_definition.py b/src/omlt/linear_tree/lt_definition.py index cf1b5a4a..09adbb78 100644 --- a/src/omlt/linear_tree/lt_definition.py +++ b/src/omlt/linear_tree/lt_definition.py @@ -230,7 +230,7 @@ def _reassign_none_bounds(leaves, input_bounds): return leaves -def _parse_tree_data(model, input_bounds): +def _parse_tree_data(model, input_bounds): # noqa: C901, PLR0915, PLR0912 """Parse tree data. This function creates the data structures with the information required @@ -241,7 +241,7 @@ def _parse_tree_data(model, input_bounds): Arguments: model: Trained linear-tree model or dic containing linear-tree model summary (e.g. dict = model.summary()) - input_bounds: + input_bounds: The input bounds Returns: leaves - Dict containing the following information for each leaf: diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 561a2ffd..904b36f6 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -2,7 +2,6 @@ import pyomo.environ as pe from pyomo.gdp import Disjunct -from omlt.base import OmltConstraintFactory, OmltVarFactory from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -50,7 +49,7 @@ class LinearTreeGDPFormulation(_PyomoFormulation): optimization development. Optimization and Engineering, 23:607-642 """ - def __init__(self, lt_definition, transformation="bigm"): + def __init__(self, lt_definition, transformation="bigm", epsilon=0): """Create a LinearTreeGDPFormulation object. Arguments: @@ -60,6 +59,9 @@ def __init__(self, lt_definition, transformation="bigm"): transformation: choose which Pyomo.GDP formulation to apply. Supported transformations are bigm, hull, mbigm, and custom (default: {'bigm'}) + epsilon: Tolerance to use in enforcing that choosing the right + branch of a linear tree node can only happen if the feature + is strictly greater than the branch value.(default: 0) Raises: Exception: If transformation not in supported transformations @@ -67,6 +69,7 @@ def __init__(self, lt_definition, transformation="bigm"): super().__init__() self.model_definition = lt_definition self.transformation = transformation + self.epsilon = epsilon # Ensure that the GDP transformation given is supported supported_transformations = ["bigm", "hull", "mbigm", "custom"] @@ -102,6 +105,7 @@ def _build_formulation(self): input_vars=self.block.scaled_inputs, output_vars=self.block.scaled_outputs, transformation=self.transformation, + epsilon=self.epsilon, ) @@ -134,14 +138,21 @@ class LinearTreeHybridBigMFormulation(_PyomoFormulation): """ - def __init__(self, lt_definition): + def __init__(self, lt_definition, epsilon=0): """Create a LinearTreeHybridBigMFormulation object. Arguments: lt_definition: LinearTreeDefinition Object + + Keyword Arguments: + epsilon: Tolerance to use in enforcing that choosing the right + branch of a linear tree node can only happen if the feature + is strictly greater than the branch value.(default: 0) + """ super().__init__() self.model_definition = lt_definition + self.epsilon = epsilon @property def input_indexes(self): @@ -165,13 +176,18 @@ def _build_formulation(self): self.model_definition.scaled_input_bounds, ) - _add_hybrid_formulation_to_block( + _add_gdp_formulation_to_block( block=self.block, model_definition=self.model_definition, input_vars=self.block.scaled_inputs, output_vars=self.block.scaled_outputs, + transformation="custom", + epsilon=self.epsilon, ) + pe.TransformationFactory("gdp.bound_pretransformation").apply_to(self.block) + pe.TransformationFactory("gdp.binary_multiplication").apply_to(self.block) + def _build_output_bounds(model_def, input_bounds): """Build output bounds. @@ -207,18 +223,16 @@ def _build_output_bounds(model_def, input_bounds): else: upper_bound += slopes[k] * input_bounds[k][1] + intercept lower_bound += slopes[k] * input_bounds[k][0] + intercept - if upper_bound >= bounds[1]: - bounds[1] = upper_bound - if lower_bound <= bounds[0]: - bounds[0] = lower_bound + bounds[1] = max(bounds[1], upper_bound) + bounds[0] = min(bounds[0], lower_bound) upper_bound = 0 lower_bound = 0 return bounds -def _add_gdp_formulation_to_block( - block, model_definition, input_vars, output_vars, transformation +def _add_gdp_formulation_to_block( # noqa: PLR0913 + block, model_definition, input_vars, output_vars, transformation, epsilon ): """This function adds the GDP representation to the OmltBlock using Pyomo.GDP. @@ -228,6 +242,9 @@ def _add_gdp_formulation_to_block( input_vars: input variables to the linear tree model output_vars: output variable of the linear tree model transformation: Transformation to apply + epsilon: Tolerance to use in enforcing that choosing the right + branch of a linear tree node can only happen if the feature + is strictly greater than the branch value. """ leaves = model_definition.leaves @@ -249,16 +266,15 @@ def _add_gdp_formulation_to_block( block.scaled_outputs.setub(output_bounds[1]) block.scaled_outputs.setlb(output_bounds[0]) - var_factory = OmltVarFactory() - block.intermediate_output = var_factory.new_var( - tree_ids, lang=block._format, bounds=(output_bounds[0], output_bounds[1]) + block.intermediate_output = pe.Var( + tree_ids, bounds=(output_bounds[0], output_bounds[1]) ) # Create a disjunct for each leaf containing the bound constraints # and the linear model expression. def disjuncts_rule(dsj, tree, leaf): def lb_rule(dsj, feat): - return input_vars[feat] >= leaves[tree][leaf]["bounds"][feat][0] + return input_vars[feat] >= leaves[tree][leaf]["bounds"][feat][0] + epsilon dsj.lb_constraint = pe.Constraint(features, rule=lb_rule) @@ -288,92 +304,3 @@ def disjunction_rule(b, tree): if transformation != "custom": pe.TransformationFactory(transformation_string).apply_to(block) - - -def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output_vars): - """This function adds the Hybrid BigM representation to the OmltBlock. - - Arguments: - block: OmltBlock - model_definition: LinearTreeDefinition Object - input_vars: input variables to the linear tree model - output_vars: output variable of the linear tree model - """ - leaves = model_definition.leaves - input_bounds = model_definition.scaled_input_bounds - n_inputs = model_definition.n_inputs - - # The set of trees - tree_ids = list(leaves.keys()) - # Create a list of tuples that contains the tree and leaf indices. Note that - # the leaf indices depend on the tree in the ensemble. - t_l = [(tree, leaf) for tree in tree_ids for leaf in leaves[tree]] - - features = np.arange(0, n_inputs) - - # Use the input_bounds and the linear models in the leaves to calculate - # the lower and upper bounds on the output variable. Required for Pyomo.GDP - output_bounds = _build_output_bounds(model_definition, input_bounds) - - # Ouptuts are automatically scaled based on whether inputs are scaled - block.outputs.setub(output_bounds[1]) - block.outputs.setlb(output_bounds[0]) - block.scaled_outputs.setub(output_bounds[1]) - block.scaled_outputs.setlb(output_bounds[0]) - - # Create the intermeditate variables. z is binary that indicates which leaf - # in tree t is returned. intermediate_output is the output of tree t and - # the total output of the model is the sum of the intermediate_output vars - var_factory = OmltVarFactory() - block.z = var_factory.new_var(t_l, lang=block._format, within=pe.Binary) - block.intermediate_output = var_factory.new_var(tree_ids, lang=block._format) - - constraint_factory = OmltConstraintFactory() - block.lower_bound_constraints = constraint_factory.new_constraint(features, tree_ids) - for tree in tree_ids: - leaf_ids = list(leaves[tree].keys()) - for feat in features: - block.lower_bound_constraints[feat, tree] = ( - sum( - leaves[tree][leaf]["bounds"][feat][0] * block.z[tree, leaf] - for leaf in leaf_ids - ) - <= input_vars[feat] - ) - - block.upper_bound_constraints = constraint_factory.new_constraint(features, tree_ids) - for tree in tree_ids: - leaf_ids = list(leaves[tree].keys()) - for feat in features: - block.upper_bound_constraints[feat, tree] = ( - sum( - leaves[tree][leaf]["bounds"][feat][1] * block.z[tree, leaf] - for leaf in leaf_ids - ) - >= input_vars[feat] - ) - - block.linear_constraint = constraint_factory.new_constraint(tree_ids) - for tree in tree_ids: - leaf_ids = list(leaves[tree].keys()) - block.linear_constraint[tree] = block.intermediate_output[tree] == sum( - ( - sum( - leaves[tree][leaf]["slope"][feat] * input_vars[feat] - for feat in features - ) - + leaves[tree][leaf]["intercept"] - ) - * block.z[tree, leaf] - for leaf in leaf_ids - ) - block.only_one_leaf_per_tree = constraint_factory.new_constraint(tree_ids) - for tree in tree_ids: - leaf_ids = list(leaves[tree].keys()) - block.only_one_leaf_per_tree[tree] = ( - sum(block.z[tree, leaf] for leaf in leaf_ids) == 1 - ) - - block.output_sum_of_trees = constraint_factory.new_constraint( - expr=output_vars[0] == sum(block.intermediate_output[tree] for tree in tree_ids) - ) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index f1f365f5..65cf8b27 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -130,7 +130,7 @@ def __init__(self, transform=None): transform = "mpec.simple_nonlinear" self.transform = transform - def __call__(self, net_block, net, layer_block, layer): + def __call__(self, net_block, net, layer_block, layer): # noqa: ARG002 layer_block._complementarity = mpec.Complementarity( layer.output_indexes, rule=_relu_complementarity ) diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index d7a52750..3b6faf09 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -17,6 +17,7 @@ """ import itertools +from typing import ClassVar import numpy as np @@ -195,7 +196,7 @@ class DenseLayer(Layer): map indexes from this layer index to the input layer index size """ - def __init__( + def __init__( # noqa: PLR0913 self, input_size, output_size, @@ -321,7 +322,7 @@ class GNNLayer(DenseLayer): map indexes from this layer index to the input layer index size """ - def __init__( + def __init__( # noqa: PLR0913 self, input_size, output_size, @@ -380,7 +381,6 @@ def _eval_with_adjacency(self, x, A): if self.input_index_mapper is not None else x[:] ) - assert x_reshaped.shape == tuple(self.input_size) y = np.zeros(shape=self.output_size) for output_index in self.output_indexes: for input_index in self.input_indexes: @@ -447,7 +447,7 @@ def kernel_depth(self): """Return the depth of the kernel.""" raise NotImplementedError - def kernel_index_with_input_indexes(self, out_d, out_r, out_c): + def kernel_index_with_input_indexes(self, out_d, out_r, out_c): # noqa: ARG002 """Kernel index with input indexes. Returns an iterator over the index within the kernel and input index @@ -468,16 +468,12 @@ def kernel_index_with_input_indexes(self, out_d, out_r, out_c): start_in_d = 0 start_in_r = out_r * rows_stride start_in_c = out_c * cols_stride - mapper = lambda x: x - if self.input_index_mapper is not None: - mapper = self.input_index_mapper for k_d in range(kernel_d): for k_r in range(kernel_r): for k_c in range(kernel_c): input_index = (start_in_d + k_d, start_in_r + k_r, start_in_c + k_c) - assert len(input_index) == len(self.input_size) # don't yield an out-of-bounds input index; # can happen if ceil mode is enabled for pooling layers # as this could require using a partial kernel @@ -542,9 +538,9 @@ class PoolingLayer2D(Layer2D): map indexes from this layer index to the input layer index size """ - _POOL_FUNCTIONS = {"max": max} + _POOL_FUNCTIONS: ClassVar = {"max": max} - def __init__( + def __init__( # noqa: PLR0913 self, input_size, output_size, @@ -618,7 +614,7 @@ class ConvLayer2D(Layer2D): map indexes from this layer index to the input layer index size """ - def __init__( + def __init__( # noqa: PLR0913 self, input_size, output_size, @@ -677,7 +673,8 @@ def __str__(self): return ( f"ConvLayer(input_size={self.input_size}, output_size={self.output_size}," f" strides={self.strides}, kernel_shape={self.kernel_shape})" - ) + ) + def _eval_at_index(self, x, out_d, out_r, out_c): acc = 0.0 for k, index in self.kernel_with_input_indexes(out_d, out_r, out_c): diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 619a6ba8..4be222f6 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -257,25 +257,24 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): \end{align*} where: + :math:`w` is the convolution kernel on the preceding convolutional layer; :math:`d` is the number of features in each of the :math:`N` max pooling windows; :math:`x_{i}` is the set of :math:`d` features in the :math:`i`-th max pooling - window; + window; :math:`\Delta^{d}` is the :math:`d`-dimensional simplex; and [L_{i},U_{i}] are the - bounds on x_{i}. + bounds on x_{i}. NOTE This formulation is adapted from the Anderson et al. (2020) formulation, section 5.1, with the following changes: - OMLT presently does not support biases on convolutional layers. Bias terms from - the original formulation are removed. - + the original formulation are removed. - The original formulation formulates the max of :math:`w^{l}\cdot x + b^{l}`, varying the weights :math:`w` and biases :math:`b` and keeping the input :math:`x` constant. Since convolutional layers have constant weights and biases convolved with varying portions of the feature map, this formulation formulates the max of :math:`w\cdot x^{l} + b`. - - Due to the above 2 changes, the calculation of :math:`N^{l,k}` is changed. """ @@ -289,9 +288,6 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): " are not supported." ) raise ValueError(msg) - # TODO @cog-imperial: add support for non-increasing activation functions on - # preceding convolutional layer - # https://github.com/cog-imperial/OMLT/issues/154 # note kernel indexes are the same set of values for any output index, so wlog get # kernel indexes for (0, 0, 0) @@ -344,30 +340,32 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): == 1 ) - for l, input_index in layer.kernel_index_with_input_indexes( + for layer_index, input_index in layer.kernel_index_with_input_indexes( out_d, out_r, out_c ): mapped_input_index = layer.input_index_mapper(input_index) # Since biases are zero, # input_layer_block.z[input_index] is equal to w dot x in the formulation. - layer_block._zhat_upper_bound[output_index, l] = layer_block.zhat[ + layer_block._zhat_upper_bound[output_index, layer_index] = layer_block.zhat[ output_index ] <= input_layer_block.z[mapped_input_index] + sum( layer_block.q_maxpool[output_index, k] - * _calculate_n_plus(output_index, l, k, layer, input_layer_block) + * _calculate_n_plus( + output_index, layer_index, k, layer, input_layer_block + ) for k in layer_block._kernel_indexes ) - layer_block._zhat_lower_bound[output_index, l] = ( + layer_block._zhat_lower_bound[output_index, layer_index] = ( layer_block.zhat[output_index] >= input_layer_block.z[mapped_input_index] ) -def _calculate_n_plus(out_index, l, k, layer, input_layer_block): - if l == k: +def _calculate_n_plus(out_index, kernel_index, k, layer, input_layer_block): + if kernel_index == k: return 0 - x_l_index = layer.input_index_mapper(layer.get_input_index(out_index, l)) + x_l_index = layer.input_index_mapper(layer.get_input_index(out_index, kernel_index)) x_k_index = layer.input_index_mapper(layer.get_input_index(out_index, k)) return max( x_k_bound - x_l_bound diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 292f6d8c..d23380f0 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -77,7 +77,7 @@ def partition_based_dense_relu_layer(net_block, net, layer_block, layer, split_f prev_layer_block = net_block.layer[id(prev_layer)] @layer_block.Block(layer.output_indexes) - def output_node_block(b, *output_index): + def output_node_block(b, *output_index): # noqa: PLR0915 # dense layers multiply only the last dimension of # their inputs weights = layer.weights[:, output_index[-1]] diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index f4c91aa9..14beb5c6 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,3 +1,5 @@ +from functools import partial + import pyomo.environ as pyo from omlt.base import OmltConstraintFactory, OmltVarFactory @@ -138,7 +140,7 @@ def output_indexes(self): return network_outputs[0].output_indexes -def _build_neural_network_formulation( +def _build_neural_network_formulation( # noqa: C901 block, network_structure, layer_constraints, activation_constraints ): """Adds the neural network formulation to the given Pyomo block. @@ -419,14 +421,6 @@ def z(b, *output_index): == block.layer[id(output_layer)].z[output_index] ) - # @property - # def layer_constraints(self): - # return self._layer_constraints - - # @property - # def activation_constraints(self): - # return self._activation_constraints - @property def input_indexes(self): """The indexes of the formulation inputs.""" @@ -490,11 +484,11 @@ def __init__(self, network_structure, split_func=None): self.__scaled_input_bounds = network_structure.scaled_input_bounds if split_func is None: - split_func = lambda w: default_partition_split_func(w, 2) + split_func = partial(default_partition_split_func, n=2) self.__split_func = split_func - def _build_formulation(self): + def _build_formulation(self): # noqa: C901 _setup_scaled_inputs_outputs( self.block, self.__scaling_object, self.__scaled_input_bounds ) diff --git a/src/omlt/scaling.py b/src/omlt/scaling.py index 5ffaafbe..443f14e3 100644 --- a/src/omlt/scaling.py +++ b/src/omlt/scaling.py @@ -120,8 +120,7 @@ def get_unscaled_input_expressions(self, scaled_input_vars): scaled_x = scaled_input_vars return { - k: scaled_x[k] * self.__x_factor[k] + self.__x_offset[k] - for k in scaled_x + k: scaled_x[k] * self.__x_factor[k] + self.__x_offset[k] for k in scaled_x } def get_scaled_output_expressions(self, output_vars): @@ -163,6 +162,5 @@ def get_unscaled_output_expressions(self, scaled_output_vars): scaled_y = scaled_output_vars return { - k: scaled_y[k] * self.__y_factor[k] + self.__y_offset[k] - for k in scaled_y + k: scaled_y[k] * self.__y_factor[k] + self.__y_offset[k] for k in scaled_y } diff --git a/tests/base/test_block.py b/tests/base/test_block.py index bc0b0e39..e7de62bd 100644 --- a/tests/base/test_block.py +++ b/tests/base/test_block.py @@ -46,8 +46,8 @@ def test_block(): m.b.build_formulation(formulation, lang="pyomo") assert m.b._OmltBlockCore__formulation is formulation - assert [k for k in m.b.inputs] == ["A", "C", "D"] - assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(m.b.inputs) == ["A", "C", "D"] + assert list(m.b.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_input_output_auto_creation(): diff --git a/tests/base/test_expression.py b/tests/base/test_expression.py index 34e1d3e7..1d33ed9d 100644 --- a/tests/base/test_expression.py +++ b/tests/base/test_expression.py @@ -5,6 +5,7 @@ VAR1_VALUE = 6 VAR2_VALUE = 3 CONST_VALUE = 4 +NUM_ARGS = 2 var_factory = OmltVarFactory() expr_factory = OmltExprFactory() @@ -71,7 +72,7 @@ def test_init_scalar_expression(): assert e2.is_potentially_variable() assert not e2.is_indexed() - assert e2.nargs() == 2 + assert e2.nargs() == NUM_ARGS assert e2.args[1] == CONST_VALUE assert e2.arg(1) == CONST_VALUE assert len(e2) == 1 diff --git a/tests/base/test_var.py b/tests/base/test_var.py index cf9a9fa3..0a4f06d5 100644 --- a/tests/base/test_var.py +++ b/tests/base/test_var.py @@ -3,6 +3,10 @@ from omlt.base import OmltVarFactory from omlt.dependencies import julia_available +VAR_VALUE = 3 +FIX_VALUE = 2 +UPPER_BOUND = 5 + var_factory = OmltVarFactory() @@ -20,12 +24,12 @@ def _test_scalar_var(lang): assert v.is_constructed() v.value = 3 - assert v.value == 3 + assert v.value == VAR_VALUE v.fix(2, skip_validation=True) v.bounds = (0, 5) assert v.lb == 0 - assert v.ub == 5 + assert v.ub == UPPER_BOUND v.lb = 1 v.ub = 3 assert v.bounds == (1, 3) @@ -62,11 +66,11 @@ def _test_indexed_var(lang): assert v.is_constructed() v.value = 3 - assert v.value == 3 + assert v.value == VAR_VALUE v.fix(2, skip_validation=True) for e in v: - assert v[e].value == 2 + assert v[e].value == FIX_VALUE v.fix() diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index 3227e67d..fc74b34c 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -287,6 +287,7 @@ def test_consume_maxpool_wrong_dims(datadir): parser._nodes["node1"][1].input.append("abcd") expected_msg_maxpool = ( "node1 input has 2 dimensions, only nodes with 1 input " - "dimension can be used as starting points for parsing.") - with pytest.raises(ValueError, match = expected_msg_maxpool): + "dimension can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_maxpool): parser._consume_pool_nodes(parser._nodes["node1"][1], parser._nodes["node1"][2]) diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index 0a6861d1..53d9db58 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -75,7 +75,7 @@ def _test_torch_geometric_reader(nn, activation, pooling): A = np.ones((N, N), dtype=int) net = load_torch_geometric_sequential(nn, N, A) layers = list(net.layers) - assert len(layers) == 7 + assert len(layers) == 7 # noqa: PLR2004 assert layers[1].weights.shape == (8, 16) assert layers[2].weights.shape == (16, 16) assert layers[3].weights.shape == (16, 16) @@ -112,8 +112,8 @@ def _test_gnn_with_fixed_graph(nn): m.nn = OmltBlock() A = np.eye(N, dtype=int) gnn_with_fixed_graph(m.nn, nn, N, A, scaled_input_bounds=input_bounds) - assert m.nvariables() == 282 - assert m.nconstraints() == 614 + assert m.nvariables() == 282 # noqa: PLR2004 + assert m.nconstraints() == 614 # noqa: PLR2004 @pytest.mark.skipif( @@ -131,9 +131,8 @@ def _test_gnn_with_non_fixed_graph(nn): m = pyo.ConcreteModel() m.nn = OmltBlock() gnn_with_non_fixed_graph(m.nn, nn, N, scaled_input_bounds=input_bounds) - assert m.nvariables() == 282 - print(m.nn.symmetric_adjacency.constraints) - assert m.nconstraints() == 620 + assert m.nvariables() == 282 # noqa: PLR2004 + assert m.nconstraints() == 620 # noqa: PLR2004 @pytest.mark.skipif( @@ -190,7 +189,7 @@ def _test_gnn_value_error(nn, error_info, error_type="ValueError"): for i in range(input_size[0]): input_bounds[(i)] = (-1.0, 1.0) if error_type == "ValueError": - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError) as excinfo: # noqa: PT011 load_torch_geometric_sequential( nn=nn, N=N, diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 30e3a1a2..a3082c01 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -84,7 +84,7 @@ def linear_model_tree(X, y): @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") -def test_linear_tree_model_single_var(): +def test_linear_tree_model_single_var(): # noqa: C901 # construct a LinearTreeDefinition regr_small = linear_model_tree(X=X_small, y=y_small) input_bounds = {0: (min(X_small)[0], max(X_small)[0])} @@ -192,6 +192,60 @@ def connect_outputs(mdl): assert y_pred[0] == pytest.approx(solution_1_bigm[1]) +def get_epsilon_test_model(formulation_lt): + model1 = pe.ConcreteModel() + model1.x = pe.Var(initialize=0) + model1.y = pe.Var(initialize=0) + model1.obj = pe.Objective(expr=model1.y, sense=pe.maximize) + model1.lt = OmltBlock() + model1.lt.build_formulation(formulation_lt) + + @model1.Constraint() + def connect_inputs(mdl): + return mdl.x == mdl.lt.inputs[0] + + @model1.Constraint() + def connect_outputs(mdl): + return mdl.y == mdl.lt.outputs[0] + + model1.x.fix(1.058749) + + return model1 + + +@pytest.mark.skipif( + not lineartree_available or not cbc_available, + reason="Need Linear-Tree Package and cbc", +) +def test_nonzero_epsilon(): + regr_small = linear_model_tree(X=X_small, y=y_small) + input_bounds = {0: (min(X_small)[0], max(X_small)[0])} + ltmodel_small = LinearTreeDefinition(regr_small, unscaled_input_bounds=input_bounds) + formulation_bad = LinearTreeGDPFormulation( + ltmodel_small, transformation="bigm", epsilon=0 + ) + formulation1_lt = LinearTreeGDPFormulation( + ltmodel_small, transformation="bigm", epsilon=1e-4 + ) + + model_good = get_epsilon_test_model(formulation1_lt) + model_bad = get_epsilon_test_model(formulation_bad) + + status_1_bigm = pe.SolverFactory("cbc").solve(model_bad) + pe.assert_optimal_termination(status_1_bigm) + solution_1_bigm = (pe.value(model_bad.x), pe.value(model_bad.y)) + y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) + # Without an epsilon, the model cheats and does not match the tree prediction + assert y_pred[0] != pytest.approx(solution_1_bigm[1]) + + status = pe.SolverFactory("cbc").solve(model_good) + pe.assert_optimal_termination(status) + solution = (pe.value(model_good.x), pe.value(model_good.y)) + y_pred = regr_small.predict(np.array(solution[0]).reshape(1, -1)) + # With epsilon, the model matches the tree prediction + assert y_pred[0] == pytest.approx(solution[1]) + + @pytest.mark.skipif( not lineartree_available or not cbc_available, reason="Need Linear-Tree Package and cbc", @@ -381,7 +435,7 @@ def test_scaling(): @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") -def test_linear_tree_model_multi_var(): +def test_linear_tree_model_multi_var(): # noqa: C901 # construct a LinearTreeDefinition regr = linear_model_tree(X=X, y=Y) input_bounds = {0: (min(X[:, 0]), max(X[:, 0])), 1: (min(X[:, 1]), max(X[:, 1]))} @@ -626,7 +680,7 @@ def connect_outputs(mdl): @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") -def test_summary_dict_as_argument(): +def test_summary_dict_as_argument(): # noqa: C901 # construct a LinearTreeDefinition regr = linear_model_tree(X=X, y=Y) input_bounds = {0: (min(X[:, 0]), max(X[:, 0])), 1: (min(X[:, 1]), max(X[:, 1]))} diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index ee073c5e..a18d7eea 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -12,8 +12,6 @@ ALMOST_EXACTLY_EQUAL = 1e-8 -# TODO @cog-imperial: Build more tests with different activations and edge cases -# https://github.com/cog-imperial/OMLT/issues/158 def test_two_node_full_space(): """Two node full space network. diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index a9459883..75048b50 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -1,4 +1,6 @@ import re +from functools import partial +from typing import TYPE_CHECKING import numpy as np import pyomo.environ as pyo @@ -32,6 +34,9 @@ from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer from pyomo.contrib.fbbt import interval +if TYPE_CHECKING: + from omlt.formulation import _PyomoFormulation + formulations = { "FullSpace": FullSpaceNNFormulation, "ReducedSpace": ReducedSpaceNNFormulation, @@ -344,8 +349,6 @@ def test_maxpool_full_space_nn_formulation(): net, y = _maxpool_conv_network(inputs) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - # assert m.nvariables() == 15 - # assert m.nconstraints() == 14 for inputs_d in range(inputs.shape[0]): for inputs_r in range(inputs.shape[1]): @@ -524,7 +527,7 @@ def test_partition_based_unbounded_below(): prev_layer_block = m.neural_net_block.layer[prev_layer_id] prev_layer_block.z.setlb(-interval.inf) - split_func = lambda w: default_partition_split_func(w, 2) + split_func = partial(default_partition_split_func, n=2) expected_msg = "Expression is unbounded below." with pytest.raises(ValueError, match=expected_msg): @@ -550,7 +553,7 @@ def test_partition_based_unbounded_above(): prev_layer_block = m.neural_net_block.layer[prev_layer_id] prev_layer_block.z.setub(interval.inf) - split_func = lambda w: default_partition_split_func(w, 2) + split_func = partial(default_partition_split_func, n=2) expected_msg = "Expression is unbounded above." with pytest.raises(ValueError, match=expected_msg): @@ -573,7 +576,7 @@ def test_partition_based_bias_unbounded_below(): m.neural_net_block.build_formulation(formulation) test_layer.biases[0] = -interval.inf - split_func = lambda w: default_partition_split_func(w, 2) + split_func = partial(default_partition_split_func, n=2) expected_msg = "Expression is unbounded below." with pytest.raises(ValueError, match=expected_msg): @@ -592,7 +595,7 @@ def test_partition_based_bias_unbounded_above(): m.neural_net_block.build_formulation(formulation) test_layer.biases[0] = interval.inf - split_func = lambda w: default_partition_split_func(w, 2) + split_func = partial(default_partition_split_func, n=2) expected_msg = "Expression is unbounded above." with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 40ed37ef..97af8af0 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -11,8 +11,7 @@ ) from omlt.neuralnet.activations import ComplementarityReLUActivation -# TODO @cog-imperial: Add tests for single dimensional outputs as well -# https://github.com/cog-imperial/OMLT/issues/158 +NEAR_EQUAL = 1e-3 NEAR_EQUAL = 1e-3 diff --git a/tests/neuralnet/train_keras_models.py b/tests/neuralnet/train_keras_models.py index 81469c6a..a2a14d94 100644 --- a/tests/neuralnet/train_keras_models.py +++ b/tests/neuralnet/train_keras_models.py @@ -7,7 +7,7 @@ from pyomo.common.fileutils import this_file_dir -def train_models(): +def train_models(): # noqa: PLR0915 x, y, x_test = get_neural_network_data("131") nn = Sequential(name="keras_linear_131") nn.add( @@ -34,9 +34,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/keras_linear_131.keras") x, y, x_test = get_neural_network_data("131") @@ -66,9 +64,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid.keras") x, y, x_test = get_neural_network_data("131") @@ -99,9 +95,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save( this_file_dir() + "/models/keras_linear_131_sigmoid_output_activation.keras" ) @@ -133,9 +127,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/keras_linear_131_relu.keras") x, y, x_test = get_neural_network_data("131") @@ -166,9 +158,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/keras_linear_131_relu_output_activation.keras") x, y, x_test = get_neural_network_data("131") @@ -199,9 +189,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save( this_file_dir() + "/models/keras_linear_131_sigmoid_softplus_output_activation.keras" @@ -260,9 +248,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/big.keras") x, y, x_test = get_neural_network_data("2353") @@ -302,9 +288,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - nn.fit( - x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 - ) + nn.fit(x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15) nn.save(this_file_dir() + "/models/keras_linear_2353.keras") @@ -336,7 +320,7 @@ def train_conv(): input_bounds[0, i, j] = (0.0, 1.0) with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as f: write_onnx_model_with_bounds(f.name, onnx_model, input_bounds) - print(f"Wrote ONNX model with bounds at {f.name}") + print(f"Wrote ONNX model with bounds at {f.name}") # noqa: T201 if __name__ == "__main__":