From 13b5265aab5290a727774df0fc3957e00d09a009 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 18 Mar 2024 22:16:04 -0700 Subject: [PATCH 01/60] Create main.yml copying CI workflow over --- .github/workflows/main.yml | 58 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..55870dbc --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,58 @@ +--- +name: CI + +on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + +jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + # python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v2" + with: + fail_ci_if_error: true From ccd3c89a97a624afbf4d62e232d298970a82a999 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Tue, 19 Mar 2024 00:58:12 -0700 Subject: [PATCH 02/60] Removing ipopt from CI workflow --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 55870dbc..3e302f03 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,7 +35,7 @@ jobs: python -m site python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt + # conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" From ff4dbcb430b11413d8176d2d11d632e79d9de1a6 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 8 Apr 2024 18:28:49 +0000 Subject: [PATCH 03/60] Implementing JuMP format scalar and indexed variables. --- src/omlt/base.py | 279 +++++++++++++++++++++++++++++++++++++-- src/omlt/block.py | 19 ++- src/omlt/dependencies.py | 10 ++ src/omlt/formulation.py | 7 +- tests/test_block.py | 13 +- 5 files changed, 312 insertions(+), 16 deletions(-) diff --git a/src/omlt/base.py b/src/omlt/base.py index 223c76b5..ab735a6d 100644 --- a/src/omlt/base.py +++ b/src/omlt/base.py @@ -9,6 +9,17 @@ from abc import ABC, abstractmethod import pyomo.environ as pyo +from omlt.dependencies import julia_available, moi_available + +if julia_available and moi_available: + from juliacall import Main as jl + from juliacall import Base + + jl.seval("import MathOptInterface") + moi = jl.MathOptInterface + jl.seval("import JuMP") + jump = jl.JuMP + class OmltVar(ABC): def __new__(cls, *indexes, **kwargs): @@ -26,7 +37,7 @@ def __new__(cls, *args, format="pyomo", **kwargs): if format not in subclass_map: raise ValueError( f"Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'moi'.", + "are 'pyomo' or 'jump'.", format, ) subclass = subclass_map[format] @@ -76,6 +87,12 @@ def ub(self): def ub(self, val): pass + def is_component_type(self): + return True + + def is_indexed(self): + return False + # @abstractmethod # def __mul__(self, other): # pass @@ -204,15 +221,128 @@ def __abs__(self): return pyo.NumericValue.__abs__(self) +class OmltScalarJuMP(OmltScalar): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.ScalarVar + + def __init__(self, *args, **kwargs): + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = self._bounds[0] + _has_lb = _lb is not None + _ub = self._bounds[1] + _has_ub = _ub is not None + elif self._bounds is None: + _has_lb = False + _lb = None + _has_ub = False + _ub = None + else: + raise ValueError("Bounds must be given as a tuple") + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + self._value = _initialize + else: + self._value = None + + self._jumpvarinfo = jump.VariableInfo( + _has_lb, + _lb, + _has_ub, + _ub, + False, # is fixed + None, # fixed value + _initialize is not None, + self._value, + self.binary, + self.integer, + ) + self._constructed = False + self._parent = None + self._ctype = pyo.ScalarVar + + def construct(self, data): + if self._block: + self._jumpvar = jump.add_variable(self._block, self._jumpvarinfo) + else: + self._jumpvar = jump.build_variable(Base.error, self._jumpvarinfo) + self._constructed = True + + def fix(self, value, skip_validation): + self.fixed = True + self._value = value + + @property + def bounds(self): + pass + + @bounds.setter + def bounds(self, val): + pass + + @property + def lb(self): + return self._jumpvar.info.lower_bound + + @lb.setter + def lb(self, val): + jump.set_upper_bound(self._jumpvar, val) + + @property + def ub(self): + return self._jumpvar.info.upper_bound + + @ub.setter + def ub(self, val): + jump.set_upper_bound(self._jumpvar, val) + + def to_jump(self): + if self._constructed: + return self._jumpvar + + """ Future formats to implement. """ -class OmltScalarMOI(OmltScalar): - format = "moi" - - class OmltScalarSmoke(OmltScalar): format = "smoke" @@ -257,11 +387,16 @@ def setub(self, value): def setlb(self, value): pass + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + class OmltIndexedPyomo(pyo.Var, OmltIndexed): format = "pyomo" def __init__(self, *indexes, **kwargs): + kwargs.pop("format", None) super().__init__(*indexes, **kwargs) def fix(self, value=None, skip_validation=False): @@ -282,15 +417,141 @@ def setlb(self, value): vardata.lb = value +class OmltIndexedJuMP(OmltIndexed): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.Var + + def __init__(self, *indexes, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + else: + raise ValueError("Currently index cross-products are unsupported.") + self._varinfo = {} + for idx in self._index_set: + self._varinfo[idx] = jump.VariableInfo( + False, # _has_lb, + None, # _lb, + False, # _has_ub, + None, # _ub, + False, # is fixed + None, # fix value + False, # _initialize is not None, + None, # self._value, + False, # self.binary, + False, # self.integer + ) + self._vars = {} + self._constructed = False + self._ctype = pyo.Var + self._parent = None + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._vars[item[0]] + else: + return self._vars[item] + + def __setitem__(self, item, value): + self._vars[item] = value + + def keys(self): + return self._vars.keys() + + def values(self): + return self._vars.values() + + def items(self): + return self._vars.items() + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._vars) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._vars + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._vars.__iter__() + + def construct(self, data=None): + for idx in self._index_set: + self._vars[idx] = jump.build_variable(Base.error, self._varinfo[idx]) + self._constructed = True + + def setub(self, value): + if self._constructed: + for idx in self.index_set(): + self._varinfo[idx].has_ub = True + self._varinfo[idx].upper_bound = value + self._vars[idx].info.has_ub = True + self._vars[idx].info.upper_bound = value + else: + for idx in self.index_set(): + self._varinfo[idx].has_ub = True + self._varinfo[idx].upper_bound = value + + def setlb(self, value): + if self._constructed: + for idx in self.index_set(): + self._varinfo[idx].has_lb = True + self._varinfo[idx].lower_bound = value + self._vars[idx].info.has_lb = True + self._vars[idx].info.lower_bound = value + else: + for idx in self.index_set(): + self._varinfo[idx].has_lb = True + self._varinfo[idx].lower_bound = value + + @property + def ctype(self): + return self._ctype + + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def to_jump(self): + if self._constructed: + return jump.Containers.DenseAxisArray( + list(self._vars.values()), self.index_set() + ) + + """ Future formats to implement. """ -class OmltIndexedMOI(OmltIndexed): - format = "moi" - - class OmltIndexedSmoke(OmltIndexed): format = "smoke" diff --git a/src/omlt/block.py b/src/omlt/block.py index 04932e41..26ef423d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -38,6 +38,7 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None + self.__format = "pyomo" def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ @@ -54,18 +55,20 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ self.__input_indexes = input_indexes self.__output_indexes = output_indexes + if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide more contextual error msg + # TODO: implement this check higher up in the class hierarchy to + # provide more contextual error msg raise ValueError( "OmltBlock must have at least one input and at least one output." ) self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0) + self.inputs = OmltVar(self.inputs_set, initialize=0, format=self.__format) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0) + self.outputs = OmltVar(self.outputs_set, initialize=0, format=self.__format) - def build_formulation(self, formulation): + def build_formulation(self, formulation, format=None): """ Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network @@ -76,7 +79,15 @@ def build_formulation(self, formulation): ---------- formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation + format : str + Which modelling language to build the formulation in. + Currently supported are "pyomo" (default) and "jump". + """ + + if format is not None: + self.__format = format + self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/src/omlt/dependencies.py b/src/omlt/dependencies.py index 6330c38f..595e2274 100644 --- a/src/omlt/dependencies.py +++ b/src/omlt/dependencies.py @@ -8,3 +8,13 @@ torch_geometric, torch_geometric_available = attempt_import("torch_geometric") lineartree, lineartree_available = attempt_import("lineartree") + +julia, julia_available = attempt_import("juliacall") + +if julia_available: + from juliacall import Main as jl + try: + jl.seval("import MathOptInterface") + moi_available = True + except jl.ArgumentError: + moi_available = False diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 0d054ca9..3149d7ef 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -4,6 +4,7 @@ import pyomo.environ as pyo from omlt.base import OmltVar + class _PyomoFormulationInterface(abc.ABC): """ Base class interface for a Pyomo formulation object. This class @@ -54,7 +55,8 @@ def _build_formulation(self): class _PyomoFormulation(_PyomoFormulationInterface): """ This is a base class for different Pyomo formulations. To create a new - formulation, inherit from this class and implement the abstract methods and properties. + formulation, inherit from this class and implement the abstract methods + and properties. """ def __init__(self): @@ -66,7 +68,8 @@ def _set_block(self, block): @property def block(self): - """The underlying block containing the constraints / variables for this formulation.""" + """The underlying block containing the constraints / variables for this + formulation.""" return self.__block() diff --git a/tests/test_block.py b/tests/test_block.py index 6c6311f5..03f025b7 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -35,7 +35,18 @@ def test_block(): m.b = OmltBlock() formulation = dummy_formulation() m.b.build_formulation(formulation) - print(dir(m.b)) + assert m.b._OmltBlockData__formulation is formulation + assert [k for k in m.b.inputs] == ["A", "C", "D"] + assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + + +def test_jump_block(): + m = pyo.ConcreteModel() + m.b = OmltBlock() + formulation = dummy_formulation() + + m.b.build_formulation(formulation, format="jump") + assert m.b._OmltBlockData__formulation is formulation assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] From be74ed29bced30ad3f0e19c6157fc8612c0994c0 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 04/60] Cleaning up variables --- src/omlt/base.py | 580 ---------------------------- src/omlt/base/__init__.py | 7 + src/omlt/base/julia.py | 114 ++++++ src/omlt/base/var.py | 771 ++++++++++++++++++++++++++++++++++++++ src/omlt/block.py | 5 +- tests/test_block.py | 6 + 6 files changed, 901 insertions(+), 582 deletions(-) delete mode 100644 src/omlt/base.py create mode 100644 src/omlt/base/__init__.py create mode 100644 src/omlt/base/julia.py create mode 100644 src/omlt/base/var.py diff --git a/src/omlt/base.py b/src/omlt/base.py deleted file mode 100644 index ab735a6d..00000000 --- a/src/omlt/base.py +++ /dev/null @@ -1,580 +0,0 @@ -""" -Abstraction layer of classes used by OMLT. Underneath these are -objects in a choice of modeling languages: Pyomo (default), -MathOptInterface, or Smoke (not yet implemented). - - -""" - -from abc import ABC, abstractmethod -import pyomo.environ as pyo - -from omlt.dependencies import julia_available, moi_available - -if julia_available and moi_available: - from juliacall import Main as jl - from juliacall import Base - - jl.seval("import MathOptInterface") - moi = jl.MathOptInterface - jl.seval("import JuMP") - jump = jl.JuMP - - -class OmltVar(ABC): - def __new__(cls, *indexes, **kwargs): - - if not indexes: - instance = OmltScalar.__new__(OmltScalar, **kwargs) - else: - instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) - return instance - - -class OmltScalar(OmltVar): - def __new__(cls, *args, format="pyomo", **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - f"Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - - instance.__init__(*args, **kwargs) - return instance - - def __init__(self, *args, **kwargs): - pass - - @abstractmethod - def construct(self, data): - pass - - @abstractmethod - def fix(self, value, skip_validation): - pass - - @property - @abstractmethod - def bounds(self): - pass - - @bounds.setter - @abstractmethod - def bounds(self, val): - pass - - @property - @abstractmethod - def lb(self): - pass - - @lb.setter - @abstractmethod - def lb(self, val): - pass - - @property - @abstractmethod - def ub(self): - pass - - @ub.setter - @abstractmethod - def ub(self, val): - pass - - def is_component_type(self): - return True - - def is_indexed(self): - return False - - # @abstractmethod - # def __mul__(self, other): - # pass - - # @abstractmethod - # def __rmul__(self, other): - # pass - - -class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): - format = "pyomo" - - def __init__(self, *args, **kwargs): - pyo.ScalarVar.__init__(self, *args, **kwargs) - - def construct(self, data): - super().construct(data) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - super().fix(skip_validation) - else: - super().fix(value, skip_validation) - - @property - def bounds(self): - return super().bounds - - @bounds.setter - def bounds(self, val): - super().bounds = val - - @property - def ub(self): - return super().ub - - @ub.setter - def ub(self, val): - super().ub = val - - @property - def lb(self): - return super().__get__(self.lb) - - @lb.setter - def lb(self, val): - super().__setattr__(self.lb, val) - - def __lt__(self, other): - return pyo.NumericValue.__lt__(self, other) - - def __gt__(self, other): - return pyo.NumericValue.__gt__(self, other) - - def __le__(self, other): - return pyo.NumericValue.__le__(self, other) - - def __ge__(self, other): - return pyo.NumericValue.__ge__(self, other) - - def __eq__(self, other): - return pyo.NumericValue.__eq__(self, other) - - def __add__(self, other): - return pyo.NumericValue.__add__(self, other) - - def __sub__(self, other): - return pyo.NumericValue.__sub__(self, other) - - # def __mul__(self,other): - # return pyo.NumericValue.__mul__(self,other) - - def __div__(self, other): - return pyo.NumericValue.__div__(self, other) - - def __truediv__(self, other): - return pyo.NumericValue.__truediv__(self, other) - - def __pow__(self, other): - return pyo.NumericValue.__pow__(self, other) - - def __radd__(self, other): - return pyo.NumericValue.__radd__(self, other) - - def __rsub__(self, other): - return pyo.NumericValue.__rsub__(self, other) - - # def __rmul__(self,other): - # return self._ComponentDataClass.__rmul__(self,other) - - def __rdiv__(self, other): - return pyo.NumericValue.__rdiv__(self, other) - - def __rtruediv__(self, other): - return pyo.NumericValue.__rtruediv__(self, other) - - def __rpow__(self, other): - return pyo.NumericValue.__rpow__(self, other) - - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) - - -class OmltScalarJuMP(OmltScalar): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.ScalarVar - - def __init__(self, *args, **kwargs): - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = self._bounds[0] - _has_lb = _lb is not None - _ub = self._bounds[1] - _has_ub = _ub is not None - elif self._bounds is None: - _has_lb = False - _lb = None - _has_ub = False - _ub = None - else: - raise ValueError("Bounds must be given as a tuple") - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - raise ValueError( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - elif _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - self._value = _initialize - else: - self._value = None - - self._jumpvarinfo = jump.VariableInfo( - _has_lb, - _lb, - _has_ub, - _ub, - False, # is fixed - None, # fixed value - _initialize is not None, - self._value, - self.binary, - self.integer, - ) - self._constructed = False - self._parent = None - self._ctype = pyo.ScalarVar - - def construct(self, data): - if self._block: - self._jumpvar = jump.add_variable(self._block, self._jumpvarinfo) - else: - self._jumpvar = jump.build_variable(Base.error, self._jumpvarinfo) - self._constructed = True - - def fix(self, value, skip_validation): - self.fixed = True - self._value = value - - @property - def bounds(self): - pass - - @bounds.setter - def bounds(self, val): - pass - - @property - def lb(self): - return self._jumpvar.info.lower_bound - - @lb.setter - def lb(self, val): - jump.set_upper_bound(self._jumpvar, val) - - @property - def ub(self): - return self._jumpvar.info.upper_bound - - @ub.setter - def ub(self, val): - jump.set_upper_bound(self._jumpvar, val) - - def to_jump(self): - if self._constructed: - return self._jumpvar - - -""" -Future formats to implement. -""" - - -class OmltScalarSmoke(OmltScalar): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltScalarGurobi(OmltScalar): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - -class OmltIndexed(OmltVar): - def __new__(cls, *indexes, format="pyomo", **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - f"Variable format %s not recognized. Supported formats are 'pyomo'" - " or 'moi'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - return instance - - @abstractmethod - def fix(self, value=None, skip_validation=False): - pass - - @abstractmethod - def setub(self, value): - pass - - @abstractmethod - def setlb(self, value): - pass - - def valid_model_component(self): - """Return True if this can be used as a model component.""" - return True - - -class OmltIndexedPyomo(pyo.Var, OmltIndexed): - format = "pyomo" - - def __init__(self, *indexes, **kwargs): - kwargs.pop("format", None) - super().__init__(*indexes, **kwargs) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def setub(self, value): - for vardata in self.values(): - vardata.ub = value - - def setlb(self, value): - for vardata in self.values(): - vardata.lb = value - - -class OmltIndexedJuMP(OmltIndexed): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.Var - - def __init__(self, *indexes, **kwargs): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - else: - raise ValueError("Currently index cross-products are unsupported.") - self._varinfo = {} - for idx in self._index_set: - self._varinfo[idx] = jump.VariableInfo( - False, # _has_lb, - None, # _lb, - False, # _has_ub, - None, # _ub, - False, # is fixed - None, # fix value - False, # _initialize is not None, - None, # self._value, - False, # self.binary, - False, # self.integer - ) - self._vars = {} - self._constructed = False - self._ctype = pyo.Var - self._parent = None - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._vars[item[0]] - else: - return self._vars[item] - - def __setitem__(self, item, value): - self._vars[item] = value - - def keys(self): - return self._vars.keys() - - def values(self): - return self._vars.values() - - def items(self): - return self._vars.items() - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ - return len(self._vars) - - def __contains__(self, idx): - """Return true if the index is in the dictionary""" - return idx in self._vars - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys""" - return self._vars.__iter__() - - def construct(self, data=None): - for idx in self._index_set: - self._vars[idx] = jump.build_variable(Base.error, self._varinfo[idx]) - self._constructed = True - - def setub(self, value): - if self._constructed: - for idx in self.index_set(): - self._varinfo[idx].has_ub = True - self._varinfo[idx].upper_bound = value - self._vars[idx].info.has_ub = True - self._vars[idx].info.upper_bound = value - else: - for idx in self.index_set(): - self._varinfo[idx].has_ub = True - self._varinfo[idx].upper_bound = value - - def setlb(self, value): - if self._constructed: - for idx in self.index_set(): - self._varinfo[idx].has_lb = True - self._varinfo[idx].lower_bound = value - self._vars[idx].info.has_lb = True - self._vars[idx].info.lower_bound = value - else: - for idx in self.index_set(): - self._varinfo[idx].has_lb = True - self._varinfo[idx].lower_bound = value - - @property - def ctype(self): - return self._ctype - - def index_set(self): - return self._index_set - - @property - def name(self): - return self._name - - def to_jump(self): - if self._constructed: - return jump.Containers.DenseAxisArray( - list(self._vars.values()), self.index_set() - ) - - -""" -Future formats to implement. -""" - - -class OmltIndexedSmoke(OmltIndexed): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltIndexedGurobi(OmltIndexed): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - -class OmltSet: - def __init__(self): - pass - - -class OmltExpression: - def __init__(self): - pass diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py new file mode 100644 index 00000000..328ea98a --- /dev/null +++ b/src/omlt/base/__init__.py @@ -0,0 +1,7 @@ +DEFAULT_MODELING_LANGUAGE = "pyomo" + +from omlt.base.julia import jump +from omlt.base.var import OmltVar + +# from omlt.base.expression import OmltExpression +# from omlt.base.constraint import OmltConstraint diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py new file mode 100644 index 00000000..1e6fb413 --- /dev/null +++ b/src/omlt/base/julia.py @@ -0,0 +1,114 @@ +from omlt.dependencies import julia_available, moi_available +from omlt.base.expression import OmltExpression + +if julia_available and moi_available: + from juliacall import Main as jl + from juliacall import Base + + jl_err = Base.error + jl.seval("import MathOptInterface") + moi = jl.MathOptInterface + jl.seval("import JuMP") + jump = jl.JuMP + + +class JuMPVarInfo: + def __init__( + self, + lower_bound=None, + upper_bound=None, + fixed_value=None, + start_value=None, + binary=False, + integer=False, + ): + self.has_lb = lower_bound is not None + self.lb = lower_bound + self.has_ub = upper_bound is not None + self.ub = upper_bound + self.has_fix = fixed_value is not None + self.fixed_value = fixed_value + self.has_start = start_value is not None + self.start_value = start_value + self.binary = binary + self.integer = integer + + @property + def lower_bound(self): + return self.lb + + @lower_bound.setter + def lower_bound(self, value=None): + self.lb = value + self.has_lb = value is not None + + def setlb(self, value): + self.lower_bound = value + + @property + def upper_bound(self): + return self.ub + + @upper_bound.setter + def upper_bound(self, value=None): + self.ub = value + self.has_ub = value is not None + + def setub(self, value): + self.upper_bound = value + + def to_jump(self): + return jump.VariableInfo( + self.has_lb, + self.lower_bound, + self.has_ub, + self.upper_bound, + self.has_fix, + self.fixed_value, + self.has_start, + self.start_value, + self.binary, + self.integer, + ) + + +class JumpVar: + def __init__(self, varinfo: JuMPVarInfo, name): + self.info = varinfo + self.name = name + self.construct() + + def __str__(self): + return self.name + + def setlb(self, value): + self.info.setlb(value) + self.construct() + + def setub(self, value): + self.info.setlb(value) + self.construct() + + def construct(self): + self.var = jump.build_variable(Base.error, self.info.to_jump()) + + @property + def value(self): + return self.var.info.start + + def add_to_model(self, model, name=None): + if name is None: + name = self._name + jump.add_variable(model, self.var, name) + + def to_jump(self): + return self.var + + def __sub__(self, other): + return OmltExpression(expr=(self, "-", other), format="jump") + + def __mul__(self, other): + return OmltExpression(expr=(self, "*", other), format="jump") + + def __eq__(self, other): + return OmltExpression(expr=(self, "==", other), format="jump") diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py new file mode 100644 index 00000000..eea523c6 --- /dev/null +++ b/src/omlt/base/var.py @@ -0,0 +1,771 @@ +""" +Abstraction layer of classes used by OMLT. Underneath these are +objects in a choice of modeling languages: Pyomo (default), +JuMP, or others (not yet implemented - e.g. Smoke, Gurobi). + + +""" + +from abc import ABC, abstractmethod +import pyomo.environ as pyo + +from omlt.base import jump, DEFAULT_MODELING_LANGUAGE +from omlt.base.julia import JuMPVarInfo, JumpVar + + +class OmltVar(ABC): + def __new__(cls, *indexes, **kwargs): + + if not indexes: + instance = OmltScalar.__new__(OmltScalar, **kwargs) + else: + instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) + return instance + + @abstractmethod + def construct(self, data): + pass + + @abstractmethod + def fix(self, value, skip_validation): + pass + + @property + @abstractmethod + def ctype(self): + pass + + @property + @abstractmethod + def name(self): + pass + + # Some methods to tell OMLT (and Pyomo components) that this + # is a variable. + def is_component_type(self): + return True + + @abstractmethod + def is_indexed(self): + pass + + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + + +class OmltScalar(OmltVar): + def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + + instance.__init__(*args, **kwargs) + instance._format = format + return instance + + def is_indexed(self): + return False + + # Bound-setting interface for scalar variables: + @property + @abstractmethod + def bounds(self): + pass + + @bounds.setter + @abstractmethod + def bounds(self, val): + pass + + @property + @abstractmethod + def lb(self): + pass + + @lb.setter + @abstractmethod + def lb(self, val): + pass + + @property + @abstractmethod + def ub(self): + pass + + @ub.setter + @abstractmethod + def ub(self, val): + pass + + # Interface for getting/setting value + @property + @abstractmethod + def value(self): + pass + + @value.setter + @abstractmethod + def value(self, val): + pass + + # Interface governing how variables behave in expressions. + + # def __lt__(self, other): + # return pyo.NumericValue.__lt__(self, other) + + # def __gt__(self, other): + # return pyo.NumericValue.__gt__(self, other) + + # def __le__(self, other): + # return pyo.NumericValue.__le__(self, other) + + # def __ge__(self, other): + # return pyo.NumericValue.__ge__(self, other) + + # def __eq__(self, other): + # return pyo.NumericValue.__eq__(self, other) + + # def __add__(self, other): + # return pyo.NumericValue.__add__(self, other) + + # def __sub__(self, other): + # return pyo.NumericValue.__sub__(self, other) + + # # def __mul__(self,other): + # # return pyo.NumericValue.__mul__(self,other) + + # def __div__(self, other): + # return pyo.NumericValue.__div__(self, other) + + # def __truediv__(self, other): + # return pyo.NumericValue.__truediv__(self, other) + + # def __pow__(self, other): + # return pyo.NumericValue.__pow__(self, other) + + # def __radd__(self, other): + # return pyo.NumericValue.__radd__(self, other) + + # def __rsub__(self, other): + # return pyo.NumericValue.__rsub__(self, other) + + # # def __rmul__(self,other): + # # return self._ComponentDataClass.__rmul__(self,other) + + # def __rdiv__(self, other): + # return pyo.NumericValue.__rdiv__(self, other) + + # def __rtruediv__(self, other): + # return pyo.NumericValue.__rtruediv__(self, other) + + # def __rpow__(self, other): + # return pyo.NumericValue.__rpow__(self, other) + + # def __iadd__(self, other): + # return pyo.NumericValue.__iadd__(self, other) + + # def __isub__(self, other): + # return pyo.NumericValue.__isub__(self, other) + + # def __imul__(self, other): + # return pyo.NumericValue.__imul__(self, other) + + # def __idiv__(self, other): + # return pyo.NumericValue.__idiv__(self, other) + + # def __itruediv__(self, other): + # return pyo.NumericValue.__itruediv__(self, other) + + # def __ipow__(self, other): + # return pyo.NumericValue.__ipow__(self, other) + + # def __neg__(self): + # return pyo.NumericValue.__neg__(self) + + # def __pos__(self): + # return pyo.NumericValue.__pos__(self) + + # def __abs__(self): + # return pyo.NumericValue.__abs__(self) + + +class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): + format = "pyomo" + + def __init__(self, *args, **kwargs): + kwargs.pop("format", None) + pyo.ScalarVar.__init__(self, *args, **kwargs) + + +class OmltScalarJuMP(OmltScalar): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.ScalarVar + + def __init__(self, *args, **kwargs): + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = self._bounds[0] + _ub = self._bounds[1] + elif self._bounds is None: + _lb = None + _ub = None + else: + raise ValueError("Bounds must be given as a tuple") + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + self._value = _initialize + else: + self._value = None + + self._varinfo = JuMPVarInfo( + _lb, + _ub, + None, # fix value + self._value, + self.binary, + self.integer, + ) + self._constructed = False + self._parent = None + self._ctype = pyo.ScalarVar + self._name = None + + def construct(self, data=None): + self._var = JumpVar(self._varinfo, self._name) + self._constructed = True + if self._block: + self._blockvar = jump.add_variable(self._block, self._var) + + def fix(self, value, skip_validation): + self.fixed = True + self._value = value + self._varinfo.fixed_value = value + self._varinfo.has_fix = value is not None + if self._constructed: + self.construct() + + @property + def bounds(self): + return (self.lb, self.ub) + + @bounds.setter + def bounds(self, val): + if val is None: + self.lb = None + self.ub = None + elif len(val) == 2: + self.lb = val[0] + self.ub = val[1] + + @property + def lb(self): + return self._varinfo.lower_bound + + @lb.setter + def lb(self, val): + self._varinfo.lower_bound = val + if self._constructed: + self.construct() + + @property + def ub(self): + return self._varinfo.upper_bound + + @ub.setter + def ub(self, val): + self._varinfo.upper_bound = val + if self._constructed: + self.construct() + + @property + def value(self): + if self._constructed: + return self._var.value + else: + return self._varinfo.start_value + + @value.setter + def value(self, val): + if self._constructed: + self._var.value = val + else: + self._varinfo.start_value = val + self + + @property + def ctype(self): + return self._ctype + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + self._name = value + + def to_jump(self): + if self._constructed: + return self._var.to_jump() + else: + return self._varinfo.to_jump() + + +""" +Future formats to implement. +""" + + +class OmltScalarSmoke(OmltScalar): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltScalarGurobi(OmltScalar): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltIndexed(OmltVar): + def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + f"Variable format %s not recognized. Supported formats are 'pyomo'" + " or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltVar, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + instance._format = format + return instance + + def is_indexed(self): + return True + + @property + @abstractmethod + def index_set(self): + pass + + # Bound-setting interface for indexed variables: + @abstractmethod + def setub(self, value): + pass + + @abstractmethod + def setlb(self, value): + pass + + # Interface: act as a dict for the sub-variables. + @abstractmethod + def __getitem__(self, item): + pass + + @abstractmethod + def __setitem__(self, item, value): + pass + + @abstractmethod + def keys(self): + pass + + @abstractmethod + def values(self): + pass + + @abstractmethod + def items(self): + pass + + @abstractmethod + def __len__(self): + pass + + @abstractmethod + def __contains__(self, idx): + pass + + @abstractmethod + def __iter__(self): + pass + + +# Interface governing how variables behave in expressions. + +# def __lt__(self, other): +# return pyo.NumericValue.__lt__(self, other) + +# def __gt__(self, other): +# return pyo.NumericValue.__gt__(self, other) + +# def __le__(self, other): +# return pyo.NumericValue.__le__(self, other) + +# def __ge__(self, other): +# return pyo.NumericValue.__ge__(self, other) + +# def __eq__(self, other): +# return pyo.NumericValue.__eq__(self, other) + +# def __add__(self, other): +# return pyo.NumericValue.__add__(self, other) + +# def __sub__(self, other): +# return pyo.NumericValue.__sub__(self, other) + +# # def __mul__(self,other): +# # return pyo.NumericValue.__mul__(self,other) + +# def __div__(self, other): +# return pyo.NumericValue.__div__(self, other) + +# def __truediv__(self, other): +# return pyo.NumericValue.__truediv__(self, other) + +# def __pow__(self, other): +# return pyo.NumericValue.__pow__(self, other) + +# def __radd__(self, other): +# return pyo.NumericValue.__radd__(self, other) + +# def __rsub__(self, other): +# return pyo.NumericValue.__rsub__(self, other) + +# # def __rmul__(self,other): +# # return self._ComponentDataClass.__rmul__(self,other) + +# def __rdiv__(self, other): +# return pyo.NumericValue.__rdiv__(self, other) + +# def __rtruediv__(self, other): +# return pyo.NumericValue.__rtruediv__(self, other) + +# def __rpow__(self, other): +# return pyo.NumericValue.__rpow__(self, other) + +# def __iadd__(self, other): +# return pyo.NumericValue.__iadd__(self, other) + +# def __isub__(self, other): +# return pyo.NumericValue.__isub__(self, other) + +# def __imul__(self, other): +# return pyo.NumericValue.__imul__(self, other) + +# def __idiv__(self, other): +# return pyo.NumericValue.__idiv__(self, other) + +# def __itruediv__(self, other): +# return pyo.NumericValue.__itruediv__(self, other) + +# def __ipow__(self, other): +# return pyo.NumericValue.__ipow__(self, other) + +# def __neg__(self): +# return pyo.NumericValue.__neg__(self) + +# def __pos__(self): +# return pyo.NumericValue.__pos__(self) + +# def __abs__(self): +# return pyo.NumericValue.__abs__(self) + + +class OmltIndexedPyomo(pyo.Var, OmltIndexed): + format = "pyomo" + + def __init__(self, *indexes, **kwargs): + kwargs.pop("format", None) + super().__init__(*indexes, **kwargs) + + def fix(self, value=None, skip_validation=False): + self.fixed = True + if value is None: + for vardata in self.values(): + vardata.fix(skip_validation) + else: + for vardata in self.values(): + vardata.fix(value, skip_validation) + + def setub(self, value): + for vardata in self.values(): + vardata.ub = value + + def setlb(self, value): + for vardata in self.values(): + vardata.lb = value + + +class OmltIndexedJuMP(OmltIndexed): + format = "jump" + + # Claim to be a Pyomo Var so blocks will register + # properly. + @property + def __class__(self): + return pyo.Var + + def __init__(self, *indexes, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + else: + raise ValueError("Currently index cross-products are unsupported.") + + self._block = kwargs.pop("block", None) + + self._bounds = kwargs.pop("bounds", None) + + if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): + _lb = {k: v[0] for k, v in self._bounds.items()} + _ub = {k: v[1] for k, v in self._bounds.items()} + elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: + _lb = {i: self._bounds[0] for i in self._index_set} + _ub = {i: self._bounds[1] for i in self._index_set} + elif self._bounds is None: + _lb = {i: None for i in self._index_set} + _ub = {i: None for i in self._index_set} + else: + raise ValueError( + f"Bounds must be given as a tuple," " but %s was given.", self._bounds + ) + + _domain = kwargs.pop("domain", None) + _within = kwargs.pop("within", None) + + if _domain and _within and _domain != _within: + raise ValueError( + "'domain' and 'within' keywords have both " + "been supplied and do not agree. Please try " + "with a single keyword for the domain of this " + "variable." + ) + elif _domain: + self.domain = _domain + elif _within: + self.domain = _within + else: + self.domain = None + + if self.domain == pyo.Binary: + self.binary = True + else: + self.binary = False + if self.domain == pyo.Integers: + self.integer = True + else: + self.integer = False + + _initialize = kwargs.pop("initialize", None) + + if _initialize: + # If starting values have same length as index set, + # take one for each variable in index. + if len(self._index_set) == len(_initialize): + self._value = _initialize + # If there's a single starting value, use it for all + # variables in index. + elif len(_initialize) == 1: + self._value = {i: _initialize[0] for i in self._index_set} + else: + raise ValueError( + f"Index set has length %s, but" " initializer has length %s.", + len(self._index_set), + len(_initialize), + ) + else: + self._value = {i: None for i in self._index_set} + + self._varinfo = {} + for idx in self._index_set: + self._varinfo[idx] = JuMPVarInfo( + _lb[idx], + _ub[idx], + None, # fix value + self._value[idx], + self.binary, + self.integer, + ) + self._vars = {} + self._constructed = False + self._ctype = pyo.Var + self._parent = None + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._vars[item[0]] + else: + return self._vars[item] + + def __setitem__(self, item, value): + self._varinfo[item] = value + if self._constructed: + self.construct() + + def keys(self): + return self._vars.keys() + + def values(self): + return self._vars.values() + + def items(self): + return self._vars.items() + + def fix(self, value=None): + self.fixed = True + if value is not None: + for vardata in self._varinfo(): + vardata.has_fix = True + vardata.fixed_value = value + else: + for vardata in self._varinfo(): + vardata.has_fix = True + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._vars) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._vars + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._vars.__iter__() + + def construct(self, data=None): + for idx in self._index_set: + if isinstance(idx, int): + name = str(self.name) + "[" + str(idx) + "]" + else: + name = str(self.name) + str(list(idx)).replace(" ", "") + self._vars[idx] = JumpVar(self._varinfo[idx], name) + self._constructed = True + + def setub(self, value): + for idx in self.index_set(): + self._varinfo[idx][2] = True + self._varinfo[idx][3] = value + if self._constructed: + self.construct() + + def setlb(self, value): + for idx in self.index_set(): + self._varinfo[idx][0] = True + self._varinfo[idx][1] = value + if self._constructed: + self.construct() + + @property + def ctype(self): + return self._ctype + + def index_set(self): + return self._index_set + + @property + def name(self): + return self._name + + def to_jump(self): + if self._constructed: + return jump.Containers.DenseAxisArray( + list(self._vars.values()), self.index_set() + ) + + +""" +Future formats to implement. +""" + + +class OmltIndexedSmoke(OmltIndexed): + format = "smoke" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Smoke format is not currently implemented." + ) + + +class OmltIndexedGurobi(OmltIndexed): + format = "gurobi" + + def __init__(self, *args, **kwargs): + raise ValueError( + "Storing variables in Gurobi format is not currently implemented." + ) + + +class OmltSet: + def __init__(self): + pass + + +class OmltExpression: + def __init__(self): + pass diff --git a/src/omlt/block.py b/src/omlt/block.py index 26ef423d..11956f48 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,7 +25,8 @@ class is used in combination with a formulation object to construct the import warnings -from omlt.base import OmltVar +from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE + import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -38,7 +39,7 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None - self.__format = "pyomo" + self.__format = DEFAULT_MODELING_LANGUAGE def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ diff --git a/tests/test_block.py b/tests/test_block.py index 03f025b7..c58da133 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -2,6 +2,7 @@ import pytest from omlt import OmltBlock +from omlt.base import OmltVar class dummy_formulation(object): @@ -43,6 +44,11 @@ def test_block(): def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() + + m.b.x = OmltVar(initialize=(2, 7), format="jump") + + assert m.b.x.value == (2, 7) + formulation = dummy_formulation() m.b.build_formulation(formulation, format="jump") From 820f5f6db3aac333ee80188d50c4aa5ae8d53d0c Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 05/60] Cleaning up variables - MOI dependency --- src/omlt/base/julia.py | 6 ++---- src/omlt/dependencies.py | 8 -------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 1e6fb413..8dbb23df 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,13 +1,11 @@ -from omlt.dependencies import julia_available, moi_available +from omlt.dependencies import julia_available from omlt.base.expression import OmltExpression -if julia_available and moi_available: +if julia_available: from juliacall import Main as jl from juliacall import Base jl_err = Base.error - jl.seval("import MathOptInterface") - moi = jl.MathOptInterface jl.seval("import JuMP") jump = jl.JuMP diff --git a/src/omlt/dependencies.py b/src/omlt/dependencies.py index 595e2274..3b882da2 100644 --- a/src/omlt/dependencies.py +++ b/src/omlt/dependencies.py @@ -10,11 +10,3 @@ lineartree, lineartree_available = attempt_import("lineartree") julia, julia_available = attempt_import("juliacall") - -if julia_available: - from juliacall import Main as jl - try: - jl.seval("import MathOptInterface") - moi_available = True - except jl.ArgumentError: - moi_available = False From 55e338b9d7a686b550e14feaee13b06ee51187b7 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:19:29 +0000 Subject: [PATCH 06/60] Cleaning up variables - MOI dependency --- src/omlt/base/julia.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 8dbb23df..1a4bcf0c 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,6 +1,7 @@ from omlt.dependencies import julia_available -from omlt.base.expression import OmltExpression +# from omlt.base.expression import OmltExpression +if julia_available: if julia_available: from juliacall import Main as jl from juliacall import Base @@ -102,11 +103,11 @@ def add_to_model(self, model, name=None): def to_jump(self): return self.var - def __sub__(self, other): - return OmltExpression(expr=(self, "-", other), format="jump") + # def __sub__(self, other): + # return OmltExpression(expr=(self, "-", other), format="jump") - def __mul__(self, other): - return OmltExpression(expr=(self, "*", other), format="jump") + # def __mul__(self, other): + # return OmltExpression(expr=(self, "*", other), format="jump") - def __eq__(self, other): - return OmltExpression(expr=(self, "==", other), format="jump") + # def __eq__(self, other): + # return OmltExpression(expr=(self, "==", other), format="jump") From fa7a859a26260aeb4de057ee589d3e579fa05735 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:52:08 -0700 Subject: [PATCH 07/60] Removing duplicate line --- src/omlt/base/julia.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index 1a4bcf0c..e2e771d6 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,7 +1,6 @@ from omlt.dependencies import julia_available # from omlt.base.expression import OmltExpression -if julia_available: if julia_available: from juliacall import Main as jl from juliacall import Base From 5790bad956ae13963e112a57a2bbead512b178be Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:58:01 -0700 Subject: [PATCH 08/60] Getting dependencies lined up correctly --- src/omlt/base/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 328ea98a..292c5eb2 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,6 +1,8 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" +from omlt.dependencies import julia_available -from omlt.base.julia import jump +if julia_available: + from omlt.base.julia import jump from omlt.base.var import OmltVar # from omlt.base.expression import OmltExpression From 8cc7bf649922ffa83f48cfebe083ba0836805c9b Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:29:08 -0700 Subject: [PATCH 09/60] Update var.py --- src/omlt/base/var.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index eea523c6..176fe372 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -9,7 +9,10 @@ from abc import ABC, abstractmethod import pyomo.environ as pyo -from omlt.base import jump, DEFAULT_MODELING_LANGUAGE +from omlt.dependencies import julia_available + +if julia_available: + from omlt.base import jump, DEFAULT_MODELING_LANGUAGE from omlt.base.julia import JuMPVarInfo, JumpVar From cad7ba28236f1cec5a13ba87f3df75a068023ee6 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:35:36 -0700 Subject: [PATCH 10/60] Update var.py --- src/omlt/base/var.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 176fe372..f22d8183 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -11,8 +11,9 @@ from omlt.dependencies import julia_available +from omlt.base import DEFAULT_MODELING_LANGUAGE if julia_available: - from omlt.base import jump, DEFAULT_MODELING_LANGUAGE + from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar From 3abb7ffb085f4b99fd32722740e3417fbc867fed Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:43:43 -0700 Subject: [PATCH 11/60] Make test for JuMP variables conditional on presence of JuMP --- tests/test_block.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_block.py b/tests/test_block.py index c58da133..74fb9290 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -3,6 +3,8 @@ from omlt import OmltBlock from omlt.base import OmltVar +from omlt.dependencies import julia_available + class dummy_formulation(object): @@ -40,7 +42,9 @@ def test_block(): assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] - +@pytest.mark.skipif( + not julia_available, reason="Test only valid when Julia is available" +) def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() From 9621f3d25ae7049e3fe0dc2af5957617de16fbab Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:01:19 -0700 Subject: [PATCH 12/60] Use tensorflow-cpu for testing to save space --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 7b4233e8..90a56457 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,7 +72,7 @@ testing = nbmake tox flake8 - tensorflow + tensorflow-cpu ipywidgets jupyter lightgbm From 725348beb392956a85a982d84108568c38ac397e Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sat, 20 Apr 2024 23:21:18 -0700 Subject: [PATCH 13/60] Fix Keras version at 2.9 Keras 3 requires models to have the .keras file format. Going forward we should probably update the test models to use this format, but to unblock I'm holding back the Keras version. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 90a56457..7ea8599e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -79,7 +79,7 @@ testing = linear-tree matplotlib pandas - keras + keras==2.9.0 onnx onnxruntime onnxmltools From 19a7128eb4b7d397a2eb776966747d066a15e539 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:16:13 +0000 Subject: [PATCH 14/60] removing tweaked action file --- .github/workflows/main.yml | 58 -------------------------------------- 1 file changed, 58 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 3e302f03..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - # conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" - with: - fail_ci_if_error: true From 015324f1a331f5ba7b16867457c7b90c8f3ffe7f Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:20:48 +0000 Subject: [PATCH 15/60] restoring action workflow file --- .github/workflows/main.yml | 57 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..528fdaf0 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,57 @@ +--- + name: CI + + on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + + jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + # python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v2" + with: + fail_ci_if_error: true \ No newline at end of file From da6c316eea84e9679dec0d8784b60da0a5c32e53 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 22 Apr 2024 00:35:06 +0000 Subject: [PATCH 16/60] Fixing some whitespace linting --- src/omlt/neuralnet/layer.py | 3 ++- tests/neuralnet/test_nn_formulation.py | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index fd1b3234..16e068a3 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -16,6 +16,7 @@ \end{align*} """ + import itertools import numpy as np @@ -254,7 +255,7 @@ class GNNLayer(DenseLayer): .. math:: \begin{align*} - y_j = \sigma \left(\sum\limits_{i=0}^{F_{in}-1}A_{u,v}w_{ij}x_i+b_j\right), && \forall 0\le j Date: Fri, 17 May 2024 11:38:30 -0700 Subject: [PATCH 17/60] Update Python versions in main.yml --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 528fdaf0..afde145f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -16,7 +16,7 @@ strategy: matrix: # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: "actions/checkout@v2" @@ -54,4 +54,4 @@ - name: "Upload coverage to Codecov" uses: "codecov/codecov-action@v2" with: - fail_ci_if_error: true \ No newline at end of file + fail_ci_if_error: true From f7e1e7f759f3f66fa69aa4c88366aa9377ad99fa Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:39:03 -0700 Subject: [PATCH 18/60] Update setup.cfg for Keras version --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7ea8599e..cb8d5e2e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,7 +48,7 @@ python_requires = >=3.7 install_requires = importlib-metadata; python_version<"3.8" networkx - pyomo + pyomo==6.6.2 numpy protobuf==3.20.3 @@ -79,7 +79,7 @@ testing = linear-tree matplotlib pandas - keras==2.9.0 + keras>=3.0 onnx onnxruntime onnxmltools From 63798c322f313490a771d6182ddffa11e4618b67 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:40:35 -0700 Subject: [PATCH 19/60] Update main.yml --- .github/workflows/main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index afde145f..3f9a8f8d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,7 +15,6 @@ strategy: matrix: - # python-version: ["3.7", "3.8", "3.9"] python-version: ["3.9", "3.10", "3.11", "3.12"] steps: @@ -52,6 +51,6 @@ run: "python -m coverage xml" - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" + uses: "codecov/codecov-action@v4" with: fail_ci_if_error: true From 175f61353fa183420ff0196d8aff45e5fe1ea510 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:42:42 -0700 Subject: [PATCH 20/60] Update main.yml --- .github/workflows/main.yml | 111 +++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3f9a8f8d..34aef0e0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,56 +1,57 @@ --- - name: CI - - on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - - jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v4" - with: - fail_ci_if_error: true +name: CI + +on: + push: + branches: ["main","github-actions"] + pull_request: + branches: ["main"] + workflow_dispatch: + +jobs: + tests: + name: "Python ${{ matrix.python-version }}" + runs-on: "ubuntu-latest" + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: "actions/checkout@v2" + - uses: "actions/setup-python@v2" + - uses: "s-weigand/setup-conda@v1" + with: + python-version: "${{ matrix.python-version }}" + + - name: Install solvers + run: sudo apt-get install -y glpk-utils coinor-cbc + + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions + conda install -c conda-forge ipopt + conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" + run: "tox -re leanenv" + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "tox" + + # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" + # run: | + # shopt -s globstar + # tox -e notebooks docs/**/*.ipynb + + - name: "Convert coverage" + run: "python -m coverage xml" + + - name: "Upload coverage to Codecov" + uses: "codecov/codecov-action@v4" + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true From 40ef6b1c81da3c9e884b1d3d2558583b48ac6966 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:44:36 -0700 Subject: [PATCH 21/60] Update main.yml --- .github/workflows/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 34aef0e0..bd222b2f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,17 +36,13 @@ jobs: python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" run: "tox -re leanenv" - name: "Run tox targets for ${{ matrix.python-version }}" run: "tox" - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - name: "Convert coverage" run: "python -m coverage xml" From e2224242898f0a4958e606a6400286ff082c6283 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 17 May 2024 11:45:42 -0700 Subject: [PATCH 22/60] Update main.yml --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index bd222b2f..2c92b089 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,7 +36,7 @@ jobs: python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions conda install -c conda-forge ipopt conda install -c conda-forge pyscipopt - + - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" run: "tox -re leanenv" @@ -49,5 +49,5 @@ jobs: - name: "Upload coverage to Codecov" uses: "codecov/codecov-action@v4" with: - token: ${{ secrets.CODECOV_TOKEN }} - fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true From 0babe628f5e8f317092f589fe17d280d23e0eed9 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:04:57 +0100 Subject: [PATCH 23/60] wip --- Makefile | 10 --- justfile | 58 +++++++++++++++++ pyproject.toml | 107 +++++++++++++++++++++++++++++-- setup.cfg | 171 ------------------------------------------------- setup.py | 21 ------ tox.ini | 117 --------------------------------- 6 files changed, 161 insertions(+), 323 deletions(-) delete mode 100644 Makefile create mode 100644 justfile delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 tox.ini diff --git a/Makefile b/Makefile deleted file mode 100644 index cba83db0..00000000 --- a/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: develop docs test - -develop: - python -m pip install -e .[testing] - -docs: - python -m tox -e docs - -test: - python -m tox \ No newline at end of file diff --git a/justfile b/justfile new file mode 100644 index 00000000..bfea2cb3 --- /dev/null +++ b/justfile @@ -0,0 +1,58 @@ +# List all commands. +default: + @just --list + +# Build docs. +docs: + rm -rf docs/build docs/source/_autosummary + make -C docs html + echo Docs are in $PWD/docs/build/html/index.html + +conda-deps := " \ + conda-forge::ipopt \ + conda-forge::pyscipopt \ + conda-forge::coin-or-cbc \ +" + +# Do a dev install. +dev: + pip install -e '.[dev]' + conda install {{conda-deps}} + +# Do a dev install with GPU support. +dev-gpu: + pip install -e '.[dev-gpu]' + conda install {{conda-deps}} + +# Run code checks. +check: + #!/usr/bin/env bash + + error=0 + trap error=1 ERR + + echo + (set -x; ruff check src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; ruff format --check src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; mypy src/ tests/ docs/source/ examples/ ) + + echo + ( set -x; pytest ) + + echo + ( set -x; make -C docs doctest ) + + test $error = 0 + +# Auto-fix code issues. +fix: + ruff format src/ tests/ docs/source/ examples/ + ruff check --fix src/ tests/ docs/source/ examples/ + +# Build a release. +build: + python -m build diff --git a/pyproject.toml b/pyproject.toml index 2c63dbb2..e44fbb28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,107 @@ [build-system] -# AVOID CHANGING REQUIRES: IT WILL BE UPDATED BY PYSCAFFOLD! -requires = ["setuptools>=46.1.0", "setuptools_scm[toml]>=5", "wheel"] +requires = ["setuptools", "setuptools_scm"] build-backend = "setuptools.build_meta" +[project] +name = "omlt" +authors = [ + { name = "The OMLT Developers", email = "omlt@googlegroups.com" }, +] + +dependencies = [ + "networkx", + "numpy", + "pyomo", + "onnx", + "onnxruntime", +] +requires-python = ">=3.7" +dynamic = ["version"] +readme = "README.rst" +license = { file = "LICENSE.rst" } +description = "OMLT is a Python package for representing machine learning models (such as neural networks) within the Pyomo optimization environment." + +[project.optional-dependencies] +linear-tree = ["linear-tree"] +keras = ["tensorflow", "keras"] +keras-gpu = ["tensorflow[and-cuda]", "keras"] +torch = ["torch", "torch-geometric"] +dev-tools = [ + "ruff", + "mypy", + "pytest", + "pytest-cov", + "sphinx", + "sphinx-copybutton", + "build", + "twine", + "furo", + "testbook", + "notebook", + "pandas", + "matplotlib", + "gurobipy", + "torchvision", + "tf2onnx", +] +dev = [ + "omlt[dev-tools,keras,torch,linear-tree]", +] +dev-gpu = [ + "omlt[dev-tools,keras-gpu,torch,linear-tree]", +] + + +[project.urls] +github = "https://github.com/cog-imperial/OMLT" +x = "https://x.com/cogimperial" +documentation = "https://omlt.readthedocs.io" + [tool.setuptools_scm] -# See configuration details in https://github.com/pypa/setuptools_scm -version_scheme = "no-guess-dev" + +[tool.ruff] +line-length = 88 + +[tool.ruff.lint] +select = ["ALL"] +ignore = ["ANN101", "ANN401", "COM812", "ISC001"] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.per-file-ignores] +"tests/*" = [ + "D100", + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", + "S101", + "INP001", +] +"examples/*" = [ + "INP001", +] +"docs/source/conf.py" = ["D100", "INP001"] + +[tool.mypy] +show_error_codes = true +implicit_optional = false +warn_no_return = true +strict_optional = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +warn_unreachable = true +disallow_any_generics = true + +[[tool.mypy.overrides]] +module = [] +ignore_missing_imports = true + +[tool.pytest.ini_options] +addopts = "--cov omlt --cov-report term-missing --verbose" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index cb8d5e2e..00000000 --- a/setup.cfg +++ /dev/null @@ -1,171 +0,0 @@ -# This file is used to configure your project. -# Read more about the various options under: -# http://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files - -[metadata] -name = omlt -description = OMLT is a Python package for representing machine learning models (such as neural networks) within the Pyomo optimization environment. -author = The OMLT Developers -author_email = omlt@googlegroups.com -license = BSD 3-Clause -long_description = file: README.rst -long_description_content_type = text/x-rst; charset=UTF-8 -url = https://github.com/cog-imperial/OMLT/ -# Add here related links, for example: -project_urls = - Source = https://github.com/cog-imperial/OMLT/ - Twitter = https://twitter.com/cogimperial -# Changelog = https://pyscaffold.org/en/latest/changelog.html -# Tracker = https://github.com/pyscaffold/pyscaffold/issues -# Conda-Forge = https://anaconda.org/conda-forge/pyscaffold -# Download = https://pypi.org/project/PyScaffold/#files - -# Change if running only on Windows, Mac or Linux (comma-separated) -platforms = any - -# Add here all kinds of additional classifiers as defined under -# https://pypi.python.org/pypi?%3Aaction=list_classifiers -classifiers = - Development Status :: 4 - Beta - Programming Language :: Python - - -[options] -zip_safe = False -packages = find_namespace: -include_package_data = True -package_dir = - =src - -# Require a min/specific Python version (comma-separated conditions) -# OMLT currently supports Python 3.7 and above -python_requires = >=3.7 - -# Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. -# Version specifiers like >=2.2,<3.0 avoid problems due to API changes in -# new major versions. This works if the required packages follow Semantic Versioning. -# For more information, check out https://semver.org/. -install_requires = - importlib-metadata; python_version<"3.8" - networkx - pyomo==6.6.2 - numpy - protobuf==3.20.3 - - -[options.packages.find] -where = src -exclude = - tests - -[options.extras_require] -# Add here additional requirements for extra features, to install with: -# `pip install omlt[PDF]` like: -# PDF = ReportLab; RXP - -# Add here test requirements (semicolon/line-separated) -testing = - setuptools - pytest - pytest-cov - testbook - nbmake - tox - flake8 - tensorflow-cpu - ipywidgets - jupyter - lightgbm - linear-tree - matplotlib - pandas - keras>=3.0 - onnx - onnxruntime - onnxmltools - tf2onnx>=1.12 - torch - torchvision - tqdm - protobuf==3.20.3 - torch_geometric - -testing_lean = - setuptools - pytest - pytest-cov - testbook - nbmake - tox - flake8 - ipywidgets - jupyter - lightgbm - matplotlib - pandas - torch - torchvision - tqdm - -[options.entry_points] -# Add here console scripts like: -# console_scripts = -# script_name = omlt.module:function -# For example: -# console_scripts = -# fibonacci = omlt.skeleton:run -# And any other entry points, for example: -# pyscaffold.cli = -# awesome = pyscaffoldext.awesome.extension:AwesomeExtension - -[tool:pytest] -# Specify command line options as you would do when invoking pytest directly. -# e.g. --cov-report html (or xml) for html/xml output or --junitxml junit.xml -# in order to write a coverage file that can be read by Jenkins. -# CAUTION: --cov flags may prohibit setting breakpoints while debugging. -# Comment those flags to avoid this py.test issue. -addopts = - --cov omlt --cov-report term-missing - --verbose -norecursedirs = - dist - build - .tox -testpaths = tests -# Use pytest markers to select/deselect specific tests -# markers = -# slow: mark tests as slow (deselect with '-m "not slow"') -# system: mark end-to-end system tests - -[bdist_wheel] -# Use this option if your package is pure-python -universal = 1 - -[devpi:upload] -# Options for the devpi: PyPI server and packaging tool -# VCS export must be deactivated since we are using setuptools-scm -no_vcs = 1 -formats = bdist_wheel - -[flake8] -# Some sane defaults for the code style checker flake8 -max_line_length = 88 -extend_ignore = E203, W503 -# ^ Black-compatible -# E203 and W503 have edge cases handled by black -exclude = - .tox - build - dist - .eggs - docs/conf.py -per_file_ignores = - # ignore docstrings in tests - tests/*:D100,D101,D102,D103,D104,D105,D106,D107 - -[pyscaffold] -# PyScaffold's parameters when the project was created. -# This will be used when updating. Do not change! -version = 4.0.2 -package = omlt -extensions = diff --git a/setup.py b/setup.py deleted file mode 100644 index 57314fee..00000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -""" - Setup file for omlt. - Use setup.cfg to configure your project. - - This file was generated with PyScaffold 4.0.2. - PyScaffold helps you to put up the scaffold of your new Python project. - Learn more under: https://pyscaffold.org/ -""" -from setuptools import setup - -if __name__ == "__main__": - try: - setup(use_scm_version={"version_scheme": "no-guess-dev"}) - except: # noqa - print( - "\n\nAn error occurred while building the project, " - "please ensure you have the most updated version of setuptools, " - "setuptools_scm and wheel with:\n" - " pip install -U setuptools setuptools_scm wheel\n\n" - ) - raise diff --git a/tox.ini b/tox.ini deleted file mode 100644 index e64ab1d8..00000000 --- a/tox.ini +++ /dev/null @@ -1,117 +0,0 @@ -# Tox configuration file -# Read more under https://tox.readthedocs.org/ -# THIS SCRIPT IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS! - -[tox] -minversion = 3.15 -envlist = py36, py37, py38, py39, py310, py311, py312, lint - -[gh-actions] -python = - 3.6: py36 - 3.7: py37 - 3.8: py38 - 3.9: lint, py39 - 3.10: py310 - 3.11: py311 - 3.12: py312 - -[testenv] -deps = pytest -extras = testing -commands = pytest {posargs} - -#[testenv:fullenv] -#description = Testing with full dependencies -#deps = pytest -#extras = testing -#commands = pytest {posargs} - -[testenv:leanenv] -description = Testing with fewer dependencies -deps = pytest -extras = testing_lean -commands = pytest {posargs} - -[testenv:notebooks] -deps = pytest -extras = testing -commands = pytest --nbmake --cov-append {posargs} - -[testenv:{clean,build}] -description = - Build (or clean) the package in isolation according to instructions in: - https://setuptools.readthedocs.io/en/latest/build_meta.html#how-to-use-it - https://github.com/pypa/pep517/issues/91 - https://github.com/pypa/build -# NOTE: build is still experimental, please refer to the links for updates/issues -skip_install = True -changedir = {toxinidir} -deps = - build: build[virtualenv] -commands = - clean: python -c 'from shutil import rmtree; rmtree("build", True); rmtree("dist", True)' - build: python -m build . -# By default `build` produces wheels, you can also explicitly use the flags `--sdist` and `--wheel` - - -[testenv:{docs,doctests}] -description = invoke sphinx-build to build the docs/run doctests -setenv = - DOCSDIR = {toxinidir}/docs - BUILDDIR = {toxinidir}/docs/_build - docs: BUILD = html - doctests: BUILD = doctest -deps = - -r {toxinidir}/docs/requirements.txt - # ^ requirements.txt shared with Read The Docs -commands = - sphinx-build -b {env:BUILD} -d "{env:BUILDDIR}/doctrees" "{env:DOCSDIR}" "{env:BUILDDIR}/{env:BUILD}" {posargs} - - -[testenv:publish] -description = - Publish the package you have been developing to a package index server. - By default, it uses testpypi. If you really want to publish your package - to be publicly accessible in PyPI, use the `-- --repository pypi` option. -skip_install = True -changedir = {toxinidir} -passenv = - TWINE_USERNAME - TWINE_PASSWORD - TWINE_REPOSITORY -deps = twine -commands = - python -m twine check dist/* - python -m twine upload {posargs:--repository testpypi} dist/* - -[flake8] -extend-ignore = D, E, F, N -per-file-ignores = __init__.py:F401 - -[testenv:lint] -description = Lint files using isort, black, and flake8 -skip_install = True -changedir = {toxinidir} -deps = - black - flake8 - flake8-bugbear - flake8-docstrings - isort - pep8-naming -commands = - flake8 --config=tox.ini src/omlt tests/ - black --check --diff src/omlt tests/ - -[testenv:format] -description = Format Python files using isort and black -skip_install = true -changedir = {toxinidir} -deps = - black - typing-extensions - isort -commands = - isort src/omlt tests - black src/omlt tests From 30a36d12dbbe5d9cb170f6c9b5ab9c8b079273b4 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:20:10 +0100 Subject: [PATCH 24/60] Add workflows --- .github/workflows/publish_release.yml | 23 +++++++++ .github/workflows/tests.yml | 73 +++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 .github/workflows/publish_release.yml create mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/publish_release.yml b/.github/workflows/publish_release.yml new file mode 100644 index 00000000..20a9e104 --- /dev/null +++ b/.github/workflows/publish_release.yml @@ -0,0 +1,23 @@ +name: Publish release +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' +jobs: + publish-release: + runs-on: ubuntu-22.04 + env: + VERSION: ${{ github.ref_name }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + cache: "pip" + - run: pip install -e '.[dev]' + - run: python -m build + - run: + twine upload + -u __token__ + -p ${{ secrets.PYPI_API_TOKEN }} + dist/* diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..155713d8 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,73 @@ +name: Tests +on: + push: + branches: + - main + pull_request: + workflow_dispatch: +jobs: + ruff: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: ruff check src/ tests/ docs/source/ examples/ + mypy: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: mypy src/ tests/ docs/source/ examples/ + ruff-format: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: ruff format --check src/ tests/ docs/source/ examples/ + pytest: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: pytest --cov=src --cov-report term-missing + doctest: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - run: pip install -e '.[dev]' + - run: make -C docs doctest From 027703f62fe4b805a7a95ec2701485e8c889f7d6 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Tue, 28 May 2024 22:41:59 +0100 Subject: [PATCH 25/60] wip --- .github/workflows/tests.yml | 6 +++--- justfile | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 155713d8..8b25e5a3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: ruff check src/ tests/ docs/source/ examples/ + - run: ruff check src/ tests/ docs/ mypy: strategy: matrix: @@ -31,7 +31,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: mypy src/ tests/ docs/source/ examples/ + - run: mypy src/ tests/ docs/ ruff-format: strategy: matrix: @@ -44,7 +44,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: ruff format --check src/ tests/ docs/source/ examples/ + - run: ruff format --check src/ tests/ docs/ pytest: strategy: matrix: diff --git a/justfile b/justfile index bfea2cb3..fcf1dc4d 100644 --- a/justfile +++ b/justfile @@ -32,13 +32,13 @@ check: trap error=1 ERR echo - (set -x; ruff check src/ tests/ docs/source/ examples/ ) + (set -x; ruff check src/ tests/ docs/ ) echo - ( set -x; ruff format --check src/ tests/ docs/source/ examples/ ) + ( set -x; ruff format --check src/ tests/ docs/ ) echo - ( set -x; mypy src/ tests/ docs/source/ examples/ ) + ( set -x; mypy src/ tests/ docs/ ) echo ( set -x; pytest ) @@ -50,8 +50,8 @@ check: # Auto-fix code issues. fix: - ruff format src/ tests/ docs/source/ examples/ - ruff check --fix src/ tests/ docs/source/ examples/ + ruff format src/ tests/ docs/ + ruff check --fix src/ tests/ docs/ # Build a release. build: From 3959e6a85a2363ca35e61fe8ecb08ae05deb7fee Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 15:43:30 +0100 Subject: [PATCH 26/60] Add stuff --- .gitignore | 2 ++ .readthedocs.yml | 9 +++++---- docs/requirements.txt | 10 ---------- environment.yml | 6 ++++++ justfile | 10 ++-------- pyproject.toml | 9 ++++++++- 6 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 docs/requirements.txt create mode 100644 environment.yml diff --git a/.gitignore b/.gitignore index 1b77d315..4fbb7dfd 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,8 @@ coverage.xml *.py,cover .hypothesis/ .pytest_cache/ +docs/notebooks/data/MNIST +docs/notebooks/neuralnet/*.keras # Translations *.mo diff --git a/.readthedocs.yml b/.readthedocs.yml index 6e41af22..2cb5498c 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -16,7 +16,7 @@ build: tools: - python: "3.8" + python: "3.12" # You can also specify other tool versions: @@ -58,7 +58,8 @@ sphinx: # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: - install: - - - requirements: docs/requirements.txt \ No newline at end of file + - method: pip + path: . + extra_requirements: + - docs diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 6305e50b..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# Required dependencies for Sphinx documentation -sphinx -sphinx-rtd-theme -numpy -pyomo -networkx -onnx -tensorflow -linear-tree -importlib-metadata \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..69566368 --- /dev/null +++ b/environment.yml @@ -0,0 +1,6 @@ +channels: + - conda-forge +dependencies: + - ipopt + - pyscipopt + - coin-or-cbc diff --git a/justfile b/justfile index fcf1dc4d..dccccbfc 100644 --- a/justfile +++ b/justfile @@ -8,21 +8,15 @@ docs: make -C docs html echo Docs are in $PWD/docs/build/html/index.html -conda-deps := " \ - conda-forge::ipopt \ - conda-forge::pyscipopt \ - conda-forge::coin-or-cbc \ -" - # Do a dev install. dev: pip install -e '.[dev]' - conda install {{conda-deps}} + conda env update --file environment.yml # Do a dev install with GPU support. dev-gpu: pip install -e '.[dev-gpu]' - conda install {{conda-deps}} + conda env update --file environment.yml # Run code checks. check: diff --git a/pyproject.toml b/pyproject.toml index e44fbb28..2db39b62 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,8 @@ authors = [ dependencies = [ "networkx", "numpy", - "pyomo", + # TODO: Remove constraint when fix to https://github.com/Pyomo/pyomo/issues/3262 is released + "pyomo==6.6.2", "onnx", "onnxruntime", ] @@ -50,6 +51,12 @@ dev = [ dev-gpu = [ "omlt[dev-tools,keras-gpu,torch,linear-tree]", ] +docs = [ + "sphinx", + "sphinx-rtd-theme", + "tensorflow", + "linear-tree", +] [project.urls] From c3b619b462c5f7b04f6a6c4060b063235559bf0c Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 15:45:45 +0100 Subject: [PATCH 27/60] Fix formatting --- src/omlt/neuralnet/activations/relu.py | 4 +--- src/omlt/neuralnet/layers/full_space.py | 8 ++------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 427be19a..e14718d7 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -77,9 +77,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): output_index ] <= layer_block.zhat[output_index] - layer_block._big_m_lb_relu[ output_index - ] * ( - 1.0 - layer_block.q_relu[output_index] - ) + ] * (1.0 - layer_block.q_relu[output_index]) class ComplementarityReLUActivation: diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 8970bc69..3e0e1e2a 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -131,9 +131,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): local_index, output_node_index ] = input_layer_block.zbar[ local_index, output_node_index - ] >= input_layer_block.z[ - input_index - ] - ub * ( + ] >= input_layer_block.z[input_index] - ub * ( 1.0 - net_block.A[input_node_index, output_node_index] ) @@ -141,9 +139,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): local_index, output_node_index ] = input_layer_block.zbar[ local_index, output_node_index - ] <= input_layer_block.z[ - input_index - ] - lb * ( + ] <= input_layer_block.z[input_index] - lb * ( 1.0 - net_block.A[input_node_index, output_node_index] ) From 691d3a6179b7ed0ab3701ac1aa3d42cbc765c03f Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:16:38 +0100 Subject: [PATCH 28/60] wip --- {docs => .github}/pull_request_template.md | 0 .github/workflows/tests.yml | 2 +- .gitignore | 1 + docs/Makefile | 15 +- docs/conf.py | 176 +-------------------- justfile | 4 +- pyproject.toml | 2 +- src/omlt/py.typed | 0 8 files changed, 13 insertions(+), 187 deletions(-) rename {docs => .github}/pull_request_template.md (100%) create mode 100644 src/omlt/py.typed diff --git a/docs/pull_request_template.md b/.github/pull_request_template.md similarity index 100% rename from docs/pull_request_template.md rename to .github/pull_request_template.md diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8b25e5a3..4201663f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -57,7 +57,7 @@ jobs: python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install -e '.[dev]' - - run: pytest --cov=src --cov-report term-missing + - run: pytest doctest: strategy: matrix: diff --git a/.gitignore b/.gitignore index 4fbb7dfd..d6b1a635 100644 --- a/.gitignore +++ b/.gitignore @@ -72,6 +72,7 @@ instance/ # Sphinx documentation docs/_build/ +docs/_autosummary # PyBuilder target/ diff --git a/docs/Makefile b/docs/Makefile index 95d96808..5117fbf5 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,27 +1,18 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. -SPHINXOPTS ?= +SPHINXOPTS ?= -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build -#AUTODOCDIR = api - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $?), 1) -$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/") -endif - -.PHONY: help clean Makefile # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -clean: - rm -rf $(BUILDDIR)/* #$(AUTODOCDIR) +.PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/docs/conf.py b/docs/conf.py index a85d176d..c0a58377 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,70 +1,7 @@ -# This file is execfile()d with the current directory set to its containing dir. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import inspect -import os -import shutil import sys -# -- Path setup -------------------------------------------------------------- - -__location__ = os.path.join( - os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())) -) - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.join(__location__, "../src")) - -# -- Run sphinx-apidoc ------------------------------------------------------- -# This hack is necessary since RTD does not issue `sphinx-apidoc` before running -# `sphinx-build -b html . _build/html`. See Issue: -# https://github.com/rtfd/readthedocs.org/issues/1139 -# DON'T FORGET: Check the box "Install your project inside a virtualenv using -# setup.py install" in the RTD Advanced Settings. -# Additionally it helps us to avoid running apidoc manually - -try: # for Sphinx >= 1.7 - from sphinx.ext import apidoc -except ImportError: - from sphinx import apidoc - -# output_dir = os.path.join(__location__, "api") -# module_dir = os.path.join(__location__, "../src/omlt") -# try: -# shutil.rmtree(output_dir) -# except FileNotFoundError: -# pass - -# try: -# import sphinx - -# cmd_line_template = ( -# "sphinx-apidoc --implicit-namespaces -f -o {outputdir} {moduledir}" -# ) -# cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) - -# args = cmd_line.split(" ") -# if tuple(sphinx.__version__.split(".")) >= ("1", "7"): -# # This is a rudimentary parse_version to avoid external dependencies -# args = args[1:] - -# apidoc.main(args) -# except Exception as e: -# print("Running `sphinx-apidoc` failed!\n{}".format(e)) - # -- General configuration --------------------------------------------------- -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ @@ -86,15 +23,12 @@ # The suffix of source filenames. source_suffix = ".rst" -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - # General information about the project. project = "omlt" -copyright = "2022, Carnegie Mellon University, Imperial College London, Sandia National Laboratories" +project_copyright = ( + "2022, Carnegie Mellon University, " + "Imperial College London, Sandia National Laboratories" +) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -105,49 +39,20 @@ # The full version, including alpha/beta/rc tags. release = "" # Is set by calling `setup.py docs` -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv", "notebooks"] -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -# html_theme = "furo" html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme @@ -164,8 +69,6 @@ }, } -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". @@ -176,64 +79,16 @@ else: release = version -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/omlt_logo.png" -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - # Output file base name for HTML help builder. htmlhelp_basename = "omlt-doc" @@ -255,31 +110,10 @@ ("index", "user_guide.tex", "OMLT Documentation", "The OMLT Developers", "manual") ] -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = "" - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - # -- External mapping -------------------------------------------------------- -python_version = ".".join(map(str, sys.version_info[0:2])) intersphinx_mapping = { "sphinx": ("http://www.sphinx-doc.org/en/stable", None), - "python": ("https://docs.python.org/" + python_version, None), + "python": ("https://docs.python.org/3", None), "matplotlib": ("https://matplotlib.org", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "sklearn": ("https://scikit-learn.org/stable", None), diff --git a/justfile b/justfile index dccccbfc..30eddd46 100644 --- a/justfile +++ b/justfile @@ -4,9 +4,9 @@ default: # Build docs. docs: - rm -rf docs/build docs/source/_autosummary + rm -rf docs/_build docs/_autosummary make -C docs html - echo Docs are in $PWD/docs/build/html/index.html + echo Docs are in $PWD/docs/_build/html/index.html # Do a dev install. dev: diff --git a/pyproject.toml b/pyproject.toml index 2db39b62..aa773d6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,7 +92,7 @@ convention = "google" "examples/*" = [ "INP001", ] -"docs/source/conf.py" = ["D100", "INP001"] +"docs/conf.py" = ["D100", "INP001"] [tool.mypy] show_error_codes = true diff --git a/src/omlt/py.typed b/src/omlt/py.typed new file mode 100644 index 00000000..e69de29b From 8a896baf6ebc7582b8ff86ac28c7d4e1542c8146 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:25:46 +0100 Subject: [PATCH 29/60] wip --- docs/conf.py | 47 ++++------------------------------------------- 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index c0a58377..575f2b79 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,4 @@ -import sys +import omlt # -- General configuration --------------------------------------------------- @@ -30,19 +30,10 @@ "Imperial College London, Sandia National Laboratories" ) -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "" # Is set by calling `setup.py docs` -# The full version, including alpha/beta/rc tags. -release = "" # Is set by calling `setup.py docs` - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv", "notebooks"] +exclude_patterns = ["_build"] # The name of the Pygments (syntax highlighting) style to use. @@ -55,29 +46,8 @@ # a list of builtin themes. html_theme = "sphinx_rtd_theme" -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "light_css_variables": { - "color-brand-primary": "#003E74", - "color-brand-content": "#002147", - }, - "dark_css_variables": { - "color-brand-primary": "#0091D4", - "color-brand-content": "#D4EFFC", - }, -} - - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -try: - from omlt import __version__ as version -except ImportError: - pass -else: - release = version +version = omlt.__version__ +release = omlt.__version__ # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -95,15 +65,6 @@ # -- Options for LaTeX output ------------------------------------------------ -latex_elements = { - # The paper size ("letterpaper" or "a4paper"). - # "papersize": "letterpaper", - # The font size ("10pt", "11pt" or "12pt"). - # "pointsize": "10pt", - # Additional stuff for the LaTeX preamble. - # "preamble": "", -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ From 25d31fec40af8225b90d348bd3657cba6c5d5583 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 16:55:36 +0100 Subject: [PATCH 30/60] docs work --- .gitignore | 1 + docs/installation.rst | 4 ++-- pyproject.toml | 15 ++++++++------- src/omlt/__init__.py | 27 +++++++-------------------- 4 files changed, 18 insertions(+), 29 deletions(-) diff --git a/.gitignore b/.gitignore index d6b1a635..243e16e1 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST +src/omlt/_version.py # PyInstaller # Usually these files are written by a python script from a template diff --git a/docs/installation.rst b/docs/installation.rst index 15b116ae..2c586bb0 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -1,5 +1,5 @@ Installation -============== +============ OMLT requires Python >= 3.6. The most stable OMLT version can be installed using the PyPI package index. This will also install the required depencies. Simply run: :: @@ -13,7 +13,7 @@ If using the latest un-released version, install from the github repository and Optional Requirements -------------- +--------------------- OMLT can import sequential Keras models which requires a working installation of tensorflow: :: diff --git a/pyproject.toml b/pyproject.toml index aa773d6c..1e85484e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "onnx", "onnxruntime", ] -requires-python = ">=3.7" +requires-python = ">=3.9" dynamic = ["version"] readme = "README.rst" license = { file = "LICENSE.rst" } @@ -45,18 +45,18 @@ dev-tools = [ "torchvision", "tf2onnx", ] -dev = [ - "omlt[dev-tools,keras,torch,linear-tree]", -] -dev-gpu = [ - "omlt[dev-tools,keras-gpu,torch,linear-tree]", -] docs = [ "sphinx", "sphinx-rtd-theme", "tensorflow", "linear-tree", ] +dev = [ + "omlt[dev-tools,keras,torch,linear-tree,docs]", +] +dev-gpu = [ + "omlt[dev-tools,keras-gpu,torch,linear-tree,docs]", +] [project.urls] @@ -65,6 +65,7 @@ x = "https://x.com/cogimperial" documentation = "https://omlt.readthedocs.io" [tool.setuptools_scm] +write_to = "src/omlt/_version.py" [tool.ruff] line-length = 88 diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 12aafdd5..8e702f0b 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -9,25 +9,12 @@ """ -import sys - -if sys.version_info[:2] >= (3, 8): - # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` - from importlib.metadata import PackageNotFoundError # pragma: no cover - from importlib.metadata import version -else: - from importlib_metadata import PackageNotFoundError # pragma: no cover - from importlib_metadata import version - -try: - # Change here if project is renamed and does not equal the package name - dist_name = __name__ - __version__ = version(dist_name) -except PackageNotFoundError: # pragma: no cover - __version__ = "unknown" -finally: - del version, PackageNotFoundError - -# Top level exports +from omlt._version import __version__ from omlt.block import OmltBlock from omlt.scaling import OffsetScaling + +__all__ = [ + "OmltBlock", + "OffsetScaling", + "__version__", +] From c02be6771def4183dc3de9017718995e7fe33ce2 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Wed, 29 May 2024 17:06:23 +0100 Subject: [PATCH 31/60] wip --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1e85484e..ec31ee06 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,7 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] -ignore = ["ANN101", "ANN401", "COM812", "ISC001"] +ignore = ["ANN101", "ANN401"] [tool.ruff.lint.pydocstyle] convention = "google" From d8390716e05d5724bf0fadb1a7f572eb60d209fb Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:06:33 +0100 Subject: [PATCH 32/60] Update checks --- pyproject.toml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ec31ee06..dffd39e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,16 @@ line-length = 88 [tool.ruff.lint] select = ["ALL"] -ignore = ["ANN101", "ANN401"] +ignore = [ + "ANN101", + "ANN401", + # Remove these eventually + "ANN001", + "ANN002", + "ANN201", + "ANN202", + "ANN204", +] [tool.ruff.lint.pydocstyle] convention = "google" From 77c05880f0a7b25a19f8dd67f6d0bc36fc960b46 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:10:14 +0100 Subject: [PATCH 33/60] update checks --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index dffd39e8..ad26567a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,12 +69,15 @@ write_to = "src/omlt/_version.py" [tool.ruff] line-length = 88 +extend-exclude = ["src/omlt/_version.py"] [tool.ruff.lint] select = ["ALL"] ignore = [ "ANN101", "ANN401", + "COM812", + "ISC001", # Remove these eventually "ANN001", "ANN002", From a61fb827cbddd0bf8c6d7a55826143f5c3a52749 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:21:39 +0100 Subject: [PATCH 34/60] update checks --- pyproject.toml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ad26567a..4ad1ca44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ ignore = [ "ANN401", "COM812", "ISC001", - # Remove these eventually + # TODO: Remove these eventually "ANN001", "ANN002", "ANN201", @@ -112,7 +112,8 @@ show_error_codes = true implicit_optional = false warn_no_return = true strict_optional = true -disallow_untyped_defs = true +# TODO: Enable eventually +# disallow_untyped_defs = true disallow_incomplete_defs = true check_untyped_defs = true disallow_untyped_decorators = true @@ -120,7 +121,19 @@ warn_unreachable = true disallow_any_generics = true [[tool.mypy.overrides]] -module = [] +module = [ + "pandas.*", + "networkx.*", + "tf2onnx.*", + "onnxruntime.*", + "lineartree.*", + "sklearn.*", + "testbook.*", + "pyomo.*", + "keras.*", + "tensorflow.*", + "torch_geometric.*", +] ignore_missing_imports = true [tool.pytest.ini_options] From e0b35b21725024bdbeb5045c3c99024c7f789d66 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:34:35 +0100 Subject: [PATCH 35/60] Add conda --- .github/workflows/tests.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4201663f..419eadc9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -56,6 +56,15 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" + - uses: actions/cache@v3 + with: + path: ~/conda_pkgs_dir + key: ${{ runner.os }}-conda-${{ hashFiles('environment.yml') }} + - uses: conda-incubator/setup-miniconda@v3 + with: + channel-priority: strict + environment-file: environment.yml + use-only-tar-bz2: true - run: pip install -e '.[dev]' - run: pytest doctest: From 1a134310423e487db344509a3f4bff28b18a3e17 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 11:37:53 +0100 Subject: [PATCH 36/60] Thing --- .github/workflows/main.yml | 53 ------------------------------------- .github/workflows/tests.yml | 5 ++++ 2 files changed, 5 insertions(+), 53 deletions(-) delete mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 2c92b089..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v4" - with: - token: ${{ secrets.CODECOV_TOKEN }} - fail_ci_if_error: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 419eadc9..7f662cb9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -67,6 +67,11 @@ jobs: use-only-tar-bz2: true - run: pip install -e '.[dev]' - run: pytest + - run: python -m coverage xml + - uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true doctest: strategy: matrix: From e8c20f6b00645e278f6e9562b6aa1af8a410fd13 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 12:57:50 +0100 Subject: [PATCH 37/60] Add thing --- .github/workflows/tests.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7f662cb9..e1b1541b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: strategy: @@ -30,7 +30,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ ruff-format: strategy: @@ -43,7 +43,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: ruff format --check src/ tests/ docs/ pytest: strategy: @@ -65,7 +65,7 @@ jobs: channel-priority: strict environment-file: environment.yml use-only-tar-bz2: true - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: pytest - run: python -m coverage xml - uses: codecov/codecov-action@v4 @@ -83,5 +83,5 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: "pip" - - run: pip install -e '.[dev]' + - run: pip install '.[dev]' - run: make -C docs doctest From 3c6148a99d92b535225970250d16b88256bee7c4 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:03:26 +0100 Subject: [PATCH 38/60] wip --- .github/workflows/tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e1b1541b..d8af318e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -66,7 +66,8 @@ jobs: environment-file: environment.yml use-only-tar-bz2: true - run: pip install '.[dev]' - - run: pytest + - shell: bash -el {0} + run: pytest - run: python -m coverage xml - uses: codecov/codecov-action@v4 with: From 5c8be2ba5cf98466e8e104965aaca73f47cb6e14 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:17:32 +0100 Subject: [PATCH 39/60] update docs --- README.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index e99fe927..78fab6f4 100644 --- a/README.rst +++ b/README.rst @@ -142,14 +142,14 @@ Example Development =========== -OMLT uses `tox` to manage development tasks: +OMLT uses ``just`` to manage development tasks: -* `tox -av` to list available tasks -* `tox` to run tests -* `tox -e lint` to check formatting and code styles -* `tox -e format` to automatically format files -* `tox -e docs` to build the documentation -* `tox -e publish` to publish the package to PyPi +* ``just`` to list available tasks +* ``just check`` to run all checks +* ``just fix`` to apply any auto-fixes +* ``just dev`` to install development dependencies +* ``just dev-gpu`` to install development dependencies but with GPU support +* ``just docs`` to build the documentation Contributors ============ @@ -224,4 +224,4 @@ Contributors .. _zshiqiang: https://github.com/zshiqiang .. |zshiqiang| image:: https://avatars.githubusercontent.com/u/91337036?v=4 - :width: 80px + :width: 80px From b51ba7dc1b7b4c1998203f6592a186741d09baaa Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:18:08 +0100 Subject: [PATCH 40/60] add link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 78fab6f4..4dab06c7 100644 --- a/README.rst +++ b/README.rst @@ -142,7 +142,7 @@ Example Development =========== -OMLT uses ``just`` to manage development tasks: +OMLT uses [just](https://github.com/casey/just) to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks From cd892bcd1afa6f814d1cb956608bcfee55870bc5 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:20:00 +0100 Subject: [PATCH 41/60] wip --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 4dab06c7..3820ebe9 100644 --- a/README.rst +++ b/README.rst @@ -142,7 +142,7 @@ Example Development =========== -OMLT uses [just](https://github.com/casey/just) to manage development tasks: +OMLT uses `just `_ to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks From fbac1ba6ff277fe2c0e2ee37e738eefc72649cea Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:20:48 +0100 Subject: [PATCH 42/60] thing --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 3820ebe9..be307348 100644 --- a/README.rst +++ b/README.rst @@ -147,8 +147,8 @@ OMLT uses `just `_ to manage development tasks: * ``just`` to list available tasks * ``just check`` to run all checks * ``just fix`` to apply any auto-fixes -* ``just dev`` to install development dependencies -* ``just dev-gpu`` to install development dependencies but with GPU support +* ``just dev`` to install development dependencies in your current Python environment +* ``just dev-gpu`` to install development dependencies in your current Python environment but with GPU support * ``just docs`` to build the documentation Contributors From 0e8bded5ad0bde90662eea30052378eac5853dff Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:21:20 +0100 Subject: [PATCH 43/60] wip --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index be307348..058c0fc6 100644 --- a/README.rst +++ b/README.rst @@ -148,7 +148,7 @@ OMLT uses `just `_ to manage development tasks: * ``just check`` to run all checks * ``just fix`` to apply any auto-fixes * ``just dev`` to install development dependencies in your current Python environment -* ``just dev-gpu`` to install development dependencies in your current Python environment but with GPU support +* ``just dev-gpu`` same as ``dev`` but with GPU support * ``just docs`` to build the documentation Contributors From 1cdf89c24e3f8ae473b0958806c15d0957d3df7a Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:31:55 +0100 Subject: [PATCH 44/60] remove unnecessary things --- .github/workflows/tests.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d8af318e..bbaaf8ca 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,41 +7,32 @@ on: workflow_dispatch: jobs: ruff: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ ruff-format: - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} + python-version: "3.12" cache: "pip" - run: pip install '.[dev]' - run: ruff format --check src/ tests/ docs/ From 909f86e4276f98e68e5bb2d44b77c6dd33165dbe Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Thu, 30 May 2024 18:32:52 +0100 Subject: [PATCH 45/60] Add back for mypy --- .github/workflows/tests.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bbaaf8ca..507f64f5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,12 +17,15 @@ jobs: - run: pip install '.[dev]' - run: ruff check src/ tests/ docs/ mypy: + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.12" + python-version: ${{ matrix.python-version }} cache: "pip" - run: pip install '.[dev]' - run: mypy src/ tests/ docs/ From 7ae13bed517d93ccdcf15ad344c480736a2a4daa Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Wed, 5 Jun 2024 20:01:17 +0000 Subject: [PATCH 46/60] Including OmltExpr expressions for the OmltVars --- src/omlt/base/__init__.py | 7 +- src/omlt/base/expression.py | 554 +++++++++++++++++++ src/omlt/base/julia.py | 23 +- src/omlt/base/var.py | 365 +++++++----- src/omlt/block.py | 24 +- src/omlt/neuralnet/layers/partition_based.py | 14 +- tests/test_block.py | 16 +- tests/test_var.py | 32 ++ 8 files changed, 874 insertions(+), 161 deletions(-) create mode 100644 src/omlt/base/expression.py create mode 100644 tests/test_var.py diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index 292c5eb2..bd526fc3 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,9 +1,10 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" -from omlt.dependencies import julia_available +from omlt.dependencies import julia_available if julia_available: - from omlt.base.julia import jump + from omlt.base.julia import jl, jump + from omlt.base.var import OmltVar +from omlt.base.expression import OmltExpr -# from omlt.base.expression import OmltExpression # from omlt.base.constraint import OmltConstraint diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py new file mode 100644 index 00000000..d8dda339 --- /dev/null +++ b/src/omlt/base/expression.py @@ -0,0 +1,554 @@ +from abc import ABC +import pyomo.environ as pyo + +# from pyomo.core.expr import RelationalExpression + +from omlt.base import DEFAULT_MODELING_LANGUAGE +import omlt.base.var as var +from omlt.dependencies import julia_available + +if julia_available: + from omlt.base.julia import jl, jump, JumpVar + from juliacall import AnyValue +relations = {"==", ">=", "<=", ">", "<"} + +formats = { + "pyomo": { + "scalar": pyo.Expression, + "indexed": pyo.Expression, + }, + "jump": { + "scalar": jump.AffExpr, + "indexed": jl.Vector, + }, +} + + +class OmltExpr(ABC): + # Claim to be a Pyomo Expression so blocks will register + # properly. + @property + def __class__(self): + return pyo.Expression + + def __new__(cls, *indexes, **kwargs): + if not indexes: + instance = super(OmltExpr, cls).__new__(OmltExprScalar) + instance.__init__(**kwargs) + else: + instance = super(OmltExpr, cls).__new__(OmltExprIndexed) + instance.__init__(*indexes, **kwargs) + return instance + + @property + def ctype(self): + return pyo.Expression + + def is_component_type(self): + return True + + def is_expression_type(self): + return True + + def is_indexed(self): + pass + + def valid_model_component(self): + """Return True if this can be used as a model component.""" + return True + + +class OmltExprScalar(OmltExpr): + def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + "Expression format %s not recognized. Supported formats " + "are 'pyomo' or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltExpr, cls).__new__(subclass) + # instance.__init__(*args, **kwargs) + instance._format = format + return instance + + def __mul__(self, other): + pass + + +class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): + format = "pyomo" + + def __init__(self, *args, expr=None, **kwargs): + self._index_set = {} + if isinstance(expr, (pyo.Expression, pyo.NumericValue)): + self._expression = expr + elif isinstance(expr, OmltExprScalarPyomo): + self._expression = expr._expression + elif isinstance(expr, tuple): + self._expression = self._parse_expression_tuple(expr) + else: + print("expression not recognized", expr, type(expr)) + + self._parent = None + self.name = None + + def _parse_expression_tuple_term(self, term): + if isinstance(term, tuple): + return self._parse_expression_tuple(term) + elif isinstance(term, OmltExprScalarPyomo): + return term._expression + elif isinstance(term, var.OmltVar): + return term._pyovar + elif isinstance(term, ( + pyo.Expression, pyo.Var, int, float + )): + return term + else: + raise TypeError("Term of expression is an unsupported type. " + "Write a better error message.") + + def _parse_expression_tuple(self, expr): + lhs = self._parse_expression_tuple_term(expr[0]) + rhs = self._parse_expression_tuple_term(expr[2]) + + if expr[1] == "+": + return lhs + rhs + + elif expr[1] == "-": + return lhs - rhs + + elif expr[1] == "*": + return lhs * rhs + + elif expr[1] == "/": + return lhs / rhs + + else: + raise ValueError("Expression middle term was {%s}.", expr[1]) + + def __repr__(self): + return repr(self._expression.arg(0)) + + def is_indexed(self): + return False + + def as_numeric(self): + return self._expression._apply_operation(self._expression.args) + + def construct(self, data=None): + return self._expression.construct(data) + + @property + def _constructed(self): + return self._expression.expr._constructed + + @property + def const(self): + return self._expression.const + + @property + def args(self): + return self._expression.args + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __call__(self): + return self._expression() + + def __add__(self, other): + if isinstance(other, OmltExpr): + expr = self._expression + other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression + other + return OmltExpr(format=self._format, expr=expr) + + # def __sub__(self, other): + # expr = (self, "-", other) + # return OmltExpression(format=self._format, expr=expr) + + def __mul__(self, other): + if isinstance(other, OmltExpr): + expr = self._expression * other._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = self._expression * other + return OmltExprScalar(format=self._format, expr=expr) + + def __div__(self, other): + expr = (self, "/", other) + return OmltExpr(format=self._format, expr=expr) + + def __truediv__(self, other): + expr = (self, "//", other) + return OmltExpr(format=self._format, expr=expr) + + def __radd__(self, other): + if isinstance(other, OmltExpr): + expr = other._expression + self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other + self._expression + return OmltExpr(format=self._format, expr=expr) + + def __rsub__(self, other): + if isinstance(other, OmltExpr): + expr = other._expression - self._expression + elif isinstance(other, (int, float, pyo.Expression)): + expr = other - self._expression + return OmltExpr(format=self._format, expr=expr) + + def __rmul__(self, other): + expr = (other, "*", self) + return OmltExpr(format=self._format, expr=expr) + + def __ge__(self, other): + expr = self._expression >= other + return expr + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + def __le__(self, other): + expr = (self._expression <= other) + return expr + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + def __eq__(self, other): + expr = self._expression == other + return pyo.Expression(expr=expr) + # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) + + +class OmltExprIndexed(OmltExpr): + def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} + if format not in subclass_map: + raise ValueError( + "Variable format %s not recognized. Supported formats are 'pyomo'" + " or 'jump'.", + format, + ) + subclass = subclass_map[format] + instance = super(OmltExpr, subclass).__new__(subclass) + instance.__init__(*indexes, **kwargs) + instance._format = format + return instance + + +class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): + format = "pyomo" + + def __init__(self, *indexes, expr=None, format=DEFAULT_MODELING_LANGUAGE, **kwargs): + if len(indexes) == 1: + index_set = indexes[0] + i_dict = {} + for i, val in enumerate(index_set): + i_dict[i] = val + self._index_set = tuple(i_dict[i] for i in range(len(index_set))) + elif len(indexes) > 1: + raise ValueError("Currently index cross-products are unsupported.") + else: + self._index_set = {} + self._format = format + self._expression = pyo.Expression(self._index_set, expr=expr) + + # self.pyo.construct() + + def is_indexed(self): + return True + + def expression_as_dict(self): + if len(self._index_set) == 1: + return {self._index_set[0]: self._expression} + else: + return {k: self._expression[k] for k in self._index_set} + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item) == 1: + return self._expression[item[0]] + else: + return self._expression[item] + + def __setitem__(self, item, value): + self._expression[item] = value + + def keys(self): + return self._expression.keys() + + def values(self): + return self._expression.values() + + def items(self): + return self._expression.items() + + def __len__(self): + """ + Return the number of component data objects stored by this + component. + """ + return len(self._expression) + + def __contains__(self, idx): + """Return true if the index is in the dictionary""" + return idx in self._expression + + # The default implementation is for keys() and __iter__ to be + # synonyms. The logic is implemented in keys() so that + # keys/values/items continue to work for components that implement + # other definitions for __iter__ (e.g., Set) + def __iter__(self): + """Return an iterator of the component data keys""" + return self._expression.__iter__() + + @property + def args(self): + return self._expression.args() + + def arg(self, index): + return self._expression.arg(index) + + def nargs(self): + return self._expression.nargs() + + def __call__(self): + return self._expression() + + # # def __str__(self): + # # return parse_expression(self.expr, "").rstrip() + + # def __repr__(self): + # if self._expr is not None: + # return parse_expression(self._expr, "").rstrip() + # else: + # return "empty expression" + + # def set_value(self, value): + # print("setting value:", value) + # self.value = value + + # @property + # def rule(self): + # return self._expr + + def __add__(self, other): + expr = (self, "+", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + # def __sub__(self, other): + # expr = (self, "-", other) + # return OmltExpression(format=self._format, expr=expr) + + # def __mul__(self, other): + # expr = (self, "*", other) + # return OmltExpression(format=self._format, expr=expr) + + def __div__(self, other): + expr = (self, "/", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + def __truediv__(self, other): + expr = (self, "//", other) + return OmltExpr(self._index_set, format=self._format, expr=expr) + + def __eq__(self, other): + expr = (self, "==", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + def __le__(self, other): + expr = (self, "<=", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + def __ge__(self, other): + expr = (self, ">=", other) + return pyo.Expression(self._index_set, expr=expr) + # return constraint.OmltRelation( + # self._index_set, format=self._format, expr_tuple=expr + # ) + + +# def parse_expression(expr, string): +# if expr is not None: +# for t in expr: +# if str(t).count(" ") == 2: +# string += "(" + str(t) + ") " +# else: +# string += str(t) + " " +# else: +# string = expr +# return string + + +# def parse_jump_affine(expr_tuple): +# if expr_tuple is not None: +# if isinstance(expr_tuple, JumpVar): +# return jump.AffExpr(0, {expr_tuple.to_jump(): 1}) +# elif isinstance(expr_tuple, (int, float)): +# return jump.AffExpr(expr_tuple, {}) +# elif isinstance(expr_tuple, OmltExprScalar): +# print("found a scalar expression") +# print(expr_tuple) +# print(expr_tuple._expression) +# return expr_tuple._expression +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], JumpVar): +# return jump.AffExpr(0, {expr_tuple[0].to_jump(): 1}) +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): +# return jump.AffExpr(expr_tuple[0], {}) +# elif len(expr_tuple) == 2: +# print("don't know how to deal with 2-element expressions") +# print("expr_tuple") +# elif len(expr_tuple) == 3: +# print("triplet") +# if expr_tuple[1] == "+": +# return parse_jump_affine(expr_tuple[0]) + parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "-": +# return parse_jump_affine(expr_tuple[0]) - parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "*": +# return parse_jump_affine(expr_tuple[0]) * parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "/": +# return parse_jump_affine(expr_tuple[0]) / parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "//": +# return parse_jump_affine(expr_tuple[0]) // parse_jump_affine( +# expr_tuple[2] +# ) +# elif expr_tuple[1] == "**": +# return parse_jump_affine(expr_tuple[0]) ** parse_jump_affine( +# expr_tuple[2] +# ) + + +# def dictplus(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): +# c[k] = a[k] + b[k] +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dictminus(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): +# c[k] = a[k] - b[k] +# print("dictminus gives:", c) +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dicttimes(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): + +# c[k] = a[k] * b[k] +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def dictover(a, b): +# c = dict() +# if a.keys() == b.keys(): +# for k in a.keys(): + +# c[k] = jump_divide(a[k], b[k]) +# return c +# else: +# raise ValueError("dicts have non-matching keys") + + +# def jump_divide(a, b): +# assert isinstance(a, AnyValue) +# print(b.terms) +# assert (isinstance(b, AnyValue) and len(b.terms) == 0) or isinstance( +# b, (int, float) +# ) +# if isinstance(b, AnyValue): +# div_by = b.constant +# else: +# div_by = b +# return jump.AffExpr(a.constant / div_by, {}) + + +# def parse_jump_indexed(expr_tuple, index): +# print("parsing:", expr_tuple) +# if expr_tuple is not None: +# if isinstance(expr_tuple, OmltExpr): +# print("here") +# return expr_tuple.expression_as_dict() +# elif isinstance(expr_tuple, var.OmltVar): +# return expr_tuple.to_jumpexpr() +# elif isinstance(expr_tuple, (int, float)): +# return {k: jump.AffExpr(expr_tuple, {}) for k in index} +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], OmltExpr): +# return expr_tuple[0]._expression +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], var.OmltVar): +# indexed = { +# k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) +# for k, v in expr_tuple[0].items() +# } +# return indexed +# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): +# return {k: jump.AffExpr(expr_tuple[0], {}) for k in index} +# elif len(expr_tuple) == 2: +# print("don't know how to deal with 2-element expressions") +# print(expr_tuple) +# elif len(expr_tuple) == 3: +# if expr_tuple[1] == "+": +# return dictplus( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "-": +# return dictminus( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "*": +# return dicttimes( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "/": +# return dictover( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "//": +# return dictover( +# parse_jump_indexed(expr_tuple[0], index), +# parse_jump_indexed(expr_tuple[2], index), +# ) +# elif expr_tuple[1] == "**": +# return parse_jump_indexed(expr_tuple[0], index) ** parse_jump_indexed( +# expr_tuple[2], index +# ) +# elif expr_tuple[1] in relations: +# cnstrnt = constraint.OmltRelation( +# index, +# model=None, +# lhs=parse_jump_indexed(expr_tuple[0], index), +# sense=expr_tuple[1], +# rhs=parse_jump_indexed(expr_tuple[2], index), +# format="jump", +# ) +# indexed = {k: cnstrnt.lhs[k] - cnstrnt.rhs[k] for k in index} +# return indexed diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py index e2e771d6..b3c9109f 100644 --- a/src/omlt/base/julia.py +++ b/src/omlt/base/julia.py @@ -1,5 +1,4 @@ from omlt.dependencies import julia_available -# from omlt.base.expression import OmltExpression if julia_available: from juliacall import Main as jl @@ -74,6 +73,8 @@ class JumpVar: def __init__(self, varinfo: JuMPVarInfo, name): self.info = varinfo self.name = name + self.omltvar = None + self.index = None self.construct() def __str__(self): @@ -96,17 +97,21 @@ def value(self): def add_to_model(self, model, name=None): if name is None: - name = self._name - jump.add_variable(model, self.var, name) + name = self.name + variable_ref = jump.add_variable(model, self.var, name) + return variable_ref def to_jump(self): return self.var - # def __sub__(self, other): - # return OmltExpression(expr=(self, "-", other), format="jump") + def __add__(self, other): + return (self.omltvar + other)[self.index] - # def __mul__(self, other): - # return OmltExpression(expr=(self, "*", other), format="jump") + def __sub__(self, other): + return (self.omltvar - other)[self.index] - # def __eq__(self, other): - # return OmltExpression(expr=(self, "==", other), format="jump") + def __mul__(self, other): + return (self.omltvar * other)[self.index] + + def __eq__(self, other): + return (self.omltvar == other)[self.index] diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index f22d8183..c119e8bf 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -12,9 +12,12 @@ from omlt.dependencies import julia_available from omlt.base import DEFAULT_MODELING_LANGUAGE + if julia_available: from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar +from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar +from omlt.base.constraint import OmltRelation, OmltRelScalar class OmltVar(ABC): @@ -63,7 +66,7 @@ def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} if format not in subclass_map: raise ValueError( - f"Variable format %s not recognized. Supported formats " + "Variable format %s not recognized. Supported formats " "are 'pyomo' or 'jump'.", format, ) @@ -108,6 +111,16 @@ def ub(self): def ub(self, val): pass + @property + @abstractmethod + def domain(self): + pass + + @domain.setter + @abstractmethod + def domain(self, val): + pass + # Interface for getting/setting value @property @abstractmethod @@ -121,91 +134,149 @@ def value(self, val): # Interface governing how variables behave in expressions. - # def __lt__(self, other): - # return pyo.NumericValue.__lt__(self, other) + def __lt__(self, other): + return OmltRelScalar(expr=(self, "<", other)) - # def __gt__(self, other): - # return pyo.NumericValue.__gt__(self, other) + def __gt__(self, other): + return OmltRelScalar(expr=(self, ">", other)) - # def __le__(self, other): - # return pyo.NumericValue.__le__(self, other) + def __le__(self, other): + return OmltRelScalar(expr=(self, "<=", other)) - # def __ge__(self, other): - # return pyo.NumericValue.__ge__(self, other) + def __ge__(self, other): + return OmltRelScalar(expr=(self, ">=", other)) - # def __eq__(self, other): - # return pyo.NumericValue.__eq__(self, other) + def __eq__(self, other): + return OmltRelScalar(expr=(self, "==", other)) - # def __add__(self, other): - # return pyo.NumericValue.__add__(self, other) + def __add__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "+", other)) - # def __sub__(self, other): - # return pyo.NumericValue.__sub__(self, other) + def __sub__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "-", other)) - # # def __mul__(self,other): - # # return pyo.NumericValue.__mul__(self,other) + def __mul__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "*", other)) - # def __div__(self, other): - # return pyo.NumericValue.__div__(self, other) + def __div__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "//", other)) - # def __truediv__(self, other): - # return pyo.NumericValue.__truediv__(self, other) + def __truediv__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "/", other)) - # def __pow__(self, other): - # return pyo.NumericValue.__pow__(self, other) + def __pow__(self, other): + return OmltExprScalar(format=self._format, expr=(self, "**", other)) - # def __radd__(self, other): - # return pyo.NumericValue.__radd__(self, other) + def __radd__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "+", self)) - # def __rsub__(self, other): - # return pyo.NumericValue.__rsub__(self, other) + def __rsub__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "-", self)) - # # def __rmul__(self,other): - # # return self._ComponentDataClass.__rmul__(self,other) + def __rmul__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "*", self)) - # def __rdiv__(self, other): - # return pyo.NumericValue.__rdiv__(self, other) + def __rdiv__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "//", self)) - # def __rtruediv__(self, other): - # return pyo.NumericValue.__rtruediv__(self, other) + def __rtruediv__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "/", self)) - # def __rpow__(self, other): - # return pyo.NumericValue.__rpow__(self, other) + def __rpow__(self, other): + return OmltExprScalar(format=self._format, expr=(other, "**", self)) - # def __iadd__(self, other): - # return pyo.NumericValue.__iadd__(self, other) + def __iadd__(self, other): + return pyo.NumericValue.__iadd__(self, other) - # def __isub__(self, other): - # return pyo.NumericValue.__isub__(self, other) + def __isub__(self, other): + return pyo.NumericValue.__isub__(self, other) - # def __imul__(self, other): - # return pyo.NumericValue.__imul__(self, other) + def __imul__(self, other): + return pyo.NumericValue.__imul__(self, other) - # def __idiv__(self, other): - # return pyo.NumericValue.__idiv__(self, other) + def __idiv__(self, other): + return pyo.NumericValue.__idiv__(self, other) - # def __itruediv__(self, other): - # return pyo.NumericValue.__itruediv__(self, other) + def __itruediv__(self, other): + return pyo.NumericValue.__itruediv__(self, other) - # def __ipow__(self, other): - # return pyo.NumericValue.__ipow__(self, other) + def __ipow__(self, other): + return pyo.NumericValue.__ipow__(self, other) - # def __neg__(self): - # return pyo.NumericValue.__neg__(self) + def __neg__(self): + return pyo.NumericValue.__neg__(self) - # def __pos__(self): - # return pyo.NumericValue.__pos__(self) + def __pos__(self): + return pyo.NumericValue.__pos__(self) - # def __abs__(self): - # return pyo.NumericValue.__abs__(self) + def __abs__(self): + return pyo.NumericValue.__abs__(self) -class OmltScalarPyomo(pyo.ScalarVar, OmltScalar): +class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): format = "pyomo" def __init__(self, *args, **kwargs): kwargs.pop("format", None) - pyo.ScalarVar.__init__(self, *args, **kwargs) + # pyo.ScalarVar.__init__(self, *args, **kwargs) + self._pyovar = pyo.ScalarVar(*args, **kwargs) + + def construct(self, data=None): + return self._pyovar.construct(data) + + def fix(self, value, skip_validation): + return self._pyovar.fix(value, skip_validation) + + @property + def ctype(self): + return pyo.ScalarVar + + @property + def name(self): + self._pyovar._name = self._name + return self._pyovar._name + + @property + def bounds(self): + return (self._pyovar._lb, self._pyovar._ub) + + @bounds.setter + def bounds(self, val): + self._pyovar.lb = val[0] + self._pyovar.ub = val[1] + + @property + def lb(self): + return self._pyovar._lb + + @lb.setter + def lb(self, val): + self._pyovar.setlb(val) + + @property + def ub(self): + return self._pyovar._ub + + @ub.setter + def ub(self, val): + self._pyovar.setub(val) + + @property + def domain(self): + return self._pyovar._domain + + @domain.setter + def domain(self, val): + self._pyovar._domain = val + + # Interface for getting/setting value + @property + def value(self): + return self._pyovar.value + + @value.setter + def value(self, val): + self._pyovar.value = val class OmltScalarJuMP(OmltScalar): @@ -261,7 +332,18 @@ def __init__(self, *args, **kwargs): _initialize = kwargs.pop("initialize", None) if _initialize: - self._value = _initialize + if isinstance(_initialize, (int, float)): + self._value = _initialize + elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): + self._value = _initialize[0] + else: + # Pyomo's "scalar" variables can be multidimensional, they're + # just not indexed. JuMP scalar variables can only be a single + # dimension. Rewrite this error to be more helpful. + raise ValueError( + "Initial value for JuMP variables must be an int" + f" or float, but {type(_initialize)} was provided." + ) else: self._value = None @@ -280,9 +362,12 @@ def __init__(self, *args, **kwargs): def construct(self, data=None): self._var = JumpVar(self._varinfo, self._name) + self._var.omltvar = self self._constructed = True - if self._block: - self._blockvar = jump.add_variable(self._block, self._var) + if self._parent: + self._blockvar = jump.add_variable( + self._parent()._jumpmodel, self.to_jumpvar() + ) def fix(self, value, skip_validation): self.fixed = True @@ -311,7 +396,7 @@ def lb(self): @lb.setter def lb(self, val): - self._varinfo.lower_bound = val + self._varinfo.setlb(val) if self._constructed: self.construct() @@ -321,7 +406,7 @@ def ub(self): @ub.setter def ub(self, val): - self._varinfo.upper_bound = val + self._varinfo.setub(val) if self._constructed: self.construct() @@ -352,12 +437,15 @@ def name(self): def name(self, value): self._name = value - def to_jump(self): + def to_jumpvar(self): if self._constructed: return self._var.to_jump() else: return self._varinfo.to_jump() + def to_jumpexpr(self): + return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) + """ Future formats to implement. @@ -387,7 +475,7 @@ def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} if format not in subclass_map: raise ValueError( - f"Variable format %s not recognized. Supported formats are 'pyomo'" + "Variable format %s not recognized. Supported formats are 'pyomo'" " or 'jump'.", format, ) @@ -447,86 +535,85 @@ def __contains__(self, idx): def __iter__(self): pass + # Interface governing how variables behave in expressions. -# Interface governing how variables behave in expressions. - -# def __lt__(self, other): -# return pyo.NumericValue.__lt__(self, other) + def __lt__(self, other): + return OmltRelation(self.index_set(), expr=(self, "<", other)) -# def __gt__(self, other): -# return pyo.NumericValue.__gt__(self, other) + def __gt__(self, other): + return OmltRelation(self.index_set(), expr=(self, ">", other)) -# def __le__(self, other): -# return pyo.NumericValue.__le__(self, other) + def __le__(self, other): + return OmltRelation(self.index_set(), expr=(self, "<=", other)) -# def __ge__(self, other): -# return pyo.NumericValue.__ge__(self, other) + def __ge__(self, other): + return OmltRelation(self.index_set(), expr=(self, ">=", other)) -# def __eq__(self, other): -# return pyo.NumericValue.__eq__(self, other) + def __eq__(self, other): + return OmltRelation(self.index_set(), expr=(self, "==", other)) -# def __add__(self, other): -# return pyo.NumericValue.__add__(self, other) + def __add__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) -# def __sub__(self, other): -# return pyo.NumericValue.__sub__(self, other) + def __sub__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "-", other)) -# # def __mul__(self,other): -# # return pyo.NumericValue.__mul__(self,other) + def __mul__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "*", other)) -# def __div__(self, other): -# return pyo.NumericValue.__div__(self, other) + def __div__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "//", other)) -# def __truediv__(self, other): -# return pyo.NumericValue.__truediv__(self, other) + def __truediv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "/", other)) -# def __pow__(self, other): -# return pyo.NumericValue.__pow__(self, other) + def __pow__(self, other): + return OmltExprIndexed(self.index_set(), expr=(self, "**", other)) -# def __radd__(self, other): -# return pyo.NumericValue.__radd__(self, other) + def __radd__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "+", self)) -# def __rsub__(self, other): -# return pyo.NumericValue.__rsub__(self, other) + def __rsub__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "-", self)) -# # def __rmul__(self,other): -# # return self._ComponentDataClass.__rmul__(self,other) + def __rmul__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "*", self)) -# def __rdiv__(self, other): -# return pyo.NumericValue.__rdiv__(self, other) + def __rdiv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "//", self)) -# def __rtruediv__(self, other): -# return pyo.NumericValue.__rtruediv__(self, other) + def __rtruediv__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "/", self)) -# def __rpow__(self, other): -# return pyo.NumericValue.__rpow__(self, other) + def __rpow__(self, other): + return OmltExprIndexed(self.index_set(), expr=(other, "**", self)) -# def __iadd__(self, other): -# return pyo.NumericValue.__iadd__(self, other) + def __iadd__(self, other): + return pyo.NumericValue.__iadd__(self, other) -# def __isub__(self, other): -# return pyo.NumericValue.__isub__(self, other) + def __isub__(self, other): + return pyo.NumericValue.__isub__(self, other) -# def __imul__(self, other): -# return pyo.NumericValue.__imul__(self, other) + def __imul__(self, other): + return pyo.NumericValue.__imul__(self, other) -# def __idiv__(self, other): -# return pyo.NumericValue.__idiv__(self, other) + def __idiv__(self, other): + return pyo.NumericValue.__idiv__(self, other) -# def __itruediv__(self, other): -# return pyo.NumericValue.__itruediv__(self, other) + def __itruediv__(self, other): + return pyo.NumericValue.__itruediv__(self, other) -# def __ipow__(self, other): -# return pyo.NumericValue.__ipow__(self, other) + def __ipow__(self, other): + return pyo.NumericValue.__ipow__(self, other) -# def __neg__(self): -# return pyo.NumericValue.__neg__(self) + def __neg__(self): + return pyo.NumericValue.__neg__(self) -# def __pos__(self): -# return pyo.NumericValue.__pos__(self) + def __pos__(self): + return pyo.NumericValue.__pos__(self) -# def __abs__(self): -# return pyo.NumericValue.__abs__(self) + def __abs__(self): + return pyo.NumericValue.__abs__(self) class OmltIndexedPyomo(pyo.Var, OmltIndexed): @@ -588,7 +675,7 @@ def __init__(self, *indexes, **kwargs): _ub = {i: None for i in self._index_set} else: raise ValueError( - f"Bounds must be given as a tuple," " but %s was given.", self._bounds + "Bounds must be given as a tuple," " but %s was given.", self._bounds ) _domain = kwargs.pop("domain", None) @@ -630,7 +717,7 @@ def __init__(self, *indexes, **kwargs): self._value = {i: _initialize[0] for i in self._index_set} else: raise ValueError( - f"Index set has length %s, but" " initializer has length %s.", + "Index set has length %s, but initializer has length %s.", len(self._index_set), len(_initialize), ) @@ -648,6 +735,7 @@ def __init__(self, *indexes, **kwargs): self.integer, ) self._vars = {} + self._varrefs = {} self._constructed = False self._ctype = pyo.Var self._parent = None @@ -664,13 +752,22 @@ def __setitem__(self, item, value): self.construct() def keys(self): - return self._vars.keys() + if self._parent is not None: + return self._varrefs.keys() + else: + return self._vars.keys() def values(self): - return self._vars.values() + if self._parent is not None: + return self._varrefs.values() + else: + return self._vars.values() def items(self): - return self._vars.items() + if self._parent is not None: + return self._varrefs.items() + else: + return self._vars.items() def fix(self, value=None): self.fixed = True @@ -708,6 +805,13 @@ def construct(self, data=None): else: name = str(self.name) + str(list(idx)).replace(" ", "") self._vars[idx] = JumpVar(self._varinfo[idx], name) + self._vars[idx].omltvar = self + self._vars[idx].index = idx + if self._parent is not None: + block = self._parent() + if block._format == "jump" and block._jumpmodel is not None: + self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) + self._constructed = True def setub(self, value): @@ -735,11 +839,12 @@ def index_set(self): def name(self): return self._name - def to_jump(self): + def to_jumpvar(self): if self._constructed: - return jump.Containers.DenseAxisArray( - list(self._vars.values()), self.index_set() - ) + return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) + + def to_jumpexpr(self): + return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} """ @@ -763,13 +868,3 @@ def __init__(self, *args, **kwargs): raise ValueError( "Storing variables in Gurobi format is not currently implemented." ) - - -class OmltSet: - def __init__(self): - pass - - -class OmltExpression: - def __init__(self): - pass diff --git a/src/omlt/block.py b/src/omlt/block.py index 11956f48..f97b9c5c 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -26,7 +26,9 @@ class is used in combination with a formulation object to construct the import warnings from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE - +from omlt.dependencies import julia_available +if julia_available: + from omlt.base import jump import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -39,7 +41,16 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None - self.__format = DEFAULT_MODELING_LANGUAGE + self._format = DEFAULT_MODELING_LANGUAGE + if self._format == "jump": + self._jumpmodel = jump.Model() + else: + self._jumpmodel = None + + def set_format(self, format): + self._format = format + if self._format == "jump" and self._jumpmodel is None: + self._jumpmodel = jump.Model() def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ @@ -65,9 +76,9 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): ) self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0, format=self.__format) + self.inputs = OmltVar(self.inputs_set, initialize=0, format=self._format) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0, format=self.__format) + self.outputs = OmltVar(self.outputs_set, initialize=0, format=self._format) def build_formulation(self, formulation, format=None): """ @@ -87,7 +98,10 @@ def build_formulation(self, formulation, format=None): """ if format is not None: - self.__format = format + self._format = format + + if self._format == "jump": + self._jumpmodel = jump.Model() self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 5f99e706..3d2ebff7 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -121,10 +121,12 @@ def output_node_block(b, *output_index): z2.setlb(min(0, lb)) z2.setub(max(0, ub)) - b.eq_16_lb.add(expr - z2 >= b.sig * lb) - b.eq_16_ub.add(expr - z2 <= b.sig * ub) - b.eq_17_lb.add(z2 >= (1 - b.sig) * lb) - b.eq_17_ub.add(z2 <= (1 - b.sig) * ub) + b.eq_16_lb.add(b.sig * lb <= expr - z2) + b.eq_16_ub.add(b.sig * ub >= expr - z2) + + minus_sig = 1 - b.sig + b.eq_17_lb.add(minus_sig * lb <= z2) + b.eq_17_ub.add(minus_sig * ub >= z2) # compute dense layer expression to compute bounds expr = 0.0 @@ -159,9 +161,9 @@ def output_node_block(b, *output_index): b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) b.eq_14 = pyo.Constraint( - expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) >= 0 + expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0 ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression ) diff --git a/tests/test_block.py b/tests/test_block.py index 74fb9290..35d58a6d 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -6,7 +6,6 @@ from omlt.dependencies import julia_available - class dummy_formulation(object): def __init__(self): self.input_indexes = ["A", "C", "D"] @@ -48,10 +47,21 @@ def test_block(): def test_jump_block(): m = pyo.ConcreteModel() m.b = OmltBlock() + m.b.set_format("jump") + + with pytest.raises(ValueError) as excinfo: + m.b.x = OmltVar(initialize=(2, 7), format="jump") + expected_msg = "Initial value for JuMP variables must be an int or float, but was provided." - m.b.x = OmltVar(initialize=(2, 7), format="jump") + assert str(excinfo.value) == expected_msg - assert m.b.x.value == (2, 7) + m.b.y = OmltVar(initialize=2, format="jump") + assert m.b.y.value == 2 + assert m.b.y.name == 'y' + m.b.y.lb = 0 + m.b.y.ub = 5 + assert m.b.y.lb == 0 + assert m.b.y.ub == 5 formulation = dummy_formulation() diff --git a/tests/test_var.py b/tests/test_var.py new file mode 100644 index 00000000..1639c480 --- /dev/null +++ b/tests/test_var.py @@ -0,0 +1,32 @@ +import pytest + +import pyomo.environ as pyo +from omlt.base import OmltVar +from omlt.dependencies import julia_available + + +def _test_scalar_var(format): + v = OmltVar(format=format, initialize=2, domain=pyo.Integers) + assert v.is_indexed() is False + assert v.ctype == pyo.ScalarVar + + v.construct() + + v.value = 3 + assert v.value == 3 + + v.bounds = (0, 5) + assert v.lb == 0 + assert v.ub == 5 + assert v.bounds == (0, 5) + + +def test_scalar_pyomo(): + _test_scalar_var("pyomo") + + +@pytest.mark.skipif( + not julia_available, reason="Test only valid when Julia is available" +) +def test_scalar_jump(): + _test_scalar_var("jump") From 3483455ca049f7dd4ffc82c204a87389758e0fbf Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:22:30 +0000 Subject: [PATCH 47/60] cleanup in expression.py --- src/omlt/base/expression.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index d8dda339..c6086cf5 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -5,23 +5,12 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE import omlt.base.var as var -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base.julia import jl, jump, JumpVar - from juliacall import AnyValue -relations = {"==", ">=", "<=", ">", "<"} - -formats = { - "pyomo": { - "scalar": pyo.Expression, - "indexed": pyo.Expression, - }, - "jump": { - "scalar": jump.AffExpr, - "indexed": jl.Vector, - }, -} +# from omlt.dependencies import julia_available + +# if julia_available: +# from omlt.base.julia import jl, jump, JumpVar +# from juliacall import AnyValue +# relations = {"==", ">=", "<=", ">", "<"} class OmltExpr(ABC): From 21a63ea4b6f4a91f85bc4df998b32ae2d709ec25 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:47:01 +0000 Subject: [PATCH 48/60] tidying var.py --- src/omlt/base/var.py | 42 +++++++++++++------------- src/omlt/neuralnet/activations/relu.py | 1 + 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index c119e8bf..bc68df39 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -17,7 +17,7 @@ from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar -from omlt.base.constraint import OmltRelation, OmltRelScalar +# from omlt.base.constraint import OmltRelation, OmltRelScalar class OmltVar(ABC): @@ -134,20 +134,20 @@ def value(self, val): # Interface governing how variables behave in expressions. - def __lt__(self, other): - return OmltRelScalar(expr=(self, "<", other)) + # def __lt__(self, other): + # return OmltRelScalar(expr=(self, "<", other)) - def __gt__(self, other): - return OmltRelScalar(expr=(self, ">", other)) + # def __gt__(self, other): + # return OmltRelScalar(expr=(self, ">", other)) - def __le__(self, other): - return OmltRelScalar(expr=(self, "<=", other)) + # def __le__(self, other): + # return OmltRelScalar(expr=(self, "<=", other)) - def __ge__(self, other): - return OmltRelScalar(expr=(self, ">=", other)) + # def __ge__(self, other): + # return OmltRelScalar(expr=(self, ">=", other)) - def __eq__(self, other): - return OmltRelScalar(expr=(self, "==", other)) + # def __eq__(self, other): + # return OmltRelScalar(expr=(self, "==", other)) def __add__(self, other): return OmltExprScalar(format=self._format, expr=(self, "+", other)) @@ -537,20 +537,20 @@ def __iter__(self): # Interface governing how variables behave in expressions. - def __lt__(self, other): - return OmltRelation(self.index_set(), expr=(self, "<", other)) + # def __lt__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "<", other)) - def __gt__(self, other): - return OmltRelation(self.index_set(), expr=(self, ">", other)) + # def __gt__(self, other): + # return OmltRelation(self.index_set(), expr=(self, ">", other)) - def __le__(self, other): - return OmltRelation(self.index_set(), expr=(self, "<=", other)) + # def __le__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "<=", other)) - def __ge__(self, other): - return OmltRelation(self.index_set(), expr=(self, ">=", other)) + # def __ge__(self, other): + # return OmltRelation(self.index_set(), expr=(self, ">=", other)) - def __eq__(self, other): - return OmltRelation(self.index_set(), expr=(self, "==", other)) + # def __eq__(self, other): + # return OmltRelation(self.index_set(), expr=(self, "==", other)) def __add__(self, other): return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index 8ac42aa0..995bf31e 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -3,6 +3,7 @@ from omlt.base import OmltVar + def bigm_relu_activation_constraint(net_block, net, layer_block, layer): r""" Big-M ReLU activation formulation. From 4ae0715710134d8cf467926a07307abc4d0767f5 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:58:51 +0000 Subject: [PATCH 49/60] fixing variable initialization --- src/omlt/base/var.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index bc68df39..4ecfc7ef 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -220,6 +220,7 @@ def __init__(self, *args, **kwargs): kwargs.pop("format", None) # pyo.ScalarVar.__init__(self, *args, **kwargs) self._pyovar = pyo.ScalarVar(*args, **kwargs) + self._parent = None def construct(self, data=None): return self._pyovar.construct(data) From b17482059a4ffd04efbefa46360945f581654aea Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:06:17 +0000 Subject: [PATCH 50/60] further fixing --- src/omlt/base/var.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 4ecfc7ef..73065289 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -221,6 +221,7 @@ def __init__(self, *args, **kwargs): # pyo.ScalarVar.__init__(self, *args, **kwargs) self._pyovar = pyo.ScalarVar(*args, **kwargs) self._parent = None + self._constructed = None def construct(self, data=None): return self._pyovar.construct(data) From e2191147019df7d98892aa1ef0d374a75490571f Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:22:56 +0000 Subject: [PATCH 51/60] adding abstract methods to expression interface --- src/omlt/base/expression.py | 16 +++++++++++++++- src/omlt/base/var.py | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index c6086cf5..d49b95a7 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -1,4 +1,4 @@ -from abc import ABC +from abc import ABC, abstractmethod import pyomo.environ as pyo # from pyomo.core.expr import RelationalExpression @@ -39,6 +39,7 @@ def is_component_type(self): def is_expression_type(self): return True + @abstractmethod def is_indexed(self): pass @@ -46,6 +47,19 @@ def valid_model_component(self): """Return True if this can be used as a model component.""" return True + @property + @abstractmethod + def args(self): + pass + + @abstractmethod + def arg(self, index): + pass + + @abstractmethod + def nargs(self): + pass + class OmltExprScalar(OmltExpr): def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 73065289..5927695f 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -16,7 +16,7 @@ if julia_available: from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar -from omlt.base.expression import OmltExpr, OmltExprIndexed, OmltExprScalar +from omlt.base.expression import OmltExprIndexed, OmltExprScalar # from omlt.base.constraint import OmltRelation, OmltRelScalar From 7c0dcb4ff59bc5ef33f0d509d415152fdcfe20ed Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 13:33:16 -0700 Subject: [PATCH 52/60] Delete .github/workflows/python-package.yml --- .github/workflows/python-package.yml | 58 ---------------------------- 1 file changed, 58 deletions(-) delete mode 100644 .github/workflows/python-package.yml diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml deleted file mode 100644 index 55870dbc..00000000 --- a/.github/workflows/python-package.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -name: CI - -on: - push: - branches: ["main","github-actions"] - pull_request: - branches: ["main"] - workflow_dispatch: - -jobs: - tests: - name: "Python ${{ matrix.python-version }}" - runs-on: "ubuntu-latest" - - strategy: - matrix: - # python-version: ["3.7", "3.8", "3.9"] - python-version: ["3.8", "3.9", "3.10"] - - steps: - - uses: "actions/checkout@v2" - - uses: "actions/setup-python@v2" - - uses: "s-weigand/setup-conda@v1" - with: - python-version: "${{ matrix.python-version }}" - - - name: Install solvers - run: sudo apt-get install -y glpk-utils coinor-cbc - - - name: "Install dependencies" - run: | - set -xe - python -VV - python -m site - python -m pip install --upgrade pip setuptools wheel - python -m pip install --upgrade coverage[toml] virtualenv tox tox-gh-actions - conda install -c conda-forge ipopt - conda install -c conda-forge pyscipopt - - - name: "Run tox targets with lean testing environment for ${{ matrix.python-version }}" - run: "tox -re leanenv" - - - name: "Run tox targets for ${{ matrix.python-version }}" - run: "tox" - - # - name: "Run tox notebooks targets for ${{ matrix.python-version }}" - # run: | - # shopt -s globstar - # tox -e notebooks docs/**/*.ipynb - - - name: "Convert coverage" - run: "python -m coverage xml" - - - name: "Upload coverage to Codecov" - uses: "codecov/codecov-action@v2" - with: - fail_ci_if_error: true From b6fed2a09d6e6580b4863ec6e1deaa6cd46c853d Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:42:30 +0000 Subject: [PATCH 53/60] linting (1) --- src/omlt/base/__init__.py | 1 + src/omlt/base/expression.py | 13 +++++++------ src/omlt/base/var.py | 1 + src/omlt/neuralnet/layers/partition_based.py | 8 ++++++-- tests/test_block.py | 3 ++- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py index bd526fc3..3d881472 100644 --- a/src/omlt/base/__init__.py +++ b/src/omlt/base/__init__.py @@ -1,6 +1,7 @@ DEFAULT_MODELING_LANGUAGE = "pyomo" from omlt.dependencies import julia_available + if julia_available: from omlt.base.julia import jl, jump diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py index d49b95a7..80229d99 100644 --- a/src/omlt/base/expression.py +++ b/src/omlt/base/expression.py @@ -5,6 +5,7 @@ from omlt.base import DEFAULT_MODELING_LANGUAGE import omlt.base.var as var + # from omlt.dependencies import julia_available # if julia_available: @@ -104,13 +105,13 @@ def _parse_expression_tuple_term(self, term): return term._expression elif isinstance(term, var.OmltVar): return term._pyovar - elif isinstance(term, ( - pyo.Expression, pyo.Var, int, float - )): + elif isinstance(term, (pyo.Expression, pyo.Var, int, float)): return term else: - raise TypeError("Term of expression is an unsupported type. " - "Write a better error message.") + raise TypeError( + "Term of expression is an unsupported type. " + "Write a better error message." + ) def _parse_expression_tuple(self, expr): lhs = self._parse_expression_tuple_term(expr[0]) @@ -214,7 +215,7 @@ def __ge__(self, other): # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) def __le__(self, other): - expr = (self._expression <= other) + expr = self._expression <= other return expr # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py index 5927695f..a7e5a9b8 100644 --- a/src/omlt/base/var.py +++ b/src/omlt/base/var.py @@ -17,6 +17,7 @@ from omlt.base import jump from omlt.base.julia import JuMPVarInfo, JumpVar from omlt.base.expression import OmltExprIndexed, OmltExprScalar + # from omlt.base.constraint import OmltRelation, OmltRelScalar diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 3d2ebff7..2ec724a6 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -161,9 +161,13 @@ def output_node_block(b, *output_index): b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) b.eq_14 = pyo.Constraint( - expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression >= 0 + expr=sum(b.z2[s] for s in range(num_splits)) + + bias * (1 - b.sig)._expression + >= 0 ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + + bias + * (1 - b.sig)._expression ) diff --git a/tests/test_block.py b/tests/test_block.py index 35d58a6d..88b1e49b 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -41,6 +41,7 @@ def test_block(): assert [k for k in m.b.inputs] == ["A", "C", "D"] assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + @pytest.mark.skipif( not julia_available, reason="Test only valid when Julia is available" ) @@ -57,7 +58,7 @@ def test_jump_block(): m.b.y = OmltVar(initialize=2, format="jump") assert m.b.y.value == 2 - assert m.b.y.name == 'y' + assert m.b.y.name == "y" m.b.y.lb = 0 m.b.y.ub = 5 assert m.b.y.lb == 0 From bea9863bf90fa2d34a8581f84e40502242c68a3c Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 6 Jun 2024 20:56:23 +0000 Subject: [PATCH 54/60] linting (2) --- src/omlt/block.py | 1 + src/omlt/neuralnet/layers/partition_based.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/omlt/block.py b/src/omlt/block.py index f97b9c5c..971547b1 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -27,6 +27,7 @@ class is used in combination with a formulation object to construct the from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE from omlt.dependencies import julia_available + if julia_available: from omlt.base import jump diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index 2ec724a6..b43f2178 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -167,7 +167,5 @@ def output_node_block(b, *output_index): ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) - + bias - * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression ) From 09dad4785717dc4e83b09b616cdc28d0fd1bdd27 Mon Sep 17 00:00:00 2001 From: Lukas Turcani Date: Fri, 7 Jun 2024 10:21:08 +0100 Subject: [PATCH 55/60] fix long line --- src/omlt/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 8e702f0b..ae0ac537 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -2,7 +2,8 @@ OMLT ==== -OMLT is a Python package for representing machine learning models (neural networks and gradient-boosted trees) within the Pyomo optimization environment. +OMLT is a Python package for representing machine learning models +(neural networks and gradient-boosted trees) within the Pyomo optimization environment. The package provides various optimization formulations for machine learning models (such as full-space, reduced-space, and MILP) as well as an interface to import sequential Keras and general ONNX models. From a967418ca055c925d5e881503b036970c7eec1c7 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Thu, 13 Jun 2024 22:08:18 +0000 Subject: [PATCH 56/60] Fixing initial batch of ruff errors --- .../notebooks/data/build_sin_quadratic_csv.py | 14 +- src/omlt/__init__.py | 7 +- src/omlt/block.py | 20 +- src/omlt/formulation.py | 33 +- src/omlt/gbt/__init__.py | 5 +- src/omlt/gbt/gbt_formulation.py | 83 +++-- src/omlt/gbt/model.py | 37 +- src/omlt/io/__init__.py | 6 + src/omlt/io/input_bounds.py | 24 +- src/omlt/io/keras/__init__.py | 2 + src/omlt/io/keras/keras_reader.py | 29 +- src/omlt/io/onnx.py | 16 +- src/omlt/io/onnx_parser.py | 348 +++++++++++------- src/omlt/io/torch_geometric/__init__.py | 6 + .../torch_geometric/build_gnn_formulation.py | 36 +- .../torch_geometric/torch_geometric_reader.py | 48 +-- src/omlt/linear_tree/__init__.py | 9 +- src/omlt/linear_tree/lt_definition.py | 94 ++--- src/omlt/linear_tree/lt_formulation.py | 74 ++-- src/omlt/neuralnet/__init__.py | 19 +- src/omlt/neuralnet/activations/__init__.py | 22 +- src/omlt/neuralnet/activations/linear.py | 5 +- src/omlt/neuralnet/activations/relu.py | 11 +- src/omlt/neuralnet/activations/smooth.py | 30 +- src/omlt/neuralnet/layer.py | 182 ++++----- src/omlt/neuralnet/layers/__init__.py | 12 +- src/omlt/neuralnet/layers/full_space.py | 107 +++--- src/omlt/neuralnet/layers/partition_based.py | 53 +-- src/omlt/neuralnet/layers/reduced_space.py | 13 +- src/omlt/neuralnet/network_definition.py | 58 +-- src/omlt/neuralnet/nn_formulation.py | 92 ++--- src/omlt/scaling.py | 112 +++--- tests/conftest.py | 32 +- tests/gbt/test_gbt_formulation.py | 23 +- tests/io/test_input_bounds.py | 2 +- tests/io/test_keras_reader.py | 12 +- tests/io/test_onnx_parser.py | 141 ++++--- tests/io/test_torch_geometric.py | 16 +- tests/linear_tree/test_lt_formulation.py | 94 ++--- tests/neuralnet/test_keras.py | 42 ++- tests/neuralnet/test_layer.py | 21 +- tests/neuralnet/test_network_definition.py | 36 +- tests/neuralnet/test_nn_formulation.py | 269 +++++++------- tests/neuralnet/test_onnx.py | 20 +- tests/neuralnet/test_relu.py | 69 ++-- tests/neuralnet/train_keras_models.py | 27 +- tests/notebooks/test_run_notebooks.py | 10 +- tests/test_block.py | 33 +- tests/test_formulation.py | 7 +- tests/test_scaling.py | 61 ++- 50 files changed, 1385 insertions(+), 1137 deletions(-) diff --git a/docs/notebooks/data/build_sin_quadratic_csv.py b/docs/notebooks/data/build_sin_quadratic_csv.py index 6506022a..72e6c554 100644 --- a/docs/notebooks/data/build_sin_quadratic_csv.py +++ b/docs/notebooks/data/build_sin_quadratic_csv.py @@ -1,5 +1,4 @@ -from random import random - +import matplotlib.pyplot as plt import numpy as np import pandas as pd @@ -7,14 +6,15 @@ w = 5 x = np.linspace(-2, 2, n_samples) -df = pd.DataFrame(x, columns=["x"]) -df["y"] = ( +rng = np.random.default_rng() +sin_quads = pd.DataFrame(x, columns=["x"]) +sin_quads["y"] = ( np.sin(w * x) + x**2 - + np.array([np.random.uniform() * 0.1 for _ in range(n_samples)]) + + np.array([rng.uniform() * 0.1 for _ in range(n_samples)]) ) -plt.plot(df["x"], df["y"]) +plt.plot(sin_quads["x"], sin_quads["y"]) plt.show() -df.to_csv("sin_quadratic.csv") +sin_quads.to_csv("sin_quadratic.csv") diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 8e702f0b..3bf95df2 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -1,8 +1,9 @@ -""" -OMLT +"""OMLT. + ==== -OMLT is a Python package for representing machine learning models (neural networks and gradient-boosted trees) within the Pyomo optimization environment. +OMLT is a Python package for representing machine learning models (neural networks +and gradient-boosted trees) within the Pyomo optimization environment. The package provides various optimization formulations for machine learning models (such as full-space, reduced-space, and MILP) as well as an interface to import sequential Keras and general ONNX models. diff --git a/src/omlt/block.py b/src/omlt/block.py index a6c7bbf2..0a03838d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -1,4 +1,5 @@ -""" +"""OmltBlock. + The omlt.block module contains the implementation of the OmltBlock class. This class is used in combination with a formulation object to construct the necessary constraints and variables to represent ML models. @@ -23,7 +24,6 @@ class is used in combination with a formulation object to construct the pyo.assert_optimal_termination(status) """ -import warnings import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -32,13 +32,14 @@ class is used in combination with a formulation object to construct the @declare_custom_block(name="OmltBlock") class OmltBlockData(_BlockData): def __init__(self, component): - super(OmltBlockData, self).__init__(component) + super().__init__(component) self.__formulation = None self.__input_indexes = None self.__output_indexes = None def _setup_inputs_outputs(self, *, input_indexes, output_indexes): - """ + """Setup inputs and outputs. + This function should be called by the derived class to create the inputs and outputs on the block @@ -53,10 +54,10 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.__input_indexes = input_indexes self.__output_indexes = output_indexes if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide more contextual error msg - raise ValueError( - "OmltBlock must have at least one input and at least one output." - ) + # TODO: implement this check higher up in the class hierarchy to provide + # more contextual error msg + msg = "OmltBlock must have at least one input and at least one output." + raise ValueError(msg) self.inputs_set = pyo.Set(initialize=input_indexes) self.inputs = pyo.Var(self.inputs_set, initialize=0) @@ -64,7 +65,8 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.outputs = pyo.Var(self.outputs_set, initialize=0) def build_formulation(self, formulation): - """ + """Build formulation. + Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network formulation. The formulation object can be accessed later through the diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index fd83ae86..7097fbf1 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -5,7 +5,8 @@ class _PyomoFormulationInterface(abc.ABC): - """ + """Pyomo Formulation Interface. + Base class interface for a Pyomo formulation object. This class is largely internal, and developers of new formulations should derive from _PyomoFormulation. @@ -23,42 +24,46 @@ def _set_block(self, block): @abc.abstractmethod def block(self): """Return the block associated with this formulation.""" - pass @property @abc.abstractmethod def input_indexes(self): - """Return the indices corresponding to the inputs of the + """Input indexes. + + Return the indices corresponding to the inputs of the ML model. This is a list of entries (which may be tuples for higher dimensional inputs). """ - pass @property @abc.abstractmethod def output_indexes(self): - """Return the indices corresponding to the outputs of the + """Output indexes. + + Return the indices corresponding to the outputs of the ML model. This is a list of entries (which may be tuples for higher dimensional outputs). """ - pass @abc.abstractmethod def _build_formulation(self): - """This method is called by the OmltBlock object to build the + """Build formulation. + + This method is called by the OmltBlock object to build the corresponding mathematical formulation of the model. """ - pass class _PyomoFormulation(_PyomoFormulationInterface): - """ + """Pyomo Formulation. + This is a base class for different Pyomo formulations. To create a new - formulation, inherit from this class and implement the abstract methods and properties. + formulation, inherit from this class and implement the abstract methods + and properties. """ def __init__(self): - super(_PyomoFormulation, self).__init__() + super().__init__() self.__block = None def _set_block(self, block): @@ -66,7 +71,11 @@ def _set_block(self, block): @property def block(self): - """The underlying block containing the constraints / variables for this formulation.""" + """Block. + + The underlying block containing the constraints / variables for this + formulation. + """ return self.__block() diff --git a/src/omlt/gbt/__init__.py b/src/omlt/gbt/__init__.py index f62ed421..ebf2bb1c 100644 --- a/src/omlt/gbt/__init__.py +++ b/src/omlt/gbt/__init__.py @@ -1,4 +1,5 @@ -r""" +r"""Gradient-Boosted Trees formulation. + We use the following notation to describe the gradient-boosted trees formulation: .. math:: @@ -25,3 +26,5 @@ from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel + +__all__ = ["GBTBigMFormulation", "GradientBoostedTreeModel"] diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index f2d01296..a51bec98 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -8,17 +8,17 @@ class GBTBigMFormulation(_PyomoFormulation): - """ - This class is the entry-point to build gradient-boosted trees formulations. + """This class is the entry-point to build gradient-boosted trees formulations. This class iterates over all trees in the ensemble and generates constraints to enforce splitting rules according to: - References + References: ---------- * Misic, V. "Optimization of tree ensembles." Operations Research 68.5 (2020): 1605-1624. - * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with gradient-boosted trees embedded." + * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with + gradient-boosted trees embedded." INFORMS Journal on Computing (2020). Parameters @@ -28,6 +28,7 @@ class GBTBigMFormulation(_PyomoFormulation): """ def __init__(self, gbt_model): + """Constructor.""" super().__init__() self.model_definition = gbt_model @@ -42,7 +43,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -60,8 +63,7 @@ def _build_formulation(self): def add_formulation_to_block(block, model_definition, input_vars, output_vars): - r""" - Adds the gradient-boosted trees formulation to the given Pyomo block. + r"""Adds the gradient-boosted trees formulation to the given Pyomo block. .. math:: \begin{align*} @@ -73,7 +75,8 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): \sum\limits_{l \in \text{Right}_{t,s}} z_{t,l} &\leq 1 - y_{i(s),j(s)}, && \forall t \in T, \forall s \in V_t, \\ y_{i,j} &\leq y_{i,j+1}, - && \forall i \in \left [ n \right ], \forall j \in \left [ m_i - 1 \right ], \\ + && \forall i \in \left [ n \right ], \\ + \forall j \in \left [ m_i - 1 \right ], \\ x_{i} &\geq v_{i,0} + \sum\limits_{j=1}^{m_i} \left (v_{i,j} - v_{i,j-1} \right ) \left ( 1 - y_{i,j} \right ), @@ -84,11 +87,12 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): \end{align*} - References + References: ---------- * Misic, V. "Optimization of tree ensembles." Operations Research 68.5 (2020): 1605-1624. - * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with gradient-boosted trees embedded." + * Mistry, M., et al. "Mixed-integer convex nonlinear optimization with + gradient-boosted trees embedded." INFORMS Journal on Computing (2020). Parameters @@ -142,7 +146,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): tree_ids = set(nodes_tree_ids) feature_ids = set(nodes_feature_ids) - continuous_vars = dict() + continuous_vars = {} for var_idx in input_vars: var = input_vars[var_idx] @@ -154,7 +158,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): domain=pe.Reals, ) - branch_value_by_feature_id = dict() + branch_value_by_feature_id = {} branch_value_by_feature_id = collections.defaultdict(list) for f in feature_ids: @@ -164,15 +168,17 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): y_index = [ (f, bi) - for f in continuous_vars.keys() + for f in continuous_vars for bi, _ in enumerate(branch_value_by_feature_id[f]) ] block.y = pe.Var(y_index, domain=pe.Binary) @block.Constraint(tree_ids) def single_leaf(b, tree_id): - r""" - Add constraint to ensure that only one leaf per tree is active, Mistry et al. Equ. (3b). + r"""Single leaf constraint. + + Add constraint to ensure that only one leaf per tree is active, + Mistry et al. Equ. (3b). .. math:: \begin{align*} \sum\limits_{l \in L_t} z_{t,l} &= 1, && \forall t \in T @@ -198,22 +204,28 @@ def _branching_y(tree_id, branch_node_id): feature_id = nodes_feature_ids[node_mask] branch_value = nodes_values[node_mask] if len(branch_value) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a branch value." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a" + " branch value." ) + raise ValueError(msg) if len(feature_id) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a feature." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a" + " feature." ) + raise ValueError(msg) feature_id = feature_id[0] branch_value = branch_value[0] (branch_y_idx,) = np.where( branch_value_by_feature_id[feature_id] == branch_value ) if len(branch_y_idx) != 1: - raise ValueError( - f"The given tree_id and branch_node_id do not uniquely identify a branch index." + msg = ( + "The given tree_id and branch_node_id do not uniquely identify a branch" + " index." ) + raise ValueError(msg) return block.y[feature_id, branch_y_idx[0]] def _sum_of_z_l(tree_id, start_node_id): @@ -235,7 +247,8 @@ def _sum_of_z_l(tree_id, start_node_id): @block.Constraint(nodes_tree_branch_ids) def left_split(b, tree_id, branch_node_id): - r""" + r"""Left split. + Add constraint to activate all left splits leading to an active leaf, Mistry et al. Equ. (3c). .. math:: @@ -252,7 +265,8 @@ def left_split(b, tree_id, branch_node_id): @block.Constraint(nodes_tree_branch_ids) def right_split(b, tree_id, branch_node_id): - r""" + r"""Right split. + Add constraint to activate all right splits leading to an active leaf, Mistry et al. Equ. (3d). .. math:: @@ -269,8 +283,8 @@ def right_split(b, tree_id, branch_node_id): @block.Constraint(y_index) def order_y(b, feature_id, branch_y_idx): - r""" - Add constraint to activate splits in the correct order. + r"""Add constraint to activate splits in the correct order. + Mistry et al. Equ. (3e). .. math:: \begin{align*} @@ -285,8 +299,11 @@ def order_y(b, feature_id, branch_y_idx): @block.Constraint(y_index) def var_lower(b, feature_id, branch_y_idx): - r""" - Add constraint to link discrete tree splits to lower bound of continuous variables. + r"""Lower bound constraint. + + Add constraint to link discrete tree splits to lower bound of continuous + variables. + Mistry et al. Equ. (4a). .. math:: \begin{align*} @@ -304,8 +321,10 @@ def var_lower(b, feature_id, branch_y_idx): @block.Constraint(y_index) def var_upper(b, feature_id, branch_y_idx): - r""" - Add constraint to link discrete tree splits to upper bound of continuous variables. + r"""Upper bound constraint. + + Add constraint to link discrete tree splits to upper bound of continuous + variables. Mistry et al. Equ. (4b). .. math:: \begin{align*} @@ -322,8 +341,8 @@ def var_upper(b, feature_id, branch_y_idx): @block.Constraint() def tree_mean_value(b): - r""" - Add constraint to link block output tree model mean. + r"""Add constraint to link block output tree model mean. + Mistry et al. Equ. (3a). .. math:: \begin{align*} @@ -344,7 +363,7 @@ def tree_mean_value(b): def _node_attributes(node): - attr = dict() + attr = {} for at in node.attribute: attr[at.name] = at return attr diff --git a/src/omlt/gbt/model.py b/src/omlt/gbt/model.py index 9bac2590..0fbc3f7e 100644 --- a/src/omlt/gbt/model.py +++ b/src/omlt/gbt/model.py @@ -1,6 +1,7 @@ class GradientBoostedTreeModel: def __init__(self, onnx_model, scaling_object=None, scaled_input_bounds=None): - """ + """Constructor. + Create a network definition object used to create the gradient-boosted trees formulation in Pyomo @@ -25,27 +26,27 @@ def __init__(self, onnx_model, scaling_object=None, scaled_input_bounds=None): @property def onnx_model(self): - """Returns underlying onnx model of the tree model being used""" + """Returns underlying onnx model of the tree model being used.""" return self.__model @property def n_inputs(self): - """Returns the number of input variables""" + """Returns the number of input variables.""" return self.__n_inputs @property def n_outputs(self): - """Returns the number of output variables""" + """Returns the number of output variables.""" return self.__n_outputs @property def scaling_object(self): - """Return an instance of the scaling object that supports the ScalingInterface""" + """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Return a list of tuples containing lower and upper bounds of tree ensemble inputs""" + """Return a list of tuples of lower and upper bounds of tree ensemble inputs.""" return self.__scaled_input_bounds @scaling_object.setter @@ -54,27 +55,27 @@ def scaling_object(self, scaling_object): def _model_num_inputs(model): - """Returns the number of input variables""" + """Returns the number of input variables.""" graph = model.graph if len(graph.input) != 1: - raise ValueError( - f"Model graph input field is multi-valued {graph.input}. A single value is required." - ) + msg = f"Model graph input field is multi-valued {graph.input}. A single value" + " is required." + raise ValueError(msg) return _tensor_size(graph.input[0]) def _model_num_outputs(model): - """Returns the number of output variables""" + """Returns the number of output variables.""" graph = model.graph if len(graph.output) != 1: - raise ValueError( - f"Model graph output field is multi-valued {graph.output}. A single value is required." - ) + msg = f"Model graph output field is multi-valued {graph.output}. A single value" + " is required." + raise ValueError(msg) return _tensor_size(graph.output[0]) def _tensor_size(tensor): - """Returns the size of an input tensor""" + """Returns the size of an input tensor.""" tensor_type = tensor.type.tensor_type size = None dim_values = [ @@ -85,7 +86,9 @@ def _tensor_size(tensor): if len(dim_values) == 1: size = dim_values[0] elif dim_values == []: - raise ValueError(f"Tensor {tensor} has no positive dimensions.") + msg = f"Tensor {tensor} has no positive dimensions." + raise ValueError(msg) else: - raise ValueError(f"Tensor {tensor} has multiple positive dimensions.") + msg = f"Tensor {tensor} has multiple positive dimensions." + raise ValueError(msg) return size diff --git a/src/omlt/io/__init__.py b/src/omlt/io/__init__.py index 6933e312..b568fb90 100644 --- a/src/omlt/io/__init__.py +++ b/src/omlt/io/__init__.py @@ -14,3 +14,9 @@ if keras_available: from omlt.io.keras import load_keras_sequential + +__all__ = [ + "keras_available", "onnx_available", "torch_available", "torch_geometric_available", + "load_onnx_neural_network", "load_onnx_neural_network_with_bounds", + "write_onnx_model_with_bounds", "load_keras_sequential" +] diff --git a/src/omlt/io/input_bounds.py b/src/omlt/io/input_bounds.py index 7bdb8ea8..9826d498 100644 --- a/src/omlt/io/input_bounds.py +++ b/src/omlt/io/input_bounds.py @@ -1,20 +1,17 @@ import json +from pathlib import Path def write_input_bounds(input_bounds_filename, input_bounds): - """ - Write the specified input bounds to the given file. - """ + """Write the specified input bounds to the given file.""" input_bounds = _prepare_input_bounds(input_bounds) - with open(input_bounds_filename, "w") as f: + with Path.open(input_bounds_filename, "w") as f: json.dump(input_bounds, f) def load_input_bounds(input_bounds_filename): - """ - Read the input bounds from the given file. - """ - with open(input_bounds_filename, "r") as f: + """Read the input bounds from the given file.""" + with Path.open(input_bounds_filename) as f: raw_input_bounds = json.load(f) return dict(_parse_raw_input_bounds(d) for d in raw_input_bounds) @@ -26,12 +23,11 @@ def _prepare_input_bounds(input_bounds): {"key": i, "lower_bound": lb, "upper_bound": ub} for i, (lb, ub) in enumerate(input_bounds) ] - else: - # users should have passed a dict-like - return [ - {"key": key, "lower_bound": lb, "upper_bound": ub} - for key, (lb, ub) in input_bounds.items() - ] + # users should have passed a dict-like + return [ + {"key": key, "lower_bound": lb, "upper_bound": ub} + for key, (lb, ub) in input_bounds.items() + ] def _parse_raw_input_bounds(raw): diff --git a/src/omlt/io/keras/__init__.py b/src/omlt/io/keras/__init__.py index 72f6931a..bd9bbc3e 100644 --- a/src/omlt/io/keras/__init__.py +++ b/src/omlt/io/keras/__init__.py @@ -1 +1,3 @@ from omlt.io.keras.keras_reader import load_keras_sequential + +__all__ = ["load_keras_sequential"] diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index daccf68b..3ec0aaaa 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -1,4 +1,4 @@ -import tensorflow.keras as keras +from tensorflow import keras from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition @@ -7,7 +7,8 @@ def load_keras_sequential( nn, scaling_object=None, scaled_input_bounds=None, unscaled_input_bounds=None ): - """ + """Load Keras sequential network. + Load a keras neural network model (built with Sequential) into an OMLT network definition object. This network definition object can be used in different formulations. @@ -17,8 +18,9 @@ def load_keras_sequential( nn : keras.model A keras model that was built with Sequential scaling_object : instance of ScalingInterface or None - Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + Provide an instance of a scaling object to use to scale inputs --> scaled_inputs + and scaled_outputs --> outputs. If None, no scaling is performed. + See scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -29,7 +31,7 @@ def load_keras_sequential( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- NetworkDefinition """ @@ -45,15 +47,16 @@ def load_keras_sequential( prev_layer = InputLayer([n_inputs]) net.add_layer(prev_layer) - for l in nn.layers: - cfg = l.get_config() - if not isinstance(l, keras.layers.Dense): - raise ValueError( - "Layer type {} encountered. The function load_keras_sequential " - "only supports dense layers at this time. Consider using " - "ONNX and the ONNX parser".format(type(l)) + for layer in nn.layers: + cfg = layer.get_config() + if not isinstance(layer, keras.layers.Dense): + msg = ( + f"Layer type {type(layer)} encountered. The load_keras_sequential " + "function only supports dense layers at this time. Consider using " + "ONNX and the ONNX parser." ) - weights, biases = l.get_weights() + raise TypeError(msg) + weights, biases = layer.get_weights() n_layer_inputs, n_layer_nodes = weights.shape dense_layer = DenseLayer( diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index d41983c2..9676ea31 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -1,4 +1,3 @@ -import json from pathlib import Path import onnx @@ -8,8 +7,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): - """ - Write the ONNX model to the given file. + """Write the ONNX model to the given file. If `input_bounds` is not None, write it alongside the ONNX model. @@ -23,7 +21,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): bounds on the input variables """ if onnx_model is not None: - with open(filename, "wb") as f: + with Path.open(filename, "wb") as f: f.write(onnx_model.SerializeToString()) if input_bounds is not None: @@ -31,15 +29,14 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): def load_onnx_neural_network_with_bounds(filename): - """ - Load a NetworkDefinition with input bounds from an onnx object. + """Load a NetworkDefinition with input bounds from an onnx object. Parameters ---------- filename : str the path where the ONNX model and input bounds file are written - Returns + Returns: ------- NetworkDefinition """ @@ -53,8 +50,7 @@ def load_onnx_neural_network_with_bounds(filename): def load_onnx_neural_network(onnx, scaling_object=None, input_bounds=None): - """ - Load a NetworkDefinition from an onnx object. + """Load a NetworkDefinition from an onnx object. Parameters ---------- @@ -63,7 +59,7 @@ def load_onnx_neural_network(onnx, scaling_object=None, input_bounds=None): scaling_object : instance of object supporting ScalingInterface input_bounds : list of tuples - Returns + Returns: ------- NetworkDefinition """ diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index 511261c0..979b437c 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -1,4 +1,5 @@ import math +from typing import Any import numpy as np from onnx import numpy_helper @@ -14,11 +15,23 @@ _ACTIVATION_OP_TYPES = ["Relu", "Sigmoid", "LogSoftmax", "Tanh", "Softplus"] _POOLING_OP_TYPES = ["MaxPool"] - +DENSE_INPUT_DIMENSIONS = 2 +GEMM_INPUT_DIMENSIONS = 3 +CONV_INPUT_DIMENSIONS = [2, 3] +TWO_D_IMAGE_W_CHANNELS = 3 +RESHAPE_INPUT_DIMENSIONS = 2 +MAXPOOL_INPUT_DIMENSIONS = 1 +MAXPOOL_INPUT_OUTPUT_W_BATCHES = 4 +# Attribute types enum: +ATTR_FLOAT = 1 +ATTR_INT = 2 +ATTR_TENSOR = 4 +ATTR_INTS = 7 class NetworkParser: - """ - References + """Network Parser. + + References: ---------- * https://github.com/onnx/onnx/blob/master/docs/Operators.md """ @@ -42,48 +55,50 @@ def parse_network(self, graph, scaling_object, input_bounds): self._graph = graph # initializers contain constant data - initializers = dict() + initializers = {} for initializer in self._graph.initializer: initializers[initializer.name] = numpy_helper.to_array(initializer) self._initializers = initializers # Build graph - nodes = dict() - nodes_by_output = dict() + nodes = {} + nodes_by_output = {} inputs = set() outputs = set() - self._node_map = dict() + self._node_map = {} network = NetworkDefinition( scaling_object=scaling_object, scaled_input_bounds=input_bounds ) network_input = None - for input in self._graph.input: - nodes[input.name] = ("input", input.type, []) - nodes_by_output[input.name] = input.name - inputs.add(input.name) + for input_node in self._graph.input: + nodes[input_node.name] = ("input", input_node.type, []) + nodes_by_output[input_node.name] = input_node.name + inputs.add(input_node.name) # onnx inputs are tensors. Flatten tensors to a vector. dim_value = None size = [] - for dim in input.type.tensor_type.shape.dim: + for dim in input_node.type.tensor_type.shape.dim: if dim.dim_value > 0: if dim_value is None: dim_value = 1 size.append(dim.dim_value) dim_value *= dim.dim_value if dim_value is None: - raise ValueError( + msg = ( f'All dimensions in graph "{graph.name}" input tensor have 0 value.' ) + raise ValueError(msg) assert network_input is None network_input = InputLayer(size) - self._node_map[input.name] = network_input + self._node_map[input_node.name] = network_input network.add_layer(network_input) if network_input is None: - raise ValueError(f'No valid input layer found in graph "{graph.name}".') + msg = f'No valid input layer found in graph "{graph.name}".' + raise ValueError(msg) self._nodes = nodes self._nodes_by_output = nodes_by_output @@ -97,37 +112,39 @@ def parse_network(self, graph, scaling_object, input_bounds): for output in node.output: nodes_by_output[output] = node.name - self._constants = dict() + self._constants = {} for node in self._graph.node: # add node not connected to anything self._nodes[node.name] = ("node", node, []) # Map inputs by their output name node_inputs = [ - nodes_by_output[input] - for input in node.input - if input not in initializers + nodes_by_output[input_node] + for input_node in node.input + if input_node not in initializers ] if node_inputs: # Now connect inputs to the current node - for input in node_inputs: - self._nodes[input][2].append(node.name) + for input_node in node_inputs: + self._nodes[input_node][2].append(node.name) elif node.op_type == "Constant": for output in node.output: value = _parse_constant_value(node) self._constants[output] = value else: - raise ValueError( - f'Nodes must have inputs or have op_type "Constant". Node "{node.name}" has no inputs and op_type "{node.op_type}".' + msg = ( + 'Nodes must have inputs or have op_type "Constant". Node' + f' "{node.name}" has no inputs and op_type "{node.op_type}".' ) + raise ValueError(msg) # traverse graph self._node_stack = list(inputs) - self._weights = dict() - self._biases = dict() - self._activations = dict() + self._weights = {} + self._biases = {} + self._activations = {} while self._node_stack: node_name = self._node_stack.pop() @@ -141,8 +158,8 @@ def parse_network(self, graph, scaling_object, input_bounds): for layer_input in new_layer_inputs: network.add_edge(layer_input, new_layer) else: - for next in next_nodes: - self._node_stack.append(next) + for next_node in next_nodes: + self._node_stack.append(next_node) return network @@ -167,41 +184,55 @@ def _visit_node(self, node, next_nodes): node, next_nodes ) else: - raise Exception(f"Unhandled node type {node.op_type}") + msg = f"Unhandled node type {node.op_type}" + raise ValueError(msg) - for next in next_nodes: - self._node_stack.append(next) + for next_node in next_nodes: + self._node_stack.append(next_node) return new_layer, new_layer_inputs - def _consume_dense_nodes(self, node, next_nodes): + def _consume_dense_nodes( + self, node: Any, next_nodes: Any + ) -> tuple[Any, Any, list[Any]]: """Starting from a MatMul node, consume nodes to form a dense Ax + b node.""" if node.op_type != "MatMul": - raise ValueError( - f"{node.name} is a {node.op_type} node, only MatMul nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " MatMul nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) != 2: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + + if len(node.input) != DENSE_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 2" + " input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) [in_0, in_1] = list(node.input) input_layer, transformer = self._node_input_and_transformer(in_0) node_weights = self._initializers[in_1] if len(next_nodes) != 1: - raise ValueError( - f"Next nodes must have length 1, {next_nodes} has length {len(next_nodes)}" + msg = ( + f"Next nodes must have length 1, {next_nodes} has length" + f" {len(next_nodes)}" ) + raise ValueError(msg) # expect 'Add' node ahead type_, node, maybe_next_nodes = self._nodes[next_nodes[0]] if type_ != "node": - raise TypeError(f"Expected a node next, got a {type_} instead.") + msg = f"Expected a node next, got a {type_} instead." + raise TypeError(msg) if node.op_type != "Add": - raise ValueError( - f"The first node to be consumed, {node.name}, is a {node.op_type} node. Only Add nodes are supported." + msg = ( + f"The first node to be consumed, {node.name}, is a {node.op_type} node." + " Only Add nodes are supported." ) + raise ValueError(msg) # extract biases next_nodes = maybe_next_nodes @@ -212,18 +243,20 @@ def _consume_dense_nodes(self, node, next_nodes): elif in_1 in self._initializers: node_biases = self._initializers[in_1] else: - raise ValueError(f"Node inputs were not found in graph initializers.") - - if len(node_weights.shape) != 2: - raise ValueError(f"Node weights must be a 2-dimensional matrix.") + msg = "Node inputs were not found in graph initializers." + raise ValueError(msg) + if len(node_weights.shape) != DENSE_INPUT_DIMENSIONS: + msg = "Node weights must be a 2-dimensional matrix." + raise ValueError(msg) if node_weights.shape[1] != node_biases.shape[0]: - raise ValueError( - f"Node weights has {node_weights.shape[1]} columns; node biases has {node_biases.shape[0]} rows. These must be equal." + msg = ( + f"Node weights has {node_weights.shape[1]} columns; node biases has " + f"{node_biases.shape[0]} rows. These must be equal." ) + raise ValueError(msg) if len(node.output) != 1: - raise ValueError( - f"Node output is {node.output} but should be a single value." - ) + msg = f"Node output is {node.output} but should be a single value." + raise ValueError(msg) input_output_size = _get_input_output_size(input_layer, transformer) @@ -254,13 +287,18 @@ def _consume_dense_nodes(self, node, next_nodes): def _consume_gemm_dense_nodes(self, node, next_nodes): """Starting from a Gemm node, consume nodes to form a dense aAB + bC node.""" if node.op_type != "Gemm": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Gemm nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Gemm nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) != 3: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) != GEMM_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 3 input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) attr = _collect_attributes(node) alpha = attr["alpha"] @@ -304,20 +342,26 @@ def _consume_gemm_dense_nodes(self, node, next_nodes): return next_nodes, dense_layer, [input_layer] def _consume_conv_nodes(self, node, next_nodes): - """ + """Consume Conv nodes. + Starting from a Conv node, consume nodes to form a convolution node with (optional) activation function. """ if node.op_type != "Conv": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Conv nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Conv nodes was called. This could indicate changes in the" + " network being parsed." ) - if len(node.input) not in [2, 3]: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) not in CONV_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 2 or 3 input dimensions can be used as starting points for parsing." ) + raise ValueError(msg) - if len(node.input) == 2: + if len(node.input) == CONV_INPUT_DIMENSIONS[0]: [in_0, in_1] = list(node.input) in_2 = None else: @@ -327,51 +371,59 @@ def _consume_conv_nodes(self, node, next_nodes): weights = self._initializers[in_1] [out_channels, in_channels, *kernel_shape] = weights.shape - if in_2 is None: - biases = np.zeros(out_channels) - else: - biases = self._initializers[in_2] + biases = np.zeros(out_channels) if in_2 is None else self._initializers[in_2] attr = _collect_attributes(node) strides = attr["strides"] # check only kernel shape and stride are set if attr["kernel_shape"] != kernel_shape: - raise ValueError( - f"Kernel shape attribute {attr['kernel_shape']} does not match initialized kernel shape {kernel_shape}." + msg = ( + f"Kernel shape attribute {attr['kernel_shape']} does not match" + f" initialized kernel shape {kernel_shape}." ) + raise ValueError(msg) if len(kernel_shape) != len(strides): - raise ValueError( - f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + msg = ( + f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} " + f"dimensions. Strides attribute has {len(strides)} dimensions. " + "These must be equal." ) + raise ValueError(msg) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError( - f"Input/output size ({input_output_size}) must have one more dimension than initialized kernel shape ({kernel_shape})." + msg = ( + f"Input/output size ({input_output_size}) must have one more dimension " + f"than initialized kernel shape ({kernel_shape})." ) + raise ValueError(msg) # Check input, output have correct dimensions if biases.shape != (out_channels,): - raise ValueError( - f"Biases shape {biases.shape} must match output weights channels {(out_channels,)}." + msg = ( + f"Biases shape {biases.shape} must match output weights channels" + f" {(out_channels,)}." ) + raise ValueError(msg) if in_channels != input_output_size[0]: - raise ValueError( - f"Input/output size ({input_output_size}) first dimension must match input weights channels ({in_channels})." + msg = ( + f"Input/output size ({input_output_size}) first dimension must match " + f"input weights channels ({in_channels})." ) + raise ValueError(msg) # Other attributes are not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError( - f"{node} has non-identity dilations ({attr['dilations']}). This is not supported." + msg = ( + f"{node} has non-identity dilations ({attr['dilations']}). This is not" + " supported." ) + raise ValueError(msg) if attr["group"] != 1: - raise ValueError( - f"{node} has multiple groups ({attr['group']}). This is not supported." - ) + msg = f"{node} has multiple groups ({attr['group']}). This is unsupported." + raise ValueError(msg) if "pads" in attr and np.any(attr["pads"]): - raise ValueError( - f"{node} has non-zero pads ({attr['pads']}). This is not supported." - ) + msg = f"{node} has non-zero pads ({attr['pads']}). This is not supported." + raise ValueError(msg) # generate new nodes for the node output padding = 0 @@ -391,10 +443,9 @@ def _consume_conv_nodes(self, node, next_nodes): # convolute image one channel at the time # expect 2d image with channels - if len(input_output_size) != 3: - raise ValueError( - f"Expected a 2D image with channels, got {input_output_size}." - ) + if len(input_output_size) != TWO_D_IMAGE_W_CHANNELS: + msg = f"Expected a 2D image with channels, got {input_output_size}." + raise ValueError(msg) conv_layer = ConvLayer2D( input_output_size, @@ -412,13 +463,18 @@ def _consume_conv_nodes(self, node, next_nodes): def _consume_reshape_nodes(self, node, next_nodes): """Parse a Reshape node.""" if node.op_type != "Reshape": - raise ValueError( - f"{node.name} is a {node.op_type} node, only Reshape nodes can be used as starting points for consumption." - ) - if len(node.input) != 2: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." - ) + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " Reshape nodes was called. This could indicate changes in the" + " network being parsed." + ) + raise ValueError(msg) + if len(node.input) != RESHAPE_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with" + " 2 input dimensions can be used as starting points for parsing." + ) + raise ValueError(msg) [in_0, in_1] = list(node.input) input_layer = self._node_map[in_0] new_shape = self._constants[in_1] @@ -428,37 +484,48 @@ def _consume_reshape_nodes(self, node, next_nodes): return next_nodes def _consume_pool_nodes(self, node, next_nodes): - """ + """Consume MaxPool nodes. + Starting from a MaxPool node, consume nodes to form a pooling node with (optional) activation function. """ if node.op_type not in _POOLING_OP_TYPES: - raise ValueError( - f"{node.name} is a {node.op_type} node, only MaxPool nodes can be used as starting points for consumption." + msg = ( + f"{node.name} is a {node.op_type} node, but the parsing method for" + " MaxPool nodes was called. This could indicate changes in the" + " network being parsed." ) + raise ValueError(msg) pool_func_name = "max" - # ONNX network should not contain indices output from MaxPool - not supported by OMLT + # ONNX network should not contain indices output from MaxPool - + # not supported by OMLT if len(node.output) != 1: - raise ValueError( - f"The ONNX contains indices output from MaxPool. This is not supported by OMLT." + msg = ( + "The ONNX network contains indices output from MaxPool. This is not" + " supported by OMLT." ) - if len(node.input) != 1: - raise ValueError( - f"{node.name} input has {len(node.input)} dimensions, only nodes with 1 input dimension can be used as starting points for consumption." + raise ValueError(msg) + if len(node.input) != MAXPOOL_INPUT_DIMENSIONS: + msg = ( + f"{node.name} input has {len(node.input)} dimensions, only nodes with " + "1 input dimension can be used as starting points for parsing." ) - + raise ValueError(msg) input_layer, transformer = self._node_input_and_transformer(node.input[0]) input_output_size = _get_input_output_size(input_layer, transformer) # currently only support 2D image with channels. - if len(input_output_size) == 4: + if len(input_output_size) == MAXPOOL_INPUT_OUTPUT_W_BATCHES: # this means there is an extra dimension for number of batches - # batches not supported, so only accept if they're not there or there is only 1 batch + # batches not supported, so only accept if they're not there or there is + # only 1 batch if input_output_size[0] != 1: - raise ValueError( - f"{node.name} has {input_output_size[0]} batches, only a single batch is supported." + msg = ( + f"{node.name} has {input_output_size[0]} batches, only single batch" + " is supported." ) + raise ValueError(msg) input_output_size = input_output_size[1:] in_channels = input_output_size[0] @@ -471,37 +538,46 @@ def _consume_pool_nodes(self, node, next_nodes): # check only kernel shape, stride, storage order are set # everything else is not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError( - f"{node.name} has non-identity dilations ({attr['dilations']}). This is not supported." + msg = ( + f"{node.name} has non-identity dilations ({attr['dilations']})." + " This is not supported." ) + raise ValueError(msg) if "pads" in attr and np.any(attr["pads"]): - raise ValueError( - f"{node.name} has non-zero pads ({attr['pads']}). This is not supported." + msg = ( + f"{node.name} has non-zero pads ({attr['pads']})." + " This is not supported." ) + raise ValueError(msg) if ("auto_pad" in attr) and (attr["auto_pad"] != "NOTSET"): - raise ValueError( - f"{node.name} has autopad set ({attr['auto_pad']}). This is not supported." + msg = ( + f"{node.name} has autopad set ({attr['auto_pad']})." + " This is not supported." ) + raise ValueError(msg) if len(kernel_shape) != len(strides): - raise ValueError( - f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + msg = ( + f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. " + f"Strides attribute has {len(strides)} dimensions. These must be equal." ) + raise ValueError(msg) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError( - f"Input/output size ({input_output_size}) must have one more dimension than kernel shape ({kernel_shape})." + msg = ( + f"Input/output size ({input_output_size}) must have one more dimension" + f" than kernel shape ({kernel_shape})." ) + raise ValueError(msg) output_shape_wrapper = math.floor if "ceil_mode" in attr and attr["ceil_mode"] == 1: output_shape_wrapper = math.ceil - output_size = [in_channels] - for i in range(1, len(input_output_size)): - output_size.append( - output_shape_wrapper( - (input_output_size[i] - kernel_shape[i - 1]) / strides[i - 1] + 1 - ) + output_size = [in_channels] + [ + output_shape_wrapper( + (input_output_size[i] - kernel_shape[i - 1]) / strides[i - 1] + 1 ) + for i in range(1, len(input_output_size)) + ] activation = "linear" if len(next_nodes) == 1: @@ -532,31 +608,29 @@ def _node_input_and_transformer(self, node_name): if isinstance(maybe_layer, tuple): transformer, input_layer = maybe_layer return input_layer, transformer - else: - return maybe_layer, None + return maybe_layer, None def _collect_attributes(node): - r = dict() + r = {} for attr in node.attribute: - if attr.type == 1: # FLOAT + if attr.type == ATTR_FLOAT: # FLOAT r[attr.name] = attr.f - elif attr.type == 2: # INT + elif attr.type == ATTR_INT: # INT r[attr.name] = int(attr.i) - elif attr.type == 4: # TENSOR + elif attr.type == ATTR_TENSOR: # TENSOR r[attr.name] = numpy_helper.to_array(attr.t) - pass - elif attr.type == 7: # INTS + elif attr.type == ATTR_INTS: # INTS r[attr.name] = list(attr.ints) else: - raise RuntimeError(f"unhandled attribute type {attr.type}") + msg = f"unhandled attribute type {attr.type}" + raise RuntimeError(msg) return r def _parse_constant_value(node): attr = _collect_attributes(node) - value = attr["value"] - return value + return attr["value"] def _get_input_output_size(input_layer, transformer): diff --git a/src/omlt/io/torch_geometric/__init__.py b/src/omlt/io/torch_geometric/__init__.py index ae94d147..4b908c7a 100644 --- a/src/omlt/io/torch_geometric/__init__.py +++ b/src/omlt/io/torch_geometric/__init__.py @@ -5,3 +5,9 @@ from omlt.io.torch_geometric.torch_geometric_reader import ( load_torch_geometric_sequential, ) + +__all__ = [ + "gnn_with_fixed_graph", + "gnn_with_non_fixed_graph", + "load_torch_geometric_sequential", +] diff --git a/src/omlt/io/torch_geometric/build_gnn_formulation.py b/src/omlt/io/torch_geometric/build_gnn_formulation.py index 6e2e04ee..66e48775 100644 --- a/src/omlt/io/torch_geometric/build_gnn_formulation.py +++ b/src/omlt/io/torch_geometric/build_gnn_formulation.py @@ -15,9 +15,11 @@ def gnn_with_non_fixed_graph( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Build formulation for a torch_geometric graph neural network model (built with Sequential). - Since the input graph is not fixed, the elements in adjacency matrix are decision variables. + """Graph neural network with non-fixed graph. + + Build formulation for a torch_geometric graph neural network model (built with + Sequential). Since the input graph is not fixed, the elements in adjacency matrix + are decision variables. Parameters ---------- @@ -29,7 +31,8 @@ def gnn_with_non_fixed_graph( The number of nodes of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -40,11 +43,10 @@ def gnn_with_non_fixed_graph( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- OmltBlock (formulated) """ - # build NetworkDefinition for nn net = load_torch_geometric_sequential( nn=nn, @@ -68,7 +70,7 @@ def gnn_with_non_fixed_graph( block.symmetric_adjacency = pyo.ConstraintList() for u in range(N): for v in range(u + 1, N): - block.symmetric_adjacency.add((block.A[u, v] == block.A[v, u])) + block.symmetric_adjacency.add(block.A[u, v] == block.A[v, u]) # build formulation for GNN block.build_formulation(FullSpaceNNFormulation(net)) @@ -85,9 +87,10 @@ def gnn_with_fixed_graph( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Build formulation for a torch_geometric graph neural network model (built with Sequential). - Given the adjacency matrix, the input graph structure is fixed. + """Graph neural network with non-fixed graph. + + Build formulation for a torch_geometric graph neural network model (built with + Sequential). Given the adjacency matrix, the input graph structure is fixed. Parameters ---------- @@ -101,7 +104,8 @@ def gnn_with_fixed_graph( The adjacency matrix of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -112,13 +116,17 @@ def gnn_with_fixed_graph( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- OmltBlock (formulated) """ - # assume the adjacency matrix is always symmetric - assert np.array_equal(A, np.transpose(A)) + if not np.array_equal(A, np.transpose(A)): + msg = ( + f"Adjacency matrix A of the input graph must be symmetrical. {A} was" + " provided." + ) + raise ValueError(msg) # build NetworkDefinition for nn net = load_torch_geometric_sequential( diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 72d594cc..090d9b5a 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -7,8 +7,7 @@ def _compute_gcn_norm(A): - """ - Calculate the norm for a GCN layer + """Calculate the norm for a GCN layer. Parameters ---------- @@ -26,8 +25,7 @@ def _compute_gcn_norm(A): def _compute_sage_norm(A, aggr): - """ - Calculate the norm for a SAGE layer + """Calculate the norm for a SAGE layer. Parameters ---------- @@ -50,8 +48,7 @@ def _compute_sage_norm(A, aggr): def _process_gnn_parameters(gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm): - """ - Construct the weights and biases for the GNNLayer class + """Construct the weights and biases for the GNNLayer class. Parameters ---------- @@ -64,7 +61,7 @@ def _process_gnn_parameters(gnn_weights_uv, gnn_weights_vv, gnn_biases, gnn_norm gnn_norm : matrix-like the norm for the GNN layer, shape: (N, N) - Returns + Returns: ------- weights : matrix-like the weights for the GNNLayer class, shape: (N * in_channels, N * out_channels) @@ -113,8 +110,9 @@ def load_torch_geometric_sequential( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """ - Load a torch_geometric graph neural network model (built with Sequential) into + """Load a torch_geometric graph neural network model. + + Load a torch_geometric graph neural network model (built with Sequential) into an OMLT network definition object. This network definition object can be used in different formulations. @@ -128,7 +126,8 @@ def load_torch_geometric_sequential( The adjacency matrix of input graph scaling_object : instance of ScalingInterface or None Provide an instance of a scaling object to use to scale iputs --> scaled_inputs - and scaled_outputs --> outputs. If None, no scaling is performed. See scaling.py. + and scaled_outputs --> outputs. If None, no scaling is performed. See + scaling.py. scaled_input_bounds : dict or None A dict that contains the bounds on the scaled variables (the direct inputs to the neural network). If None, then no bounds @@ -139,7 +138,7 @@ def load_torch_geometric_sequential( dictionary will be generated using the provided scaling object. If None, then no bounds are specified. - Returns + Returns: ------- NetworkDefinition """ @@ -163,14 +162,16 @@ def load_torch_geometric_sequential( op_name = l.__class__.__name__ if op_name not in _OP_TYPES: - raise ValueError("this operation is not supported") + msg = f"Operation {op_name} is not supported." + raise ValueError(msg) operations.append(op_name) if A is None: # If A is None, then the graph is not fixed. # Only layers in _LAYER_OP_TYPES_NON_FIXED_GRAPH are supported. # Only "sum" aggregation is supported. - # Since all weights and biases are possibly needed, A is set to correspond to a complete graph. + # Since all weights and biases are possibly needed, A is set to correspond to a + # complete graph. for index, l in enumerate(nn): if ( operations[index] @@ -181,16 +182,15 @@ def load_torch_geometric_sequential( warnings.warn( "nonlinear activation results in a MINLP", stacklevel=2 ) - # Linear layers, all activation functions, and all pooling functions are still supported. + # Linear layers, all activation functions, and all pooling functions are + # still supported. continue if operations[index] not in _LAYER_OP_TYPES_NON_FIXED_GRAPH: - raise ValueError( - "this layer is not supported when the graph is not fixed" - ) - elif l.aggr != "sum": - raise ValueError( - "this aggregation is not supported when the graph is not fixed" - ) + msg = "this layer is not supported when the graph is not fixed." + raise ValueError(msg) + if l.aggr != "sum": + msg = "this aggregation is not supported when the graph is not fixed" + raise ValueError(msg) A = np.ones((N, N)) - np.eye(N) @@ -207,8 +207,10 @@ def load_torch_geometric_sequential( if operations[index] == "Linear": gnn_weights = l.weight.detach().numpy() gnn_biases = l.bias.detach().numpy() - # A linear layer is either applied on each node's features (i.e., prev_layer.output_size[-1] = N * gnn_weights.shape[1]) - # or the features after pooling (i.e., prev_layer.output_size[-1] = gnn_weights.shape[1]) + # A linear layer is either applied on each node's features (i.e., + # prev_layer.output_size[-1] = N * gnn_weights.shape[1]) + # or the features after pooling (i.e., + # prev_layer.output_size[-1] = gnn_weights.shape[1]) gnn_norm = np.eye(prev_layer.output_size[-1] // gnn_weights.shape[1]) weights, biases = _process_gnn_parameters( gnn_weights, gnn_weights, gnn_biases, gnn_norm diff --git a/src/omlt/linear_tree/__init__.py b/src/omlt/linear_tree/__init__.py index 2f89a669..2099e44e 100644 --- a/src/omlt/linear_tree/__init__.py +++ b/src/omlt/linear_tree/__init__.py @@ -1,5 +1,4 @@ -r""" -There are multiple formulations for representing linear model decision trees. +r"""There are multiple formulations for representing linear model decision trees. Please see the following reference: * Ammari et al. (2023) Linear Model Decision Trees as Surrogates in Optimization @@ -23,3 +22,9 @@ LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, ) + +__all__ = [ + "LinearTreeDefinition", + "LinearTreeGDPFormulation", + "LinearTreeHybridBigMFormulation", +] diff --git a/src/omlt/linear_tree/lt_definition.py b/src/omlt/linear_tree/lt_definition.py index 6bd26c8f..8f944a4a 100644 --- a/src/omlt/linear_tree/lt_definition.py +++ b/src/omlt/linear_tree/lt_definition.py @@ -3,8 +3,7 @@ class LinearTreeDefinition: - """ - Class to represent a linear tree model trained in the linear-tree package + """Class to represent a linear tree model trained in the linear-tree package. Attributes: __model (linear-tree model) : Linear Tree Model trained in linear-tree @@ -27,22 +26,24 @@ def __init__( scaled_input_bounds=None, unscaled_input_bounds=None, ): - """Create a LinearTreeDefinition object and define attributes based on the + """Initialize LinearTreeDefinition. + + Create a LinearTreeDefinition object and define attributes based on the trained linear model decision tree. Arguments: - lt_regressor -- A LinearTreeRegressor model that is trained by the + lt_regressor: A LinearTreeRegressor model that is trained by the linear-tree package Keyword Arguments: - scaling_object -- A scaling object to specify the scaling parameters + scaling_object: A scaling object to specify the scaling parameters for the linear model tree inputs and outputs. If None, then no scaling is performed. (default: {None}) - scaled_input_bounds -- A dict that contains the bounds on the scaled + scaled_input_bounds: A dict that contains the bounds on the scaled variables (the direct inputs to the tree). If None, then the user must specify the bounds via the input_bounds argument. (default: {None}) - unscaled_input_bounds -- A dict that contains the bounds on the + unscaled_input_bounds: A dict that contains the bounds on the variables (the direct inputs to the tree). If None, then the user must specify the scaled bounds via the scaled_input_bounds argument. (default: {None}) @@ -65,7 +66,7 @@ def __init__( ) scaled_input_bounds = { - k: (lbs[k], ubs[k]) for k in unscaled_input_bounds.keys() + k: (lbs[k], ubs[k]) for k in unscaled_input_bounds } # If unscaled input bounds provided and no scaler provided, scaled @@ -73,9 +74,8 @@ def __init__( elif unscaled_input_bounds is not None and scaling_object is None: scaled_input_bounds = unscaled_input_bounds elif unscaled_input_bounds is None: - raise ValueError( - "Input Bounds needed to represent linear trees as MIPs" - ) + msg = "Input Bounds needed to represent linear trees as MIPs" + raise ValueError(msg) self.__unscaled_input_bounds = unscaled_input_bounds self.__scaled_input_bounds = scaled_input_bounds @@ -89,48 +89,49 @@ def __init__( @property def scaling_object(self): - """Returns scaling object""" + """Returns scaling object.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Returns dict containing scaled input bounds""" + """Returns dict containing scaled input bounds.""" return self.__scaled_input_bounds @property def splits(self): - """Returns dict containing split information""" + """Returns dict containing split information.""" return self.__splits @property def leaves(self): - """Returns dict containing leaf information""" + """Returns dict containing leaf information.""" return self.__leaves @property def thresholds(self): - """Returns dict containing threshold information""" + """Returns dict containing threshold information.""" return self.__thresholds @property def n_inputs(self): - """Returns number of inputs to the linear tree""" + """Returns number of inputs to the linear tree.""" return self.__n_inputs @property def n_outputs(self): - """Returns number of outputs to the linear tree""" + """Returns number of outputs to the linear tree.""" return self.__n_outputs def _find_all_children_splits(split, splits_dict): - """ + """Find all children splits. + This helper function finds all multigeneration children splits for an argument split. Arguments: - split --The split for which you are trying to find children splits - splits_dict -- A dictionary of all the splits in the tree + split: The split for which you are trying to find children splits + splits_dict: A dictionary of all the splits in the tree Returns: A list containing the Node IDs of all children splits @@ -154,20 +155,19 @@ def _find_all_children_splits(split, splits_dict): def _find_all_children_leaves(split, splits_dict, leaves_dict): - """ + """Find all children leaves. + This helper function finds all multigeneration children leaves for an argument split. Arguments: - split -- The split for which you are trying to find children leaves - splits_dict -- A dictionary of all the split info in the tree - leaves_dict -- A dictionary of all the leaf info in the tree + split: The split for which you are trying to find children leaves + splits_dict: A dictionary of all the split info in the tree + leaves_dict: A dictionary of all the leaf info in the tree Returns: A list containing all the Node IDs of all children leaves """ - all_leaves = [] - # Find all the splits that are children of the relevant split all_splits = _find_all_children_splits(split, splits_dict) @@ -177,20 +177,20 @@ def _find_all_children_leaves(split, splits_dict, leaves_dict): # For each leaf, check if the parents appear in the list of children # splits (all_splits). If so, it must be a leaf of the argument split - for leaf in leaves_dict: - if leaves_dict[leaf]["parent"] in all_splits: - all_leaves.append(leaf) - return all_leaves + return [ + leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits + ] def _find_n_inputs(leaves): - """ + """Find n inputs. + Finds the number of inputs using the length of the slope vector in the first leaf Arguments: - leaves -- Dictionary of leaf information + leaves: Dictionary of leaf information Returns: Number of inputs @@ -199,19 +199,19 @@ def _find_n_inputs(leaves): leaf_indices = np.array(list(leaves[tree_indices[0]].keys())) tree_one = tree_indices[0] leaf_one = leaf_indices[0] - n_inputs = len(np.arange(0, len(leaves[tree_one][leaf_one]["slope"]))) - return n_inputs + return len(np.arange(0, len(leaves[tree_one][leaf_one]["slope"]))) def _reassign_none_bounds(leaves, input_bounds): - """ + """Reassign None bounds. + This helper function reassigns bounds that are None to the bounds input by the user Arguments: - leaves -- The dictionary of leaf information. Attribute of the + leaves: The dictionary of leaf information. Attribute of the LinearTreeDefinition object - input_bounds -- The nested dictionary + input_bounds: The nested dictionary Returns: The modified leaves dict without any bounds that are listed as None @@ -231,15 +231,17 @@ def _reassign_none_bounds(leaves, input_bounds): def _parse_tree_data(model, input_bounds): - """ + """Parse tree data. + This function creates the data structures with the information required for creation of the variables, sets, and constraints in the pyomo reformulation of the linear model decision trees. Note that these data structures are attributes of the LinearTreeDefinition Class. Arguments: - model -- Trained linear-tree model or dic containing linear-tree model + model: Trained linear-tree model or dic containing linear-tree model summary (e.g. dict = model.summary()) + input_bounds: Returns: leaves - Dict containing the following information for each leaf: @@ -277,21 +279,23 @@ def _parse_tree_data(model, input_bounds): # Checks to ensure that the input nested dictionary contains the # correct information for entry in model: - if "children" not in model[entry].keys(): + if "children" not in model[entry]: leaves[entry] = model[entry] else: left_child = model[entry]["children"][0] right_child = model[entry]["children"][1] num_splits_in_model += 1 - if left_child not in model.keys() or right_child not in model.keys(): + if left_child not in model or right_child not in model: count += 1 if count > 0 or num_splits_in_model == 0: - raise ValueError( + msg = ( "Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()" + " e.g. dict = model.summary()" ) + raise ValueError(msg) else: - raise TypeError("Model entry must be dict or linear-tree instance") + msg = "Model entry must be dict or linear-tree instance" + raise TypeError(msg) # This loop adds keys for the slopes and intercept and removes the leaf # keys in the splits dictionary diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 4f83e7f3..5960a442 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -6,7 +6,8 @@ class LinearTreeGDPFormulation(_PyomoFormulation): - r""" + r"""Linear Tree GDP Formulation. + Class to add a Linear Tree GDP formulation to OmltBlock. We use Pyomo.GDP to create the disjuncts and disjunctions and then apply a transformation to convert to a mixed-integer programming representation. @@ -45,18 +46,17 @@ class LinearTreeGDPFormulation(_PyomoFormulation): * Ammari et al. (2023) Linear Model Decision Trees as Surrogates in Optimization of Engineering Applications. Computers & Chemical Engineering * Chen et al. (2022) Pyomo.GDP: An ecosystem for logic based modeling and - optimization development. Optimization and Engineering, 23:607–642 + optimization development. Optimization and Engineering, 23:607-642 """ def __init__(self, lt_definition, transformation="bigm"): - """ - Create a LinearTreeGDPFormulation object + """Create a LinearTreeGDPFormulation object. Arguments: - lt_definition -- LinearTreeDefintion Object + lt_definition: LinearTreeDefintion Object Keyword Arguments: - transformation -- choose which Pyomo.GDP formulation to apply. + transformation: choose which Pyomo.GDP formulation to apply. Supported transformations are bigm, hull, mbigm, and custom (default: {'bigm'}) @@ -70,9 +70,8 @@ def __init__(self, lt_definition, transformation="bigm"): # Ensure that the GDP transformation given is supported supported_transformations = ["bigm", "hull", "mbigm", "custom"] if transformation not in supported_transformations: - raise NotImplementedError( - "Supported transformations are: bigm, mbigm, hull, and custom" - ) + msg = "Supported transformations are: bigm, mbigm, hull, and custom" + raise NotImplementedError(msg) @property def input_indexes(self): @@ -85,7 +84,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -104,8 +105,7 @@ def _build_formulation(self): class LinearTreeHybridBigMFormulation(_PyomoFormulation): - r""" - Class to add a Linear Tree Hybrid Big-M formulation to OmltBlock. + r"""Class to add a Linear Tree Hybrid Big-M formulation to OmltBlock. .. math:: \begin{align*} @@ -134,11 +134,10 @@ class LinearTreeHybridBigMFormulation(_PyomoFormulation): """ def __init__(self, lt_definition): - """ - Create a LinearTreeHybridBigMFormulation object + """Create a LinearTreeHybridBigMFormulation object. Arguments: - lt_definition -- LinearTreeDefinition Object + lt_definition: LinearTreeDefinition Object """ super().__init__() self.model_definition = lt_definition @@ -154,7 +153,9 @@ def output_indexes(self): return list(range(self.model_definition.n_outputs)) def _build_formulation(self): - """This method is called by the OmltBlock to build the corresponding + """Build formulation. + + This method is called by the OmltBlock to build the corresponding mathematical formulation on the Pyomo block. """ _setup_scaled_inputs_outputs( @@ -172,13 +173,14 @@ def _build_formulation(self): def _build_output_bounds(model_def, input_bounds): - """ + """Build output bounds. + This helper function develops bounds of the output variable based on the values of the input_bounds and the signs of the slope Arguments: - model_def -- Model definition - input_bounds -- Dict of input bounds + model_def: Model definition + input_bounds: Dict of input bounds Returns: List that contains the conservative lower and upper bounds of the @@ -217,15 +219,14 @@ def _build_output_bounds(model_def, input_bounds): def _add_gdp_formulation_to_block( block, model_definition, input_vars, output_vars, transformation ): - """ - This function adds the GDP representation to the OmltBlock using Pyomo.GDP + """This function adds the GDP representation to the OmltBlock using Pyomo.GDP. Arguments: - block -- OmltBlock - model_definition -- LinearTreeDefinition Object - input_vars -- input variables to the linear tree model - output_vars -- output variable of the linear tree model - transformation -- Transformation to apply + block: OmltBlock + model_definition: LinearTreeDefinition Object + input_vars: input variables to the linear tree model + output_vars: output variable of the linear tree model + transformation: Transformation to apply """ leaves = model_definition.leaves @@ -234,10 +235,7 @@ def _add_gdp_formulation_to_block( # The set of leaves and the set of features tree_ids = list(leaves.keys()) - t_l = [] - for tree in tree_ids: - for leaf in leaves[tree].keys(): - t_l.append((tree, leaf)) + t_l = [(tree, leaf) for tree in tree_ids for leaf in leaves[tree]] features = np.arange(0, n_inputs) # Use the input_bounds and the linear models in the leaves to calculate @@ -292,14 +290,13 @@ def disjunction_rule(b, tree): def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output_vars): - """ - This function adds the Hybrid BigM representation to the OmltBlock + """This function adds the Hybrid BigM representation to the OmltBlock. Arguments: - block -- OmltBlock - model_definition -- LinearTreeDefinition Object - input_vars -- input variables to the linear tree model - output_vars -- output variable of the linear tree model + block: OmltBlock + model_definition: LinearTreeDefinition Object + input_vars: input variables to the linear tree model + output_vars: output variable of the linear tree model """ leaves = model_definition.leaves input_bounds = model_definition.scaled_input_bounds @@ -309,10 +306,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output tree_ids = list(leaves.keys()) # Create a list of tuples that contains the tree and leaf indices. Note that # the leaf indices depend on the tree in the ensemble. - t_l = [] - for tree in tree_ids: - for leaf in leaves[tree].keys(): - t_l.append((tree, leaf)) + t_l = [(tree, leaf) for tree in tree_ids for leaf in leaves[tree]] features = np.arange(0, n_inputs) diff --git a/src/omlt/neuralnet/__init__.py b/src/omlt/neuralnet/__init__.py index 2b66fc97..ef90caf3 100644 --- a/src/omlt/neuralnet/__init__.py +++ b/src/omlt/neuralnet/__init__.py @@ -1,4 +1,5 @@ -r""" +r"""omlt.neuralnet. + The basic pipeline in source code of OMLT is: .. math:: @@ -12,7 +13,10 @@ \xrightarrow[\text{Constraints}]{\text{Layer 3}}\cdots \end{align*} -where :math:`\mathbf z^{(0)}` is the output of `InputLayer`, :math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, :math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. +where +:math:`\mathbf z^{(0)}` is the output of `InputLayer`, +:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, +:math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. """ @@ -26,3 +30,14 @@ ReluComplementarityFormulation, ReluPartitionFormulation, ) + +__all__ = [ + "NetworkDefinition", + "FullSpaceNNFormulation", + "FullSpaceSmoothNNFormulation", + "ReducedSpaceNNFormulation", + "ReducedSpaceSmoothNNFormulation", + "ReluBigMFormulation", + "ReluComplementarityFormulation", + "ReluPartitionFormulation", +] diff --git a/src/omlt/neuralnet/activations/__init__.py b/src/omlt/neuralnet/activations/__init__.py index 7918d9f1..038a4dbd 100644 --- a/src/omlt/neuralnet/activations/__init__.py +++ b/src/omlt/neuralnet/activations/__init__.py @@ -1,5 +1,8 @@ -r""" -Since all activation functions are element-wised, we only consider how to formulate activation functions for a single neuron, where :math:`x` denotes pre-activation variable, and :math:`y` denotes post-activation variable. +r"""Activation functions. + +Since all activation functions are element-wised, we only consider how to formulate +activation functions for a single neuron, where :math:`x` denotes pre-activation +variable, and :math:`y` denotes post-activation variable. """ @@ -23,3 +26,18 @@ } NON_INCREASING_ACTIVATIONS = [] + +__all__ = [ + "linear_activation_constraint", + "linear_activation_function", + "ComplementarityReLUActivation", + "bigm_relu_activation_constraint", + "sigmoid_activation_constraint", + "sigmoid_activation_function", + "softplus_activation_constraint", + "softplus_activation_function", + "tanh_activation_constraint", + "tanh_activation_function", + "ACTIVATION_FUNCTION_MAP", + "NON_INCREASING_ACTIVATIONS", +] diff --git a/src/omlt/neuralnet/activations/linear.py b/src/omlt/neuralnet/activations/linear.py index 712049c1..4538401a 100644 --- a/src/omlt/neuralnet/activations/linear.py +++ b/src/omlt/neuralnet/activations/linear.py @@ -3,10 +3,9 @@ def linear_activation_function(zhat): def linear_activation_constraint( - net_block, net, layer_block, layer, add_constraint=True + net_block, net, layer_block, layer, *, add_constraint=True ): - r""" - Linear activation constraint generator + r"""Linear activation constraint generator. Generates the constraints for the linear activation function: diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index e14718d7..733abb91 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,10 +1,9 @@ import pyomo.environ as pyo -import pyomo.mpec as mpec +from pyomo import mpec def bigm_relu_activation_constraint(net_block, net, layer_block, layer): - r""" - Big-M ReLU activation formulation. + r"""Big-M ReLU activation formulation. Generates the constraints for the ReLU activation function: @@ -35,7 +34,8 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): y&\le \sigma u \end{align*} - The lower bound of :math:`y` is :math:`\max(0,l)`, and the upper bound of :math:`y` is :math:`\max(0,u)`. + The lower bound of :math:`y` is :math:`\max(0,l)`, and the upper bound of :math:`y` + is :math:`\max(0,u)`. """ layer_block.q_relu = pyo.Var(layer.output_indexes, within=pyo.Binary) @@ -81,8 +81,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): class ComplementarityReLUActivation: - r""" - Complementarity-based ReLU activation formulation. + r"""Complementarity-based ReLU activation formulation. Generates the constraints for the ReLU activation function: diff --git a/src/omlt/neuralnet/activations/smooth.py b/src/omlt/neuralnet/activations/smooth.py index b37ac6c7..7f5bd10d 100644 --- a/src/omlt/neuralnet/activations/smooth.py +++ b/src/omlt/neuralnet/activations/smooth.py @@ -2,8 +2,7 @@ def softplus_activation_function(x): - r""" - Applies the softplus function: + r"""Applies the softplus function. .. math:: @@ -16,8 +15,7 @@ def softplus_activation_function(x): def sigmoid_activation_function(x): - r""" - Applies the sigmoid function: + r"""Applies the sigmoid function. .. math:: @@ -30,8 +28,7 @@ def sigmoid_activation_function(x): def tanh_activation_function(x): - r""" - Applies the tanh function: + r"""Applies the tanh function. .. math:: @@ -44,40 +41,31 @@ def tanh_activation_function(x): def softplus_activation_constraint(net_block, net, layer_block, layer): - r""" - Softplus activation constraint generator. - - """ + r"""Softplus activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, softplus_activation_function ) def sigmoid_activation_constraint(net_block, net, layer_block, layer): - r""" - Sigmoid activation constraint generator. - - """ + r"""Sigmoid activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, sigmoid_activation_function ) def tanh_activation_constraint(net_block, net, layer_block, layer): - r""" - tanh activation constraint generator. - - """ + r"""Tanh activation constraint generator.""" return smooth_monotonic_activation_constraint( net_block, net, layer_block, layer, tanh_activation_function ) def smooth_monotonic_activation_constraint(net_block, net, layer_block, layer, fcn): - r""" - Activation constraint generator for a smooth monotonic function. + r"""Activation constraint generator for a smooth monotonic function. - Generates the constraints for the activation function :math:`f` if it is smooth and monotonic: + Generates the constraints for the activation function :math:`f` if it is smooth and + monotonic: .. math:: diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index 16e068a3..d7a52750 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -1,5 +1,4 @@ -r""" -Neural network layer classes. +r"""Neural network layer classes. We use the following notations to define a layer: @@ -21,10 +20,10 @@ import numpy as np +OUTPUT_DIMENSIONS = 3 class Layer: - """ - Base layer class. + """Base layer class. Parameters ---------- @@ -42,13 +41,11 @@ def __init__( self, input_size, output_size, *, activation=None, input_index_mapper=None ): if not isinstance(input_size, (list, tuple)): - raise TypeError( - f"input_size must be a list or tuple, {type(input_size)} was provided." - ) + msg = f"input_size must be a list or tuple, {type(input_size)} provided." + raise TypeError(msg) if not isinstance(output_size, (list, tuple)): - raise TypeError( - f"output_size must be a list or tuple, {type(output_size)} was provided." - ) + msg = f"output_size must be a list or tuple, {type(output_size)} provided." + raise TypeError(msg) self.__input_size = list(input_size) self.__output_size = list(output_size) self.activation = activation @@ -58,35 +55,34 @@ def __init__( @property def input_size(self): - """Return the size of the input tensor""" + """Return the size of the input tensor.""" return self.__input_size @property def output_size(self): - """Return the size of the output tensor""" + """Return the size of the output tensor.""" return self.__output_size @property def activation(self): - """Return the activation function""" + """Return the activation function.""" return self.__activation @activation.setter def activation(self, new_activation): - """Change the activation function""" + """Change the activation function.""" if new_activation is None: new_activation = "linear" self.__activation = new_activation @property def input_index_mapper(self): - """Return the index mapper""" + """Return the index mapper.""" return self.__input_index_mapper @property def input_indexes_with_input_layer_indexes(self): - """ - Return an iterator generating a tuple of local and input indexes. + """Return an iterator generating a tuple of local and input indexes. Local indexes are indexes over the elements of the current layer. Input indexes are indexes over the elements of the previous layer. @@ -101,17 +97,16 @@ def input_indexes_with_input_layer_indexes(self): @property def input_indexes(self): - """Return a list of the input indexes""" + """Return a list of the input indexes.""" return list(itertools.product(*[range(v) for v in self.__input_size])) @property def output_indexes(self): - """Return a list of the output indexes""" + """Return a list of the output indexes.""" return list(itertools.product(*[range(v) for v in self.__output_size])) def eval_single_layer(self, x): - """ - Evaluate the layer at x. + """Evaluate the layer at x. Parameters ---------- @@ -124,34 +119,35 @@ def eval_single_layer(self, x): else x[:] ) if x_reshaped.shape != tuple(self.input_size): - raise ValueError( - f"Layer requires an input size {self.input_size}, but the input tensor had size {x_reshaped.shape}." + msg = ( + f"Layer requires an input size {self.input_size}, but the input tensor" + f" has size {x_reshaped.shape}." ) + raise ValueError(msg) y = self._eval(x_reshaped) return self._apply_activation(y) def __repr__(self): - return f"<{str(self)} at {hex(id(self))}>" + return f"<{self!s} at {hex(id(self))}>" def _eval(self, x): - raise NotImplementedError() + raise NotImplementedError def _apply_activation(self, x): if self.__activation == "linear" or self.__activation is None: return x - elif self.__activation == "relu": + if self.__activation == "relu": return np.maximum(x, 0) - elif self.__activation == "sigmoid": + if self.__activation == "sigmoid": return 1.0 / (1.0 + np.exp(-x)) - elif self.__activation == "tanh": + if self.__activation == "tanh": return np.tanh(x) - else: - raise ValueError(f"Unknown activation function {self.__activation}") + msg = f"Unknown activation function {self.__activation}" + raise ValueError(msg) class InputLayer(Layer): - """ - The first layer in any network. + """The first layer in any network. Parameters ---------- @@ -172,13 +168,15 @@ def _eval(self, x): class DenseLayer(Layer): - r""" + r"""Dense layer. + The dense layer is defined by: .. math:: \begin{align*} - y_j = \sigma\left(\sum\limits_{i=0}^{F_{in}-1}w_{ij}x_i+b_j\right), && \forall 0\le j= input_layer_block.z[input_index] + layer_block.zhat[output_index] + >= input_layer_block.z[mapped_input_index] ) @@ -337,7 +355,8 @@ def _calculate_n_plus(out_index, l, k, layer, input_layer_block): def _input_layer_and_block(net_block, net, layer): input_layers = list(net.predecessors(layer)) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + msg = "Multiple input layers are not currently supported." + raise ValueError(msg) input_layer = input_layers[0] input_layer_block = net_block.layer[id(input_layer)] return input_layer, input_layer_block diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index f29cadd2..1430332a 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -4,8 +4,7 @@ def default_partition_split_func(w, n): - r""" - Default function for partitioning weights in :math:`w` into :math:`n` partitions. + r"""Default function to partition weights in :math:`w` into :math:`n` partitions. Weights in :math:`w` are sorted and partitioned evenly. @@ -16,15 +15,15 @@ def default_partition_split_func(w, n): def partition_based_dense_relu_layer(net_block, net, layer_block, layer, split_func): - r""" - Partition-based ReLU activation formulation. + r"""Partition-based ReLU activation formulation. Generates the constraints for the ReLU activation function: .. math:: \begin{align*} - y_j = \max\left(0,\sum\limits_{i=0}^{F_{in}-1}w_{ij}x_i+b_j\right), && \forall 0\le j 1: - raise ValueError(f"Layer {layer} has multiple predecessors.") + msg = f"Layer {layer} is not an input layer, but has no predecessors." + raise ValueError(msg) + if len(prev_layers) > 1: + msg = f"Layer {layer} has multiple predecessors." + raise ValueError(msg) prev_layer = prev_layers[0] prev_layer_block = net_block.layer[id(prev_layer)] @@ -100,19 +103,18 @@ def output_node_block(b, *output_index): for split_local_index in splits[split_index]: _, local_index = input_layer_indexes[split_local_index] - if mapper: - input_index = mapper(local_index) - else: - input_index = local_index + input_index = mapper(local_index) if mapper else local_index w = weights[local_index[-1]] expr += prev_layer_block.z[input_index] * w lb, ub = compute_bounds_on_expr(expr) if lb is None: - raise ValueError("Expression is unbounded below.") + msg = "Expression is unbounded below." + raise ValueError(msg) if ub is None: - raise ValueError("Expression is unbounded above.") + msg = "Expression is unbounded above." + raise ValueError(msg) z2 = b.z2[split_index] z2.setlb(min(0, lb)) @@ -133,9 +135,11 @@ def output_node_block(b, *output_index): lb, ub = compute_bounds_on_expr(expr) if lb is None: - raise ValueError("Expression is unbounded below.") + msg = "Expression is unbounded below." + raise ValueError(msg) if ub is None: - raise ValueError("Expression is unbounded above.") + msg = "Expression is unbounded above." + raise ValueError(msg) layer_block.z[output_index].setlb(0) layer_block.z[output_index].setub(max(0, ub)) @@ -144,10 +148,7 @@ def output_node_block(b, *output_index): for split_index in range(num_splits): for split_local_index in splits[split_index]: _, local_index = input_layer_indexes[split_local_index] - if mapper: - input_index = mapper(local_index) - else: - input_index = local_index + input_index = mapper(local_index) if mapper else local_index w = weights[local_index[-1]] eq_13_expr += prev_layer_block.z[input_index] * w diff --git a/src/omlt/neuralnet/layers/reduced_space.py b/src/omlt/neuralnet/layers/reduced_space.py index 95d1f97f..9160f87e 100644 --- a/src/omlt/neuralnet/layers/reduced_space.py +++ b/src/omlt/neuralnet/layers/reduced_space.py @@ -1,6 +1,5 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): - r""" - Add reduced-space formulation of the dense layer to the block + r"""Add reduced-space formulation of the dense layer to the block. .. math:: @@ -12,11 +11,11 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): # not an input layer, process the expressions prev_layers = list(net.predecessors(layer)) if len(prev_layers) == 0: - raise ValueError( - f"Layer {layer} is not an input layer, but has no predecessors." - ) - elif len(prev_layers) > 1: - raise ValueError(f"Layer {layer} has multiple predecessors.") + msg = f"Layer {layer} is not an input layer, but has no predecessors." + raise ValueError(msg) + if len(prev_layers) > 1: + msg = f"Layer {layer} has multiple predecessors." + raise ValueError(msg) prev_layer = prev_layers[0] prev_layer_block = net_block.layer[id(prev_layer)] diff --git a/src/omlt/neuralnet/network_definition.py b/src/omlt/neuralnet/network_definition.py index aeef22eb..783f0c76 100644 --- a/src/omlt/neuralnet/network_definition.py +++ b/src/omlt/neuralnet/network_definition.py @@ -7,7 +7,8 @@ class NetworkDefinition: def __init__( self, scaling_object=None, scaled_input_bounds=None, unscaled_input_bounds=None ): - """ + """Network Definition. + Create a network definition object used to create the neural network formulation in Pyomo @@ -26,7 +27,7 @@ def __init__( parameter will be generated using the scaling object. If None, then no bounds are specified. """ - self.__layers_by_id = dict() + self.__layers_by_id = {} self.__graph = nx.DiGraph() self.__scaling_object = scaling_object @@ -41,10 +42,11 @@ def __init__( ) scaled_input_bounds = { - k: (lbs[k], ubs[k]) for k in unscaled_input_bounds.keys() + k: (lbs[k], ubs[k]) for k in unscaled_input_bounds } - # If unscaled input bounds provided and no scaler provided, scaled input bounds = unscaled input bounds + # If unscaled input bounds provided and no scaler provided, + # scaled input bounds = unscaled input bounds elif unscaled_input_bounds is not None and scaling_object is None: scaled_input_bounds = unscaled_input_bounds @@ -52,8 +54,7 @@ def __init__( self.__scaled_input_bounds = scaled_input_bounds def add_layer(self, layer): - """ - Add a layer to the network. + """Add a layer to the network. Parameters ---------- @@ -65,8 +66,7 @@ def add_layer(self, layer): self.__graph.add_node(layer_id) def add_edge(self, from_layer, to_layer): - """ - Add an edge between two layers. + """Add an edge between two layers. Parameters ---------- @@ -78,69 +78,85 @@ def add_edge(self, from_layer, to_layer): id_to = id(to_layer) id_from = id(from_layer) if id_to not in self.__layers_by_id: - raise ValueError(f"Inbound layer {to_layer} not found in network.") + msg = f"Inbound layer {to_layer} not found in network." + raise ValueError(msg) if id_from not in self.__layers_by_id: - raise ValueError(f"Outbound layer {from_layer} not found in network.") + msg = f"Outbound layer {from_layer} not found in network." + raise ValueError(msg) self.__graph.add_edge(id_from, id_to) @property def scaling_object(self): - """Return an instance of the scaling object that supports the ScalingInterface""" + """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object @property def scaled_input_bounds(self): - """Return a dict of tuples containing lower and upper bounds of neural network inputs""" + """Scaled Input Bounds. + + Return a dict of tuples containing lower and upper bounds of neural network + inputs. + """ return self.__scaled_input_bounds @property def unscaled_input_bounds(self): - """Return a dict of tuples containing lower and upper bounds of unscaled neural network inputs""" + """Unscaled Input Bounds. + + Return a dict of tuples containing lower and upper bounds of unscaled neural + network inputs. + """ return self.__unscaled_input_bounds @property def input_layers(self): - """Return an iterator over the input layers""" + """Return an iterator over the input layers.""" for layer_id, in_degree in self.__graph.in_degree(): if in_degree == 0: yield self.__layers_by_id[layer_id] @property def input_nodes(self): - """An alias for input_layers""" + """An alias for input_layers.""" return self.input_layers @property def output_layers(self): - """Return an iterator over the output layer""" + """Return an iterator over the output layer.""" for layer_id, out_degree in self.__graph.out_degree(): if out_degree == 0: yield self.__layers_by_id[layer_id] @property def output_nodes(self): - """An alias for output_layers""" + """An alias for output_layers.""" return self.output_layers def layer(self, layer_id): - """Return the layer with the given id""" + """Return the layer with the given id.""" return self.__layers_by_id[layer_id] @property def layers(self): - """Return an iterator over all the layers""" + """Return an iterator over all the layers.""" for layer_id in nx.topological_sort(self.__graph): yield self.__layers_by_id[layer_id] def predecessors(self, layer): - """Return an iterator over the layers with outbound connections into the layer""" + """Predecessors. + + Return an iterator over the layers with outbound connections into the layer. + """ if isinstance(layer, Layer): layer = id(layer) for node_id in self.__graph.predecessors(layer): yield self.__layers_by_id[node_id] def successors(self, layer): - """Return an iterator over the layers with an inbound connection from the layer""" + """Successors. + + Return an iterator over the layers with an inbound connection from the layer. + """ if isinstance(layer, Layer): layer = id(layer) for node_id in self.__graph.successors(layer): diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index b0461aa2..d8eb5b37 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,4 +1,3 @@ -import numpy as np import pyomo.environ as pyo from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -57,10 +56,11 @@ def _ignore_input_layer(): "tanh": tanh_activation_constraint, } +MULTI_INPUTS_UNSUPPORTED = "Multiple input layers are not currently supported." +MULTI_OUTPUTS_UNSUPPORTED = "Multiple output layers are not currently supported." class FullSpaceNNFormulation(_PyomoFormulation): - """ - This class is the entry-point to build neural network formulations. + """This class is the entry-point to build neural network formulations. This class iterates over all nodes in the neural network and for each one them, generates the constraints to represent the layer @@ -96,10 +96,10 @@ def __init__( network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_layer_constraints(self): return _DEFAULT_LAYER_CONSTRAINTS @@ -124,7 +124,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -132,15 +132,14 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes def _build_neural_network_formulation( block, network_structure, layer_constraints, activation_constraints ): - """ - Adds the neural network formulation to the given Pyomo block. + """Adds the neural network formulation to the given Pyomo block. Parameters ---------- @@ -183,27 +182,21 @@ def layer(b, layer_id): layer_constraints_func = layer_constraints.get(type(layer), None) if layer_constraints_func is None: - raise ValueError( - "Layer type {} is not supported by this formulation.".format( - type(layer) - ) - ) + msg = f"Layer type {type(layer)} is not supported by this formulation." + raise ValueError(msg) layer_constraints_func(block, net, layer_block, layer) activation_constraints_func = activation_constraints.get(layer.activation, None) if activation_constraints_func is None: - raise ValueError( - "Activation {} is not supported by this formulation.".format( - layer.activation - ) - ) + msg = f"Activation {layer.activation} is not supported by this formulation." + raise ValueError(msg) activation_constraints_func(block, net, layer_block, layer) # setup input variables constraints # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] @block.Constraint(input_layer.output_indexes) @@ -214,7 +207,7 @@ def input_assignment(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -226,7 +219,8 @@ def output_assignment(b, *output_index): class FullSpaceSmoothNNFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Full Space Smooth Neural Network Formulation. + This class is used for building "full-space" formulations of neural network models composed of smooth activations (e.g., tanh, sigmoid, etc.) @@ -249,7 +243,8 @@ def _supported_default_activation_constraints(self): class ReluBigMFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Relu Big-M Formulation. + This class is used for building "full-space" formulations of neural network models composed of relu activations using a big-M formulation @@ -270,7 +265,8 @@ def _supported_default_activation_constraints(self): class ReluComplementarityFormulation(FullSpaceNNFormulation): def __init__(self, network_structure): - """ + """Relu Complementarity Formulation. + This class is used for building "full-space" formulations of neural network models composed of relu activations using a complementarity formulation (smooth represenation) @@ -290,7 +286,8 @@ def _supported_default_activation_constraints(self): class ReducedSpaceNNFormulation(_PyomoFormulation): - """ + """Reduced Space Neural Network Formulation. + This class is used to build reduced-space formulations of neural networks. @@ -322,10 +319,10 @@ def __init__(self, network_structure, activation_functions=None): # # network_inputs = list(self.__network_definition.input_nodes) # if len(network_inputs) != 1: - # raise ValueError("Multiple input layers are not currently supported.") + # raise ValueError(MULTI_INPUTS_UNSUPPORTED) # network_outputs = list(self.__network_definition.output_nodes) # if len(network_outputs) != 1: - # raise ValueError("Multiple output layers are not currently supported.") + # raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_activation_functions(self): return dict(_DEFAULT_ACTIVATION_FUNCTIONS) @@ -346,10 +343,11 @@ def _build_formulation(self): # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError( + msg = ( "build_formulation called with a network that has more than" " one input layer. Only single input layers are supported." ) + raise ValueError(msg) input_layer = input_layers[0] input_layer_id = id(input_layer) input_layer_block = block.layer[input_layer_id] @@ -374,11 +372,11 @@ def z(b, *output_index): layer_func = reduced_space_dense_layer # layer_constraints[type(layer)] activation_func = self._activation_functions.get(layer.activation, None) if activation_func is None: - raise ValueError( - "Activation {} is not supported by this formulation.".format( - layer.activation - ) + msg = ( + f"Activation {layer.activation} is not supported by this" + " formulation." ) + raise ValueError(msg) layer_func(block, net, layer_block, layer, activation_func) @@ -386,10 +384,11 @@ def z(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError( + msg = ( "build_formulation called with a network that has more than" " one output layer. Only single output layers are supported." ) + raise ValueError(msg) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -413,7 +412,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -421,12 +420,13 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes class ReducedSpaceSmoothNNFormulation(ReducedSpaceNNFormulation): - """ + """Reduced Space Smooth Neural Network Formulation. + This class is used to build reduced-space formulations of neural networks with smooth activation functions. @@ -449,7 +449,8 @@ def _supported_default_activation_functions(self): class ReluPartitionFormulation(_PyomoFormulation): - """ + """ReLU Partition Formulation. + This class is used to build partition-based formulations of neural networks. @@ -515,11 +516,14 @@ def layer(b, layer_id): full_space_dense_layer(block, net, layer_block, layer) linear_activation_constraint(block, net, layer_block, layer) else: - raise ValueError( - "ReluPartitionFormulation supports Dense layers with relu or linear activation" + msg = ( + "ReluPartitionFormulation supports Dense layers with relu or" + " linear activation" ) + raise ValueError(msg) else: - raise ValueError("ReluPartitionFormulation supports only Dense layers") + msg = "ReluPartitionFormulation supports only Dense layers" + raise TypeError(msg) # This check is never hit. The formulation._build_formulation() function is # only ever called by an OmltBlock.build_formulation(), and that runs the @@ -530,7 +534,7 @@ def layer(b, layer_id): # currently only support a single input layer input_layers = list(net.input_layers) if len(input_layers) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) input_layer = input_layers[0] @block.Constraint(input_layer.output_indexes) @@ -544,7 +548,7 @@ def input_assignment(b, *output_index): # currently only support a single output layer output_layers = list(net.output_layers) if len(output_layers) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) output_layer = output_layers[0] @block.Constraint(output_layer.output_indexes) @@ -559,7 +563,7 @@ def input_indexes(self): """The indexes of the formulation inputs.""" network_inputs = list(self.__network_definition.input_nodes) if len(network_inputs) != 1: - raise ValueError("Multiple input layers are not currently supported.") + raise ValueError(MULTI_INPUTS_UNSUPPORTED) return network_inputs[0].input_indexes @property @@ -567,5 +571,5 @@ def output_indexes(self): """The indexes of the formulation output.""" network_outputs = list(self.__network_definition.output_nodes) if len(network_outputs) != 1: - raise ValueError("Multiple output layers are not currently supported.") + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) return network_outputs[0].output_indexes diff --git a/src/omlt/scaling.py b/src/omlt/scaling.py index ea7416ba..9bf3bd3f 100644 --- a/src/omlt/scaling.py +++ b/src/omlt/scaling.py @@ -1,7 +1,8 @@ -""" +"""Scaling. + The omlt.scaling module describes the interface for providing different scaling -expressions to the Pyomo model for the inputs and outputs of an ML model. An implementation of a common scaling approach is -included with `OffsetScaling`. +expressions to the Pyomo model for the inputs and outputs of an ML model. An +implementation of a common scaling approach is included with `OffsetScaling`. """ import abc @@ -10,25 +11,32 @@ class ScalingInterface(abc.ABC): @abc.abstractmethod def get_scaled_input_expressions(self, input_vars): - """This method returns a list of expressions for the scaled inputs from - the unscaled inputs""" - pass # pragma: no cover + """Get scaled inputs. + + This method returns a list of expressions for the scaled inputs from + the unscaled inputs + """ + # pragma: no cover @abc.abstractmethod def get_unscaled_output_expressions(self, scaled_output_vars): - """This method returns a list of expressions for the unscaled outputs from - the scaled outputs""" - pass # pragma: no cover + """Get unscaled outputs. + + This method returns a list of expressions for the unscaled outputs from + the scaled outputs + """ + # pragma: no cover def convert_to_dict(x): - if type(x) is dict: + if isinstance(x, dict): return dict(x) - return {i: v for i, v in enumerate(x)} + return dict(enumerate(x)) class OffsetScaling(ScalingInterface): - r""" + r"""OffsetScaling interface. + This scaling object represents the following scaling equations for inputs (x) and outputs (y) @@ -51,7 +59,7 @@ class OffsetScaling(ScalingInterface): """ def __init__(self, offset_inputs, factor_inputs, offset_outputs, factor_outputs): - super(OffsetScaling, self).__init__() + super().__init__() self.__x_offset = convert_to_dict(offset_inputs) self.__x_factor = convert_to_dict(factor_inputs) self.__y_offset = convert_to_dict(offset_outputs) @@ -59,112 +67,102 @@ def __init__(self, offset_inputs, factor_inputs, offset_outputs, factor_outputs) for k, v in self.__x_factor.items(): if v <= 0: - raise ValueError( + msg = ( "OffsetScaling only accepts positive values" " for factor_inputs. Negative value found at" - " index {}.".format(k) + f" index {k}." ) + raise ValueError(msg) for k, v in self.__y_factor.items(): if v <= 0: - raise ValueError( + msg = ( "OffsetScaling only accepts positive values" " for factor_outputs. Negative value found at" - " index {}.".format(k) + f" index {k}." ) + raise ValueError(msg) def get_scaled_input_expressions(self, input_vars): - """ - Get the scaled input expressions of the input variables. - """ + """Get the scaled input expressions of the input variables.""" sorted_keys = sorted(input_vars.keys()) if ( sorted(self.__x_offset) != sorted_keys or sorted(self.__x_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_input_expressions called with input_vars" " that do not have the same indices as offset_inputs" " or factor_inputs.\n" - "Keys in input_vars: {}.\n" - "Keys in offset_inputs: {}.\n" - "Keys in offset_factor: {}.".format( - sorted_keys, sorted(self.__x_offset), sorted(self.__x_factor) - ) + f"Keys in input_vars: {sorted_keys}.\n" + f"Keys in offset_inputs: {sorted(self.__x_offset)}.\n" + f"Keys in offset_factor: {sorted(self.__x_factor)}." ) + raise ValueError(msg) x = input_vars - return {k: (x[k] - self.__x_offset[k]) / self.__x_factor[k] for k in x.keys()} + return {k: (x[k] - self.__x_offset[k]) / self.__x_factor[k] for k in x} def get_unscaled_input_expressions(self, scaled_input_vars): - """ - Get the unscaled input expressions of the scaled input variables. - """ + """Get the unscaled input expressions of the scaled input variables.""" sorted_keys = sorted(scaled_input_vars.keys()) if ( sorted(self.__x_offset) != sorted_keys or sorted(self.__x_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_input_expressions called with input_vars" " that do not have the same indices as offset_inputs" " or factor_inputs.\n" - "Keys in input_vars: {}\n" - "Keys in offset_inputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__x_offset), sorted(self.__x_factor) - ) + f"Keys in input_vars: {sorted_keys}\n" + f"Keys in offset_inputs: {sorted(self.__x_offset)}\n" + f"Keys in offset_factor: {sorted(self.__x_factor)}" ) + raise ValueError(msg) scaled_x = scaled_input_vars return { k: scaled_x[k] * self.__x_factor[k] + self.__x_offset[k] - for k in scaled_x.keys() + for k in scaled_x } def get_scaled_output_expressions(self, output_vars): - """ - Get the scaled output expressions of the output variables. - """ + """Get the scaled output expressions of the output variables.""" sorted_keys = sorted(output_vars.keys()) if ( sorted(self.__y_offset) != sorted_keys or sorted(self.__y_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_output_expressions called with output_vars" " that do not have the same indices as offset_outputs" " or factor_outputs.\n" - "Keys in output_vars: {}\n" - "Keys in offset_outputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__y_offset), sorted(self.__y_factor) - ) + f"Keys in output_vars: {sorted_keys}\n" + f"Keys in offset_outputs: {sorted(self.__y_offset)}\n" + f"Keys in offset_factor: {sorted(self.__y_factor)}" ) + raise ValueError(msg) y = output_vars - return {k: (y[k] - self.__y_offset[k]) / self.__y_factor[k] for k in y.keys()} + return {k: (y[k] - self.__y_offset[k]) / self.__y_factor[k] for k in y} def get_unscaled_output_expressions(self, scaled_output_vars): - """ - Get the unscaled output expressions of the scaled output variables. - """ + """Get the unscaled output expressions of the scaled output variables.""" sorted_keys = sorted(scaled_output_vars.keys()) if ( sorted(self.__y_offset) != sorted_keys or sorted(self.__y_factor) != sorted_keys ): - raise ValueError( + msg = ( "get_scaled_output_expressions called with output_vars" " that do not have the same indices as offset_outputs" " or factor_outputs.\n" - "Keys in output_vars: {}\n" - "Keys in offset_outputs: {}\n" - "Keys in offset_factor: {}".format( - sorted_keys, sorted(self.__y_offset), sorted(self.__y_factor) - ) + f"Keys in output_vars: {sorted_keys}\n" + f"Keys in offset_outputs: {sorted(self.__y_offset)}\n" + f"Keys in offset_factor: {sorted(self.__y_factor)}" ) + raise ValueError(msg) scaled_y = scaled_output_vars return { k: scaled_y[k] * self.__y_factor[k] + self.__y_offset[k] - for k in scaled_y.keys() + for k in scaled_y } diff --git a/tests/conftest.py b/tests/conftest.py index b9c4daf7..bcea6cff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,46 +2,43 @@ import numpy as np import pytest -from pyomo.common.fileutils import this_file_dir - from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition +from pyomo.common.fileutils import this_file_dir def get_neural_network_data(desc): - """ - Return input and test data for a neural network. + """Return input and test data for a neural network. Parameters ---------- desc : string model name. One of 131 or 2353. """ + rng = np.random.default_rng(42) + if desc == "131": # build data with 1 input and 1 output and 500 data points - x = np.random.uniform(-1, 1, 500) + x = rng.uniform(-1, 1, 500) y = np.sin(x) - x_test = np.random.uniform(-1, 1, 5) + x_test = rng.uniform(-1, 1, 5) return x, y, x_test - elif desc == "2353": + if desc == "2353": # build data with 2 inputs, 3 outputs, and 500 data points - np.random.seed(42) - x = np.random.uniform([-1, 2], [1, 3], (500, 2)) + x = rng.uniform([-1, 2], [1, 3], (500, 2)) y1 = np.sin(x[:, 0] * x[:, 1]) y2 = x[:, 0] + x[:, 1] y3 = np.cos(x[:, 0] / x[:, 1]) y = np.column_stack((y1, y2, y3)) - x_test = np.random.uniform([-1, 2], [1, 3], (5, 2)) + x_test = rng.uniform([-1, 2], [1, 3], (5, 2)) return x, y, x_test return None class _Datadir: - """ - Give access to files in the `models` directory. - """ + """Give access to files in the `models` directory.""" def __init__(self, basedir): self._basedir = basedir @@ -50,16 +47,17 @@ def file(self, filename): return str(self._basedir / filename) -@pytest.fixture +@pytest.fixture() def datadir(): basedir = Path(this_file_dir()) / "models" return _Datadir(basedir) -@pytest.fixture +@pytest.fixture() def two_node_network_relu(): - """ - 1 1 + """Two node network with ReLU activation. + + 1 1 x0 -------- (1) --------- (3) | / | / diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index 57d93427..a7c7557c 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -2,12 +2,16 @@ import pyomo.environ as pe import pytest - from omlt import OmltBlock from omlt.dependencies import onnx, onnx_available from omlt.gbt.gbt_formulation import GBTBigMFormulation from omlt.gbt.model import GradientBoostedTreeModel +TOTAL_CONSTRAINTS = 423 +Y_VARS = 42 +Z_L_VARS = 160 +SINGLE_LEAVES = 20 +SPLITS = 140 @pytest.mark.skip("Francesco and Alex need to check this test") def test_formulation_with_continuous_variables(): @@ -27,17 +31,18 @@ def test_formulation_with_continuous_variables(): assert ( len(list(m.gbt.component_data_objects(pe.Var))) == 202 + 10 ) # our auto-created variables - assert len(list(m.gbt.component_data_objects(pe.Constraint))) == 423 # TODO: fix? + # TODO: fix below?: + assert len(list(m.gbt.component_data_objects(pe.Constraint))) == TOTAL_CONSTRAINTS - assert len(m.gbt.z_l) == 160 - assert len(m.gbt.y) == 42 + assert len(m.gbt.z_l) == Z_L_VARS + assert len(m.gbt.y) == Y_VARS - assert len(m.gbt.single_leaf) == 20 - assert len(m.gbt.left_split) == 140 - assert len(m.gbt.right_split) == 140 + assert len(m.gbt.single_leaf) == SINGLE_LEAVES + assert len(m.gbt.left_split) == SPLITS + assert len(m.gbt.right_split) == SPLITS assert len(m.gbt.categorical) == 0 - assert len(m.gbt.var_lower) == 42 - assert len(m.gbt.var_upper) == 42 + assert len(m.gbt.var_lower) == Y_VARS + assert len(m.gbt.var_upper) == Y_VARS # TODO: did we remove categorical variables intentionally? diff --git a/tests/io/test_input_bounds.py b/tests/io/test_input_bounds.py index c8f1f439..ea9238fc 100644 --- a/tests/io/test_input_bounds.py +++ b/tests/io/test_input_bounds.py @@ -15,7 +15,7 @@ def test_input_bounds_reader_writer_with_list(): def test_input_bounds_reader_writer_with_dictionary(): - input_bounds = dict(((i, i), (i * 10.0, i * 10.0 + 1.0)) for i in range(10)) + input_bounds = {(i, i): (i * 10.0, i * 10.0 + 1.0) for i in range(10)} with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as f: write_input_bounds(f.name, input_bounds) diff --git a/tests/io/test_keras_reader.py b/tests/io/test_keras_reader.py index 21629c66..1982063b 100644 --- a/tests/io/test_keras_reader.py +++ b/tests/io/test_keras_reader.py @@ -1,7 +1,9 @@ import pytest - from omlt.dependencies import keras, keras_available +NUM_LAYERS_131 = 3 +NUM_LAYERS_BIG = 5 + if keras_available: from omlt.io import load_keras_sequential @@ -14,7 +16,7 @@ def test_keras_reader(datadir): net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 for layer in layers: assert layer.activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -25,7 +27,7 @@ def test_keras_reader(datadir): ) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -36,7 +38,7 @@ def test_keras_reader(datadir): ) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "sigmoid" assert layers[1].weights.shape == (1, 3) @@ -45,7 +47,7 @@ def test_keras_reader(datadir): nn = keras.models.load_model(datadir.file("big.keras"), compile=False) net = load_keras_sequential(nn) layers = list(net.layers) - assert len(layers) == 5 + assert len(layers) == NUM_LAYERS_BIG assert layers[1].activation == "sigmoid" assert layers[2].activation == "sigmoid" assert layers[3].activation == "sigmoid" diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index 763b282c..2f4510c3 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -1,7 +1,15 @@ import pytest - from omlt.dependencies import onnx, onnx_available +NUM_LAYERS_131 = 3 +NUM_LAYERS_GEMM = 4 +NUM_LAYERS_MAXPOOL = 4 +NUM_LAYERS_BIG = 5 + +MAXPOOL_KERNEL_DEPTH = 3 + +NEAR_EQUAL = 1e-05 + if onnx_available: from omlt.io.onnx import load_onnx_neural_network from omlt.io.onnx_parser import NetworkParser @@ -12,7 +20,7 @@ def test_linear_131(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 for layer in layers: assert layer.activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -24,7 +32,7 @@ def test_linear_131_relu(datadir): model = onnx.load(datadir.file("keras_linear_131_relu.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "relu" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -36,7 +44,7 @@ def test_linear_131_sigmoid(datadir): model = onnx.load(datadir.file("keras_linear_131_sigmoid.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 3 + assert len(layers) == NUM_LAYERS_131 assert layers[1].activation == "sigmoid" assert layers[2].activation == "linear" assert layers[1].weights.shape == (1, 3) @@ -48,7 +56,7 @@ def test_gemm(datadir): model = onnx.load(datadir.file("gemm.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_GEMM assert layers[1].weights.shape == (784, 75) assert layers[2].weights.shape == (75, 75) assert layers[3].weights.shape == (75, 10) @@ -67,10 +75,10 @@ def test_gemm_transB(datadir): layers_transB = list(net_transB.layers) assert len(layers) == len(layers_transB) assert layers[1].weights.shape == layers_transB[1].weights.shape - assert abs(layers[1].weights[0][0] - layers_transB[1].weights[0][0]) < 1e-05 - assert abs(layers[1].weights[0][1] - layers_transB[1].weights[1][0]) < 1e-05 - assert abs(layers[1].weights[1][0] - layers_transB[1].weights[0][1]) < 1e-05 - assert abs(layers[1].weights[1][1] - layers_transB[1].weights[1][1]) < 1e-05 + assert abs(layers[1].weights[0][0] - layers_transB[1].weights[0][0]) < NEAR_EQUAL + assert abs(layers[1].weights[0][1] - layers_transB[1].weights[1][0]) < NEAR_EQUAL + assert abs(layers[1].weights[1][0] - layers_transB[1].weights[0][1]) < NEAR_EQUAL + assert abs(layers[1].weights[1][1] - layers_transB[1].weights[1][1]) < NEAR_EQUAL @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -78,7 +86,7 @@ def test_conv(datadir): model = onnx.load(datadir.file("convx1_gemmx1.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_GEMM assert layers[1].activation == "linear" assert layers[2].activation == "linear" assert layers[3].activation == "relu" @@ -91,7 +99,7 @@ def test_maxpool(datadir): model = onnx.load(datadir.file("maxpool_2d.onnx")) net = load_onnx_neural_network(model) layers = list(net.layers) - assert len(layers) == 4 + assert len(layers) == NUM_LAYERS_MAXPOOL assert layers[1].activation == "relu" assert layers[2].activation == "linear" assert layers[3].activation == "linear" @@ -105,7 +113,7 @@ def test_maxpool(datadir): assert layers[2].output_size == [3, 5, 2] assert layers[3].output_size == [3, 2, 1] for layer in layers[1:]: - assert layer.kernel_depth == 3 + assert layer.kernel_depth == MAXPOOL_KERNEL_DEPTH @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -113,10 +121,10 @@ def test_input_tensor_invalid_dims(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) model.graph.input[0].type.tensor_type.shape.dim[1].dim_value = 0 parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, match='All dimensions in graph "tf2onnx" input tensor have 0 value.' + ): parser.parse_network(model.graph, None, None) - expected_msg = 'All dimensions in graph "tf2onnx" input tensor have 0 value.' - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -124,10 +132,10 @@ def test_no_input_layers(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) model.graph.input.remove(model.graph.input[0]) parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, match='No valid input layer found in graph "tf2onnx".' + ): parser.parse_network(model.graph, None, None) - expected_msg = 'No valid input layer found in graph "tf2onnx".' - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -136,10 +144,13 @@ def test_node_no_inputs(datadir): while len(model.graph.node[0].input) > 0: model.graph.node[0].input.pop() parser = NetworkParser() - with pytest.raises(ValueError) as excinfo: + expected_msg = ( + 'Nodes must have inputs or have op_type "Constant". Node ' + '"StatefulPartitionedCall/keras_linear_131/dense/MatMul" has' + ' no inputs and op_type "MatMul".' + ) + with pytest.raises(ValueError, match=expected_msg): parser.parse_network(model.graph, None, None) - expected_msg = """Nodes must have inputs or have op_type \"Constant\". Node \"StatefulPartitionedCall/keras_linear_131/dense/MatMul\" has no inputs and op_type \"MatMul\".""" - assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -148,45 +159,56 @@ def test_consume_wrong_node_type(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) - with pytest.raises(ValueError) as excinfo: + expected_msg_dense = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for MatMul nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_dense): parser._consume_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MatMul nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_dense - - with pytest.raises(ValueError) as excinfo: + expected_msg_gemm = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Gemm nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_gemm): parser._consume_gemm_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_gemm = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Gemm nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_gemm - - with pytest.raises(ValueError) as excinfo: + expected_msg_conv = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Conv nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_conv): parser._consume_conv_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_conv = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Conv nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_conv - - with pytest.raises(ValueError) as excinfo: + expected_msg_reshape = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for Reshape nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_reshape): parser._consume_reshape_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_reshape = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Reshape nodes can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_reshape - - with pytest.raises(ValueError) as excinfo: + expected_msg_pool = ( + "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, " + "but the parsing method for MaxPool nodes was called. This could indicate " + "changes in the network being parsed." + ) + with pytest.raises(ValueError, match=expected_msg_pool): parser._consume_pool_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/BiasAdd"][2], ) - expected_msg_pool = """StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MaxPool nodes can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_pool @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -198,13 +220,15 @@ def test_consume_dense_wrong_dims(datadir): parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][ 1 ].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_dense = ( + "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, " + "only nodes with 2 input dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_dense): parser._consume_dense_nodes( parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][1], parser._nodes["StatefulPartitionedCall/keras_linear_131/dense/MatMul"][2], ) - expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_dense @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -213,12 +237,14 @@ def test_consume_gemm_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Gemm_0"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_gemm = ( + "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions " + "can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_gemm): parser._consume_gemm_dense_nodes( parser._nodes["Gemm_0"][1], parser._nodes["Gemm_0"][2] ) - expected_msg_gemm = "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_gemm @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -227,12 +253,14 @@ def test_consume_conv_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Conv_0"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_conv = ( + "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input" + " dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_conv): parser._consume_conv_nodes( parser._nodes["Conv_0"][1], parser._nodes["Conv_0"][2] ) - expected_msg_conv = "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." - assert str(excinfo.value) == expected_msg_conv @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -241,12 +269,14 @@ def test_consume_reshape_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["Reshape_2"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_reshape = ( + "Reshape_2 input has 3 dimensions, only nodes with 2 input" + " dimensions can be used as starting points for parsing." + ) + with pytest.raises(ValueError, match=expected_msg_reshape): parser._consume_reshape_nodes( parser._nodes["Reshape_2"][1], parser._nodes["Reshape_2"][2] ) - expected_msg_reshape = """Reshape_2 input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_reshape @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -255,7 +285,8 @@ def test_consume_maxpool_wrong_dims(datadir): parser = NetworkParser() parser.parse_network(model.graph, None, None) parser._nodes["node1"][1].input.append("abcd") - with pytest.raises(ValueError) as excinfo: + expected_msg_maxpool = ( + "node1 input has 2 dimensions, only nodes with 1 input " + "dimension can be used as starting points for parsing.") + with pytest.raises(ValueError, match = expected_msg_maxpool): parser._consume_pool_nodes(parser._nodes["node1"][1], parser._nodes["node1"][2]) - expected_msg_maxpool = """node1 input has 2 dimensions, only nodes with 1 input dimension can be used as starting points for consumption.""" - assert str(excinfo.value) == expected_msg_maxpool diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index 9cf6905f..fd52e69d 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -1,17 +1,19 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt import OmltBlock from omlt.dependencies import ( - torch, torch_available, - torch_geometric, torch_geometric_available, ) if torch_available and torch_geometric_available: - from torch.nn import Linear, ReLU, Sigmoid, Softplus, Tanh + from omlt.io.torch_geometric import ( + gnn_with_fixed_graph, + gnn_with_non_fixed_graph, + load_torch_geometric_sequential, + ) + from torch.nn import Linear, ReLU, Sigmoid, Tanh from torch_geometric.nn import ( GCNConv, SAGEConv, @@ -21,12 +23,6 @@ global_mean_pool, ) - from omlt.io.torch_geometric import ( - gnn_with_fixed_graph, - gnn_with_non_fixed_graph, - load_torch_geometric_sequential, - ) - @pytest.mark.skipif( not (torch_available and torch_geometric_available), diff --git a/tests/linear_tree/test_lt_formulation.py b/tests/linear_tree/test_lt_formulation.py index 28f6f873..30e3a1a2 100644 --- a/tests/linear_tree/test_lt_formulation.py +++ b/tests/linear_tree/test_lt_formulation.py @@ -1,22 +1,24 @@ import numpy as np import pyomo.environ as pe import pytest -from pytest import approx - from omlt.dependencies import lineartree_available if lineartree_available: from lineartree import LinearTreeRegressor - from sklearn.linear_model import LinearRegression from omlt.linear_tree import ( + LinearTreeDefinition, LinearTreeGDPFormulation, LinearTreeHybridBigMFormulation, - LinearTreeDefinition, ) + from sklearn.linear_model import LinearRegression import omlt from omlt import OmltBlock +NUM_INPUTS = 2 +NUM_SPLITS = 5 +NUM_LEAVES = 6 + scip_available = pe.SolverFactory("scip").available() cbc_available = pe.SolverFactory("cbc").available() gurobi_available = pe.SolverFactory("gurobi").available() @@ -100,7 +102,7 @@ def test_linear_tree_model_single_var(): assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -114,12 +116,12 @@ def test_linear_tree_model_single_var(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -130,8 +132,8 @@ def test_linear_tree_model_single_var(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, ensure slope dimension match n_inputs if key == "slope": @@ -187,7 +189,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -221,7 +223,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -255,7 +257,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif( @@ -289,7 +291,7 @@ def connect_outputs(mdl): pe.assert_optimal_termination(status_1_bigm) solution_1_bigm = (pe.value(model1.x), pe.value(model1.y)) y_pred = regr_small.predict(np.array(solution_1_bigm[0]).reshape(1, -1)) - assert y_pred[0] == approx(solution_1_bigm[1]) + assert y_pred[0] == pytest.approx(solution_1_bigm[1]) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -317,12 +319,12 @@ def test_scaling(): lt_def2 = LinearTreeDefinition( regr, unscaled_input_bounds=unscaled_input_bounds, scaling_object=scaler ) - assert lt_def2.scaled_input_bounds[0][0] == approx(scaled_input_bounds[0][0]) - assert lt_def2.scaled_input_bounds[0][1] == approx(scaled_input_bounds[0][1]) + assert lt_def2.scaled_input_bounds[0][0] == pytest.approx(scaled_input_bounds[0][0]) + assert lt_def2.scaled_input_bounds[0][1] == pytest.approx(scaled_input_bounds[0][1]) with pytest.raises( Exception, match="Input Bounds needed to represent linear trees as MIPs" ): - ltmodel_scaled = LinearTreeDefinition(regr) + LinearTreeDefinition(regr) #### MULTIVARIATE INPUT TESTING #### @@ -394,12 +396,12 @@ def test_linear_tree_model_multi_var(): # assert attributes in LinearTreeDefinition assert scaled_input_bounds is not None - assert n_inputs == 2 + assert n_inputs == NUM_INPUTS assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -413,12 +415,12 @@ def test_linear_tree_model_multi_var(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -429,8 +431,8 @@ def test_linear_tree_model_multi_var(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, test the shape of it if key == "slope": @@ -494,7 +496,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -536,7 +538,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -578,7 +580,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif( @@ -620,7 +622,7 @@ def connect_outputs(mdl): y_pred = regr.predict( np.array([pe.value(model1.x0), pe.value(model1.x1)]).reshape(1, -1) ) - assert y_pred[0] == approx(solution_1_bigm) + assert y_pred[0] == pytest.approx(solution_1_bigm) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -641,11 +643,11 @@ def test_summary_dict_as_argument(): # assert attributes in LinearTreeDefinition assert scaled_input_bounds is not None - assert n_inputs == 2 + assert n_inputs == NUM_INPUTS assert n_outputs == 1 # test for splits # assert the number of splits - assert len(splits[0].keys()) == 5 + assert len(splits[0].keys()) == NUM_SPLITS splits_key_list = [ "col", "th", @@ -659,12 +661,12 @@ def test_summary_dict_as_argument(): "y_index", ] # assert whether all the dicts have such keys - for i in splits[0].keys(): - for key in splits[0][i].keys(): + for i in splits[0]: + for key in splits[0][i]: assert key in splits_key_list # test for leaves # assert the number of leaves - assert len(leaves[0].keys()) == 6 + assert len(leaves[0].keys()) == NUM_LEAVES # assert whether all the dicts have such keys leaves_key_list = [ "loss", @@ -675,8 +677,8 @@ def test_summary_dict_as_argument(): "parent", "bounds", ] - for j in leaves[0].keys(): - for key in leaves[0][j].keys(): + for j in leaves[0]: + for key in leaves[0][j]: assert key in leaves_key_list # if the key is slope, test the shape of it if key == "slope": @@ -709,24 +711,26 @@ def test_raise_exception_if_wrong_model_instance(): input_bounds = {0: (min(X[:, 0]), max(X[:, 0])), 1: (min(X[:, 1]), max(X[:, 1]))} with pytest.raises( Exception, - match="Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()", + match=( + "Input dict must be the summary of the linear-tree model" + " e.g. dict = model.summary()" + ), ): - ltmodel_small = LinearTreeDefinition( + LinearTreeDefinition( regr.summary(only_leaves=True), scaled_input_bounds=input_bounds ) with pytest.raises( Exception, match="Model entry must be dict or linear-tree instance" ): - ltmodel_small = LinearTreeDefinition((0, 0), scaled_input_bounds=input_bounds) + LinearTreeDefinition((0, 0), scaled_input_bounds=input_bounds) with pytest.raises( Exception, - match="Input dict must be the summary of the linear-tree model" - + " e.g. dict = model.summary()", + match=( + "Input dict must be the summary of the linear-tree model" + " e.g. dict = model.summary()" + ), ): - ltmodel_small = LinearTreeDefinition( - wrong_summary_dict, scaled_input_bounds=input_bounds - ) + LinearTreeDefinition(wrong_summary_dict, scaled_input_bounds=input_bounds) @pytest.mark.skipif(not lineartree_available, reason="Need Linear-Tree Package") @@ -762,4 +766,4 @@ def test_raise_exception_for_wrong_transformation(): Exception, match="Supported transformations are: bigm, mbigm, hull, and custom", ): - formulation = LinearTreeGDPFormulation(model_def, transformation="hello") + LinearTreeGDPFormulation(model_def, transformation="hello") diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 02da81aa..7cc7261d 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -1,28 +1,30 @@ import numpy as np import pyomo.environ as pyo import pytest -from pyomo.common.dependencies import DeferredImportError - from omlt.dependencies import keras, keras_available +from pyomo.common.dependencies import DeferredImportError if keras_available: from omlt.io import load_keras_sequential from conftest import get_neural_network_data - from omlt.block import OmltBlock from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling +LESS_NEAR_EQUAL = 1e-3 +NEAR_EQUAL = 1e-4 +VERY_NEAR_EQUAL = 1e-5 + @pytest.mark.skipif(keras_available, reason="Test only valid when keras not available") def test_keras_not_available_exception(datadir): with pytest.raises(DeferredImportError): - NN = keras.models.load_model(datadir.file("keras_linear_131_relu")) + keras.models.load_model(datadir.file("keras_linear_131_relu")) -def _test_keras_linear_131(keras_fname, reduced_space=False): +def _test_keras_linear_131(keras_fname, *, reduced_space=False): x, y, x_test = get_neural_network_data("131") nn = keras.models.load_model(keras_fname, compile=False) @@ -40,7 +42,10 @@ def _test_keras_linear_131(keras_fname, reduced_space=False): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) def _test_keras_mip_relu_131(keras_fname): @@ -60,7 +65,10 @@ def _test_keras_mip_relu_131(keras_fname): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("cbc").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) def _test_keras_complementarity_relu_131(keras_fname): @@ -81,10 +89,13 @@ def _test_keras_complementarity_relu_131(keras_fname): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-4 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < NEAR_EQUAL + ) -def _test_keras_linear_big(keras_fname, reduced_space=False): +def _test_keras_linear_big(keras_fname, *, reduced_space=False): x, y, x_test = get_neural_network_data("131") nn = keras.models.load_model(keras_fname, compile=False) @@ -103,7 +114,10 @@ def _test_keras_linear_big(keras_fname, reduced_space=False): m.neural_net_block.inputs[0].fix(x_test[d]) status = pyo.SolverFactory("ipopt").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) < 1e-5 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0]) - nn_outputs[d][0]) + < VERY_NEAR_EQUAL + ) @pytest.mark.skipif(not keras_available, reason="Need keras for this test") @@ -183,12 +197,14 @@ def test_scaling_NN_block(datadir): def obj(mdl): return 1 - for x in np.random.normal(1, 0.5, 10): + rng = np.random.default_rng() + + for x in rng.normal(1, 0.5, 10): model.nn.inputs[0].fix(x) - result = pyo.SolverFactory("cbc").solve(model, tee=False) + pyo.SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] y_s = NN.predict([np.array((x_s,))]) y = y_s * scale_y[1] + scale_y[0] - assert y - pyo.value(model.nn.outputs[0]) <= 1e-3 + assert y - pyo.value(model.nn.outputs[0]) <= LESS_NEAR_EQUAL diff --git a/tests/neuralnet/test_layer.py b/tests/neuralnet/test_layer.py index 4a8944ac..6cf2b6de 100644 --- a/tests/neuralnet/test_layer.py +++ b/tests/neuralnet/test_layer.py @@ -1,6 +1,5 @@ import numpy as np import pytest - from omlt.neuralnet.layer import ( ConvLayer2D, DenseLayer, @@ -133,16 +132,16 @@ def test_gnn_layer_with_input_index_mapper(): y3 = np.array([[[-6, 4, 0, -12, 11, 1, -5, 5, 2], [-1, 0, 1, -1, 0, 1, -1, 0, 1]]]) assert np.array_equal(layer._eval_with_adjacency(inputs, A3), y3) - with pytest.raises(ValueError) as excinfo: - layer = GNNLayer([5], [9], weights, biases, N=3) - assert ( - str(excinfo.value) - == "Input size must equal to the number of nodes multiplied by the number of input node features" + expected_msg = ( + "Input size must equal to the number of nodes multiplied by the number of" + " input node features" ) + with pytest.raises(ValueError, match=expected_msg): + layer = GNNLayer([5], [9], weights, biases, N=3) - with pytest.raises(ValueError) as excinfo: - layer = GNNLayer([6], [8], weights, biases, N=3) - assert ( - str(excinfo.value) - == "Output size must equal to the number of nodes multiplied by the number of output node features" + expected_msg = ( + "Output size must equal to the number of nodes multiplied by the number of" + " output node features" ) + with pytest.raises(ValueError, match=expected_msg): + layer = GNNLayer([6], [8], weights, biases, N=3) diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index f3cadcb7..8dff2365 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -1,18 +1,20 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt.block import OmltBlock from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition from omlt.neuralnet.nn_formulation import FullSpaceNNFormulation from omlt.scaling import OffsetScaling +ALMOST_EXACTLY_EQUAL = 1e-8 + # TODO: Build more tests with different activations and edge cases def test_two_node_full_space(): - """ - 1 1 + """Two node full space network. + + 1 1 x0 -------- (1) --------- (3) | / | / @@ -57,14 +59,16 @@ def test_two_node_full_space(): m.obj1 = pyo.Objective(expr=0) status = pyo.SolverFactory("cbc").solve(m, tee=True) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10.0) < 1e-8 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2.0) < 1e-8 + assert ( + abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10.0) < ALMOST_EXACTLY_EQUAL + ) + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2.0) < ALMOST_EXACTLY_EQUAL m.neural_net_block.inputs[0].fix(1) status = pyo.SolverFactory("cbc").solve(m, tee=False) pyo.assert_optimal_termination(status) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1.0) < 1e-8 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0.0) < 1e-8 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1.0) < ALMOST_EXACTLY_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0.0) < ALMOST_EXACTLY_EQUAL def test_input_bounds_no_scaler(): @@ -91,7 +95,7 @@ def test_input_bound_scaling_1D(): scaled_input_bounds = {0: (0, 5), 1: (-2, 2), 2: (0, 1)} unscaled_input_bounds = {} - for k in scaled_input_bounds.keys(): + for k in scaled_input_bounds: lb, ub = scaled_input_bounds[k] unscaled_input_bounds[k] = ( (lb * xfactor[k]) + xoffset[k], @@ -121,7 +125,7 @@ def test_input_bound_scaling_multiD(): scaled_input_bounds = {(0, 0): (0, 5), (0, 1): (-2, 2), (0, 2): (0, 1)} unscaled_input_bounds = {} - for k in scaled_input_bounds.keys(): + for k in scaled_input_bounds: lb, ub = scaled_input_bounds[k] unscaled_input_bounds[k] = ( (lb * xfactor[k]) + xoffset[k], @@ -135,9 +139,7 @@ def test_input_bound_scaling_multiD(): def _test_add_invalid_edge(direction): - """ - direction can be "in" or "out" - """ + """Direction can be "in" or "out".""" net = NetworkDefinition(scaled_input_bounds=[(-10.0, 10.0)]) input_layer = InputLayer([1]) @@ -162,15 +164,13 @@ def _test_add_invalid_edge(direction): ) if direction == "in": - with pytest.raises(ValueError) as excinfo: - net.add_edge(input_layer, dense_layer_1) expected_msg = f"Inbound layer {dense_layer_1} not found in network." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + net.add_edge(input_layer, dense_layer_1) elif direction == "out": - with pytest.raises(ValueError) as excinfo: - net.add_edge(dense_layer_1, dense_layer_0) expected_msg = f"Outbound layer {dense_layer_1} not found in network." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + net.add_edge(dense_layer_1, dense_layer_0) def test_add_invalid_edge(): diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index 577a5f45..d79d2160 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -1,8 +1,8 @@ +import re + import numpy as np import pyomo.environ as pyo import pytest -from pyomo.contrib.fbbt import interval - from omlt import OmltBlock from omlt.neuralnet import ( FullSpaceNNFormulation, @@ -29,11 +29,22 @@ partition_based_dense_relu_layer, ) from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer +from pyomo.contrib.fbbt import interval +NEAR_EQUAL = 1e-6 +FULLSPACE_SMOOTH_VARS = 14 +FULLSPACE_SMOOTH_CONSTRAINTS = 15 +FULLSPACE_RELU_VARS = 19 +FULLSPACE_RELU_CONSTRAINTS = 26 +REDUCED_VARS = 6 +REDUCED_CONSTRAINTS = 5 +THREE_NODE_VARS = 81 +THREE_NODE_CONSTRAINTS = 120 def two_node_network(activation, input_value): - """ - 1 1 + """Two node network. + + 1 1 x0 -------- (1) --------- (3) | / | / @@ -80,21 +91,21 @@ def _test_two_node_FullSpaceNNFormulation_smooth(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - assert m.nvariables() == 15 - assert m.nconstraints() == 14 + assert m.nvariables() == FULLSPACE_SMOOTH_VARS + assert m.nconstraints() == FULLSPACE_SMOOTH_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_FullSpaceNNFormulation_relu(): @@ -102,21 +113,21 @@ def _test_two_node_FullSpaceNNFormulation_relu(): m.neural_net_block = OmltBlock() net, y = two_node_network("relu", -2.0) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - assert m.nvariables() == 19 - assert m.nconstraints() == 26 + assert m.nvariables() == FULLSPACE_RELU_VARS + assert m.nconstraints() == FULLSPACE_RELU_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network("relu", 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_FullSpaceSmoothNNFormulation(activation): @@ -124,21 +135,21 @@ def _test_two_node_FullSpaceSmoothNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(FullSpaceSmoothNNFormulation(net)) - assert m.nvariables() == 15 - assert m.nconstraints() == 14 + assert m.nvariables() == FULLSPACE_SMOOTH_VARS + assert m.nconstraints() == FULLSPACE_SMOOTH_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_ReducedSpaceNNFormulation(activation): @@ -146,21 +157,21 @@ def _test_two_node_ReducedSpaceNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) - assert m.nvariables() == 6 - assert m.nconstraints() == 5 + assert m.nvariables() == REDUCED_VARS + assert m.nconstraints() == REDUCED_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): @@ -168,21 +179,21 @@ def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) m.neural_net_block.build_formulation(ReducedSpaceSmoothNNFormulation(net)) - assert m.nvariables() == 6 - assert m.nconstraints() == 5 + assert m.nvariables() == REDUCED_VARS + assert m.nconstraints() == REDUCED_CONSTRAINTS m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) + pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL net, y = two_node_network(activation, 1.0) m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < 1e-6 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < 1e-6 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - y[0, 0]) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL def test_two_node_ReducedSpaceNNFormulation(): @@ -198,10 +209,9 @@ def test_two_node_ReducedSpaceSmoothNNFormulation(): def test_two_node_ReducedSpaceSmoothNNFormulation_invalid_activation(): - with pytest.raises(ValueError) as excinfo: - _test_two_node_ReducedSpaceSmoothNNFormulation("relu") expected_msg = "Activation relu is not supported by this formulation." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _test_two_node_ReducedSpaceSmoothNNFormulation("relu") def test_two_node_FullSpaceNNFormulation(): @@ -218,15 +228,15 @@ def test_two_node_FullSpaceSmoothNNFormulation(): def test_two_node_FullSpaceSmoothNNFormulation_invalid_activation(): - with pytest.raises(ValueError) as excinfo: - _test_two_node_FullSpaceSmoothNNFormulation("relu") expected_msg = "Activation relu is not supported by this formulation." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _test_two_node_FullSpaceSmoothNNFormulation("relu") @pytest.mark.skip(reason="Need to add checks on layer types") def test_invalid_layer_type(): - raise AssertionError("Layer type test not yet implemented") + msg = "Layer type test not yet implemented" + raise AssertionError(msg) def _maxpool_conv_network(inputs): @@ -337,35 +347,27 @@ def test_maxpool_FullSpaceNNFormulation(): inputs_d, inputs_r, inputs_c ] m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0, 0]) - y[0, 0, 0]) < 1e-6 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0, 0]) - y[0, 0, 0]) < NEAR_EQUAL def _test_formulation_initialize_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) net.add_layer(extra_input) - with pytest.raises(ValueError) as excinfo: - if network_formulation == "FullSpace": + + expected_msg = "Multiple input layers are not currently supported." + if network_formulation == "FullSpace": + with pytest.raises(ValueError, match=expected_msg): FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": + elif network_formulation == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): ReducedSpaceNNFormulation(net) - expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg def _test_formulation_added_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) if network_formulation == "FullSpace": @@ -375,19 +377,13 @@ def _test_formulation_added_extra_input(network_formulation): elif network_formulation == "relu": formulation = ReluPartitionFormulation(net) net.add_layer(extra_input) - with pytest.raises(ValueError) as excinfo: - formulation.input_indexes expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _ = formulation.input_indexes def _test_formulation_build_extra_input(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) if network_formulation == "FullSpace": @@ -399,19 +395,13 @@ def _test_formulation_build_extra_input(network_formulation): net.add_layer(extra_input) m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() - with pytest.raises(ValueError) as excinfo: - m.neural_net_block.build_formulation(formulation) expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + m.neural_net_block.build_formulation(formulation) def _test_formulation_added_extra_output(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - 'relu' - """ + """network_formulation can be: 'FullSpace', 'ReducedSpace' 'relu'.""" net, y = two_node_network("linear", -2.0) extra_output = DenseLayer( [1, 2], @@ -428,18 +418,13 @@ def _test_formulation_added_extra_output(network_formulation): formulation = ReluPartitionFormulation(net) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) - with pytest.raises(ValueError) as excinfo: - formulation.output_indexes expected_msg = "Multiple output layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _ = formulation.output_indexes def _test_formulation_initialize_extra_output(network_formulation): - """ - network_formulation can be: - 'FullSpace', - 'ReducedSpace' - """ + """network_formulation can be: 'FullSpace', 'ReducedSpace'.""" net, y = two_node_network("linear", -2.0) extra_output = DenseLayer( [1, 2], @@ -450,13 +435,14 @@ def _test_formulation_initialize_extra_output(network_formulation): ) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) - with pytest.raises(ValueError) as excinfo: - if network_formulation == "FullSpace": + + expected_msg = "Multiple output layers are not currently supported." + if network_formulation == "FullSpace": + with pytest.raises(ValueError, match=expected_msg): FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": + elif network_formulation == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): ReducedSpaceNNFormulation(net) - expected_msg = "Multiple output layers are not currently supported." - assert str(excinfo.value) == expected_msg def test_FullSpaceNNFormulation_invalid_network(): @@ -489,19 +475,18 @@ def _test_dense_layer_multiple_predecessors(layer_type): test_layer = list(net.layers)[2] net.add_layer(extra_input) net.add_edge(extra_input, test_layer) - with pytest.raises(ValueError) as excinfo: - if layer_type == "PartitionBased": + + expected_msg = re.escape(f"Layer {test_layer} has multiple predecessors.") + if layer_type == "PartitionBased": + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == "ReducedSpace": + elif layer_type == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): reduced_space_dense_layer(m, net, m, test_layer, None) - expected_msg = f"Layer {test_layer} has multiple predecessors." - assert str(excinfo.value) == expected_msg def _test_dense_layer_no_predecessors(layer_type): - """ - Layer type can be "ReducedSpace", or "PartitionBased". - """ + """Layer type can be "ReducedSpace", or "PartitionBased".""" m = pyo.ConcreteModel() net = NetworkDefinition(scaled_input_bounds=[(-10.0, 10.0)]) @@ -513,13 +498,16 @@ def _test_dense_layer_no_predecessors(layer_type): biases=np.array([1.0, 2.0]), ) net.add_layer(test_layer) - with pytest.raises(ValueError) as excinfo: - if layer_type == "PartitionBased": + + expected_msg = re.escape( + f"Layer {test_layer} is not an input layer, but has no predecessors." + ) + if layer_type == "PartitionBased": + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == "ReducedSpace": + elif layer_type == "ReducedSpace": + with pytest.raises(ValueError, match=expected_msg): reduced_space_dense_layer(m, net, m, test_layer, None) - expected_msg = f"Layer {test_layer} is not an input layer, but has no predecessors." - assert str(excinfo.value) == expected_msg def test_partition_based_dense_layer_predecessors(): @@ -546,12 +534,11 @@ def test_partition_based_unbounded_below(): split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded below." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded below." - assert str(excinfo.value) == expected_msg def test_partition_based_unbounded_above(): @@ -568,12 +555,11 @@ def test_partition_based_unbounded_above(): split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded above." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded above." - assert str(excinfo.value) == expected_msg def test_partition_based_bias_unbounded_below(): @@ -588,12 +574,11 @@ def test_partition_based_bias_unbounded_below(): test_layer.biases[0] = -interval.inf split_func = lambda w: default_partition_split_func(w, 2) - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded below." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded below." - assert str(excinfo.value) == expected_msg def test_partition_based_bias_unbounded_above(): @@ -607,13 +592,11 @@ def test_partition_based_bias_unbounded_above(): test_layer.biases[0] = interval.inf split_func = lambda w: default_partition_split_func(w, 2) - - with pytest.raises(ValueError) as excinfo: + expected_msg = "Expression is unbounded above." + with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) - expected_msg = "Expression is unbounded above." - assert str(excinfo.value) == expected_msg def test_fullspace_internal_extra_input(): @@ -626,10 +609,9 @@ def test_fullspace_internal_extra_input(): m.neural_net_block.build_formulation(formulation) net.add_layer(extra_input) net.add_edge(extra_input, test_layer) - with pytest.raises(ValueError) as excinfo: - _input_layer_and_block(m.neural_net_block, net, test_layer) expected_msg = "Multiple input layers are not currently supported." - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + _input_layer_and_block(m.neural_net_block, net, test_layer) def test_conv2d_extra_activation(): @@ -673,10 +655,14 @@ def test_conv2d_extra_activation(): ) net.add_layer(maxpool_layer_1) net.add_edge(conv_layer_2, maxpool_layer_1) - with pytest.raises(ValueError) as excinfo: + expected_msg = re.escape( + "Activation is applied after convolution layer, but the successor maxpooling" + " layer PoolingLayer(input_size=[1, 3, 4], output_size=[1, 1, 2]," + " strides=[2, 2], kernel_shape=[3, 2]), pool_func_name=max has an activation" + " function also." + ) + with pytest.raises(ValueError, match=expected_msg): m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - expected_msg = """Activation is applied after convolution layer, but the successor max pooling layer PoolingLayer(input_size=[1, 3, 4], output_size=[1, 1, 2], strides=[2, 2], kernel_shape=[3, 2]), pool_func_name=max has an activation function also.""" - assert str(excinfo.value) == expected_msg def test_maxpool2d_bad_input_activation(): @@ -730,13 +716,14 @@ def test_maxpool2d_bad_input_activation(): m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) conv_layer_2.activation = "relu" - - with pytest.raises(ValueError) as excinfo: + expected_msg = ( + "Non-increasing activation functions on the preceding convolutional" + " layer are not supported." + ) + with pytest.raises(ValueError, match=expected_msg): full_space_maxpool2d_layer( m.neural_net_block, net, m.neural_net_block, maxpool_layer_1 ) - expected_msg = """Non-increasing activation functions on the preceding convolutional layer are not supported.""" - assert str(excinfo.value) == expected_msg def test_maxpool2d_bad_input_layer(): @@ -876,15 +863,15 @@ def _test_three_node_graph_neural_network(graph_type): for i in range(6): m.nn.inputs[i].fix(inputs[i]) - assert m.nvariables() == 81 - assert m.nconstraints() == 120 + assert m.nvariables() == THREE_NODE_VARS + assert m.nconstraints() == THREE_NODE_CONSTRAINTS m.obj = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) for i in range(9): - assert abs(pyo.value(m.nn.outputs[i]) - y[i]) < 1e-6 + assert abs(pyo.value(m.nn.outputs[i]) - y[i]) < NEAR_EQUAL for i in range(6): for j in range(3): @@ -893,7 +880,7 @@ def _test_three_node_graph_neural_network(graph_type): pyo.value(m.nn.layer[m.nn.layers.at(1)].zbar[i, j]) - pyo.value(m.nn.A[i // 2, j]) * inputs[i] ) - < 1e-6 + < NEAR_EQUAL ) diff --git a/tests/neuralnet/test_onnx.py b/tests/neuralnet/test_onnx.py index bb9b9dfd..7cad2d78 100644 --- a/tests/neuralnet/test_onnx.py +++ b/tests/neuralnet/test_onnx.py @@ -2,9 +2,8 @@ import numpy as np import pytest -from pyomo.common.dependencies import DeferredImportError - from omlt.dependencies import onnx, onnx_available +from pyomo.common.dependencies import DeferredImportError if onnx_available: import onnxruntime as ort @@ -14,16 +13,15 @@ write_onnx_model_with_bounds, ) -from pyomo.environ import * - from omlt import OffsetScaling, OmltBlock from omlt.neuralnet import FullSpaceNNFormulation +from pyomo.environ import ConcreteModel, SolverFactory, value @pytest.mark.skipif(onnx_available, reason="Test only valid when onnx not available") def test_onnx_not_available_exception(datadir): with pytest.raises(DeferredImportError): - neural_net = onnx.load(datadir.file("keras_linear_131_relu.onnx")) + onnx.load(datadir.file("keras_linear_131_relu.onnx")) @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -58,7 +56,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("cbc").solve(model, tee=False) + SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -101,7 +99,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("cbc").solve(model, tee=False) + SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -145,7 +143,7 @@ def obj(mdl): for x in [-0.25, 0.0, 0.25, 1.5]: model.nn.inputs.fix(x) - result = SolverFactory("ipopt").solve(model, tee=False) + SolverFactory("ipopt").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] x_s = np.array([[x_s]], dtype=np.float32) @@ -159,12 +157,12 @@ def obj(mdl): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") def test_onnx_bounds_loader_writer(datadir): onnx_model = onnx.load(datadir.file("keras_conv_7x7_relu.onnx")) - scaled_input_bounds = dict() + scaled_input_bounds = {} for i in range(7): for j in range(7): scaled_input_bounds[0, i, j] = (0.0, 1.0) with tempfile.NamedTemporaryFile(suffix=".onnx") as f: write_onnx_model_with_bounds(f.name, onnx_model, scaled_input_bounds) net = load_onnx_neural_network_with_bounds(f.name) - for key, value in net.scaled_input_bounds.items(): - assert scaled_input_bounds[key] == value + for key, val in net.scaled_input_bounds.items(): + assert scaled_input_bounds[key] == val diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 15ba97d1..59dc247a 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -1,7 +1,6 @@ import numpy as np import pyomo.environ as pyo import pytest - from omlt.block import OmltBlock from omlt.dependencies import onnx_available from omlt.neuralnet import ( @@ -14,6 +13,7 @@ # TODO: Add tests for single dimensional outputs as well +NEAR_EQUAL = 1e-3 def test_two_node_bigm(two_node_network_relu): m = pyo.ConcreteModel() @@ -24,14 +24,14 @@ def test_two_node_bigm(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluBigMFormulation(two_node_network_relu): @@ -43,14 +43,14 @@ def test_two_node_ReluBigMFormulation(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_complementarity(two_node_network_relu): @@ -64,14 +64,14 @@ def test_two_node_complementarity(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluComplementarityFormulation(two_node_network_relu): @@ -82,14 +82,14 @@ def test_two_node_ReluComplementarityFormulation(two_node_network_relu): m.neural_net_block.inputs[0].fix(-2) m.obj1 = pyo.Objective(expr=0) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("ipopt").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("ipopt").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL def test_two_node_ReluPartitionFormulation(two_node_network_relu): @@ -101,14 +101,14 @@ def test_two_node_ReluPartitionFormulation(two_node_network_relu): m.obj1 = pyo.Objective(expr=0) m.neural_net_block.inputs[0].fix(-2) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 10) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 2) < NEAR_EQUAL m.neural_net_block.inputs[0].fix(1) - status = pyo.SolverFactory("cbc").solve(m, tee=False) - assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < 1e-3 - assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < 1e-3 + pyo.SolverFactory("cbc").solve(m, tee=False) + assert abs(pyo.value(m.neural_net_block.outputs[0, 0]) - 1) < NEAR_EQUAL + assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") @@ -124,8 +124,7 @@ def test_conv_ReluBigMFormulation(datadir): m.obj1 = pyo.Objective(expr=0) # compute expected output for this input - input = np.eye(7, 7).reshape(1, 7, 7) - x = input + x = np.eye(7, 7).reshape(1, 7, 7) for layer in net.layers: x = layer.eval_single_layer(x) output = x @@ -133,7 +132,7 @@ def test_conv_ReluBigMFormulation(datadir): for i in range(7): for j in range(7): m.neural_net_block.inputs[0, i, j].fix(input[0, i, j]) - status = pyo.SolverFactory("cbc").solve(m, tee=False) + pyo.SolverFactory("cbc").solve(m, tee=False) d, r, c = output.shape for i in range(d): @@ -141,4 +140,4 @@ def test_conv_ReluBigMFormulation(datadir): for k in range(c): expected = output[i, j, k] actual = pyo.value(m.neural_net_block.outputs[i, j, k]) - assert abs(actual - expected) < 1e-3 + assert abs(actual - expected) < NEAR_EQUAL diff --git a/tests/neuralnet/train_keras_models.py b/tests/neuralnet/train_keras_models.py index c2de9dbc..81469c6a 100644 --- a/tests/neuralnet/train_keras_models.py +++ b/tests/neuralnet/train_keras_models.py @@ -1,13 +1,10 @@ -import pytest import keras - -# from conftest import get_neural_network_data +from conftest import get_neural_network_data from keras.layers import Conv2D, Dense -from keras.models import Model, Sequential -from pyomo.common.fileutils import this_file_dir +from keras.models import Sequential from keras.optimizers import Adamax - from omlt.io import write_onnx_model_with_bounds +from pyomo.common.fileutils import this_file_dir def train_models(): @@ -37,7 +34,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131.keras") @@ -69,7 +66,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid.keras") @@ -102,7 +99,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save( @@ -136,7 +133,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_relu.keras") @@ -169,7 +166,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/keras_linear_131_relu_output_activation.keras") @@ -202,7 +199,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save( @@ -263,7 +260,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) nn.save(this_file_dir() + "/models/big.keras") @@ -305,7 +302,7 @@ def train_models(): ) ) nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae") - history = nn.fit( + nn.fit( x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15 ) @@ -333,7 +330,7 @@ def train_conv(): onnx_model, _ = tf2onnx.convert.from_keras(nn) - input_bounds = dict() + input_bounds = {} for i in range(7): for j in range(7): input_bounds[0, i, j] = (0.0, 1.0) diff --git a/tests/notebooks/test_run_notebooks.py b/tests/notebooks/test_run_notebooks.py index 9b1361c9..7871bc87 100644 --- a/tests/notebooks/test_run_notebooks.py +++ b/tests/notebooks/test_run_notebooks.py @@ -1,22 +1,22 @@ import os +from pathlib import Path import pytest -from pyomo.common.fileutils import this_file_dir -from testbook import testbook - from omlt.dependencies import ( keras_available, onnx_available, torch_available, torch_geometric_available, ) +from pyomo.common.fileutils import this_file_dir +from testbook import testbook # TODO: These will be replaced with stronger tests using testbook soon def _test_run_notebook(folder, notebook_fname, n_cells): # Change to notebook directory to allow for testing - cwd = os.getcwd() - os.chdir(os.path.join(this_file_dir(), "..", "..", "docs", "notebooks", folder)) + cwd = Path.cwd() + os.chdir(Path(this_file_dir()) / ".." / ".." / "docs" / "notebooks" / folder) with testbook(notebook_fname, timeout=500, execute=True) as tb: assert tb.code_cells_executed == n_cells os.chdir(cwd) diff --git a/tests/test_block.py b/tests/test_block.py index 6c6311f5..ccb8753f 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -1,10 +1,12 @@ import pyomo.environ as pyo import pytest - from omlt import OmltBlock +INPUTS_LENGTH = 3 +OUTPUTS_LENGTH = 2 + -class dummy_formulation(object): +class DummyFormulation: def __init__(self): self.input_indexes = ["A", "C", "D"] self.output_indexes = [(0, 0), (0, 1), (1, 0), (1, 1)] @@ -26,27 +28,29 @@ def test_block(): output_indexes=[(0, 0), (0, 1), (1, 0), (1, 1)], ) - assert [k for k in m.b.inputs] == ["A", "B", "C"] - assert [k for k in m.b.outputs] == [1, 4] - assert [k for k in m.b2.inputs] == [(1, 3), (42, 1975), (13, 2)] - assert [k for k in m.b2.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(m.b.inputs) == ["A", "B", "C"] + assert list(m.b.outputs) == [1, 4] + assert list(m.b2.inputs) == [(1, 3), (42, 1975), (13, 2)] + assert list(m.b2.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] m = pyo.ConcreteModel() m.b = OmltBlock() - formulation = dummy_formulation() + formulation = DummyFormulation() m.b.build_formulation(formulation) - print(dir(m.b)) + assert m.b._OmltBlockData__formulation is formulation - assert [k for k in m.b.inputs] == ["A", "C", "D"] - assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] + assert list(m.b.inputs) == ["A", "C", "D"] + assert list(m.b.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] def test_input_output_auto_creation(): m = pyo.ConcreteModel() m.b = OmltBlock() - m.b._setup_inputs_outputs(input_indexes=range(3), output_indexes=range(2)) - assert len(m.b.inputs) == 3 - assert len(m.b.outputs) == 2 + m.b._setup_inputs_outputs( + input_indexes=range(INPUTS_LENGTH), output_indexes=range(OUTPUTS_LENGTH) + ) + assert len(m.b.inputs) == INPUTS_LENGTH + assert len(m.b.outputs) == OUTPUTS_LENGTH m.b2 = OmltBlock() m.b2._setup_inputs_outputs( @@ -57,7 +61,8 @@ def test_input_output_auto_creation(): assert len(m.b2.outputs) == 1 m.b3 = OmltBlock() - with pytest.raises(ValueError): + expected_msg = "OmltBlock must have at least one input and at least one output." + with pytest.raises(ValueError, match=expected_msg): m.b3._setup_inputs_outputs( input_indexes=[], output_indexes=[], diff --git a/tests/test_formulation.py b/tests/test_formulation.py index 4e047845..df4aa0d9 100644 --- a/tests/test_formulation.py +++ b/tests/test_formulation.py @@ -1,9 +1,8 @@ import pytest -from pyomo.environ import ConcreteModel, Objective, SolverFactory, Var, value - from omlt.block import OmltBlock from omlt.formulation import _setup_scaled_inputs_outputs from omlt.scaling import OffsetScaling +from pyomo.environ import ConcreteModel, Objective, SolverFactory, value def test_scaled_inputs_outputs(): @@ -32,7 +31,7 @@ def test_scaled_inputs_outputs(): m.obj = Objective(expr=1) m.b1.inputs.fix(2) m.b1.outputs.fix(1) - status = SolverFactory("ipopt").solve(m) + SolverFactory("ipopt").solve(m) assert value(m.b1.scaled_inputs[(0, 0)]) == pytest.approx(4.0) assert value(m.b1.scaled_inputs[(0, 1)]) == pytest.approx(1.0) @@ -68,7 +67,7 @@ def test_scaled_inputs_outputs(): m.obj = Objective(expr=1) m.b1.inputs.fix(2) m.b1.outputs.fix(1) - status = SolverFactory("ipopt").solve(m) + SolverFactory("ipopt").solve(m) assert value(m.b1.scaled_inputs[0]) == pytest.approx(4.0) assert value(m.b1.scaled_inputs[1]) == pytest.approx(1.0) assert value(m.b1.scaled_inputs[2]) == pytest.approx(0.0) diff --git a/tests/test_scaling.py b/tests/test_scaling.py index 05b0e013..790241bf 100644 --- a/tests/test_scaling.py +++ b/tests/test_scaling.py @@ -1,6 +1,7 @@ +import re + import numpy as np import pytest - from omlt import OffsetScaling from omlt.scaling import convert_to_dict @@ -71,48 +72,44 @@ def test_incorrect_keys(): np.testing.assert_almost_equal(list(test_y_unscal.values()), list(y.values())) x = {1: 42, 2: 65} - with pytest.raises(ValueError) as excinfo: - test_x_scal = scaling.get_scaled_input_expressions(x) - expected_msg = ( + expected_msg = re.escape( "get_scaled_input_expressions called with input_vars that " "do not have the same indices as offset_inputs or factor_inputs.\nKeys " "in input_vars: [1, 2].\nKeys in offset_inputs: [1, 42].\nKeys in " "offset_factor: [1, 42]." ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + test_x_scal = scaling.get_scaled_input_expressions(x) y = {7: -1, 19: 2, 11: 3} - with pytest.raises(ValueError) as excinfo: - test_y_scal = scaling.get_scaled_output_expressions(y) - expected_msg = ( + expected_msg = re.escape( "get_scaled_output_expressions called with output_vars that " "do not have the same indices as offset_outputs or factor_outputs.\nKeys " "in output_vars: [7, 11, 19]\nKeys in offset_outputs: [7, 9, 11]\nKeys in " "offset_factor: [7, 9, 11]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + scaling.get_scaled_output_expressions(y) x_scal = {1: 42, 2: 65} - with pytest.raises(ValueError) as excinfo: - test_x_unscal = scaling.get_unscaled_input_expressions(x_scal) - expected_msg = ( + expected_msg = re.escape( "get_scaled_input_expressions called with input_vars that " "do not have the same indices as offset_inputs or factor_inputs.\nKeys " "in input_vars: [1, 2]\nKeys in offset_inputs: [1, 42]\nKeys in " "offset_factor: [1, 42]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + scaling.get_unscaled_input_expressions(x_scal) y_scal = {7: -1, 8: 2, 11: 3} - with pytest.raises(ValueError) as excinfo: - test_y_unscal = scaling.get_unscaled_output_expressions(y_scal) - expected_msg = ( + expected_msg = re.escape( "get_scaled_output_expressions called with output_vars that do " "not have the same indices as offset_outputs or factor_outputs.\nKeys in " "output_vars: [7, 8, 11]\nKeys in offset_outputs: [7, 9, 11]\nKeys in " "offset_factor: [7, 9, 11]" ) - assert str(excinfo.value) == expected_msg + with pytest.raises(ValueError, match=expected_msg): + test_y_unscal = scaling.get_unscaled_output_expressions(y_scal) def test_negative_offsets(): @@ -121,36 +118,38 @@ def test_negative_offsets(): y_offset = [-4, 2, 1.784] y_factor = [2, 1.5, 1.3] - with pytest.raises(ValueError) as excinfo: - scaling = OffsetScaling( + expected_msg = ( + "OffsetScaling only accepts positive values" + " for factor_inputs. Negative value found at" + " index 0." + ) + + with pytest.raises(ValueError, match=expected_msg): + OffsetScaling( offset_inputs=x_offset, factor_inputs=x_factor, offset_outputs=y_offset, factor_outputs=y_factor, ) - assert ( - str(excinfo.value) == "OffsetScaling only accepts positive values" - " for factor_inputs. Negative value found at" - " index 0." - ) x_offset = [42, 65] x_factor = [1975, 1964] y_offset = [-4, 2, 1.784] y_factor = [2, -1.5, 1.3] - with pytest.raises(ValueError) as excinfo: - scaling = OffsetScaling( + expected_msg = ( + "OffsetScaling only accepts positive values" + " for factor_outputs. Negative value found at" + " index 1." + ) + + with pytest.raises(ValueError, match=expected_msg): + OffsetScaling( offset_inputs=x_offset, factor_inputs=x_factor, offset_outputs=y_offset, factor_outputs=y_factor, ) - assert ( - str(excinfo.value) == "OffsetScaling only accepts positive values" - " for factor_outputs. Negative value found at" - " index 1." - ) if __name__ == "__main__": From 051ac3bfeac932ccb6a62d033810a52546534633 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Sun, 23 Jun 2024 00:27:31 +0000 Subject: [PATCH 57/60] Fixing ruff linting errors. --- pyproject.toml | 14 ++++ src/omlt/block.py | 19 ++++-- src/omlt/io/input_bounds.py | 4 +- src/omlt/io/keras/keras_reader.py | 1 - src/omlt/io/onnx.py | 2 +- .../torch_geometric/torch_geometric_reader.py | 12 ++-- src/omlt/neuralnet/__init__.py | 4 +- src/omlt/neuralnet/layers/full_space.py | 8 +-- src/omlt/neuralnet/nn_formulation.py | 25 +++---- tests/gbt/test_gbt_formulation.py | 35 +--------- tests/io/test_onnx_parser.py | 2 +- tests/io/test_torch_geometric.py | 28 ++++---- tests/neuralnet/test_keras.py | 2 +- tests/neuralnet/test_network_definition.py | 19 ++++-- tests/neuralnet/test_nn_formulation.py | 66 +++++++++---------- tests/neuralnet/test_relu.py | 16 +++-- tests/notebooks/test_run_notebooks.py | 3 +- tests/test_block.py | 26 ++++++-- 18 files changed, 151 insertions(+), 135 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4ad1ca44..c504866e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,6 +78,19 @@ ignore = [ "ANN401", "COM812", "ISC001", + "SLF001", + "ARG001", + "N803", + "N806", + # Remove these after issue https://github.com/cog-imperial/OMLT/issues/153 is fixed. + "D100", + "D101", + "D102", + "D103", + "D104", + "D105", + "D106", + "D107", # TODO: Remove these eventually "ANN001", "ANN002", @@ -106,6 +119,7 @@ convention = "google" "INP001", ] "docs/conf.py" = ["D100", "INP001"] +"src/omlt/neuralnet/layer.py" = ["N802"] [tool.mypy] show_error_codes = true diff --git a/src/omlt/block.py b/src/omlt/block.py index 0a03838d..b8bb391d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -53,11 +53,6 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """ self.__input_indexes = input_indexes self.__output_indexes = output_indexes - if not input_indexes or not output_indexes: - # TODO: implement this check higher up in the class hierarchy to provide - # more contextual error msg - msg = "OmltBlock must have at least one input and at least one output." - raise ValueError(msg) self.inputs_set = pyo.Set(initialize=input_indexes) self.inputs = pyo.Var(self.inputs_set, initialize=0) @@ -77,6 +72,20 @@ def build_formulation(self, formulation): formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation """ + if not formulation.input_indexes: + msg = ( + "OmltBlock must have at least one input to build a formulation. " + f"{formulation} has no inputs." + ) + raise ValueError(msg) + + if not formulation.output_indexes: + msg = ( + "OmltBlock must have at least one output to build a formulation. " + f"{formulation} has no outputs." + ) + raise ValueError(msg) + self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/src/omlt/io/input_bounds.py b/src/omlt/io/input_bounds.py index 9826d498..f01eb3ca 100644 --- a/src/omlt/io/input_bounds.py +++ b/src/omlt/io/input_bounds.py @@ -5,13 +5,13 @@ def write_input_bounds(input_bounds_filename, input_bounds): """Write the specified input bounds to the given file.""" input_bounds = _prepare_input_bounds(input_bounds) - with Path.open(input_bounds_filename, "w") as f: + with Path(input_bounds_filename).open("w") as f: json.dump(input_bounds, f) def load_input_bounds(input_bounds_filename): """Read the input bounds from the given file.""" - with Path.open(input_bounds_filename) as f: + with Path(input_bounds_filename).open() as f: raw_input_bounds = json.load(f) return dict(_parse_raw_input_bounds(d) for d in raw_input_bounds) diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index 3ec0aaaa..d7429d72 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -35,7 +35,6 @@ def load_keras_sequential( ------- NetworkDefinition """ - # TODO: Add exceptions for unsupported layer types n_inputs = len(nn.layers[0].get_weights()[0]) net = NetworkDefinition( diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index 9676ea31..b48915a9 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -21,7 +21,7 @@ def write_onnx_model_with_bounds(filename, onnx_model=None, input_bounds=None): bounds on the input variables """ if onnx_model is not None: - with Path.open(filename, "wb") as f: + with Path(filename).open("wb") as f: f.write(onnx_model.SerializeToString()) if input_bounds is not None: diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 090d9b5a..4203338b 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -224,10 +224,10 @@ def load_torch_geometric_sequential( biases=biases, ) elif operations[index] == "GCNConv": - assert l.improved == False - assert l.cached == False - assert l.add_self_loops == True - assert l.normalize == True + assert not l.improved + assert not l.cached + assert l.add_self_loops + assert l.normalize gnn_weights = l.lin.weight.detach().numpy() gnn_biases = l.bias.detach().numpy() gnn_norm = _compute_gcn_norm(A) @@ -244,8 +244,8 @@ def load_torch_geometric_sequential( N=N, ) elif operations[index] == "SAGEConv": - assert l.normalize == False - assert l.project == False + assert not l.normalize + assert not l.project assert l.aggr in _AGGREGATION_OP_TYPES gnn_weights_uv = l.lin_l.weight.detach().numpy() gnn_biases = l.lin_l.bias.detach().numpy() diff --git a/src/omlt/neuralnet/__init__.py b/src/omlt/neuralnet/__init__.py index ef90caf3..014de739 100644 --- a/src/omlt/neuralnet/__init__.py +++ b/src/omlt/neuralnet/__init__.py @@ -13,9 +13,9 @@ \xrightarrow[\text{Constraints}]{\text{Layer 3}}\cdots \end{align*} -where +where :math:`\mathbf z^{(0)}` is the output of `InputLayer`, -:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, +:math:`\hat{\mathbf z}^{(l)}` is the pre-activation output of :math:`l`-th layer, :math:`\mathbf z^{(l)}` is the post-activation output of :math:`l`-th layer. """ diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 45b61a90..25fd2dbb 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -205,8 +205,6 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - # for out_d, out_r, out_c in layer.output_indexes: - # output_index = (out_d, out_r, out_c) @layer_block.Constraint(layer.output_indexes) def convolutional_layer(b, *output_index): out_d, out_r, out_c = output_index @@ -217,7 +215,6 @@ def convolutional_layer(b, *output_index): lb, ub = compute_bounds_on_expr(expr) layer_block.zhat[output_index].setlb(lb) layer_block.zhat[output_index].setub(ub) - # layer_block.constraints.add(layer_block.zhat[output_index] == expr) return layer_block.zhat[output_index] == expr @@ -273,8 +270,9 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): " are not supported." ) raise ValueError(msg) - # TODO - add support for non-increasing activation functions on preceding - # convolutional layer + # TODO @cog-imperial: add support for non-increasing activation functions on + # preceding convolutional layer + # https://github.com/cog-imperial/OMLT/issues/154 # note kernel indexes are the same set of values for any output index, so wlog get # kernel indexes for (0, 0, 0) diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index d8eb5b37..8e835d23 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -306,23 +306,18 @@ def __init__(self, network_structure, activation_functions=None): self.__scaling_object = network_structure.scaling_object self.__scaled_input_bounds = network_structure.scaled_input_bounds - # TODO: look into increasing support for other layers / activations - # self._layer_constraints = {**_DEFAULT_LAYER_CONSTRAINTS, **layer_constraints} self._activation_functions = dict( self._supported_default_activation_functions() ) if activation_functions is not None: self._activation_functions.update(activation_functions) - # If we want to do network input/output validation at initialize time instead - # of build time, as it is for FullSpaceNNFormulation: - # - # network_inputs = list(self.__network_definition.input_nodes) - # if len(network_inputs) != 1: - # raise ValueError(MULTI_INPUTS_UNSUPPORTED) - # network_outputs = list(self.__network_definition.output_nodes) - # if len(network_outputs) != 1: - # raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) + network_inputs = list(self.__network_definition.input_nodes) + if len(network_inputs) != 1: + raise ValueError(MULTI_INPUTS_UNSUPPORTED) + network_outputs = list(self.__network_definition.output_nodes) + if len(network_outputs) != 1: + raise ValueError(MULTI_OUTPUTS_UNSUPPORTED) def _supported_default_activation_functions(self): return dict(_DEFAULT_ACTIVATION_FUNCTIONS) @@ -365,7 +360,13 @@ def z(b, *output_index): # skip the InputLayer continue - # TODO: Add error checking on layer type + if not isinstance(layer, DenseLayer): + msg = ( + f"ReducedSpaceNNFormulation only supports Dense layers. {net}" + f" contains {layer} which is a {type(layer)}." + ) + raise TypeError(msg) + # build the linear expressions and the activation function layer_id = id(layer) layer_block = block.layer[layer_id] diff --git a/tests/gbt/test_gbt_formulation.py b/tests/gbt/test_gbt_formulation.py index a7c7557c..4a99b646 100644 --- a/tests/gbt/test_gbt_formulation.py +++ b/tests/gbt/test_gbt_formulation.py @@ -31,7 +31,7 @@ def test_formulation_with_continuous_variables(): assert ( len(list(m.gbt.component_data_objects(pe.Var))) == 202 + 10 ) # our auto-created variables - # TODO: fix below?: + assert len(list(m.gbt.component_data_objects(pe.Constraint))) == TOTAL_CONSTRAINTS assert len(m.gbt.z_l) == Z_L_VARS @@ -45,39 +45,6 @@ def test_formulation_with_continuous_variables(): assert len(m.gbt.var_upper) == Y_VARS -# TODO: did we remove categorical variables intentionally? -# def test_formulation_with_categorical_variables(): -# model = onnx.load(Path(__file__).parent / "categorical_model.onnx") - -# m = pe.ConcreteModel() - -# m.x = pe.Var(range(3), bounds=(-2.0, 2.0)) -# # categorical variable -# m.y = pe.Var(bounds=(0, 1), domain=pe.Integers) - -# m.z = pe.Var() - -# m.gbt = pe.Block() -# add_formulation_to_block( -# m.gbt, model, input_vars=[m.x[0], m.x[1], m.x[2], m.y], output_vars=[m.z] -# ) - -# assert len(list(m.gbt.component_data_objects(pe.Var))) == 193 -# # there are 28 * 2 constraints missing -# # related to categorical variables -# assert len(list(m.gbt.component_data_objects(pe.Constraint))) == 391 - -# assert len(m.gbt.z_l) == 160 -# assert len(m.gbt.y) == 31 - -# assert len(m.gbt.single_leaf) == 20 -# assert len(m.gbt.left_split) == 140 -# assert len(m.gbt.right_split) == 140 -# assert len(m.gbt.categorical) == 1 -# assert len(m.gbt.var_lower) == 31 -# assert len(m.gbt.var_upper) == 31 - - @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") def test_big_m_formulation_block(): onnx_model = onnx.load(Path(__file__).parent / "continuous_model.onnx") diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index 2f4510c3..3227e67d 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -66,7 +66,7 @@ def test_gemm(datadir): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") -def test_gemm_transB(datadir): +def test_gemm_trans_b(datadir): model = onnx.load(datadir.file("gemm_not_transB.onnx")) model_transB = onnx.load(datadir.file("gemm_transB.onnx")) net = load_onnx_neural_network(model) diff --git a/tests/io/test_torch_geometric.py b/tests/io/test_torch_geometric.py index fd52e69d..be098406 100644 --- a/tests/io/test_torch_geometric.py +++ b/tests/io/test_torch_geometric.py @@ -28,7 +28,7 @@ not (torch_available and torch_geometric_available), reason="Test only valid when torch and torch_geometric are available", ) -def GCN_Sequential(activation, pooling): +def gcn_sequential(activation, pooling): return Sequential( "x, edge_index", [ @@ -49,7 +49,7 @@ def GCN_Sequential(activation, pooling): not (torch_available and torch_geometric_available), reason="Test only valid when torch and torch_geometric are available", ) -def SAGE_Sequential(activation, pooling, aggr, root_weight): +def sage_sequential(activation, pooling, aggr, root_weight): return Sequential( "x, edge_index", [ @@ -142,11 +142,11 @@ def _test_gnn_with_non_fixed_graph(nn): def test_torch_geometric_reader(): for activation in [ReLU, Sigmoid, Tanh]: for pooling in [global_mean_pool, global_add_pool]: - nn = GCN_Sequential(activation, pooling) + nn = gcn_sequential(activation, pooling) _test_torch_geometric_reader(nn, activation, pooling) for aggr in ["sum", "mean"]: for root_weight in [False, True]: - nn = SAGE_Sequential(activation, pooling, aggr, root_weight) + nn = sage_sequential(activation, pooling, aggr, root_weight) _test_torch_geometric_reader(nn, activation, pooling) @@ -156,11 +156,11 @@ def test_torch_geometric_reader(): ) def test_gnn_with_fixed_graph(): for pooling in [global_mean_pool, global_add_pool]: - nn = GCN_Sequential(ReLU, pooling) + nn = gcn_sequential(ReLU, pooling) _test_gnn_with_fixed_graph(nn) for aggr in ["sum", "mean"]: for root_weight in [False, True]: - nn = SAGE_Sequential(ReLU, pooling, aggr, root_weight) + nn = sage_sequential(ReLU, pooling, aggr, root_weight) _test_gnn_with_fixed_graph(nn) @@ -172,7 +172,7 @@ def test_gnn_with_non_fixed_graph(): for pooling in [global_mean_pool, global_add_pool]: for aggr in ["sum"]: for root_weight in [False, True]: - nn = SAGE_Sequential(ReLU, pooling, aggr, root_weight) + nn = sage_sequential(ReLU, pooling, aggr, root_weight) _test_gnn_with_non_fixed_graph(nn) @@ -213,16 +213,18 @@ def _test_gnn_value_error(nn, error_info, error_type="ValueError"): reason="Test only valid when torch and torch_geometric are available", ) def test_gnn_value_error(): - nn = SAGE_Sequential(ReLU, global_max_pool, "mean", True) - _test_gnn_value_error(nn, "this operation is not supported") + nn = sage_sequential(ReLU, global_max_pool, "mean", root_weight=True) + _test_gnn_value_error(nn, "Operation global_max_pool is not supported.") - nn = SAGE_Sequential(Sigmoid, global_mean_pool, "sum", True) + nn = sage_sequential(Sigmoid, global_mean_pool, "sum", root_weight=True) _test_gnn_value_error(nn, "nonlinear activation results in a MINLP", "warns") - nn = SAGE_Sequential(ReLU, global_mean_pool, "mean", True) + nn = sage_sequential(ReLU, global_mean_pool, "mean", root_weight=True) _test_gnn_value_error( nn, "this aggregation is not supported when the graph is not fixed" ) - nn = GCN_Sequential(ReLU, global_mean_pool) - _test_gnn_value_error(nn, "this layer is not supported when the graph is not fixed") + nn = gcn_sequential(ReLU, global_mean_pool) + _test_gnn_value_error( + nn, "this layer is not supported when the graph is not fixed." + ) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 7cc7261d..eb3436d6 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -170,7 +170,7 @@ def test_keras_linear_big_reduced_space(datadir): @pytest.mark.skipif(not keras_available, reason="Need keras for this test") -def test_scaling_NN_block(datadir): +def test_scaling_nn_block(datadir): NN = keras.models.load_model(datadir.file("keras_linear_131_relu.keras")) model = pyo.ConcreteModel() diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index 8dff2365..2d58cd3b 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pyomo.environ as pyo import pytest @@ -10,7 +12,8 @@ ALMOST_EXACTLY_EQUAL = 1e-8 -# TODO: Build more tests with different activations and edge cases +# TODO @cog-imperial: Build more tests with different activations and edge cases +# https://github.com/cog-imperial/OMLT/issues/158 def test_two_node_full_space(): """Two node full space network. @@ -79,7 +82,7 @@ def test_input_bounds_no_scaler(): assert net.scaled_input_bounds == scaled_input_bounds -def test_input_bound_scaling_1D(): +def test_input_bound_scaling_1d(): xoffset = {i: float(i) for i in range(3)} xfactor = {i: 0.5 * (i + 1) for i in range(3)} yoffset = {i: -0.25 * i for i in range(2)} @@ -108,7 +111,7 @@ def test_input_bound_scaling_1D(): assert net.scaled_input_bounds == scaled_input_bounds -def test_input_bound_scaling_multiD(): +def test_input_bound_scaling_multi_d(): # Multidimensional test xoffset = {(0, i): float(i) for i in range(3)} xfactor = {(0, i): 0.5 * (i + 1) for i in range(3)} @@ -164,11 +167,17 @@ def _test_add_invalid_edge(direction): ) if direction == "in": - expected_msg = f"Inbound layer {dense_layer_1} not found in network." + expected_msg = re.escape( + "Inbound layer DenseLayer(input_size=[1], output_size=[1]) not" + " found in network." + ) with pytest.raises(ValueError, match=expected_msg): net.add_edge(input_layer, dense_layer_1) elif direction == "out": - expected_msg = f"Outbound layer {dense_layer_1} not found in network." + expected_msg = re.escape( + "Outbound layer DenseLayer(input_size=[1], output_size=[1]) not" + " found in network." + ) with pytest.raises(ValueError, match=expected_msg): net.add_edge(dense_layer_1, dense_layer_0) diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index d79d2160..f88a9425 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -32,8 +32,8 @@ from pyomo.contrib.fbbt import interval NEAR_EQUAL = 1e-6 -FULLSPACE_SMOOTH_VARS = 14 -FULLSPACE_SMOOTH_CONSTRAINTS = 15 +FULLSPACE_SMOOTH_VARS = 15 +FULLSPACE_SMOOTH_CONSTRAINTS = 14 FULLSPACE_RELU_VARS = 19 FULLSPACE_RELU_CONSTRAINTS = 26 REDUCED_VARS = 6 @@ -86,7 +86,7 @@ def two_node_network(activation, input_value): return net, y -def _test_two_node_FullSpaceNNFormulation_smooth(activation): +def _test_two_node_full_space_nn_formulation_smooth(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -108,7 +108,7 @@ def _test_two_node_FullSpaceNNFormulation_smooth(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_FullSpaceNNFormulation_relu(): +def _test_two_node_full_space_nn_formulation_relu(): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network("relu", -2.0) @@ -130,7 +130,7 @@ def _test_two_node_FullSpaceNNFormulation_relu(): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_FullSpaceSmoothNNFormulation(activation): +def _test_two_node_full_space_smooth_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -152,7 +152,7 @@ def _test_two_node_FullSpaceSmoothNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_ReducedSpaceNNFormulation(activation): +def _test_two_node_reduced_space_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -174,7 +174,7 @@ def _test_two_node_ReducedSpaceNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): +def _test_two_node_reduced_space_smooth_nn_formulation(activation): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() net, y = two_node_network(activation, -2.0) @@ -196,41 +196,41 @@ def _test_two_node_ReducedSpaceSmoothNNFormulation(activation): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - y[0, 1]) < NEAR_EQUAL -def test_two_node_ReducedSpaceNNFormulation(): - _test_two_node_ReducedSpaceNNFormulation("linear") - _test_two_node_ReducedSpaceNNFormulation("sigmoid") - _test_two_node_ReducedSpaceNNFormulation("tanh") +def test_two_node_reduced_space_nn_formulation(): + _test_two_node_reduced_space_nn_formulation("linear") + _test_two_node_reduced_space_nn_formulation("sigmoid") + _test_two_node_reduced_space_nn_formulation("tanh") -def test_two_node_ReducedSpaceSmoothNNFormulation(): - _test_two_node_ReducedSpaceSmoothNNFormulation("linear") - _test_two_node_ReducedSpaceSmoothNNFormulation("sigmoid") - _test_two_node_ReducedSpaceSmoothNNFormulation("tanh") +def test_two_node_reduced_space_smooth_nn_formulation(): + _test_two_node_reduced_space_smooth_nn_formulation("linear") + _test_two_node_reduced_space_smooth_nn_formulation("sigmoid") + _test_two_node_reduced_space_smooth_nn_formulation("tanh") -def test_two_node_ReducedSpaceSmoothNNFormulation_invalid_activation(): +def test_two_node_reduced_space_smooth_nn_formulation_invalid_activation(): expected_msg = "Activation relu is not supported by this formulation." with pytest.raises(ValueError, match=expected_msg): - _test_two_node_ReducedSpaceSmoothNNFormulation("relu") + _test_two_node_reduced_space_smooth_nn_formulation("relu") -def test_two_node_FullSpaceNNFormulation(): - _test_two_node_FullSpaceNNFormulation_smooth("linear") - _test_two_node_FullSpaceNNFormulation_smooth("sigmoid") - _test_two_node_FullSpaceNNFormulation_smooth("tanh") - _test_two_node_FullSpaceNNFormulation_relu() +def test_two_node_full_space_nn_formulation(): + _test_two_node_full_space_nn_formulation_smooth("linear") + _test_two_node_full_space_nn_formulation_smooth("sigmoid") + _test_two_node_full_space_nn_formulation_smooth("tanh") + _test_two_node_full_space_nn_formulation_relu() -def test_two_node_FullSpaceSmoothNNFormulation(): - _test_two_node_FullSpaceSmoothNNFormulation("linear") - _test_two_node_FullSpaceSmoothNNFormulation("sigmoid") - _test_two_node_FullSpaceSmoothNNFormulation("tanh") +def test_two_node_full_space_smooth_nn_formulation(): + _test_two_node_full_space_smooth_nn_formulation("linear") + _test_two_node_full_space_smooth_nn_formulation("sigmoid") + _test_two_node_full_space_smooth_nn_formulation("tanh") -def test_two_node_FullSpaceSmoothNNFormulation_invalid_activation(): +def test_two_node_full_space_smooth_nn_formulation_invalid_activation(): expected_msg = "Activation relu is not supported by this formulation." with pytest.raises(ValueError, match=expected_msg): - _test_two_node_FullSpaceSmoothNNFormulation("relu") + _test_two_node_full_space_smooth_nn_formulation("relu") @pytest.mark.skip(reason="Need to add checks on layer types") @@ -315,7 +315,7 @@ def _maxpool_conv_network(inputs): return net, y -def test_maxpool_FullSpaceNNFormulation(): +def test_maxpool_full_space_nn_formulation(): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -445,7 +445,7 @@ def _test_formulation_initialize_extra_output(network_formulation): ReducedSpaceNNFormulation(net) -def test_FullSpaceNNFormulation_invalid_network(): +def test_full_space_nn_formulation_invalid_network(): _test_formulation_initialize_extra_input("FullSpace") _test_formulation_added_extra_input("FullSpace") _test_formulation_build_extra_input("FullSpace") @@ -453,15 +453,13 @@ def test_FullSpaceNNFormulation_invalid_network(): _test_formulation_added_extra_output("FullSpace") -def test_ReducedSpaceNNFormulation_invalid_network(): - # _test_formulation_initialize_extra_input("ReducedSpace") +def test_reduced_space_nn_formulation_invalid_network(): _test_formulation_added_extra_input("ReducedSpace") _test_formulation_build_extra_input("ReducedSpace") - # _test_formulation_initialize_extra_output("ReducedSpace") _test_formulation_added_extra_output("ReducedSpace") -def test_ReluPartitionFormulation_invalid_network(): +def test_relu_partition_formulation_invalid_network(): _test_formulation_added_extra_input("relu") _test_formulation_build_extra_input("relu") _test_formulation_added_extra_output("relu") diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 59dc247a..23ed6fee 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -11,7 +11,8 @@ ) from omlt.neuralnet.activations import ComplementarityReLUActivation -# TODO: Add tests for single dimensional outputs as well +# TODO @cog-imperial: Add tests for single dimensional outputs as well +# https://github.com/cog-imperial/OMLT/issues/158 NEAR_EQUAL = 1e-3 @@ -34,7 +35,7 @@ def test_two_node_bigm(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluBigMFormulation(two_node_network_relu): +def test_two_node_relu_big_m_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() formulation = ReluBigMFormulation(two_node_network_relu) @@ -74,7 +75,7 @@ def test_two_node_complementarity(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluComplementarityFormulation(two_node_network_relu): +def test_two_node_relu_complementarity_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() formulation = ReluComplementarityFormulation(two_node_network_relu) @@ -92,7 +93,7 @@ def test_two_node_ReluComplementarityFormulation(two_node_network_relu): assert abs(pyo.value(m.neural_net_block.outputs[0, 1]) - 0) < NEAR_EQUAL -def test_two_node_ReluPartitionFormulation(two_node_network_relu): +def test_two_node_relu_partition_formulation(two_node_network_relu): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -112,7 +113,7 @@ def test_two_node_ReluPartitionFormulation(two_node_network_relu): @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") -def test_conv_ReluBigMFormulation(datadir): +def test_conv_relu_big_m_formulation(datadir): from omlt.io.onnx import load_onnx_neural_network_with_bounds net = load_onnx_neural_network_with_bounds(datadir.file("keras_conv_7x7_relu.onnx")) @@ -124,14 +125,15 @@ def test_conv_ReluBigMFormulation(datadir): m.obj1 = pyo.Objective(expr=0) # compute expected output for this input - x = np.eye(7, 7).reshape(1, 7, 7) + x_start = np.eye(7, 7).reshape(1, 7, 7) + x = x_start for layer in net.layers: x = layer.eval_single_layer(x) output = x for i in range(7): for j in range(7): - m.neural_net_block.inputs[0, i, j].fix(input[0, i, j]) + m.neural_net_block.inputs[0, i, j].fix(x_start[0, i, j]) pyo.SolverFactory("cbc").solve(m, tee=False) d, r, c = output.shape diff --git a/tests/notebooks/test_run_notebooks.py b/tests/notebooks/test_run_notebooks.py index 7871bc87..62d70d57 100644 --- a/tests/notebooks/test_run_notebooks.py +++ b/tests/notebooks/test_run_notebooks.py @@ -11,8 +11,9 @@ from pyomo.common.fileutils import this_file_dir from testbook import testbook +# TODO @cog-imperial: These will be replaced with stronger tests using testbook soon +# https://github.com/cog-imperial/OMLT/issues/159 -# TODO: These will be replaced with stronger tests using testbook soon def _test_run_notebook(folder, notebook_fname, n_cells): # Change to notebook directory to allow for testing cwd = Path.cwd() diff --git a/tests/test_block.py b/tests/test_block.py index ccb8753f..9711345c 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -17,6 +17,11 @@ def _set_block(self, blk): def _build_formulation(self): pass + def _clear_inputs(self): + self.input_indexes = [] + + def _clear_outputs(self): + self.output_indexes = [] def test_block(): m = pyo.ConcreteModel() @@ -61,9 +66,20 @@ def test_input_output_auto_creation(): assert len(m.b2.outputs) == 1 m.b3 = OmltBlock() - expected_msg = "OmltBlock must have at least one input and at least one output." + formulation1 = DummyFormulation() + formulation1._clear_inputs() + expected_msg = ( + "OmltBlock must have at least one input to build a formulation. " + f"{formulation1} has no inputs." + ) + with pytest.raises(ValueError, match=expected_msg): + m.b3.build_formulation(formulation1) + + formulation2 = DummyFormulation() + formulation2._clear_outputs() + expected_msg = ( + "OmltBlock must have at least one output to build a formulation. " + f"{formulation2} has no outputs." + ) with pytest.raises(ValueError, match=expected_msg): - m.b3._setup_inputs_outputs( - input_indexes=[], - output_indexes=[], - ) + m.b3.build_formulation(formulation2) From 040c858112936134d05bf6f15dd471e46ce13e63 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 24 Jun 2024 05:29:48 +0000 Subject: [PATCH 58/60] Fixing mypy typing errors --- src/omlt/__init__.py | 2 +- src/omlt/formulation.py | 5 +-- src/omlt/gbt/gbt_formulation.py | 3 +- src/omlt/gbt/model.py | 8 ++--- src/omlt/io/keras/keras_reader.py | 4 +-- src/omlt/io/onnx.py | 2 +- src/omlt/io/onnx_parser.py | 17 +++++----- .../torch_geometric/torch_geometric_reader.py | 4 +-- src/omlt/linear_tree/lt_definition.py | 8 ++--- src/omlt/neuralnet/activations/__init__.py | 3 +- src/omlt/scaling.py | 4 +-- tests/neuralnet/test_keras.py | 13 ++++--- tests/neuralnet/test_network_definition.py | 2 +- tests/neuralnet/test_nn_formulation.py | 29 ++++++---------- tests/neuralnet/test_onnx.py | 12 +++---- tests/neuralnet/test_relu.py | 2 +- tests/test_formulation.py | 34 +++++++++---------- tests/test_scaling.py | 20 +++++------ 18 files changed, 84 insertions(+), 88 deletions(-) diff --git a/src/omlt/__init__.py b/src/omlt/__init__.py index 3bf95df2..dfd36f37 100644 --- a/src/omlt/__init__.py +++ b/src/omlt/__init__.py @@ -11,7 +11,7 @@ """ from omlt._version import __version__ -from omlt.block import OmltBlock +from omlt.block import OmltBlock # type: ignore[attr-defined] from omlt.scaling import OffsetScaling __all__ = [ diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index 7097fbf1..442e44bf 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -63,7 +63,6 @@ class _PyomoFormulation(_PyomoFormulationInterface): """ def __init__(self): - super().__init__() self.__block = None def _set_block(self, block): @@ -76,7 +75,9 @@ def block(self): The underlying block containing the constraints / variables for this formulation. """ - return self.__block() + if self.__block is not None: + return self.__block() + return None def scalar_or_tuple(x): diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index a51bec98..4e1069fe 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -1,4 +1,5 @@ import collections +from typing import Any import numpy as np import pyomo.environ as pe @@ -158,7 +159,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): domain=pe.Reals, ) - branch_value_by_feature_id = {} + branch_value_by_feature_id: dict[int, Any] = {} branch_value_by_feature_id = collections.defaultdict(list) for f in feature_ids: diff --git a/src/omlt/gbt/model.py b/src/omlt/gbt/model.py index 0fbc3f7e..693c24f7 100644 --- a/src/omlt/gbt/model.py +++ b/src/omlt/gbt/model.py @@ -44,15 +44,15 @@ def scaling_object(self): """Return an instance of the scaling object supporting the ScalingInterface.""" return self.__scaling_object + @scaling_object.setter + def scaling_object(self, scaling_object): + self.__scaling_object = scaling_object + @property def scaled_input_bounds(self): """Return a list of tuples of lower and upper bounds of tree ensemble inputs.""" return self.__scaled_input_bounds - @scaling_object.setter - def scaling_object(self, scaling_object): - self.__scaling_object = scaling_object - def _model_num_inputs(model): """Returns the number of input variables.""" diff --git a/src/omlt/io/keras/keras_reader.py b/src/omlt/io/keras/keras_reader.py index d7429d72..2462bed0 100644 --- a/src/omlt/io/keras/keras_reader.py +++ b/src/omlt/io/keras/keras_reader.py @@ -1,6 +1,6 @@ from tensorflow import keras -from omlt.neuralnet.layer import DenseLayer, InputLayer +from omlt.neuralnet.layer import DenseLayer, InputLayer, Layer from omlt.neuralnet.network_definition import NetworkDefinition @@ -43,7 +43,7 @@ def load_keras_sequential( unscaled_input_bounds=unscaled_input_bounds, ) - prev_layer = InputLayer([n_inputs]) + prev_layer: Layer = InputLayer([n_inputs]) net.add_layer(prev_layer) for layer in nn.layers: diff --git a/src/omlt/io/onnx.py b/src/omlt/io/onnx.py index b48915a9..6c5b3cb3 100644 --- a/src/omlt/io/onnx.py +++ b/src/omlt/io/onnx.py @@ -43,7 +43,7 @@ def load_onnx_neural_network_with_bounds(filename): onnx_model = onnx.load(filename) input_bounds_filename = Path(f"{filename}.bounds.json") input_bounds = None - if input_bounds_filename.exists: + if input_bounds_filename.exists(): input_bounds = load_input_bounds(input_bounds_filename) return load_onnx_neural_network(onnx_model, input_bounds=input_bounds) diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index 979b437c..f35fadb9 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -28,6 +28,7 @@ ATTR_TENSOR = 4 ATTR_INTS = 7 + class NetworkParser: """Network Parser. @@ -41,31 +42,31 @@ def __init__(self): def _reset_state(self): self._graph = None - self._initializers = None - self._constants = None - self._nodes = None + self._initializers = {} + self._constants = {} + self._nodes = {} self._nodes_by_output = None self._inputs = None self._outputs = None - self._node_stack = None - self._node_map = None + self._node_stack = [] + self._node_map = {} def parse_network(self, graph, scaling_object, input_bounds): self._reset_state() self._graph = graph # initializers contain constant data - initializers = {} + initializers: dict[str, Any] = {} for initializer in self._graph.initializer: initializers[initializer.name] = numpy_helper.to_array(initializer) self._initializers = initializers # Build graph - nodes = {} + nodes: dict[str, tuple[str, Any, list[Any]]] = {} nodes_by_output = {} inputs = set() - outputs = set() + outputs: set[Any] = set() self._node_map = {} network = NetworkDefinition( diff --git a/src/omlt/io/torch_geometric/torch_geometric_reader.py b/src/omlt/io/torch_geometric/torch_geometric_reader.py index 4203338b..d37ec960 100644 --- a/src/omlt/io/torch_geometric/torch_geometric_reader.py +++ b/src/omlt/io/torch_geometric/torch_geometric_reader.py @@ -2,7 +2,7 @@ import numpy as np -from omlt.neuralnet.layer import DenseLayer, GNNLayer, InputLayer +from omlt.neuralnet.layer import DenseLayer, GNNLayer, InputLayer, Layer from omlt.neuralnet.network_definition import NetworkDefinition @@ -150,7 +150,7 @@ def load_torch_geometric_sequential( unscaled_input_bounds=unscaled_input_bounds, ) - prev_layer = InputLayer([n_inputs]) + prev_layer: Layer = InputLayer([n_inputs]) net.add_layer(prev_layer) operations = [] diff --git a/src/omlt/linear_tree/lt_definition.py b/src/omlt/linear_tree/lt_definition.py index 8f944a4a..cf1b5a4a 100644 --- a/src/omlt/linear_tree/lt_definition.py +++ b/src/omlt/linear_tree/lt_definition.py @@ -1,3 +1,5 @@ +from typing import Any + import lineartree import numpy as np @@ -178,9 +180,7 @@ def _find_all_children_leaves(split, splits_dict, leaves_dict): # For each leaf, check if the parents appear in the list of children # splits (all_splits). If so, it must be a leaf of the argument split - return [ - leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits - ] + return [leaf for leaf in leaves_dict if leaves_dict[leaf]["parent"] in all_splits] def _find_n_inputs(leaves): @@ -341,7 +341,7 @@ def _parse_tree_data(model, input_bounds): # For each variable that appears in the tree, go through all the splits # and record its splitting threshold - splitting_thresholds = {} + splitting_thresholds: dict[int, Any] = {} for split in splits: var = splits[split]["col"] splitting_thresholds[var] = {} diff --git a/src/omlt/neuralnet/activations/__init__.py b/src/omlt/neuralnet/activations/__init__.py index 038a4dbd..740022ad 100644 --- a/src/omlt/neuralnet/activations/__init__.py +++ b/src/omlt/neuralnet/activations/__init__.py @@ -5,6 +5,7 @@ variable, and :math:`y` denotes post-activation variable. """ +from typing import Any from .linear import linear_activation_constraint, linear_activation_function from .relu import ComplementarityReLUActivation, bigm_relu_activation_constraint @@ -25,7 +26,7 @@ "tanh": tanh_activation_function, } -NON_INCREASING_ACTIVATIONS = [] +NON_INCREASING_ACTIVATIONS: list[Any] = [] __all__ = [ "linear_activation_constraint", diff --git a/src/omlt/scaling.py b/src/omlt/scaling.py index 9bf3bd3f..5ffaafbe 100644 --- a/src/omlt/scaling.py +++ b/src/omlt/scaling.py @@ -4,8 +4,8 @@ expressions to the Pyomo model for the inputs and outputs of an ML model. An implementation of a common scaling approach is included with `OffsetScaling`. """ - import abc +from typing import Any class ScalingInterface(abc.ABC): @@ -28,7 +28,7 @@ def get_unscaled_output_expressions(self, scaled_output_vars): # pragma: no cover -def convert_to_dict(x): +def convert_to_dict(x: Any) -> dict[Any, Any]: if isinstance(x, dict): return dict(x) return dict(enumerate(x)) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index eb3436d6..99ae8e27 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -8,7 +8,8 @@ from omlt.io import load_keras_sequential from conftest import get_neural_network_data -from omlt.block import OmltBlock +from omlt import OmltBlock +from omlt.formulation import _PyomoFormulation from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling @@ -32,10 +33,9 @@ def _test_keras_linear_131(keras_fname, *, reduced_space=False): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() if reduced_space: - formulation = ReducedSpaceNNFormulation(net) + m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) else: - formulation = FullSpaceNNFormulation(net) - m.neural_net_block.build_formulation(formulation) + m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) nn_outputs = nn.predict(x=x_test) for d in range(len(x_test)): @@ -104,10 +104,9 @@ def _test_keras_linear_big(keras_fname, *, reduced_space=False): m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() if reduced_space: - formulation = ReducedSpaceNNFormulation(net) + m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net)) else: - formulation = FullSpaceNNFormulation(net) - m.neural_net_block.build_formulation(formulation) + m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) nn_outputs = nn.predict(x=x_test) for d in range(len(x_test)): diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index 2d58cd3b..ee073c5e 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -3,7 +3,7 @@ import numpy as np import pyomo.environ as pyo import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.neuralnet.layer import DenseLayer, InputLayer from omlt.neuralnet.network_definition import NetworkDefinition from omlt.neuralnet.nn_formulation import FullSpaceNNFormulation diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index f88a9425..315bb176 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -4,6 +4,7 @@ import pyomo.environ as pyo import pytest from omlt import OmltBlock +from omlt.formulation import _PyomoFormulation from omlt.neuralnet import ( FullSpaceNNFormulation, FullSpaceSmoothNNFormulation, @@ -31,6 +32,12 @@ from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer from pyomo.contrib.fbbt import interval +formulations = { + "FullSpace": FullSpaceNNFormulation, + "ReducedSpace": ReducedSpaceNNFormulation, + "relu": ReluPartitionFormulation, +} + NEAR_EQUAL = 1e-6 FULLSPACE_SMOOTH_VARS = 15 FULLSPACE_SMOOTH_CONSTRAINTS = 14 @@ -41,6 +48,7 @@ THREE_NODE_VARS = 81 THREE_NODE_CONSTRAINTS = 120 + def two_node_network(activation, input_value): """Two node network. @@ -370,12 +378,7 @@ def _test_formulation_added_extra_input(network_formulation): """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_input) expected_msg = "Multiple input layers are not currently supported." with pytest.raises(ValueError, match=expected_msg): @@ -386,12 +389,7 @@ def _test_formulation_build_extra_input(network_formulation): """network_formulation can be:'FullSpace', 'ReducedSpace', 'relu'.""" net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_input) m = pyo.ConcreteModel() m.neural_net_block = OmltBlock() @@ -410,12 +408,7 @@ def _test_formulation_added_extra_output(network_formulation): weights=np.array([[1.0, 0.0], [5.0, 1.0]]), biases=np.array([3.0, 4.0]), ) - if network_formulation == "FullSpace": - formulation = FullSpaceNNFormulation(net) - elif network_formulation == "ReducedSpace": - formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == "relu": - formulation = ReluPartitionFormulation(net) + formulation: _PyomoFormulation = formulations[network_formulation](net) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) expected_msg = "Multiple output layers are not currently supported." diff --git a/tests/neuralnet/test_onnx.py b/tests/neuralnet/test_onnx.py index 7cad2d78..7d33675f 100644 --- a/tests/neuralnet/test_onnx.py +++ b/tests/neuralnet/test_onnx.py @@ -59,8 +59,8 @@ def obj(mdl): SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] @@ -102,8 +102,8 @@ def obj(mdl): SolverFactory("cbc").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] @@ -146,8 +146,8 @@ def obj(mdl): SolverFactory("ipopt").solve(model, tee=False) x_s = (x - scale_x[0]) / scale_x[1] - x_s = np.array([[x_s]], dtype=np.float32) - outputs = net_regression.run(None, {"dense_2_input:0": x_s}) + x_s_arr = np.array([[x_s]], dtype=np.float32) + outputs = net_regression.run(None, {"dense_2_input:0": x_s_arr}) y_s = outputs[0][0, 0] y = y_s * scale_y[1] + scale_y[0] diff --git a/tests/neuralnet/test_relu.py b/tests/neuralnet/test_relu.py index 23ed6fee..40ed37ef 100644 --- a/tests/neuralnet/test_relu.py +++ b/tests/neuralnet/test_relu.py @@ -1,7 +1,7 @@ import numpy as np import pyomo.environ as pyo import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.dependencies import onnx_available from omlt.neuralnet import ( FullSpaceNNFormulation, diff --git a/tests/test_formulation.py b/tests/test_formulation.py index df4aa0d9..155e596c 100644 --- a/tests/test_formulation.py +++ b/tests/test_formulation.py @@ -1,5 +1,5 @@ import pytest -from omlt.block import OmltBlock +from omlt import OmltBlock from omlt.formulation import _setup_scaled_inputs_outputs from omlt.scaling import OffsetScaling from pyomo.environ import ConcreteModel, Objective, SolverFactory, value @@ -7,16 +7,16 @@ def test_scaled_inputs_outputs(): m = ConcreteModel() - xoffset = {(0, i): float(i) for i in range(3)} - xfactor = {(0, i): 0.5 * (i + 1) for i in range(3)} - yoffset = {(1, i): -0.25 * i for i in range(2)} - yfactor = {(1, i): 0.125 * (i + 1) for i in range(2)} + x1offset: dict[tuple[int, int], float] = {(0, i): float(i) for i in range(3)} + x1factor: dict[tuple[int, int], float] = {(0, i): 0.5 * (i + 1) for i in range(3)} + y1offset: dict[tuple[int, int], float] = {(1, i): -0.25 * i for i in range(2)} + y1factor: dict[tuple[int, int], float] = {(1, i): 0.125 * (i + 1) for i in range(2)} scaler = OffsetScaling( - offset_inputs=xoffset, - factor_inputs=xfactor, - offset_outputs=yoffset, - factor_outputs=yfactor, + offset_inputs=x1offset, + factor_inputs=x1factor, + offset_outputs=y1offset, + factor_outputs=y1factor, ) scaled_input_bounds = {(0, 0): (0, 5), (0, 1): (-2, 2), (0, 2): (0, 1)} @@ -47,16 +47,16 @@ def test_scaled_inputs_outputs(): assert m.b1.inputs[(0, 2)].ub == pytest.approx(3.5) m = ConcreteModel() - xoffset = {i: float(i) for i in range(3)} - xfactor = {i: 0.5 * (i + 1) for i in range(3)} - yoffset = {i: -0.25 * i for i in range(2)} - yfactor = {i: 0.125 * (i + 1) for i in range(2)} + x2offset: dict[int, float] = {i: float(i) for i in range(3)} + x2factor: dict[int, float] = {i: 0.5 * (i + 1) for i in range(3)} + y2offset: dict[int, float] = {i: -0.25 * i for i in range(2)} + y2factor: dict[int, float] = {i: 0.125 * (i + 1) for i in range(2)} scaler = OffsetScaling( - offset_inputs=xoffset, - factor_inputs=xfactor, - offset_outputs=yoffset, - factor_outputs=yfactor, + offset_inputs=x2offset, + factor_inputs=x2factor, + offset_outputs=y2offset, + factor_outputs=y2factor, ) input_bounds = {0: (0, 5), 1: (-2, 2), 2: (0, 1)} diff --git a/tests/test_scaling.py b/tests/test_scaling.py index 790241bf..dffc2a03 100644 --- a/tests/test_scaling.py +++ b/tests/test_scaling.py @@ -8,16 +8,16 @@ def test_convert_to_dict(): x = ["a", "b"] - x = convert_to_dict(x) - assert sorted(x.keys()) == [0, 1] - assert x[0] == "a" - assert x[1] == "b" - - x = {2: "a", 1: "b"} - x = convert_to_dict(x) - assert sorted(x.keys()) == [1, 2] - assert x[2] == "a" - assert x[1] == "b" + xd = convert_to_dict(x) + assert sorted(xd.keys()) == [0, 1] + assert xd[0] == "a" + assert xd[1] == "b" + + y = {2: "a", 1: "b"} + yd = convert_to_dict(y) + assert sorted(yd.keys()) == [1, 2] + assert yd[2] == "a" + assert yd[1] == "b" def test_offset_scaling(): From b7b1c5b9b0c41dd8f616039dbe30218cf9d6db19 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 24 Jun 2024 05:29:48 +0000 Subject: [PATCH 59/60] Fixing mypy typing errors --- tests/neuralnet/test_keras.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/neuralnet/test_keras.py b/tests/neuralnet/test_keras.py index 99ae8e27..f83c07d1 100644 --- a/tests/neuralnet/test_keras.py +++ b/tests/neuralnet/test_keras.py @@ -9,7 +9,6 @@ from conftest import get_neural_network_data from omlt import OmltBlock -from omlt.formulation import _PyomoFormulation from omlt.neuralnet import FullSpaceNNFormulation, ReducedSpaceNNFormulation from omlt.neuralnet.activations import ComplementarityReLUActivation from omlt.scaling import OffsetScaling From 0f9ab2e3d8e274d1bf7986234a144e789ed61406 Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Mon, 1 Jul 2024 08:25:06 -0700 Subject: [PATCH 60/60] Revert "Merge branch 'JuMP' into keras3" This reverts commit 568474a8b0d7b8855075ccad0c8b6fe342ef51d1, reversing changes made to b7b1c5b9b0c41dd8f616039dbe30218cf9d6db19. --- src/omlt/base/__init__.py | 11 - src/omlt/base/expression.py | 558 ------------ src/omlt/base/julia.py | 117 --- src/omlt/base/var.py | 873 ------------------- src/omlt/block.py | 36 +- src/omlt/dependencies.py | 2 - src/omlt/formulation.py | 7 +- src/omlt/gbt/gbt_formulation.py | 5 +- src/omlt/linear_tree/lt_formulation.py | 5 +- src/omlt/neuralnet/activations/relu.py | 4 +- src/omlt/neuralnet/layers/full_space.py | 5 +- src/omlt/neuralnet/layers/partition_based.py | 23 +- src/omlt/neuralnet/nn_formulation.py | 9 +- tests/neuralnet/test_nn_formulation.py | 14 +- tests/test_block.py | 33 - tests/test_var.py | 32 - 16 files changed, 28 insertions(+), 1706 deletions(-) delete mode 100644 src/omlt/base/__init__.py delete mode 100644 src/omlt/base/expression.py delete mode 100644 src/omlt/base/julia.py delete mode 100644 src/omlt/base/var.py delete mode 100644 tests/test_var.py diff --git a/src/omlt/base/__init__.py b/src/omlt/base/__init__.py deleted file mode 100644 index 3d881472..00000000 --- a/src/omlt/base/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -DEFAULT_MODELING_LANGUAGE = "pyomo" - -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base.julia import jl, jump - -from omlt.base.var import OmltVar -from omlt.base.expression import OmltExpr - -# from omlt.base.constraint import OmltConstraint diff --git a/src/omlt/base/expression.py b/src/omlt/base/expression.py deleted file mode 100644 index 80229d99..00000000 --- a/src/omlt/base/expression.py +++ /dev/null @@ -1,558 +0,0 @@ -from abc import ABC, abstractmethod -import pyomo.environ as pyo - -# from pyomo.core.expr import RelationalExpression - -from omlt.base import DEFAULT_MODELING_LANGUAGE -import omlt.base.var as var - -# from omlt.dependencies import julia_available - -# if julia_available: -# from omlt.base.julia import jl, jump, JumpVar -# from juliacall import AnyValue -# relations = {"==", ">=", "<=", ">", "<"} - - -class OmltExpr(ABC): - # Claim to be a Pyomo Expression so blocks will register - # properly. - @property - def __class__(self): - return pyo.Expression - - def __new__(cls, *indexes, **kwargs): - if not indexes: - instance = super(OmltExpr, cls).__new__(OmltExprScalar) - instance.__init__(**kwargs) - else: - instance = super(OmltExpr, cls).__new__(OmltExprIndexed) - instance.__init__(*indexes, **kwargs) - return instance - - @property - def ctype(self): - return pyo.Expression - - def is_component_type(self): - return True - - def is_expression_type(self): - return True - - @abstractmethod - def is_indexed(self): - pass - - def valid_model_component(self): - """Return True if this can be used as a model component.""" - return True - - @property - @abstractmethod - def args(self): - pass - - @abstractmethod - def arg(self, index): - pass - - @abstractmethod - def nargs(self): - pass - - -class OmltExprScalar(OmltExpr): - def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - "Expression format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltExpr, cls).__new__(subclass) - # instance.__init__(*args, **kwargs) - instance._format = format - return instance - - def __mul__(self, other): - pass - - -class OmltExprScalarPyomo(OmltExprScalar, pyo.Expression): - format = "pyomo" - - def __init__(self, *args, expr=None, **kwargs): - self._index_set = {} - if isinstance(expr, (pyo.Expression, pyo.NumericValue)): - self._expression = expr - elif isinstance(expr, OmltExprScalarPyomo): - self._expression = expr._expression - elif isinstance(expr, tuple): - self._expression = self._parse_expression_tuple(expr) - else: - print("expression not recognized", expr, type(expr)) - - self._parent = None - self.name = None - - def _parse_expression_tuple_term(self, term): - if isinstance(term, tuple): - return self._parse_expression_tuple(term) - elif isinstance(term, OmltExprScalarPyomo): - return term._expression - elif isinstance(term, var.OmltVar): - return term._pyovar - elif isinstance(term, (pyo.Expression, pyo.Var, int, float)): - return term - else: - raise TypeError( - "Term of expression is an unsupported type. " - "Write a better error message." - ) - - def _parse_expression_tuple(self, expr): - lhs = self._parse_expression_tuple_term(expr[0]) - rhs = self._parse_expression_tuple_term(expr[2]) - - if expr[1] == "+": - return lhs + rhs - - elif expr[1] == "-": - return lhs - rhs - - elif expr[1] == "*": - return lhs * rhs - - elif expr[1] == "/": - return lhs / rhs - - else: - raise ValueError("Expression middle term was {%s}.", expr[1]) - - def __repr__(self): - return repr(self._expression.arg(0)) - - def is_indexed(self): - return False - - def as_numeric(self): - return self._expression._apply_operation(self._expression.args) - - def construct(self, data=None): - return self._expression.construct(data) - - @property - def _constructed(self): - return self._expression.expr._constructed - - @property - def const(self): - return self._expression.const - - @property - def args(self): - return self._expression.args - - def arg(self, index): - return self._expression.arg(index) - - def nargs(self): - return self._expression.nargs() - - def __call__(self): - return self._expression() - - def __add__(self, other): - if isinstance(other, OmltExpr): - expr = self._expression + other._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = self._expression + other - return OmltExpr(format=self._format, expr=expr) - - # def __sub__(self, other): - # expr = (self, "-", other) - # return OmltExpression(format=self._format, expr=expr) - - def __mul__(self, other): - if isinstance(other, OmltExpr): - expr = self._expression * other._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = self._expression * other - return OmltExprScalar(format=self._format, expr=expr) - - def __div__(self, other): - expr = (self, "/", other) - return OmltExpr(format=self._format, expr=expr) - - def __truediv__(self, other): - expr = (self, "//", other) - return OmltExpr(format=self._format, expr=expr) - - def __radd__(self, other): - if isinstance(other, OmltExpr): - expr = other._expression + self._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = other + self._expression - return OmltExpr(format=self._format, expr=expr) - - def __rsub__(self, other): - if isinstance(other, OmltExpr): - expr = other._expression - self._expression - elif isinstance(other, (int, float, pyo.Expression)): - expr = other - self._expression - return OmltExpr(format=self._format, expr=expr) - - def __rmul__(self, other): - expr = (other, "*", self) - return OmltExpr(format=self._format, expr=expr) - - def __ge__(self, other): - expr = self._expression >= other - return expr - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) - - def __le__(self, other): - expr = self._expression <= other - return expr - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) - - def __eq__(self, other): - expr = self._expression == other - return pyo.Expression(expr=expr) - # return constraint.OmltRelScalar(format=self._format, expr_tuple=expr) - - -class OmltExprIndexed(OmltExpr): - def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - "Variable format %s not recognized. Supported formats are 'pyomo'" - " or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltExpr, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - instance._format = format - return instance - - -class OmltExprIndexedPyomo(OmltExprIndexed, pyo.Expression): - format = "pyomo" - - def __init__(self, *indexes, expr=None, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - elif len(indexes) > 1: - raise ValueError("Currently index cross-products are unsupported.") - else: - self._index_set = {} - self._format = format - self._expression = pyo.Expression(self._index_set, expr=expr) - - # self.pyo.construct() - - def is_indexed(self): - return True - - def expression_as_dict(self): - if len(self._index_set) == 1: - return {self._index_set[0]: self._expression} - else: - return {k: self._expression[k] for k in self._index_set} - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._expression[item[0]] - else: - return self._expression[item] - - def __setitem__(self, item, value): - self._expression[item] = value - - def keys(self): - return self._expression.keys() - - def values(self): - return self._expression.values() - - def items(self): - return self._expression.items() - - def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ - return len(self._expression) - - def __contains__(self, idx): - """Return true if the index is in the dictionary""" - return idx in self._expression - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys""" - return self._expression.__iter__() - - @property - def args(self): - return self._expression.args() - - def arg(self, index): - return self._expression.arg(index) - - def nargs(self): - return self._expression.nargs() - - def __call__(self): - return self._expression() - - # # def __str__(self): - # # return parse_expression(self.expr, "").rstrip() - - # def __repr__(self): - # if self._expr is not None: - # return parse_expression(self._expr, "").rstrip() - # else: - # return "empty expression" - - # def set_value(self, value): - # print("setting value:", value) - # self.value = value - - # @property - # def rule(self): - # return self._expr - - def __add__(self, other): - expr = (self, "+", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - # def __sub__(self, other): - # expr = (self, "-", other) - # return OmltExpression(format=self._format, expr=expr) - - # def __mul__(self, other): - # expr = (self, "*", other) - # return OmltExpression(format=self._format, expr=expr) - - def __div__(self, other): - expr = (self, "/", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - def __truediv__(self, other): - expr = (self, "//", other) - return OmltExpr(self._index_set, format=self._format, expr=expr) - - def __eq__(self, other): - expr = (self, "==", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - def __le__(self, other): - expr = (self, "<=", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - def __ge__(self, other): - expr = (self, ">=", other) - return pyo.Expression(self._index_set, expr=expr) - # return constraint.OmltRelation( - # self._index_set, format=self._format, expr_tuple=expr - # ) - - -# def parse_expression(expr, string): -# if expr is not None: -# for t in expr: -# if str(t).count(" ") == 2: -# string += "(" + str(t) + ") " -# else: -# string += str(t) + " " -# else: -# string = expr -# return string - - -# def parse_jump_affine(expr_tuple): -# if expr_tuple is not None: -# if isinstance(expr_tuple, JumpVar): -# return jump.AffExpr(0, {expr_tuple.to_jump(): 1}) -# elif isinstance(expr_tuple, (int, float)): -# return jump.AffExpr(expr_tuple, {}) -# elif isinstance(expr_tuple, OmltExprScalar): -# print("found a scalar expression") -# print(expr_tuple) -# print(expr_tuple._expression) -# return expr_tuple._expression -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], JumpVar): -# return jump.AffExpr(0, {expr_tuple[0].to_jump(): 1}) -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): -# return jump.AffExpr(expr_tuple[0], {}) -# elif len(expr_tuple) == 2: -# print("don't know how to deal with 2-element expressions") -# print("expr_tuple") -# elif len(expr_tuple) == 3: -# print("triplet") -# if expr_tuple[1] == "+": -# return parse_jump_affine(expr_tuple[0]) + parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "-": -# return parse_jump_affine(expr_tuple[0]) - parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "*": -# return parse_jump_affine(expr_tuple[0]) * parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "/": -# return parse_jump_affine(expr_tuple[0]) / parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "//": -# return parse_jump_affine(expr_tuple[0]) // parse_jump_affine( -# expr_tuple[2] -# ) -# elif expr_tuple[1] == "**": -# return parse_jump_affine(expr_tuple[0]) ** parse_jump_affine( -# expr_tuple[2] -# ) - - -# def dictplus(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): -# c[k] = a[k] + b[k] -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dictminus(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): -# c[k] = a[k] - b[k] -# print("dictminus gives:", c) -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dicttimes(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): - -# c[k] = a[k] * b[k] -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def dictover(a, b): -# c = dict() -# if a.keys() == b.keys(): -# for k in a.keys(): - -# c[k] = jump_divide(a[k], b[k]) -# return c -# else: -# raise ValueError("dicts have non-matching keys") - - -# def jump_divide(a, b): -# assert isinstance(a, AnyValue) -# print(b.terms) -# assert (isinstance(b, AnyValue) and len(b.terms) == 0) or isinstance( -# b, (int, float) -# ) -# if isinstance(b, AnyValue): -# div_by = b.constant -# else: -# div_by = b -# return jump.AffExpr(a.constant / div_by, {}) - - -# def parse_jump_indexed(expr_tuple, index): -# print("parsing:", expr_tuple) -# if expr_tuple is not None: -# if isinstance(expr_tuple, OmltExpr): -# print("here") -# return expr_tuple.expression_as_dict() -# elif isinstance(expr_tuple, var.OmltVar): -# return expr_tuple.to_jumpexpr() -# elif isinstance(expr_tuple, (int, float)): -# return {k: jump.AffExpr(expr_tuple, {}) for k in index} -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], OmltExpr): -# return expr_tuple[0]._expression -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], var.OmltVar): -# indexed = { -# k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) -# for k, v in expr_tuple[0].items() -# } -# return indexed -# elif len(expr_tuple) == 1 and isinstance(expr_tuple[0], (int, float)): -# return {k: jump.AffExpr(expr_tuple[0], {}) for k in index} -# elif len(expr_tuple) == 2: -# print("don't know how to deal with 2-element expressions") -# print(expr_tuple) -# elif len(expr_tuple) == 3: -# if expr_tuple[1] == "+": -# return dictplus( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "-": -# return dictminus( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "*": -# return dicttimes( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "/": -# return dictover( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "//": -# return dictover( -# parse_jump_indexed(expr_tuple[0], index), -# parse_jump_indexed(expr_tuple[2], index), -# ) -# elif expr_tuple[1] == "**": -# return parse_jump_indexed(expr_tuple[0], index) ** parse_jump_indexed( -# expr_tuple[2], index -# ) -# elif expr_tuple[1] in relations: -# cnstrnt = constraint.OmltRelation( -# index, -# model=None, -# lhs=parse_jump_indexed(expr_tuple[0], index), -# sense=expr_tuple[1], -# rhs=parse_jump_indexed(expr_tuple[2], index), -# format="jump", -# ) -# indexed = {k: cnstrnt.lhs[k] - cnstrnt.rhs[k] for k in index} -# return indexed diff --git a/src/omlt/base/julia.py b/src/omlt/base/julia.py deleted file mode 100644 index b3c9109f..00000000 --- a/src/omlt/base/julia.py +++ /dev/null @@ -1,117 +0,0 @@ -from omlt.dependencies import julia_available - -if julia_available: - from juliacall import Main as jl - from juliacall import Base - - jl_err = Base.error - jl.seval("import JuMP") - jump = jl.JuMP - - -class JuMPVarInfo: - def __init__( - self, - lower_bound=None, - upper_bound=None, - fixed_value=None, - start_value=None, - binary=False, - integer=False, - ): - self.has_lb = lower_bound is not None - self.lb = lower_bound - self.has_ub = upper_bound is not None - self.ub = upper_bound - self.has_fix = fixed_value is not None - self.fixed_value = fixed_value - self.has_start = start_value is not None - self.start_value = start_value - self.binary = binary - self.integer = integer - - @property - def lower_bound(self): - return self.lb - - @lower_bound.setter - def lower_bound(self, value=None): - self.lb = value - self.has_lb = value is not None - - def setlb(self, value): - self.lower_bound = value - - @property - def upper_bound(self): - return self.ub - - @upper_bound.setter - def upper_bound(self, value=None): - self.ub = value - self.has_ub = value is not None - - def setub(self, value): - self.upper_bound = value - - def to_jump(self): - return jump.VariableInfo( - self.has_lb, - self.lower_bound, - self.has_ub, - self.upper_bound, - self.has_fix, - self.fixed_value, - self.has_start, - self.start_value, - self.binary, - self.integer, - ) - - -class JumpVar: - def __init__(self, varinfo: JuMPVarInfo, name): - self.info = varinfo - self.name = name - self.omltvar = None - self.index = None - self.construct() - - def __str__(self): - return self.name - - def setlb(self, value): - self.info.setlb(value) - self.construct() - - def setub(self, value): - self.info.setlb(value) - self.construct() - - def construct(self): - self.var = jump.build_variable(Base.error, self.info.to_jump()) - - @property - def value(self): - return self.var.info.start - - def add_to_model(self, model, name=None): - if name is None: - name = self.name - variable_ref = jump.add_variable(model, self.var, name) - return variable_ref - - def to_jump(self): - return self.var - - def __add__(self, other): - return (self.omltvar + other)[self.index] - - def __sub__(self, other): - return (self.omltvar - other)[self.index] - - def __mul__(self, other): - return (self.omltvar * other)[self.index] - - def __eq__(self, other): - return (self.omltvar == other)[self.index] diff --git a/src/omlt/base/var.py b/src/omlt/base/var.py deleted file mode 100644 index a7e5a9b8..00000000 --- a/src/omlt/base/var.py +++ /dev/null @@ -1,873 +0,0 @@ -""" -Abstraction layer of classes used by OMLT. Underneath these are -objects in a choice of modeling languages: Pyomo (default), -JuMP, or others (not yet implemented - e.g. Smoke, Gurobi). - - -""" - -from abc import ABC, abstractmethod -import pyomo.environ as pyo - -from omlt.dependencies import julia_available - -from omlt.base import DEFAULT_MODELING_LANGUAGE - -if julia_available: - from omlt.base import jump -from omlt.base.julia import JuMPVarInfo, JumpVar -from omlt.base.expression import OmltExprIndexed, OmltExprScalar - -# from omlt.base.constraint import OmltRelation, OmltRelScalar - - -class OmltVar(ABC): - def __new__(cls, *indexes, **kwargs): - - if not indexes: - instance = OmltScalar.__new__(OmltScalar, **kwargs) - else: - instance = OmltIndexed.__new__(OmltIndexed, *indexes, **kwargs) - return instance - - @abstractmethod - def construct(self, data): - pass - - @abstractmethod - def fix(self, value, skip_validation): - pass - - @property - @abstractmethod - def ctype(self): - pass - - @property - @abstractmethod - def name(self): - pass - - # Some methods to tell OMLT (and Pyomo components) that this - # is a variable. - def is_component_type(self): - return True - - @abstractmethod - def is_indexed(self): - pass - - def valid_model_component(self): - """Return True if this can be used as a model component.""" - return True - - -class OmltScalar(OmltVar): - def __new__(cls, *args, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - "Variable format %s not recognized. Supported formats " - "are 'pyomo' or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - - instance.__init__(*args, **kwargs) - instance._format = format - return instance - - def is_indexed(self): - return False - - # Bound-setting interface for scalar variables: - @property - @abstractmethod - def bounds(self): - pass - - @bounds.setter - @abstractmethod - def bounds(self, val): - pass - - @property - @abstractmethod - def lb(self): - pass - - @lb.setter - @abstractmethod - def lb(self, val): - pass - - @property - @abstractmethod - def ub(self): - pass - - @ub.setter - @abstractmethod - def ub(self, val): - pass - - @property - @abstractmethod - def domain(self): - pass - - @domain.setter - @abstractmethod - def domain(self, val): - pass - - # Interface for getting/setting value - @property - @abstractmethod - def value(self): - pass - - @value.setter - @abstractmethod - def value(self, val): - pass - - # Interface governing how variables behave in expressions. - - # def __lt__(self, other): - # return OmltRelScalar(expr=(self, "<", other)) - - # def __gt__(self, other): - # return OmltRelScalar(expr=(self, ">", other)) - - # def __le__(self, other): - # return OmltRelScalar(expr=(self, "<=", other)) - - # def __ge__(self, other): - # return OmltRelScalar(expr=(self, ">=", other)) - - # def __eq__(self, other): - # return OmltRelScalar(expr=(self, "==", other)) - - def __add__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "+", other)) - - def __sub__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "-", other)) - - def __mul__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "*", other)) - - def __div__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "//", other)) - - def __truediv__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "/", other)) - - def __pow__(self, other): - return OmltExprScalar(format=self._format, expr=(self, "**", other)) - - def __radd__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "+", self)) - - def __rsub__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "-", self)) - - def __rmul__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "*", self)) - - def __rdiv__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "//", self)) - - def __rtruediv__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "/", self)) - - def __rpow__(self, other): - return OmltExprScalar(format=self._format, expr=(other, "**", self)) - - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) - - -class OmltScalarPyomo(OmltScalar, pyo.ScalarVar): - format = "pyomo" - - def __init__(self, *args, **kwargs): - kwargs.pop("format", None) - # pyo.ScalarVar.__init__(self, *args, **kwargs) - self._pyovar = pyo.ScalarVar(*args, **kwargs) - self._parent = None - self._constructed = None - - def construct(self, data=None): - return self._pyovar.construct(data) - - def fix(self, value, skip_validation): - return self._pyovar.fix(value, skip_validation) - - @property - def ctype(self): - return pyo.ScalarVar - - @property - def name(self): - self._pyovar._name = self._name - return self._pyovar._name - - @property - def bounds(self): - return (self._pyovar._lb, self._pyovar._ub) - - @bounds.setter - def bounds(self, val): - self._pyovar.lb = val[0] - self._pyovar.ub = val[1] - - @property - def lb(self): - return self._pyovar._lb - - @lb.setter - def lb(self, val): - self._pyovar.setlb(val) - - @property - def ub(self): - return self._pyovar._ub - - @ub.setter - def ub(self, val): - self._pyovar.setub(val) - - @property - def domain(self): - return self._pyovar._domain - - @domain.setter - def domain(self, val): - self._pyovar._domain = val - - # Interface for getting/setting value - @property - def value(self): - return self._pyovar.value - - @value.setter - def value(self, val): - self._pyovar.value = val - - -class OmltScalarJuMP(OmltScalar): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.ScalarVar - - def __init__(self, *args, **kwargs): - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = self._bounds[0] - _ub = self._bounds[1] - elif self._bounds is None: - _lb = None - _ub = None - else: - raise ValueError("Bounds must be given as a tuple") - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - raise ValueError( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - elif _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - if isinstance(_initialize, (int, float)): - self._value = _initialize - elif len(_initialize) == 1 and isinstance(_initialize[0], (int, float)): - self._value = _initialize[0] - else: - # Pyomo's "scalar" variables can be multidimensional, they're - # just not indexed. JuMP scalar variables can only be a single - # dimension. Rewrite this error to be more helpful. - raise ValueError( - "Initial value for JuMP variables must be an int" - f" or float, but {type(_initialize)} was provided." - ) - else: - self._value = None - - self._varinfo = JuMPVarInfo( - _lb, - _ub, - None, # fix value - self._value, - self.binary, - self.integer, - ) - self._constructed = False - self._parent = None - self._ctype = pyo.ScalarVar - self._name = None - - def construct(self, data=None): - self._var = JumpVar(self._varinfo, self._name) - self._var.omltvar = self - self._constructed = True - if self._parent: - self._blockvar = jump.add_variable( - self._parent()._jumpmodel, self.to_jumpvar() - ) - - def fix(self, value, skip_validation): - self.fixed = True - self._value = value - self._varinfo.fixed_value = value - self._varinfo.has_fix = value is not None - if self._constructed: - self.construct() - - @property - def bounds(self): - return (self.lb, self.ub) - - @bounds.setter - def bounds(self, val): - if val is None: - self.lb = None - self.ub = None - elif len(val) == 2: - self.lb = val[0] - self.ub = val[1] - - @property - def lb(self): - return self._varinfo.lower_bound - - @lb.setter - def lb(self, val): - self._varinfo.setlb(val) - if self._constructed: - self.construct() - - @property - def ub(self): - return self._varinfo.upper_bound - - @ub.setter - def ub(self, val): - self._varinfo.setub(val) - if self._constructed: - self.construct() - - @property - def value(self): - if self._constructed: - return self._var.value - else: - return self._varinfo.start_value - - @value.setter - def value(self, val): - if self._constructed: - self._var.value = val - else: - self._varinfo.start_value = val - self - - @property - def ctype(self): - return self._ctype - - @property - def name(self): - return self._name - - @name.setter - def name(self, value): - self._name = value - - def to_jumpvar(self): - if self._constructed: - return self._var.to_jump() - else: - return self._varinfo.to_jump() - - def to_jumpexpr(self): - return jump.AffExpr(0, jump.OrderedDict([(self._blockvar, 1)])) - - -""" -Future formats to implement. -""" - - -class OmltScalarSmoke(OmltScalar): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltScalarGurobi(OmltScalar): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) - - -class OmltIndexed(OmltVar): - def __new__(cls, *indexes, format=DEFAULT_MODELING_LANGUAGE, **kwargs): - subclass_map = {subclass.format: subclass for subclass in cls.__subclasses__()} - if format not in subclass_map: - raise ValueError( - "Variable format %s not recognized. Supported formats are 'pyomo'" - " or 'jump'.", - format, - ) - subclass = subclass_map[format] - instance = super(OmltVar, subclass).__new__(subclass) - instance.__init__(*indexes, **kwargs) - instance._format = format - return instance - - def is_indexed(self): - return True - - @property - @abstractmethod - def index_set(self): - pass - - # Bound-setting interface for indexed variables: - @abstractmethod - def setub(self, value): - pass - - @abstractmethod - def setlb(self, value): - pass - - # Interface: act as a dict for the sub-variables. - @abstractmethod - def __getitem__(self, item): - pass - - @abstractmethod - def __setitem__(self, item, value): - pass - - @abstractmethod - def keys(self): - pass - - @abstractmethod - def values(self): - pass - - @abstractmethod - def items(self): - pass - - @abstractmethod - def __len__(self): - pass - - @abstractmethod - def __contains__(self, idx): - pass - - @abstractmethod - def __iter__(self): - pass - - # Interface governing how variables behave in expressions. - - # def __lt__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "<", other)) - - # def __gt__(self, other): - # return OmltRelation(self.index_set(), expr=(self, ">", other)) - - # def __le__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "<=", other)) - - # def __ge__(self, other): - # return OmltRelation(self.index_set(), expr=(self, ">=", other)) - - # def __eq__(self, other): - # return OmltRelation(self.index_set(), expr=(self, "==", other)) - - def __add__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "+", other)) - - def __sub__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "-", other)) - - def __mul__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "*", other)) - - def __div__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "//", other)) - - def __truediv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "/", other)) - - def __pow__(self, other): - return OmltExprIndexed(self.index_set(), expr=(self, "**", other)) - - def __radd__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "+", self)) - - def __rsub__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "-", self)) - - def __rmul__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "*", self)) - - def __rdiv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "//", self)) - - def __rtruediv__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "/", self)) - - def __rpow__(self, other): - return OmltExprIndexed(self.index_set(), expr=(other, "**", self)) - - def __iadd__(self, other): - return pyo.NumericValue.__iadd__(self, other) - - def __isub__(self, other): - return pyo.NumericValue.__isub__(self, other) - - def __imul__(self, other): - return pyo.NumericValue.__imul__(self, other) - - def __idiv__(self, other): - return pyo.NumericValue.__idiv__(self, other) - - def __itruediv__(self, other): - return pyo.NumericValue.__itruediv__(self, other) - - def __ipow__(self, other): - return pyo.NumericValue.__ipow__(self, other) - - def __neg__(self): - return pyo.NumericValue.__neg__(self) - - def __pos__(self): - return pyo.NumericValue.__pos__(self) - - def __abs__(self): - return pyo.NumericValue.__abs__(self) - - -class OmltIndexedPyomo(pyo.Var, OmltIndexed): - format = "pyomo" - - def __init__(self, *indexes, **kwargs): - kwargs.pop("format", None) - super().__init__(*indexes, **kwargs) - - def fix(self, value=None, skip_validation=False): - self.fixed = True - if value is None: - for vardata in self.values(): - vardata.fix(skip_validation) - else: - for vardata in self.values(): - vardata.fix(value, skip_validation) - - def setub(self, value): - for vardata in self.values(): - vardata.ub = value - - def setlb(self, value): - for vardata in self.values(): - vardata.lb = value - - -class OmltIndexedJuMP(OmltIndexed): - format = "jump" - - # Claim to be a Pyomo Var so blocks will register - # properly. - @property - def __class__(self): - return pyo.Var - - def __init__(self, *indexes, **kwargs): - if len(indexes) == 1: - index_set = indexes[0] - i_dict = {} - for i, val in enumerate(index_set): - i_dict[i] = val - self._index_set = tuple(i_dict[i] for i in range(len(index_set))) - else: - raise ValueError("Currently index cross-products are unsupported.") - - self._block = kwargs.pop("block", None) - - self._bounds = kwargs.pop("bounds", None) - - if isinstance(self._bounds, dict) and len(self._bounds) == len(self._index_set): - _lb = {k: v[0] for k, v in self._bounds.items()} - _ub = {k: v[1] for k, v in self._bounds.items()} - elif isinstance(self._bounds, tuple) and len(self._bounds) == 2: - _lb = {i: self._bounds[0] for i in self._index_set} - _ub = {i: self._bounds[1] for i in self._index_set} - elif self._bounds is None: - _lb = {i: None for i in self._index_set} - _ub = {i: None for i in self._index_set} - else: - raise ValueError( - "Bounds must be given as a tuple," " but %s was given.", self._bounds - ) - - _domain = kwargs.pop("domain", None) - _within = kwargs.pop("within", None) - - if _domain and _within and _domain != _within: - raise ValueError( - "'domain' and 'within' keywords have both " - "been supplied and do not agree. Please try " - "with a single keyword for the domain of this " - "variable." - ) - elif _domain: - self.domain = _domain - elif _within: - self.domain = _within - else: - self.domain = None - - if self.domain == pyo.Binary: - self.binary = True - else: - self.binary = False - if self.domain == pyo.Integers: - self.integer = True - else: - self.integer = False - - _initialize = kwargs.pop("initialize", None) - - if _initialize: - # If starting values have same length as index set, - # take one for each variable in index. - if len(self._index_set) == len(_initialize): - self._value = _initialize - # If there's a single starting value, use it for all - # variables in index. - elif len(_initialize) == 1: - self._value = {i: _initialize[0] for i in self._index_set} - else: - raise ValueError( - "Index set has length %s, but initializer has length %s.", - len(self._index_set), - len(_initialize), - ) - else: - self._value = {i: None for i in self._index_set} - - self._varinfo = {} - for idx in self._index_set: - self._varinfo[idx] = JuMPVarInfo( - _lb[idx], - _ub[idx], - None, # fix value - self._value[idx], - self.binary, - self.integer, - ) - self._vars = {} - self._varrefs = {} - self._constructed = False - self._ctype = pyo.Var - self._parent = None - - def __getitem__(self, item): - if isinstance(item, tuple) and len(item) == 1: - return self._vars[item[0]] - else: - return self._vars[item] - - def __setitem__(self, item, value): - self._varinfo[item] = value - if self._constructed: - self.construct() - - def keys(self): - if self._parent is not None: - return self._varrefs.keys() - else: - return self._vars.keys() - - def values(self): - if self._parent is not None: - return self._varrefs.values() - else: - return self._vars.values() - - def items(self): - if self._parent is not None: - return self._varrefs.items() - else: - return self._vars.items() - - def fix(self, value=None): - self.fixed = True - if value is not None: - for vardata in self._varinfo(): - vardata.has_fix = True - vardata.fixed_value = value - else: - for vardata in self._varinfo(): - vardata.has_fix = True - - def __len__(self): - """ - Return the number of component data objects stored by this - component. - """ - return len(self._vars) - - def __contains__(self, idx): - """Return true if the index is in the dictionary""" - return idx in self._vars - - # The default implementation is for keys() and __iter__ to be - # synonyms. The logic is implemented in keys() so that - # keys/values/items continue to work for components that implement - # other definitions for __iter__ (e.g., Set) - def __iter__(self): - """Return an iterator of the component data keys""" - return self._vars.__iter__() - - def construct(self, data=None): - for idx in self._index_set: - if isinstance(idx, int): - name = str(self.name) + "[" + str(idx) + "]" - else: - name = str(self.name) + str(list(idx)).replace(" ", "") - self._vars[idx] = JumpVar(self._varinfo[idx], name) - self._vars[idx].omltvar = self - self._vars[idx].index = idx - if self._parent is not None: - block = self._parent() - if block._format == "jump" and block._jumpmodel is not None: - self._varrefs[idx] = self._vars[idx].add_to_model(block._jumpmodel) - - self._constructed = True - - def setub(self, value): - for idx in self.index_set(): - self._varinfo[idx][2] = True - self._varinfo[idx][3] = value - if self._constructed: - self.construct() - - def setlb(self, value): - for idx in self.index_set(): - self._varinfo[idx][0] = True - self._varinfo[idx][1] = value - if self._constructed: - self.construct() - - @property - def ctype(self): - return self._ctype - - def index_set(self): - return self._index_set - - @property - def name(self): - return self._name - - def to_jumpvar(self): - if self._constructed: - return jump.Containers.DenseAxisArray(list(self.values()), self.index_set()) - - def to_jumpexpr(self): - return {k: jump.AffExpr(0, jump.OrderedDict([(v, 1)])) for k, v in self.items()} - - -""" -Future formats to implement. -""" - - -class OmltIndexedSmoke(OmltIndexed): - format = "smoke" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Smoke format is not currently implemented." - ) - - -class OmltIndexedGurobi(OmltIndexed): - format = "gurobi" - - def __init__(self, *args, **kwargs): - raise ValueError( - "Storing variables in Gurobi format is not currently implemented." - ) diff --git a/src/omlt/block.py b/src/omlt/block.py index ea6f7665..b8bb391d 100644 --- a/src/omlt/block.py +++ b/src/omlt/block.py @@ -25,12 +25,6 @@ class is used in combination with a formulation object to construct the """ -from omlt.base import OmltVar, DEFAULT_MODELING_LANGUAGE -from omlt.dependencies import julia_available - -if julia_available: - from omlt.base import jump - import pyomo.environ as pyo from pyomo.core.base.block import _BlockData, declare_custom_block @@ -42,16 +36,6 @@ def __init__(self, component): self.__formulation = None self.__input_indexes = None self.__output_indexes = None - self._format = DEFAULT_MODELING_LANGUAGE - if self._format == "jump": - self._jumpmodel = jump.Model() - else: - self._jumpmodel = None - - def set_format(self, format): - self._format = format - if self._format == "jump" and self._jumpmodel is None: - self._jumpmodel = jump.Model() def _setup_inputs_outputs(self, *, input_indexes, output_indexes): """Setup inputs and outputs. @@ -71,13 +55,13 @@ def _setup_inputs_outputs(self, *, input_indexes, output_indexes): self.__output_indexes = output_indexes self.inputs_set = pyo.Set(initialize=input_indexes) - self.inputs = OmltVar(self.inputs_set, initialize=0, format=self._format) + self.inputs = pyo.Var(self.inputs_set, initialize=0) self.outputs_set = pyo.Set(initialize=output_indexes) - self.outputs = OmltVar(self.outputs_set, initialize=0, format=self._format) + self.outputs = pyo.Var(self.outputs_set, initialize=0) + def build_formulation(self, formulation): + """Build formulation. - def build_formulation(self, formulation, format=None): - """ Call this method to construct the constraints (and possibly intermediate variables) necessary for the particular neural network formulation. The formulation object can be accessed later through the @@ -87,10 +71,6 @@ def build_formulation(self, formulation, format=None): ---------- formulation : instance of _PyomoFormulation see, for example, FullSpaceNNFormulation - format : str - Which modelling language to build the formulation in. - Currently supported are "pyomo" (default) and "jump". - """ if not formulation.input_indexes: msg = ( @@ -106,14 +86,6 @@ def build_formulation(self, formulation, format=None): ) raise ValueError(msg) - - if format is not None: - self._format = format - - if self._format == "jump": - self._jumpmodel = jump.Model() - - self._setup_inputs_outputs( input_indexes=list(formulation.input_indexes), output_indexes=list(formulation.output_indexes), diff --git a/src/omlt/dependencies.py b/src/omlt/dependencies.py index 3b882da2..6330c38f 100644 --- a/src/omlt/dependencies.py +++ b/src/omlt/dependencies.py @@ -8,5 +8,3 @@ torch_geometric, torch_geometric_available = attempt_import("torch_geometric") lineartree, lineartree_available = attempt_import("lineartree") - -julia, julia_available = attempt_import("juliacall") diff --git a/src/omlt/formulation.py b/src/omlt/formulation.py index db607935..442e44bf 100644 --- a/src/omlt/formulation.py +++ b/src/omlt/formulation.py @@ -2,7 +2,6 @@ import weakref import pyomo.environ as pyo -from omlt.base import OmltVar class _PyomoFormulationInterface(abc.ABC): @@ -93,11 +92,11 @@ def _setup_scaled_inputs_outputs(block, scaler=None, scaled_input_bounds=None): k: (float(scaled_input_bounds[k][0]), float(scaled_input_bounds[k][1])) for k in block.inputs_set } - block.scaled_inputs = OmltVar(block.inputs_set, initialize=0, bounds=bnds) + block.scaled_inputs = pyo.Var(block.inputs_set, initialize=0, bounds=bnds) else: - block.scaled_inputs = OmltVar(block.inputs_set, initialize=0) + block.scaled_inputs = pyo.Var(block.inputs_set, initialize=0) - block.scaled_outputs = OmltVar(block.outputs_set, initialize=0) + block.scaled_outputs = pyo.Var(block.outputs_set, initialize=0) if scaled_input_bounds is not None and scaler is None: # set the bounds on the inputs to be the same as the scaled inputs diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 17e798cf..4e1069fe 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -4,7 +4,6 @@ import numpy as np import pyomo.environ as pe -from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.gbt.model import GradientBoostedTreeModel @@ -154,7 +153,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): var = input_vars[var_idx] continuous_vars[var_idx] = var - block.z_l = OmltVar( + block.z_l = pe.Var( list(zip(nodes_tree_ids[nodes_leaf_mask], nodes_node_ids[nodes_leaf_mask])), bounds=(0, None), domain=pe.Reals, @@ -173,7 +172,7 @@ def add_formulation_to_block(block, model_definition, input_vars, output_vars): for f in continuous_vars for bi, _ in enumerate(branch_value_by_feature_id[f]) ] - block.y = OmltVar(y_index, domain=pe.Binary) + block.y = pe.Var(y_index, domain=pe.Binary) @block.Constraint(tree_ids) def single_leaf(b, tree_id): diff --git a/src/omlt/linear_tree/lt_formulation.py b/src/omlt/linear_tree/lt_formulation.py index 6c22dab5..5960a442 100644 --- a/src/omlt/linear_tree/lt_formulation.py +++ b/src/omlt/linear_tree/lt_formulation.py @@ -2,7 +2,6 @@ import pyomo.environ as pe from pyomo.gdp import Disjunct -from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs @@ -249,7 +248,7 @@ def _add_gdp_formulation_to_block( block.scaled_outputs.setub(output_bounds[1]) block.scaled_outputs.setlb(output_bounds[0]) - block.intermediate_output = OmltVar( + block.intermediate_output = pe.Var( tree_ids, bounds=(output_bounds[0], output_bounds[1]) ) @@ -324,7 +323,7 @@ def _add_hybrid_formulation_to_block(block, model_definition, input_vars, output # Create the intermeditate variables. z is binary that indicates which leaf # in tree t is returned. intermediate_output is the output of tree t and # the total output of the model is the sum of the intermediate_output vars - block.z = OmltVar(t_l, within=pe.Binary) + block.z = pe.Var(t_l, within=pe.Binary) block.intermediate_output = pe.Var(tree_ids) @block.Constraint(features, tree_ids) diff --git a/src/omlt/neuralnet/activations/relu.py b/src/omlt/neuralnet/activations/relu.py index eaad5a7f..733abb91 100644 --- a/src/omlt/neuralnet/activations/relu.py +++ b/src/omlt/neuralnet/activations/relu.py @@ -1,8 +1,6 @@ import pyomo.environ as pyo from pyomo import mpec -from omlt.base import OmltVar - def bigm_relu_activation_constraint(net_block, net, layer_block, layer): r"""Big-M ReLU activation formulation. @@ -40,7 +38,7 @@ def bigm_relu_activation_constraint(net_block, net, layer_block, layer): is :math:`\max(0,u)`. """ - layer_block.q_relu = OmltVar(layer.output_indexes, within=pyo.Binary) + layer_block.q_relu = pyo.Var(layer.output_indexes, within=pyo.Binary) layer_block._z_lower_bound_relu = pyo.Constraint(layer.output_indexes) layer_block._z_lower_bound_zhat_relu = pyo.Constraint(layer.output_indexes) diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index b042bdf4..25fd2dbb 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -1,7 +1,6 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltVar from omlt.neuralnet.activations import NON_INCREASING_ACTIVATIONS from omlt.neuralnet.layer import ConvLayer2D, PoolingLayer2D @@ -84,7 +83,7 @@ def full_space_gnn_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - input_layer_block.zbar = OmltVar( + input_layer_block.zbar = pyo.Var( pyo.Set(initialize=layer.input_indexes), pyo.Set(initialize=range(layer.N)), initialize=0, @@ -283,7 +282,7 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): for kernel_index, _ in layer.kernel_index_with_input_indexes(0, 0, 0) ) ) - layer_block.q_maxpool = OmltVar( + layer_block.q_maxpool = pyo.Var( layer.output_indexes, layer_block._kernel_indexes, within=pyo.Binary ) layer_block._q_sum_maxpool = pyo.Constraint(layer.output_indexes) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index b38377de..1430332a 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -2,8 +2,6 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from omlt.base import OmltVar - def default_partition_split_func(w, n): r"""Default function to partition weights in :math:`w` into :math:`n` partitions. @@ -86,8 +84,8 @@ def output_node_block(b, *output_index): splits = split_func(weights) num_splits = len(splits) - b.sig = OmltVar(domain=pyo.Binary) - b.z2 = OmltVar(range(num_splits)) + b.sig = pyo.Var(domain=pyo.Binary) + b.z2 = pyo.Var(range(num_splits)) mapper = layer.input_index_mapper @@ -111,7 +109,6 @@ def output_node_block(b, *output_index): expr += prev_layer_block.z[input_index] * w lb, ub = compute_bounds_on_expr(expr) - if lb is None: msg = "Expression is unbounded below." raise ValueError(msg) @@ -123,12 +120,10 @@ def output_node_block(b, *output_index): z2.setlb(min(0, lb)) z2.setub(max(0, ub)) - b.eq_16_lb.add(b.sig * lb <= expr - z2) - b.eq_16_ub.add(b.sig * ub >= expr - z2) - - minus_sig = 1 - b.sig - b.eq_17_lb.add(minus_sig * lb <= z2) - b.eq_17_ub.add(minus_sig * ub >= z2) + b.eq_16_lb.add(expr - z2 >= b.sig * lb) + b.eq_16_ub.add(expr - z2 <= b.sig * ub) + b.eq_17_lb.add(z2 >= (1 - b.sig) * lb) + b.eq_17_ub.add(z2 <= (1 - b.sig) * ub) # compute dense layer expression to compute bounds expr = 0.0 @@ -162,11 +157,9 @@ def output_node_block(b, *output_index): b.eq_13 = pyo.Constraint(expr=eq_13_expr <= 0) b.eq_14 = pyo.Constraint( - expr=sum(b.z2[s] for s in range(num_splits)) - + bias * (1 - b.sig)._expression - >= 0 + expr=sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) >= 0 ) b.eq_15 = pyo.Constraint( expr=layer_block.z[output_index] - == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig)._expression + == sum(b.z2[s] for s in range(num_splits)) + bias * (1 - b.sig) ) diff --git a/src/omlt/neuralnet/nn_formulation.py b/src/omlt/neuralnet/nn_formulation.py index 6d5fcf8f..8e835d23 100644 --- a/src/omlt/neuralnet/nn_formulation.py +++ b/src/omlt/neuralnet/nn_formulation.py @@ -1,6 +1,5 @@ import pyomo.environ as pyo -from omlt.base import OmltVar from omlt.formulation import _PyomoFormulation, _setup_scaled_inputs_outputs from omlt.neuralnet.activations import ( ACTIVATION_FUNCTION_MAP as _DEFAULT_ACTIVATION_FUNCTIONS, @@ -162,7 +161,7 @@ def _build_neural_network_formulation( @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, initialize=0) + b.z = pyo.Var(net_layer.output_indexes, initialize=0) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -171,7 +170,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, initialize=0) + b.zhat = pyo.Var(net_layer.output_indexes, initialize=0) return b @@ -491,7 +490,7 @@ def _build_formulation(self): @block.Block(block.layers) def layer(b, layer_id): net_layer = net.layer(layer_id) - b.z = OmltVar(net_layer.output_indexes, initialize=0) + b.z = pyo.Var(net_layer.output_indexes, initialize=0) if isinstance(net_layer, InputLayer): for index in net_layer.output_indexes: input_var = block.scaled_inputs[index] @@ -500,7 +499,7 @@ def layer(b, layer_id): z_var.setub(input_var.ub) else: # add zhat only to non input layers - b.zhat = OmltVar(net_layer.output_indexes, initialize=0) + b.zhat = pyo.Var(net_layer.output_indexes, initialize=0) return b diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index a9459883..315bb176 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -516,7 +516,6 @@ def test_partition_based_unbounded_below(): m.neural_net_block = OmltBlock() net, y = two_node_network(None, -2.0) test_layer = list(net.layers)[2] - test_layer_id = id(test_layer) prev_layer_id = id(list(net.layers)[1]) formulation = ReluPartitionFormulation(net) @@ -529,11 +528,7 @@ def test_partition_based_unbounded_below(): expected_msg = "Expression is unbounded below." with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( - m.neural_net_block, - net, - m.neural_net_block.layer[test_layer_id], - test_layer, - split_func, + m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) @@ -542,7 +537,6 @@ def test_partition_based_unbounded_above(): m.neural_net_block = OmltBlock() net, y = two_node_network(None, -2.0) test_layer = list(net.layers)[2] - test_layer_id = id(test_layer) prev_layer_id = id(list(net.layers)[1]) formulation = ReluPartitionFormulation(net) @@ -555,11 +549,7 @@ def test_partition_based_unbounded_above(): expected_msg = "Expression is unbounded above." with pytest.raises(ValueError, match=expected_msg): partition_based_dense_relu_layer( - m.neural_net_block, - net, - m.neural_net_block.layer[test_layer_id], - test_layer, - split_func, + m.neural_net_block, net, m.neural_net_block, test_layer, split_func ) diff --git a/tests/test_block.py b/tests/test_block.py index 153e0f78..9711345c 100644 --- a/tests/test_block.py +++ b/tests/test_block.py @@ -1,8 +1,6 @@ import pyomo.environ as pyo import pytest from omlt import OmltBlock -from omlt.base import OmltVar -from omlt.dependencies import julia_available INPUTS_LENGTH = 3 OUTPUTS_LENGTH = 2 @@ -45,37 +43,6 @@ def test_block(): formulation = DummyFormulation() m.b.build_formulation(formulation) - assert m.b._OmltBlockData__formulation is formulation - assert [k for k in m.b.inputs] == ["A", "C", "D"] - assert [k for k in m.b.outputs] == [(0, 0), (0, 1), (1, 0), (1, 1)] - - -@pytest.mark.skipif( - not julia_available, reason="Test only valid when Julia is available" -) -def test_jump_block(): - m = pyo.ConcreteModel() - m.b = OmltBlock() - m.b.set_format("jump") - - with pytest.raises(ValueError) as excinfo: - m.b.x = OmltVar(initialize=(2, 7), format="jump") - expected_msg = "Initial value for JuMP variables must be an int or float, but was provided." - - assert str(excinfo.value) == expected_msg - - m.b.y = OmltVar(initialize=2, format="jump") - assert m.b.y.value == 2 - assert m.b.y.name == "y" - m.b.y.lb = 0 - m.b.y.ub = 5 - assert m.b.y.lb == 0 - assert m.b.y.ub == 5 - - formulation = dummy_formulation() - - m.b.build_formulation(formulation, format="jump") - assert m.b._OmltBlockData__formulation is formulation assert list(m.b.inputs) == ["A", "C", "D"] assert list(m.b.outputs) == [(0, 0), (0, 1), (1, 0), (1, 1)] diff --git a/tests/test_var.py b/tests/test_var.py deleted file mode 100644 index 1639c480..00000000 --- a/tests/test_var.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest - -import pyomo.environ as pyo -from omlt.base import OmltVar -from omlt.dependencies import julia_available - - -def _test_scalar_var(format): - v = OmltVar(format=format, initialize=2, domain=pyo.Integers) - assert v.is_indexed() is False - assert v.ctype == pyo.ScalarVar - - v.construct() - - v.value = 3 - assert v.value == 3 - - v.bounds = (0, 5) - assert v.lb == 0 - assert v.ub == 5 - assert v.bounds == (0, 5) - - -def test_scalar_pyomo(): - _test_scalar_var("pyomo") - - -@pytest.mark.skipif( - not julia_available, reason="Test only valid when Julia is available" -) -def test_scalar_jump(): - _test_scalar_var("jump")