From 5499b87a6c759cc79933b8f86a0c24a681523d07 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Tue, 16 Apr 2024 14:54:17 +0200 Subject: [PATCH 01/19] Make creation information a mixin --- ixmp4/data/db/iamc/variable/model.py | 2 -- ixmp4/data/db/model/model.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/ixmp4/data/db/iamc/variable/model.py b/ixmp4/data/db/iamc/variable/model.py index b08d4f5f..69a11256 100644 --- a/ixmp4/data/db/iamc/variable/model.py +++ b/ixmp4/data/db/iamc/variable/model.py @@ -1,7 +1,5 @@ from typing import ClassVar -from sqlalchemy.orm import Mapped as Mapped - from ixmp4.data import types from ixmp4.data.abstract import iamc as abstract from ixmp4.data.db import mixins diff --git a/ixmp4/data/db/model/model.py b/ixmp4/data/db/model/model.py index 19ac3d86..91c486d6 100644 --- a/ixmp4/data/db/model/model.py +++ b/ixmp4/data/db/model/model.py @@ -1,7 +1,5 @@ from typing import ClassVar -from sqlalchemy.orm import Mapped as Mapped - from ixmp4.data import abstract, types from ixmp4.data.db import mixins From a2a63ffae306498f494b5c2fcf598c1ee3809afb Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:09:54 +0200 Subject: [PATCH 02/19] Make name column a mixin --- ixmp4/data/db/iamc/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ixmp4/data/db/iamc/base.py b/ixmp4/data/db/iamc/base.py index 98b56581..65ec5567 100644 --- a/ixmp4/data/db/iamc/base.py +++ b/ixmp4/data/db/iamc/base.py @@ -7,6 +7,7 @@ Deleter, Enumerator, Lister, + NameMixin, Retriever, Selecter, Tabulator, From 90b71059d391f4be59c795d62a627c55355fddbd Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:11:00 +0200 Subject: [PATCH 03/19] Make optimization columns mixins * Covers: * run__id, data, name, uniqueness of name together with run__id * Adapts tests since default order of columns changes --- ixmp4/data/db/optimization/base.py | 3 +++ ixmp4/data/db/optimization/indexset/model.py | 2 +- ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 7 ++++++- tests/core/test_table.py | 6 ++++++ tests/data/test_optimization_table.py | 6 ++++++ 6 files changed, 23 insertions(+), 3 deletions(-) diff --git a/ixmp4/data/db/optimization/base.py b/ixmp4/data/db/optimization/base.py index 7c3cc084..924a12e8 100644 --- a/ixmp4/data/db/optimization/base.py +++ b/ixmp4/data/db/optimization/base.py @@ -12,7 +12,10 @@ Deleter, Enumerator, Lister, + OptimizationDataMixin, + OptimizationNameMixin, Retriever, + RunIDMixin, Selecter, Tabulator, ) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index 896692a4..f7d3f4a7 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel): +class IndexSet(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index c364f807..2f719468 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel): +class Scalar(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index ea99cd11..352bd5cb 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -11,7 +11,12 @@ from .. import Column, base, utils -class Table(base.BaseModel): +class Table( + base.BaseModel, + base.OptimizationDataMixin, + base.RunIDMixin, + base.UniqueNameRunIDMixin, +): # NOTE: These might be mixin-able, but would require some abstraction NotFound: ClassVar = abstract.Table.NotFound NotUnique: ClassVar = abstract.Table.NotUnique diff --git a/tests/core/test_table.py b/tests/core/test_table.py index 0fd081c9..d54363bc 100644 --- a/tests/core/test_table.py +++ b/tests/core/test_table.py @@ -21,8 +21,11 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, + table.name, table.created_at, table.created_by, + table.data, + table.run_id, ] for table in tables ], @@ -31,8 +34,11 @@ def df_from_list(tables: list[Table]): "data", "name", "id", + "name", "created_at", "created_by", + "data", + "run__id", ], ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index 4eaef13e..0f4a49ba 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,8 +19,11 @@ def df_from_list(tables: list): table.data, table.name, table.id, + table.name, table.created_at, table.created_by, + table.data, + table.run__id, ] for table in tables ], @@ -29,8 +32,11 @@ def df_from_list(tables: list): "data", "name", "id", + "name", "created_at", "created_by", + "data", + "run__id", ], ) From 7b8e389cc44bc5ef3214948004eb7660ff30301c Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 18 Apr 2024 11:46:49 +0200 Subject: [PATCH 04/19] Inherit mixin requirements directly --- ixmp4/data/db/optimization/indexset/model.py | 2 +- ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 1 - tests/core/test_table.py | 4 ++-- tests/data/test_optimization_table.py | 4 ++-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index f7d3f4a7..9bb0f41d 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): +class IndexSet(base.BaseModel, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index 2f719468..3dac531e 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel, base.RunIDMixin, base.UniqueNameRunIDMixin): +class Scalar(base.BaseModel, base.UniqueNameRunIDMixin): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index 352bd5cb..654f5dea 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -14,7 +14,6 @@ class Table( base.BaseModel, base.OptimizationDataMixin, - base.RunIDMixin, base.UniqueNameRunIDMixin, ): # NOTE: These might be mixin-able, but would require some abstraction diff --git a/tests/core/test_table.py b/tests/core/test_table.py index d54363bc..ab82877c 100644 --- a/tests/core/test_table.py +++ b/tests/core/test_table.py @@ -21,10 +21,10 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, + table.data, table.name, table.created_at, table.created_by, - table.data, table.run_id, ] for table in tables @@ -34,10 +34,10 @@ def df_from_list(tables: list[Table]): "data", "name", "id", + "data", "name", "created_at", "created_by", - "data", "run__id", ], ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index 0f4a49ba..132cae47 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,10 +19,10 @@ def df_from_list(tables: list): table.data, table.name, table.id, + table.data, table.name, table.created_at, table.created_by, - table.data, table.run__id, ] for table in tables @@ -32,10 +32,10 @@ def df_from_list(tables: list): "data", "name", "id", + "data", "name", "created_at", "created_by", - "data", "run__id", ], ) From 437424168577f30d763cabb1cc33973767c6238f Mon Sep 17 00:00:00 2001 From: Fridolin Glatter <83776373+glatterf42@users.noreply.github.com> Date: Fri, 28 Jun 2024 10:45:38 +0200 Subject: [PATCH 05/19] Include optimization parameter basis (#79) * Make Column generic enough for multiple parents * Introduce optimization.Parameter * Add tests for add_data * Enable remaining parameter tests (#86) * Enable remaining parameter tests * Include optimization parameter api layer (#89) * Bump several dependency versions * Let api/column handle both tables and parameters * Make api-layer tests pass * Include optimization parameter core layer (#90) * Enable parameter core layer and test it * Fix things after rebase * Ensure all intended changes survive the rebase * Adapt data validation function for parameters * Allow tests to pass again --- ixmp4/__init__.py | 8 +- ixmp4/core/__init__.py | 1 + ixmp4/core/optimization/data.py | 3 + ixmp4/core/optimization/parameter.py | 131 +++++++ ixmp4/data/abstract/__init__.py | 3 +- ixmp4/data/abstract/optimization/__init__.py | 1 + ixmp4/data/abstract/optimization/column.py | 4 +- ixmp4/data/abstract/optimization/parameter.py | 202 ++++++++++ ixmp4/data/api/__init__.py | 6 +- ixmp4/data/api/optimization/__init__.py | 1 + ixmp4/data/api/optimization/column.py | 3 +- ixmp4/data/api/optimization/parameter.py | 83 ++++ ixmp4/data/backend/api.py | 2 + ixmp4/data/backend/base.py | 2 + ixmp4/data/backend/db.py | 3 + ixmp4/data/db/__init__.py | 2 + ixmp4/data/db/filters/__init__.py | 1 + .../data/db/filters/optimizationparameter.py | 17 + ixmp4/data/db/iamc/base.py | 1 - ixmp4/data/db/optimization/__init__.py | 1 + ixmp4/data/db/optimization/base.py | 3 - ixmp4/data/db/optimization/column/model.py | 13 +- .../data/db/optimization/column/repository.py | 36 +- ixmp4/data/db/optimization/indexset/model.py | 2 +- .../db/optimization/parameter/__init__.py | 2 + ixmp4/data/db/optimization/parameter/docs.py | 8 + .../data/db/optimization/parameter/filter.py | 19 + ixmp4/data/db/optimization/parameter/model.py | 37 ++ .../db/optimization/parameter/repository.py | 175 +++++++++ ixmp4/data/db/optimization/scalar/model.py | 2 +- ixmp4/data/db/optimization/table/model.py | 6 +- .../data/db/optimization/table/repository.py | 4 +- ixmp4/server/rest/__init__.py | 12 +- ixmp4/server/rest/docs.py | 24 ++ ixmp4/server/rest/optimization/__init__.py | 2 +- ixmp4/server/rest/optimization/parameter.py | 79 ++++ tests/core/test_optimization_parameter.py | 323 ++++++++++++++++ tests/core/test_table.py | 6 - tests/data/test_docs.py | 69 ++++ tests/data/test_optimization_parameter.py | 366 ++++++++++++++++++ tests/data/test_optimization_table.py | 6 - 41 files changed, 1609 insertions(+), 60 deletions(-) create mode 100644 ixmp4/core/optimization/parameter.py create mode 100644 ixmp4/data/abstract/optimization/parameter.py create mode 100644 ixmp4/data/api/optimization/parameter.py create mode 100644 ixmp4/data/db/filters/optimizationparameter.py create mode 100644 ixmp4/data/db/optimization/parameter/__init__.py create mode 100644 ixmp4/data/db/optimization/parameter/docs.py create mode 100644 ixmp4/data/db/optimization/parameter/filter.py create mode 100644 ixmp4/data/db/optimization/parameter/model.py create mode 100644 ixmp4/data/db/optimization/parameter/repository.py create mode 100644 ixmp4/server/rest/optimization/parameter.py create mode 100644 tests/core/test_optimization_parameter.py create mode 100644 tests/data/test_optimization_parameter.py diff --git a/ixmp4/__init__.py b/ixmp4/__init__.py index 32a69aa4..2bed434d 100644 --- a/ixmp4/__init__.py +++ b/ixmp4/__init__.py @@ -1,16 +1,16 @@ -# flake8: noqa import importlib.metadata +from ixmp4.core import IndexSet as IndexSet from ixmp4.core import Model as Model +from ixmp4.core import Parameter as Parameter from ixmp4.core import Platform as Platform from ixmp4.core import Region as Region from ixmp4.core import Run as Run +from ixmp4.core import Scalar as Scalar from ixmp4.core import Scenario as Scenario +from ixmp4.core import Table as Table from ixmp4.core import Unit as Unit from ixmp4.core import Variable as Variable -from ixmp4.core import IndexSet as IndexSet -from ixmp4.core import Scalar as Scalar -from ixmp4.core import Table as Table from ixmp4.core.exceptions import InconsistentIamcType as InconsistentIamcType from ixmp4.core.exceptions import IxmpError as IxmpError from ixmp4.core.exceptions import NotFound as NotFound diff --git a/ixmp4/core/__init__.py b/ixmp4/core/__init__.py index 4d5d8078..3ee40f84 100644 --- a/ixmp4/core/__init__.py +++ b/ixmp4/core/__init__.py @@ -4,6 +4,7 @@ from .optimization.indexset import IndexSet as IndexSet from .optimization.scalar import Scalar as Scalar from .optimization.table import Table as Table +from .optimization.parameter import Parameter as Parameter from .platform import Platform as Platform from .region import Region as Region from .run import Run as Run diff --git a/ixmp4/core/optimization/data.py b/ixmp4/core/optimization/data.py index 1bf5a295..3d085e78 100644 --- a/ixmp4/core/optimization/data.py +++ b/ixmp4/core/optimization/data.py @@ -2,6 +2,7 @@ from ..base import BaseFacade from .indexset import IndexSetRepository +from .parameter import ParameterRepository from .scalar import ScalarRepository from .table import TableRepository @@ -11,11 +12,13 @@ class OptimizationData(BaseFacade): IndexSet, Table, Variable, etc.""" indexsets: IndexSetRepository + parameters: ParameterRepository scalars: ScalarRepository tables: TableRepository def __init__(self, *args, run: Run, **kwargs) -> None: super().__init__(*args, **kwargs) self.indexsets = IndexSetRepository(_backend=self.backend, _run=run) + self.parameters = ParameterRepository(_backend=self.backend, _run=run) self.scalars = ScalarRepository(_backend=self.backend, _run=run) self.tables = TableRepository(_backend=self.backend, _run=run) diff --git a/ixmp4/core/optimization/parameter.py b/ixmp4/core/optimization/parameter.py new file mode 100644 index 00000000..6612be41 --- /dev/null +++ b/ixmp4/core/optimization/parameter.py @@ -0,0 +1,131 @@ +from datetime import datetime +from typing import Any, ClassVar, Iterable + +import pandas as pd + +from ixmp4.core.base import BaseFacade, BaseModelFacade +from ixmp4.data.abstract import Docs as DocsModel +from ixmp4.data.abstract import Parameter as ParameterModel +from ixmp4.data.abstract import Run +from ixmp4.data.abstract.optimization import Column + + +class Parameter(BaseModelFacade): + _model: ParameterModel + NotFound: ClassVar = ParameterModel.NotFound + NotUnique: ClassVar = ParameterModel.NotUnique + + @property + def id(self) -> int: + return self._model.id + + @property + def name(self) -> str: + return self._model.name + + @property + def run_id(self) -> int: + return self._model.run__id + + @property + def data(self) -> dict[str, Any]: + return self._model.data + + def add(self, data: dict[str, Any] | pd.DataFrame) -> None: + """Adds data to an existing Parameter.""" + self.backend.optimization.parameters.add_data( + parameter_id=self._model.id, data=data + ) + self._model.data = self.backend.optimization.parameters.get( + run_id=self._model.run__id, name=self._model.name + ).data + + @property + def values(self) -> list: + return self._model.data.get("values", []) + + @property + def units(self) -> list: + return self._model.data.get("units", []) + + @property + def constrained_to_indexsets(self) -> list[str]: + return [column.indexset.name for column in self._model.columns] + + @property + def columns(self) -> list[Column]: + return self._model.columns + + @property + def created_at(self) -> datetime | None: + return self._model.created_at + + @property + def created_by(self) -> str | None: + return self._model.created_by + + @property + def docs(self): + try: + return self.backend.optimization.parameters.docs.get(self.id).description + except DocsModel.NotFound: + return None + + @docs.setter + def docs(self, description): + if description is None: + self.backend.optimization.parameters.docs.delete(self.id) + else: + self.backend.optimization.parameters.docs.set(self.id, description) + + @docs.deleter + def docs(self): + try: + self.backend.optimization.parameters.docs.delete(self.id) + # TODO: silently failing + except DocsModel.NotFound: + return None + + def __str__(self) -> str: + return f"" + + +class ParameterRepository(BaseFacade): + _run: Run + + def __init__(self, _run: Run, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._run = _run + + def create( + self, + name: str, + constrained_to_indexsets: list[str], + column_names: list[str] | None = None, + ) -> Parameter: + model = self.backend.optimization.parameters.create( + name=name, + run_id=self._run.id, + constrained_to_indexsets=constrained_to_indexsets, + column_names=column_names, + ) + return Parameter(_backend=self.backend, _model=model) + + def get(self, name: str) -> Parameter: + model = self.backend.optimization.parameters.get(run_id=self._run.id, name=name) + return Parameter(_backend=self.backend, _model=model) + + def list(self, name: str | None = None) -> Iterable[Parameter]: + parameters = self.backend.optimization.parameters.list( + run_id=self._run.id, name=name + ) + return [ + Parameter( + _backend=self.backend, + _model=i, + ) + for i in parameters + ] + + def tabulate(self, name: str | None = None) -> pd.DataFrame: + return self.backend.optimization.parameters.tabulate(name=name) diff --git a/ixmp4/data/abstract/__init__.py b/ixmp4/data/abstract/__init__.py index cfcf7b61..acc388d1 100644 --- a/ixmp4/data/abstract/__init__.py +++ b/ixmp4/data/abstract/__init__.py @@ -2,7 +2,6 @@ This module holds a shared datastructure and interface for normalization between the database and api data models and repositories. """ -# flake8: noqa from .base import ( BaseMeta, @@ -32,6 +31,8 @@ from .optimization import ( IndexSet, IndexSetRepository, + Parameter, + ParameterRepository, Scalar, ScalarRepository, Table, diff --git a/ixmp4/data/abstract/optimization/__init__.py b/ixmp4/data/abstract/optimization/__init__.py index 03d8e505..cd322e09 100644 --- a/ixmp4/data/abstract/optimization/__init__.py +++ b/ixmp4/data/abstract/optimization/__init__.py @@ -1,4 +1,5 @@ from .column import Column from .indexset import IndexSet, IndexSetRepository +from .parameter import Parameter, ParameterRepository from .scalar import Scalar, ScalarRepository from .table import Table, TableRepository diff --git a/ixmp4/data/abstract/optimization/column.py b/ixmp4/data/abstract/optimization/column.py index 856fc116..029d2361 100644 --- a/ixmp4/data/abstract/optimization/column.py +++ b/ixmp4/data/abstract/optimization/column.py @@ -14,8 +14,10 @@ class Column(base.BaseModel, Protocol): """Unique name of the Column.""" dtype: types.String """Type of the Column's data.""" - table__id: types.Integer + table__id: types.Mapped[int | None] """Foreign unique integer id of a Table.""" + parameter__id: types.Mapped[int | None] + """Foreign unique integer id of a Parameter.""" indexset: types.Mapped[IndexSet] """Associated IndexSet.""" constrained_to_indexset: types.Integer diff --git a/ixmp4/data/abstract/optimization/parameter.py b/ixmp4/data/abstract/optimization/parameter.py new file mode 100644 index 00000000..01e1cacc --- /dev/null +++ b/ixmp4/data/abstract/optimization/parameter.py @@ -0,0 +1,202 @@ +from typing import Any, Iterable, Protocol + +import pandas as pd + +from ixmp4.data import types + +from .. import base +from ..docs import DocsRepository +from .column import Column + + +class Parameter(base.BaseModel, Protocol): + """Parameter data model.""" + + name: types.String + """Unique name of the Parameter.""" + data: types.JsonDict + """Data stored in the Parameter.""" + columns: types.Mapped[list[Column]] + """Data specifying this Parameter's Columns.""" + + run__id: types.Integer + "Foreign unique integer id of a run." + + created_at: types.DateTime + "Creation date/time. TODO" + created_by: types.String + "Creator. TODO" + + def __str__(self) -> str: + return f"" + + +class ParameterRepository( + base.Creator, + base.Retriever, + base.Enumerator, + Protocol, +): + docs: DocsRepository + + def create( + self, + run_id: int, + name: str, + constrained_to_indexsets: list[str], + column_names: list[str] | None = None, + ) -> Parameter: + """Creates a Parameter. + + Each column of the Parameter needs to be constrained to an existing + :class:ixmp4.data.abstract.optimization.IndexSet. These are specified by name + and per default, these will be the column names. They can be overwritten by + specifying `column_names`, which needs to specify a unique name for each column. + + Parameters + ---------- + run_id : int + The id of the :class:`ixmp4.data.abstract.Run` for which this Parameter is + defined. + name : str + The unique name of the Parameter. + constrained_to_indexsets : list[str] + List of :class:`ixmp4.data.abstract.optimization.IndexSet` names that define + the allowed contents of the Parameter's columns. + column_names: list[str] | None = None + Optional list of names to use as column names. If given, overwrites the + names inferred from `constrained_to_indexsets`. + + Raises + ------ + :class:`ixmp4.data.abstract.optimization.Parameter.NotUnique`: + If the Parameter with `name` already exists for the Run with `run_id`. + ValueError + If `column_names` are not unique or not enough names are given. + + Returns + ------- + :class:`ixmp4.data.abstract.optimization.Parameter`: + The created Parameter. + """ + ... + + def get(self, run_id: int, name: str) -> Parameter: + """Retrieves a Parameter. + + Parameters + ---------- + run_id : int + The id of the :class:`ixmp4.data.abstract.Run` for which this Parameter is + defined. + name : str + The name of the Parameter. + + Raises + ------ + :class:`ixmp4.data.abstract.optimization.Parameter.NotFound`: + If the Parameter with `name` does not exist. + + Returns + ------- + :class:`ixmp4.data.abstract.optimization.Parameter`: + The retrieved Parameter. + """ + ... + + def get_by_id(self, id: int) -> Parameter: + """Retrieves a Parameter by its id. + + Parameters + ---------- + id : int + Unique integer id. + + Raises + ------ + :class:`ixmp4.data.abstract.optimization.Parameter.NotFound`. + If the Parameter with `id` does not exist. + + Returns + ------- + :class:`ixmp4.data.abstract.optimization.Parameter`: + The retrieved Parameter. + """ + ... + + def list(self, *, name: str | None = None, **kwargs) -> Iterable[Parameter]: + r"""Lists Parameters by specified criteria. + + Parameters + ---------- + name : str + The name of a Parameter. If supplied only one result will be returned. + # TODO: Update kwargs + \*\*kwargs: any + More filter parameters as specified in + `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + + Returns + ------- + Iterable[:class:`ixmp4.data.abstract.optimization.Parameter`]: + List of Parameters. + """ + ... + + def tabulate(self, *, name: str | None = None, **kwargs) -> pd.DataFrame: + r"""Tabulate Parameters by specified criteria. + + Parameters + ---------- + name : str + The name of a Parameter. If supplied only one result will be returned. + # TODO: Update kwargs + \*\*kwargs: any + More filter parameters as specified in + `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + + Returns + ------- + :class:`pandas.DataFrame`: + A data frame with the columns: + - id + - name + - data + - run__id + - created_at + - created_by + """ + ... + + # TODO Once present, state how to check which IndexSets are linked and which values + # they permit + def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> None: + r"""Adds data to a Parameter. + + The data will be validated with the linked constrained + :class:`ixmp4.data.abstract.optimization.IndexSet`s. For that, `data.keys()` + must correspond to the names of the Parameter's columns. Each column can only + contain values that are in the linked `IndexSet.elements`. Each row of entries + must be unique. No values can be missing, `None`, or `NaN`. If `data.keys()` + contains names already present in `Parameter.data`, existing values will be + overwritten. + + Parameters + ---------- + parameter_id : int + The id of the :class:`ixmp4.data.abstract.optimization.Parameter`. + data : dict[str, Any] | pandas.DataFrame + The data to be added. + + Raises + ------ + ValueError: + - If values are missing, `None`, or `NaN` + - If values are not allowed based on constraints to `Indexset`s + - If rows are not unique + + Returns + ------- + None + """ + ... diff --git a/ixmp4/data/api/__init__.py b/ixmp4/data/api/__init__.py index c9db71be..eb23f968 100644 --- a/ixmp4/data/api/__init__.py +++ b/ixmp4/data/api/__init__.py @@ -1,8 +1,6 @@ -# flake8: noqa - from .base import BaseModel, DataFrame from .docs import Docs, DocsRepository -from .iamc import ( # Measurand,; MeasurandRepository,; AnnualDataPoint,; SubAnnualDataPoint,; CategoricalDataPoint, +from .iamc import ( # Measurand,; MeasurandRepository,; AnnualDataPoint,; SubAnnualDataPoint,; CategoricalDataPoint, # noqa: E501 DataPoint, DataPointRepository, TimeSeries, @@ -15,6 +13,8 @@ from .optimization import ( IndexSet, IndexSetRepository, + Parameter, + ParameterRepository, Scalar, ScalarRepository, Table, diff --git a/ixmp4/data/api/optimization/__init__.py b/ixmp4/data/api/optimization/__init__.py index 020571bd..6cf12292 100644 --- a/ixmp4/data/api/optimization/__init__.py +++ b/ixmp4/data/api/optimization/__init__.py @@ -1,3 +1,4 @@ from .indexset import IndexSet, IndexSetRepository +from .parameter import Parameter, ParameterRepository from .scalar import Scalar, ScalarRepository from .table import Table, TableRepository diff --git a/ixmp4/data/api/optimization/column.py b/ixmp4/data/api/optimization/column.py index 229bb5cf..582c2314 100644 --- a/ixmp4/data/api/optimization/column.py +++ b/ixmp4/data/api/optimization/column.py @@ -14,7 +14,8 @@ class Column(base.BaseModel): id: int name: str dtype: str - table__id: int + table__id: int | None + parameter__id: int | None indexset: IndexSet constrained_to_indexset: int unique: bool diff --git a/ixmp4/data/api/optimization/parameter.py b/ixmp4/data/api/optimization/parameter.py new file mode 100644 index 00000000..32b21cc1 --- /dev/null +++ b/ixmp4/data/api/optimization/parameter.py @@ -0,0 +1,83 @@ +from datetime import datetime +from typing import Any, ClassVar, Iterable + +import pandas as pd + +from ixmp4.data import abstract + +from .. import base +from ..docs import Docs, DocsRepository +from .column import Column + + +class Parameter(base.BaseModel): + NotFound: ClassVar = abstract.Parameter.NotFound + NotUnique: ClassVar = abstract.Parameter.NotUnique + DeletionPrevented: ClassVar = abstract.Parameter.DeletionPrevented + + id: int + name: str + data: dict[str, Any] + columns: list["Column"] + run__id: int + + created_at: datetime | None + created_by: str | None + + +class ParameterDocsRepository(DocsRepository): + model_class = Docs + prefix = "docs/optimization/parameters/" + + +class ParameterRepository( + base.Creator[Parameter], + base.Retriever[Parameter], + base.Enumerator[Parameter], + abstract.ParameterRepository, +): + model_class = Parameter + prefix = "optimization/parameters/" + + def __init__(self, backend, *args, **kwargs) -> None: + super().__init__(backend, *args, **kwargs) + self.docs = ParameterDocsRepository(backend) + + def create( + self, + run_id: int, + name: str, + constrained_to_indexsets: list[str], + column_names: list[str] | None = None, + ) -> Parameter: + return super().create( + name=name, + run_id=run_id, + constrained_to_indexsets=constrained_to_indexsets, + column_names=column_names, + ) + + def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> None: + if isinstance(data, pd.DataFrame): + # data will always contains str, not only Hashable + data: dict[str, Any] = data.to_dict(orient="list") # type: ignore + kwargs = {"data": data} + self._request( + method="PATCH", path=self.prefix + str(parameter_id) + "/data/", json=kwargs + ) + + def get(self, run_id: int, name: str) -> Parameter: + return super().get(run_id=run_id, name=name) + + def get_by_id(self, id: int) -> Parameter: + res = self._get_by_id(id) + return Parameter(**res) + + def list(self, *args, **kwargs) -> Iterable[Parameter]: + return super().list(*args, **kwargs) + + def tabulate(self, *args, **kwargs) -> pd.DataFrame: + return super().tabulate(*args, **kwargs) + + def enumerate(self, *args, **kwargs) -> Iterable[Parameter] | pd.DataFrame: + return super().enumerate(*args, **kwargs) diff --git a/ixmp4/data/backend/api.py b/ixmp4/data/backend/api.py index d03f7178..386e9572 100644 --- a/ixmp4/data/backend/api.py +++ b/ixmp4/data/backend/api.py @@ -13,6 +13,7 @@ DataPointRepository, IndexSetRepository, ModelRepository, + ParameterRepository, RunMetaEntryRepository, RunRepository, ScalarRepository, @@ -113,6 +114,7 @@ def create_repositories(self): self.meta = RunMetaEntryRepository(self) self.models = ModelRepository(self) self.optimization.indexsets = IndexSetRepository(self) + self.optimization.parameters = ParameterRepository(self) self.optimization.scalars = ScalarRepository(self) self.optimization.tables = TableRepository(self) self.regions = RegionRepository(self) diff --git a/ixmp4/data/backend/base.py b/ixmp4/data/backend/base.py index 5cfbfcdc..87a9384a 100644 --- a/ixmp4/data/backend/base.py +++ b/ixmp4/data/backend/base.py @@ -3,6 +3,7 @@ DataPointRepository, IndexSetRepository, ModelRepository, + ParameterRepository, RegionRepository, RunMetaEntryRepository, RunRepository, @@ -23,6 +24,7 @@ class IamcSubobject(object): class OptimizationSubobject(object): indexsets: IndexSetRepository + parameters: ParameterRepository scalars: ScalarRepository tables: TableRepository diff --git a/ixmp4/data/backend/db.py b/ixmp4/data/backend/db.py index ac68a394..bd65eb8a 100644 --- a/ixmp4/data/backend/db.py +++ b/ixmp4/data/backend/db.py @@ -16,6 +16,7 @@ DataPointRepository, IndexSetRepository, ModelRepository, + ParameterRepository, RegionRepository, RunMetaEntryRepository, RunRepository, @@ -51,6 +52,7 @@ class IamcSubobject(BaseIamcSubobject): class OptimizationSubobject(BaseOptimizationSubobject): indexsets: IndexSetRepository + parameters: ParameterRepository scalars: ScalarRepository tables: TableRepository @@ -96,6 +98,7 @@ def make_repositories(self): self.meta = RunMetaEntryRepository(self) self.models = ModelRepository(self) self.optimization.indexsets = IndexSetRepository(self) + self.optimization.parameters = ParameterRepository(self) self.optimization.scalars = ScalarRepository(self) self.optimization.tables = TableRepository(self) self.regions = RegionRepository(self) diff --git a/ixmp4/data/db/__init__.py b/ixmp4/data/db/__init__.py index 9cb53d87..4d7827ae 100644 --- a/ixmp4/data/db/__init__.py +++ b/ixmp4/data/db/__init__.py @@ -22,6 +22,8 @@ ColumnRepository, IndexSet, IndexSetRepository, + Parameter, + ParameterRepository, Scalar, ScalarRepository, Table, diff --git a/ixmp4/data/db/filters/__init__.py b/ixmp4/data/db/filters/__init__.py index d0a5585d..92819c74 100644 --- a/ixmp4/data/db/filters/__init__.py +++ b/ixmp4/data/db/filters/__init__.py @@ -2,6 +2,7 @@ from .model import ModelFilter from .optimizationcolumn import OptimizationColumnFilter from .optimizationindexset import OptimizationIndexSetFilter +from .optimizationparameter import OptimizationParameterFilter from .optimizationscalar import OptimizationScalarFilter from .optimizationtable import OptimizationTableFilter from .region import RegionFilter diff --git a/ixmp4/data/db/filters/optimizationparameter.py b/ixmp4/data/db/filters/optimizationparameter.py new file mode 100644 index 00000000..5fe142a6 --- /dev/null +++ b/ixmp4/data/db/filters/optimizationparameter.py @@ -0,0 +1,17 @@ +from typing import ClassVar + +from ixmp4.db import filters + +from .. import Parameter, Run + + +class OptimizationParameterFilter(filters.BaseFilter, metaclass=filters.FilterMeta): + id: filters.Id + name: filters.String + run__id: filters.Integer = filters.Field(None, alias="run_id") + + sqla_model: ClassVar[type] = Parameter + + def join(self, exc, **kwargs): + exc = exc.join(Run, onclause=Parameter.run__id == Run.id) + return exc diff --git a/ixmp4/data/db/iamc/base.py b/ixmp4/data/db/iamc/base.py index 65ec5567..98b56581 100644 --- a/ixmp4/data/db/iamc/base.py +++ b/ixmp4/data/db/iamc/base.py @@ -7,7 +7,6 @@ Deleter, Enumerator, Lister, - NameMixin, Retriever, Selecter, Tabulator, diff --git a/ixmp4/data/db/optimization/__init__.py b/ixmp4/data/db/optimization/__init__.py index 7bbc8fc6..cf9d594b 100644 --- a/ixmp4/data/db/optimization/__init__.py +++ b/ixmp4/data/db/optimization/__init__.py @@ -1,4 +1,5 @@ from .column import Column, ColumnRepository from .indexset import IndexSet, IndexSetRepository +from .parameter import Parameter, ParameterRepository from .scalar import Scalar, ScalarRepository from .table import Table, TableRepository diff --git a/ixmp4/data/db/optimization/base.py b/ixmp4/data/db/optimization/base.py index 924a12e8..7c3cc084 100644 --- a/ixmp4/data/db/optimization/base.py +++ b/ixmp4/data/db/optimization/base.py @@ -12,10 +12,7 @@ Deleter, Enumerator, Lister, - OptimizationDataMixin, - OptimizationNameMixin, Retriever, - RunIDMixin, Selecter, Tabulator, ) diff --git a/ixmp4/data/db/optimization/column/model.py b/ixmp4/data/db/optimization/column/model.py index 2ee1a072..d1551013 100644 --- a/ixmp4/data/db/optimization/column/model.py +++ b/ixmp4/data/db/optimization/column/model.py @@ -1,7 +1,5 @@ from typing import ClassVar -from sqlalchemy import UniqueConstraint - from ixmp4 import db from ixmp4.data import types from ixmp4.data.abstract import optimization as abstract @@ -20,15 +18,18 @@ class Column(base.BaseModel): db.String(255), nullable=False, unique=False ) # pandas dtype - table__id: types.Mapped[int] = db.Column( - db.Integer, db.ForeignKey("optimization_table.id"), index=True + table__id: types.Mapped[int | None] = db.Column( + db.Integer, db.ForeignKey("optimization_table.id"), nullable=True + ) + parameter__id: types.Mapped[int | None] = db.Column( + db.Integer, db.ForeignKey("optimization_parameter.id"), nullable=True ) indexset: types.Mapped[IndexSet] = db.relationship(single_parent=True) - constrained_to_indexset: types.Mapped[int] = db.Column( + constrained_to_indexset: types.Integer = db.Column( db.Integer, db.ForeignKey("optimization_indexset.id"), index=True ) # Currently not in use: unique: types.Boolean = db.Column(db.Boolean, default=True) - __table_args__ = (UniqueConstraint("name", "table__id"),) + __table_args__ = (db.UniqueConstraint("name", "table__id"),) diff --git a/ixmp4/data/db/optimization/column/repository.py b/ixmp4/data/db/optimization/column/repository.py index 6669d462..f151e540 100644 --- a/ixmp4/data/db/optimization/column/repository.py +++ b/ixmp4/data/db/optimization/column/repository.py @@ -21,17 +21,19 @@ def __init__(self, *args, **kwargs) -> None: def add( self, - table_id: int, name: str, - dtype: str, constrained_to_indexset: str, + dtype: str, + parameter_id: int, + table_id: int, unique: bool, ) -> Column: column = Column( - table__id=table_id, name=name, - dtype=dtype, constrained_to_indexset=constrained_to_indexset, + dtype=dtype, + parameter__id=parameter_id, + table__id=table_id, unique=unique, ) self.session.add(column) @@ -40,27 +42,32 @@ def add( @guard("edit") def create( self, - table_id: int, name: str, - dtype: str, constrained_to_indexset: int, - unique: bool, + dtype: str, + parameter_id: int | None = None, + table_id: int | None = None, + unique: bool = True, **kwargs, ) -> Column: """Creates a Column. Parameters ---------- - table_id : int - The unique integer id of the :class:`ixmp4.data.abstract.optimization.Table` - this Column belongs to. name : str The unique name of the Column. - dtype : str - The pandas-inferred type of the Column's data. constrained_to_indexset : int The id of an :class:`ixmp4.data.abstract.optimization.IndexSet`, which must contain all values used as entries in this Column. + dtype : str + The pandas-inferred type of the Column's data. + parameter_id : int | None, default None + The unique integer id of the + :class:`ixmp4.data.abstract.optimization.Parameter` this Column belongs to, + if it belongs to a `Paremeter`. + table_id : int | None, default None + The unique integer id of the :class:`ixmp4.data.abstract.optimization.Table` + this Column belongs to, if it belongs to a `Table`. unique : bool A bool to determine whether entries in this Column should be considered for evaluating uniqueness of keys. Defaults to True. @@ -77,10 +84,11 @@ def create( The created Column. """ return super().create( - table_id=table_id, name=name, - dtype=dtype, constrained_to_indexset=constrained_to_indexset, + dtype=dtype, + parameter_id=parameter_id, + table_id=table_id, unique=unique, **kwargs, ) diff --git a/ixmp4/data/db/optimization/indexset/model.py b/ixmp4/data/db/optimization/indexset/model.py index 9bb0f41d..896692a4 100644 --- a/ixmp4/data/db/optimization/indexset/model.py +++ b/ixmp4/data/db/optimization/indexset/model.py @@ -10,7 +10,7 @@ from .. import base -class IndexSet(base.BaseModel, base.UniqueNameRunIDMixin): +class IndexSet(base.BaseModel): NotFound: ClassVar = abstract.IndexSet.NotFound NotUnique: ClassVar = abstract.IndexSet.NotUnique DataInvalid: ClassVar = OptimizationDataValidationError diff --git a/ixmp4/data/db/optimization/parameter/__init__.py b/ixmp4/data/db/optimization/parameter/__init__.py new file mode 100644 index 00000000..01df8d91 --- /dev/null +++ b/ixmp4/data/db/optimization/parameter/__init__.py @@ -0,0 +1,2 @@ +from .model import Parameter +from .repository import ParameterRepository diff --git a/ixmp4/data/db/optimization/parameter/docs.py b/ixmp4/data/db/optimization/parameter/docs.py new file mode 100644 index 00000000..db1cb774 --- /dev/null +++ b/ixmp4/data/db/optimization/parameter/docs.py @@ -0,0 +1,8 @@ +from ixmp4.data.db.docs import BaseDocsRepository, docs_model + +from .model import Parameter + + +class ParameterDocsRepository(BaseDocsRepository): + model_class = docs_model(Parameter) # ParameterDocs + dimension_model_class = Parameter diff --git a/ixmp4/data/db/optimization/parameter/filter.py b/ixmp4/data/db/optimization/parameter/filter.py new file mode 100644 index 00000000..cbd913bb --- /dev/null +++ b/ixmp4/data/db/optimization/parameter/filter.py @@ -0,0 +1,19 @@ +from ixmp4.data.db import filters as base +from ixmp4.data.db.run import Run +from ixmp4.db import filters, utils + +from .model import Parameter + + +class RunFilter(base.RunFilter, metaclass=filters.FilterMeta): + def join(self, exc, **kwargs): + if not utils.is_joined(exc, Run): + exc = exc.join(Run, onclause=Parameter.run__id == Run.id) + return exc + + +class OptimizationParameterFilter( + base.OptimizationParameterFilter, metaclass=filters.FilterMeta +): + def join(self, exc, session=None): + return exc diff --git a/ixmp4/data/db/optimization/parameter/model.py b/ixmp4/data/db/optimization/parameter/model.py new file mode 100644 index 00000000..3199675d --- /dev/null +++ b/ixmp4/data/db/optimization/parameter/model.py @@ -0,0 +1,37 @@ +import copy +from typing import Any, ClassVar + +from sqlalchemy.orm import validates + +from ixmp4 import db +from ixmp4.data import types +from ixmp4.data.abstract import optimization as abstract + +from .. import Column, base, utils + + +class Parameter(base.BaseModel): + # NOTE: These might be mixin-able, but would require some abstraction + NotFound: ClassVar = abstract.Parameter.NotFound + NotUnique: ClassVar = abstract.Parameter.NotUnique + DeletionPrevented: ClassVar = abstract.Parameter.DeletionPrevented + + # constrained_to_indexsets: ClassVar[list[str] | None] = None + + run__id: types.RunId + columns: types.Mapped[list["Column"]] = db.relationship() + data: types.JsonDict = db.Column(db.JsonType, nullable=False, default={}) + + @validates("data") + def validate_data(self, key, data: dict[str, Any]): + data_to_validate = copy.deepcopy(data) + del data_to_validate["values"] + del data_to_validate["units"] + _ = utils.validate_data( + key=key, + data=data_to_validate, + columns=self.columns, + ) + return data + + __table_args__ = (db.UniqueConstraint("name", "run__id"),) diff --git a/ixmp4/data/db/optimization/parameter/repository.py b/ixmp4/data/db/optimization/parameter/repository.py new file mode 100644 index 00000000..735194fe --- /dev/null +++ b/ixmp4/data/db/optimization/parameter/repository.py @@ -0,0 +1,175 @@ +from typing import Any, Iterable + +import pandas as pd + +from ixmp4 import db +from ixmp4.data.abstract import optimization as abstract +from ixmp4.data.auth.decorators import guard +from ixmp4.data.db.unit import Unit + +from .. import ColumnRepository, base +from .docs import ParameterDocsRepository +from .model import Parameter + + +class ParameterRepository( + base.Creator[Parameter], + base.Retriever[Parameter], + base.Enumerator[Parameter], + abstract.ParameterRepository, +): + model_class = Parameter + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.docs = ParameterDocsRepository(*args, **kwargs) + self.columns = ColumnRepository(*args, **kwargs) + + from .filter import OptimizationParameterFilter + + self.filter_class = OptimizationParameterFilter + + def _add_column( + self, + run_id: int, + parameter_id: int, + column_name: str, + indexset_name: str, + **kwargs, + ) -> None: + r"""Adds a Column to a Parameter. + + Parameters + ---------- + run_id : int + The id of the :class:`ixmp4.data.abstract.Run` for which the + :class:`ixmp4.data.abstract.optimization.Parameter` is defined. + parameter_id : int + The id of the :class:`ixmp4.data.abstract.optimization.Parameter`. + column_name : str + The name of the Column, which must be unique in connection with the names of + :class:`ixmp4.data.abstract.Run` and + :class:`ixmp4.data.abstract.optimization.Parameter`. + indexset_name : str + The name of the :class:`ixmp4.data.abstract.optimization.IndexSet` the + Column will be linked to. + \*\*kwargs: any + Keyword arguments to be passed to + :func:`ixmp4.data.abstract.optimization.Column.create`. + """ + indexset = self.backend.optimization.indexsets.get( + run_id=run_id, name=indexset_name + ) + self.columns.create( + name=column_name, + constrained_to_indexset=indexset.id, + dtype=pd.Series(indexset.elements).dtype.name, + parameter_id=parameter_id, + unique=True, + **kwargs, + ) + + def add( + self, + run_id: int, + name: str, + ) -> Parameter: + parameter = Parameter(name=name, run__id=run_id) + parameter.set_creation_info(auth_context=self.backend.auth_context) + self.session.add(parameter) + + return parameter + + @guard("view") + def get(self, run_id: int, name: str) -> Parameter: + exc = db.select(Parameter).where( + (Parameter.name == name) & (Parameter.run__id == run_id) + ) + try: + return self.session.execute(exc).scalar_one() + except db.NoResultFound: + raise Parameter.NotFound + + @guard("view") + def get_by_id(self, id: int) -> Parameter: + obj = self.session.get(self.model_class, id) + + if obj is None: + raise Parameter.NotFound(id=id) + + return obj + + @guard("edit") + def create( + self, + run_id: int, + name: str, + constrained_to_indexsets: list[str], + column_names: list[str] | None = None, + **kwargs, + ) -> Parameter: + # Convert to list to avoid enumerate() splitting strings to letters + if isinstance(constrained_to_indexsets, str): + constrained_to_indexsets = list(constrained_to_indexsets) + if column_names and len(column_names) != len(constrained_to_indexsets): + raise ValueError( + "`constrained_to_indexsets` and `column_names` not equal in length! " + "Please provide the same number of entries for both!" + ) + # TODO: activate something like this if each column must be indexed by a unique + # indexset + # if len(constrained_to_indexsets) != len(set(constrained_to_indexsets)): + # raise ValueError("Each dimension must be constrained to a unique indexset!") # noqa + if column_names and len(column_names) != len(set(column_names)): + raise ValueError("The given `column_names` are not unique!") + + parameter = super().create( + run_id=run_id, + name=name, + **kwargs, + ) + for i, name in enumerate(constrained_to_indexsets): + self._add_column( + run_id=run_id, + parameter_id=parameter.id, + column_name=column_names[i] if column_names else name, + indexset_name=name, + ) + + return parameter + + @guard("view") + def list(self, *args, **kwargs) -> Iterable[Parameter]: + return super().list(*args, **kwargs) + + @guard("view") + def tabulate(self, *args, **kwargs) -> pd.DataFrame: + return super().tabulate(*args, **kwargs) + + @guard("edit") + def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> None: + if isinstance(data, dict): + data = pd.DataFrame.from_dict(data=data) + parameter = self.get_by_id(id=parameter_id) + + missing_columns = set(["values", "units"]) - set(data.columns) + assert ( + not missing_columns + ), f"Parameter.data must include the column(s): {', '.join(missing_columns)}!" + + # Can use a set for now, need full column if we care about order + for unit_name in set(data["units"]): + try: + self.backend.units.get(name=unit_name) + except Unit.NotFound as e: + # TODO Add a helpful hint on how to check defined Units + raise Unit.NotFound( + message=f"'{unit_name}' is not defined for this Platform!" + ) from e + + parameter.data = pd.concat( + [pd.DataFrame.from_dict(parameter.data), data] + ).to_dict(orient="list") + + self.session.add(parameter) + self.session.commit() diff --git a/ixmp4/data/db/optimization/scalar/model.py b/ixmp4/data/db/optimization/scalar/model.py index 3dac531e..c364f807 100644 --- a/ixmp4/data/db/optimization/scalar/model.py +++ b/ixmp4/data/db/optimization/scalar/model.py @@ -8,7 +8,7 @@ from .. import base -class Scalar(base.BaseModel, base.UniqueNameRunIDMixin): +class Scalar(base.BaseModel): NotFound: ClassVar = abstract.Scalar.NotFound NotUnique: ClassVar = abstract.Scalar.NotUnique DeletionPrevented: ClassVar = abstract.Scalar.DeletionPrevented diff --git a/ixmp4/data/db/optimization/table/model.py b/ixmp4/data/db/optimization/table/model.py index 654f5dea..ea99cd11 100644 --- a/ixmp4/data/db/optimization/table/model.py +++ b/ixmp4/data/db/optimization/table/model.py @@ -11,11 +11,7 @@ from .. import Column, base, utils -class Table( - base.BaseModel, - base.OptimizationDataMixin, - base.UniqueNameRunIDMixin, -): +class Table(base.BaseModel): # NOTE: These might be mixin-able, but would require some abstraction NotFound: ClassVar = abstract.Table.NotFound NotUnique: ClassVar = abstract.Table.NotUnique diff --git a/ixmp4/data/db/optimization/table/repository.py b/ixmp4/data/db/optimization/table/repository.py index 17738a3d..6edf0bf5 100644 --- a/ixmp4/data/db/optimization/table/repository.py +++ b/ixmp4/data/db/optimization/table/repository.py @@ -63,10 +63,10 @@ def _add_column( run_id=run_id, name=indexset_name ) self.columns.create( - table_id=table_id, name=column_name, - dtype=pd.Series(indexset.elements).dtype.name, constrained_to_indexset=indexset.id, + dtype=pd.Series(indexset.elements).dtype.name, + table_id=table_id, unique=True, **kwargs, ) diff --git a/ixmp4/server/rest/__init__.py b/ixmp4/server/rest/__init__.py index 2664dcad..44d19d3d 100644 --- a/ixmp4/server/rest/__init__.py +++ b/ixmp4/server/rest/__init__.py @@ -17,7 +17,10 @@ from .iamc import unit as iamc_unit from .iamc import variable as iamc_variable from .middleware import RequestSizeLoggerMiddleware, RequestTimeLoggerMiddleware -from .optimization import indexset, scalar, table +from .optimization import indexset as optimization_indexset +from .optimization import parameter as optimization_parameter +from .optimization import scalar as optimization_scalar +from .optimization import table as optimization_table v1 = FastAPI( servers=[{"url": "/v1", "description": "v1"}], @@ -45,14 +48,15 @@ v1.include_router(iamc_region.router, prefix="/iamc") v1.include_router(iamc_unit.router, prefix="/iamc") v1.include_router(iamc_variable.router, prefix="/iamc") -v1.include_router(indexset.router, prefix="/optimization") v1.include_router(meta.router) v1.include_router(model.router) +v1.include_router(optimization_indexset.router, prefix="/optimization") +v1.include_router(optimization_parameter.router, prefix="/optimization") +v1.include_router(optimization_scalar.router, prefix="/optimization") +v1.include_router(optimization_table.router, prefix="/optimization") v1.include_router(region.router) v1.include_router(run.router) -v1.include_router(scalar.router, prefix="/optimization") v1.include_router(scenario.router) -v1.include_router(table.router, prefix="/optimization") v1.include_router(timeseries.router, prefix="/iamc") v1.include_router(unit.router) diff --git a/ixmp4/server/rest/docs.py b/ixmp4/server/rest/docs.py index b533d012..9d276da1 100644 --- a/ixmp4/server/rest/docs.py +++ b/ixmp4/server/rest/docs.py @@ -237,3 +237,27 @@ def delete_tables( backend: Backend = Depends(deps.get_backend), ): return backend.optimization.tables.docs.delete(dimension_id) + + +@router.get("/optimization/parameters/", response_model=list[api.Docs]) +def list_parameters( + dimension_id: int | None = Query(None), + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.docs.list(dimension_id=dimension_id) + + +@router.post("/optimization/parameters/", response_model=api.Docs) +def set_parameters( + docs: DocsInput, + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.docs.set(**docs.model_dump()) + + +@router.delete("/optimization/parameters/{dimension_id}/") +def delete_parameters( + dimension_id: int = Path(), + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.docs.delete(dimension_id) diff --git a/ixmp4/server/rest/optimization/__init__.py b/ixmp4/server/rest/optimization/__init__.py index 30e4170b..05c699a3 100644 --- a/ixmp4/server/rest/optimization/__init__.py +++ b/ixmp4/server/rest/optimization/__init__.py @@ -1 +1 @@ -from . import indexset, scalar, table +from . import indexset, parameter, scalar, table diff --git a/ixmp4/server/rest/optimization/parameter.py b/ixmp4/server/rest/optimization/parameter.py new file mode 100644 index 00000000..3f8993b3 --- /dev/null +++ b/ixmp4/server/rest/optimization/parameter.py @@ -0,0 +1,79 @@ +from typing import Any + +from fastapi import APIRouter, Body, Depends, Query + +from ixmp4.data import api +from ixmp4.data.backend.db import SqlAlchemyBackend as Backend +from ixmp4.data.db.optimization.parameter.filter import OptimizationParameterFilter + +from .. import deps +from ..base import BaseModel, EnumerationOutput, Pagination +from ..decorators import autodoc + +router: APIRouter = APIRouter( + prefix="/parameters", + tags=["optimization", "parameters"], +) + + +class ParameterCreateInput(BaseModel): + run_id: int + name: str + constrained_to_indexsets: list[str] + column_names: list[str] | None + + +class DataInput(BaseModel): + data: dict[str, Any] + + +@autodoc +@router.get("/{id}/", response_model=api.Parameter) +def get_by_id( + id: int, + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.get_by_id(id) + + +@autodoc +@router.patch("/", response_model=EnumerationOutput[api.Parameter]) +def query( + filter: OptimizationParameterFilter = Body( + OptimizationParameterFilter(id=None, name=None) + ), + table: bool = Query(False), + pagination: Pagination = Depends(), + backend: Backend = Depends(deps.get_backend), +): + return EnumerationOutput( + results=backend.optimization.parameters.paginate( + _filter=filter, + limit=pagination.limit, + offset=pagination.offset, + table=bool(table), + ), + total=backend.optimization.parameters.count(_filter=filter), + pagination=pagination, + ) + + +@autodoc +@router.patch("/{parameter_id}/data/") +def add_data( + parameter_id: int, + data: DataInput, + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.add_data( + parameter_id=parameter_id, **data.model_dump() + ) + + +@autodoc +@router.post("/", response_model=api.Parameter) +def create( + parameter: ParameterCreateInput, + backend: Backend = Depends(deps.get_backend), +): + return backend.optimization.parameters.create(**parameter.model_dump()) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py new file mode 100644 index 00000000..63a3da5d --- /dev/null +++ b/tests/core/test_optimization_parameter.py @@ -0,0 +1,323 @@ +import pandas as pd +import pytest + +from ixmp4 import Parameter, Platform + +from ..utils import all_platforms + + +def df_from_list(parameters: list): + return pd.DataFrame( + [ + [ + parameter.run_id, + parameter.data, + parameter.name, + parameter.id, + parameter.created_at, + parameter.created_by, + ] + for parameter in parameters + ], + columns=[ + "run__id", + "data", + "name", + "id", + "created_at", + "created_by", + ], + ) + + +@all_platforms +class TestDataOptimizationParameter: + def test_create_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + + # Test normal creation + indexset_1 = run.optimization.indexsets.create("Indexset") + parameter = run.optimization.parameters.create( + name="Parameter", + constrained_to_indexsets=["Indexset"], + ) + + assert parameter.run_id == run.id + assert parameter.name == "Parameter" + assert parameter.data == {} # JsonDict type currently requires a dict, not None + assert parameter.columns[0].name == "Indexset" + assert parameter.constrained_to_indexsets == [indexset_1.name] + assert parameter.values == [] + assert parameter.units == [] + + # Test duplicate name raises + with pytest.raises(Parameter.NotUnique): + _ = run.optimization.parameters.create( + "Parameter", constrained_to_indexsets=["Indexset"] + ) + + # Test mismatch in constrained_to_indexsets and column_names raises + with pytest.raises(ValueError, match="not equal in length"): + _ = run.optimization.parameters.create( + "Parameter 2", + constrained_to_indexsets=["Indexset"], + column_names=["Dimension 1", "Dimension 2"], + ) + + # Test columns_names are used for names if given + parameter_2 = run.optimization.parameters.create( + "Parameter 2", + constrained_to_indexsets=[indexset_1.name], + column_names=["Column 1"], + ) + assert parameter_2.columns[0].name == "Column 1" + + # Test duplicate column_names raise + with pytest.raises(ValueError, match="`column_names` are not unique"): + _ = run.optimization.parameters.create( + name="Parameter 3", + constrained_to_indexsets=[indexset_1.name, indexset_1.name], + column_names=["Column 1", "Column 1"], + ) + + # Test column.dtype is registered correctly + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=2024) + parameter_3 = run.optimization.parameters.create( + "Parameter 5", + constrained_to_indexsets=["Indexset", indexset_2.name], + ) + # If indexset doesn't have elements, a generic dtype is registered + assert parameter_3.columns[0].dtype == "object" + assert parameter_3.columns[1].dtype == "int64" + + def test_get_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + _ = run.optimization.parameters.create( + name="Parameter", constrained_to_indexsets=["Indexset"] + ) + parameter = run.optimization.parameters.get(name="Parameter") + assert parameter.run_id == run.id + assert parameter.id == 1 + assert parameter.name == "Parameter" + assert parameter.data == {} + assert parameter.values == [] + assert parameter.units == [] + assert parameter.columns[0].name == indexset.name + assert parameter.constrained_to_indexsets == [indexset.name] + + with pytest.raises(Parameter.NotFound): + _ = run.optimization.parameters.get("Parameter 2") + + def test_parameter_add_data(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + unit = test_mp.units.create("Unit") + indexset_1 = run.optimization.indexsets.create("Indexset") + indexset_1.add(elements=["foo", "bar", ""]) + indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset_2.add(elements=[1, 2, 3]) + # pandas can only convert dicts to dataframes if the values are lists + # or if index is given. But maybe using read_json instead of from_dict + # can remedy this. Or maybe we want to catch the resulting + # "ValueError: If using all scalar values, you must pass an index" and + # reraise a custom informative error? + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "values": [3.14], + "units": [unit.name], + } + parameter = run.optimization.parameters.create( + "Parameter", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + parameter.add(data=test_data_1) + assert parameter.data == test_data_1 + assert parameter.values == test_data_1["values"] + assert parameter.units == test_data_1["units"] + + parameter_2 = run.optimization.parameters.create( + name="Parameter 2", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): values!" + ): + parameter_2.add( + pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "units": [unit.name], + } + ), + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): units!" + ): + parameter_2.add( + data=pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "values": [""], + } + ), + ) + + # By converting data to pd.DataFrame, we automatically enforce equal length + # of new columns, raises All arrays must be of the same length otherwise: + with pytest.raises(ValueError, match="All arrays must be of the same length"): + parameter_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "values": [1, 2], + "units": [unit.name], + }, + ) + + with pytest.raises(ValueError, match="contains duplicate rows"): + parameter_2.add( + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "values": [1, 2], + "units": [unit.name, unit.name], + }, + ) + + # Test that order is conserved + test_data_2 = { + "Indexset": ["", "", "foo", "foo", "bar", "bar"], + "Indexset 2": [3, 1, 2, 1, 2, 3], + "values": [6, 5, 4, 3, 2, 1], + "units": [unit.name] * 6, + } + parameter_2.add(test_data_2) + assert parameter_2.data == test_data_2 + assert parameter_2.values == test_data_2["values"] + assert parameter_2.units == test_data_2["units"] + + # Test order is conserved with varying types and upon later addition of data + parameter_3 = run.optimization.parameters.create( + name="Parameter 3", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + column_names=["Column 1", "Column 2"], + ) + unit_2 = test_mp.units.create("Unit 2") + unit_3 = test_mp.units.create("Unit 3") + + test_data_3 = { + "Column 1": ["bar", "foo", ""], + "Column 2": [2, 3, 1], + "values": ["3", 2.0, 1], + "units": [unit_3.name, unit_2.name, unit.name], + } + parameter_3.add(data=test_data_3) + assert parameter_3.data == test_data_3 + assert parameter_3.values == test_data_3["values"] + assert parameter_3.units == test_data_3["units"] + + test_data_4 = { + "Column 1": ["foo", "", "bar"], + "Column 2": [2, 3, 1], + "values": [3.14, 2, "1"], + "units": [unit_2.name, unit.name, unit_3.name], + } + parameter_3.add(data=test_data_4) + test_data_5 = test_data_3.copy() + for key, value in test_data_4.items(): + test_data_5[key].extend(value) + assert parameter_3.data == test_data_5 + assert parameter_3.values == test_data_5["values"] + assert parameter_3.units == test_data_5["units"] + + def test_list_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, list() lists scalars for `default` version runs: + run.set_as_default() + _ = run.optimization.indexsets.create("Indexset") + _ = run.optimization.indexsets.create("Indexset 2") + parameter = run.optimization.parameters.create( + "Parameter", constrained_to_indexsets=["Indexset"] + ) + parameter_2 = run.optimization.parameters.create( + "Parameter 2", constrained_to_indexsets=["Indexset 2"] + ) + expected_ids = [parameter.id, parameter_2.id] + list_ids = [parameter.id for parameter in run.optimization.parameters.list()] + assert not (set(expected_ids) ^ set(list_ids)) + + # Test retrieving just one result by providing a name + expected_id = [parameter.id] + list_id = [ + parameter.id + for parameter in run.optimization.parameters.list(name="Parameter") + ] + assert not (set(expected_id) ^ set(list_id)) + + def test_tabulate_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + # Per default, tabulate() lists scalars for `default` version runs: + run.set_as_default() + indexset = run.optimization.indexsets.create("Indexset") + indexset_2 = run.optimization.indexsets.create("Indexset 2") + parameter = run.optimization.parameters.create( + name="Parameter", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + parameter_2 = run.optimization.parameters.create( + name="Parameter 2", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + pd.testing.assert_frame_equal( + df_from_list([parameter_2]), + run.optimization.parameters.tabulate(name="Parameter 2"), + ) + + unit = test_mp.units.create("Unit") + unit_2 = test_mp.units.create("Unit 2") + indexset.add(elements=["foo", "bar"]) + indexset_2.add(elements=[1, 2, 3]) + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "values": ["value"], + "units": [unit.name], + } + parameter.add(data=test_data_1) + + test_data_2 = { + "Indexset 2": [2, 3], + "Indexset": ["foo", "bar"], + "values": [1, "value"], + "units": [unit.name, unit_2.name], + } + parameter_2.add(data=test_data_2) + pd.testing.assert_frame_equal( + df_from_list([parameter, parameter_2]), + run.optimization.parameters.tabulate(), + ) + + def test_parameter_docs(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.runs.create("Model", "Scenario") + indexset = run.optimization.indexsets.create("Indexset") + parameter_1 = run.optimization.parameters.create( + "Parameter 1", constrained_to_indexsets=[indexset.name] + ) + docs = "Documentation of Parameter 1" + parameter_1.docs = docs + assert parameter_1.docs == docs + + parameter_1.docs = None + assert parameter_1.docs is None diff --git a/tests/core/test_table.py b/tests/core/test_table.py index ab82877c..0fd081c9 100644 --- a/tests/core/test_table.py +++ b/tests/core/test_table.py @@ -21,11 +21,8 @@ def df_from_list(tables: list[Table]): table.data, table.name, table.id, - table.data, - table.name, table.created_at, table.created_by, - table.run_id, ] for table in tables ], @@ -34,11 +31,8 @@ def df_from_list(tables: list[Table]): "data", "name", "id", - "data", - "name", "created_at", "created_by", - "run__id", ], ) diff --git a/tests/data/test_docs.py b/tests/data/test_docs.py index 32002dec..bff26656 100644 --- a/tests/data/test_docs.py +++ b/tests/data/test_docs.py @@ -365,3 +365,72 @@ def test_delete_tabledocs(self, platform: ixmp4.Platform): with pytest.raises(Docs.NotFound): platform.backend.optimization.tables.docs.get(table.id) + + def test_get_and_set_parameterdocs(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + _ = platform.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + parameter = platform.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + docs_parameter = platform.backend.optimization.parameters.docs.set( + parameter.id, "Description of test Parameter" + ) + docs_parameter1 = platform.backend.optimization.parameters.docs.get( + parameter.id + ) + + assert docs_parameter == docs_parameter1 + + def test_change_empty_parameterdocs(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + _ = platform.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + parameter = platform.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + + with pytest.raises(Docs.NotFound): + platform.backend.optimization.parameters.docs.get(parameter.id) + + docs_parameter1 = platform.backend.optimization.parameters.docs.set( + parameter.id, "Description of test Parameter" + ) + + assert ( + platform.backend.optimization.parameters.docs.get(parameter.id) + == docs_parameter1 + ) + + docs_parameter2 = platform.backend.optimization.parameters.docs.set( + parameter.id, "Different description of test Parameter" + ) + + assert ( + platform.backend.optimization.parameters.docs.get(parameter.id) + == docs_parameter2 + ) + + def test_delete_parameterdocs(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + _ = platform.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + parameter = platform.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + docs_parameter = platform.backend.optimization.parameters.docs.set( + parameter.id, "Description of test Parameter" + ) + + assert ( + platform.backend.optimization.parameters.docs.get(parameter.id) + == docs_parameter + ) + + platform.backend.optimization.parameters.docs.delete(parameter.id) + + with pytest.raises(Docs.NotFound): + platform.backend.optimization.parameters.docs.get(parameter.id) diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py new file mode 100644 index 00000000..260539f6 --- /dev/null +++ b/tests/data/test_optimization_parameter.py @@ -0,0 +1,366 @@ +import pandas as pd +import pytest + +from ixmp4 import Parameter, Platform + +from ..utils import all_platforms + + +def df_from_list(parameters: list): + return pd.DataFrame( + [ + [ + parameter.run__id, + parameter.data, + parameter.name, + parameter.id, + parameter.created_at, + parameter.created_by, + ] + for parameter in parameters + ], + columns=[ + "run__id", + "data", + "name", + "id", + "created_at", + "created_by", + ], + ) + + +@all_platforms +class TestDataOptimizationParameter: + def test_create_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.backend.runs.create("Model", "Scenario") + + # Test normal creation + indexset_1 = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + parameter = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter", + constrained_to_indexsets=["Indexset"], + ) + + assert parameter.run__id == run.id + assert parameter.name == "Parameter" + assert parameter.data == {} # JsonDict type currently requires a dict, not None + assert parameter.columns[0].name == "Indexset" + assert parameter.columns[0].constrained_to_indexset == indexset_1.id + + # Test duplicate name raises + with pytest.raises(Parameter.NotUnique): + _ = test_mp.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + + # Test mismatch in constrained_to_indexsets and column_names raises + with pytest.raises(ValueError, match="not equal in length"): + _ = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 2", + constrained_to_indexsets=["Indexset"], + column_names=["Dimension 1", "Dimension 2"], + ) + + # Test columns_names are used for names if given + parameter_2 = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 2", + constrained_to_indexsets=[indexset_1.name], + column_names=["Column 1"], + ) + assert parameter_2.columns[0].name == "Column 1" + + # Test duplicate column_names raise + with pytest.raises(ValueError, match="`column_names` are not unique"): + _ = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 3", + constrained_to_indexsets=[indexset_1.name, indexset_1.name], + column_names=["Column 1", "Column 1"], + ) + + # Test column.dtype is registered correctly + indexset_2 = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset 2" + ) + test_mp.backend.optimization.indexsets.add_elements( + indexset_2.id, elements=2024 + ) + indexset_2 = test_mp.backend.optimization.indexsets.get(run.id, indexset_2.name) + parameter_3 = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 5", + constrained_to_indexsets=["Indexset", indexset_2.name], + ) + # If indexset doesn't have elements, a generic dtype is registered + assert parameter_3.columns[0].dtype == "object" + assert parameter_3.columns[1].dtype == "int64" + + def test_get_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.backend.runs.create("Model", "Scenario") + _ = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + parameter = test_mp.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + assert parameter == test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter" + ) + + with pytest.raises(Parameter.NotFound): + _ = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 2" + ) + + def test_parameter_add_data(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.backend.runs.create("Model", "Scenario") + unit = test_mp.backend.units.create("Unit") + indexset_1 = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + test_mp.backend.optimization.indexsets.add_elements( + indexset_id=indexset_1.id, elements=["foo", "bar", ""] + ) + indexset_2 = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset 2" + ) + test_mp.backend.optimization.indexsets.add_elements( + indexset_id=indexset_2.id, elements=[1, 2, 3] + ) + # pandas can only convert dicts to dataframes if the values are lists + # or if index is given. But maybe using read_json instead of from_dict + # can remedy this. Or maybe we want to catch the resulting + # "ValueError: If using all scalar values, you must pass an index" and + # reraise a custom informative error? + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "values": [3.14], + "units": [unit.name], + } + parameter = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter.id, data=test_data_1 + ) + + parameter = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter" + ) + assert parameter.data == test_data_1 + + parameter_2 = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 2", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): values!" + ): + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, + data=pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "units": [unit.name], + } + ), + ) + + with pytest.raises( + AssertionError, match=r"must include the column\(s\): units!" + ): + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, + data=pd.DataFrame( + { + "Indexset": [None], + "Indexset 2": [2], + "values": [""], + } + ), + ) + + # By converting data to pd.DataFrame, we automatically enforce equal length + # of new columns, raises All arrays must be of the same length otherwise: + with pytest.raises(ValueError, match="All arrays must be of the same length"): + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "values": [1, 2], + "units": [unit.name], + }, + ) + + with pytest.raises(ValueError, match="contains duplicate rows"): + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, + data={ + "Indexset": ["foo", "foo"], + "Indexset 2": [2, 2], + "values": [1, 2], + "units": [unit.name, unit.name], + }, + ) + + # Test that order is conserved + test_data_2 = { + "Indexset": ["", "", "foo", "foo", "bar", "bar"], + "Indexset 2": [3, 1, 2, 1, 2, 3], + "values": [6, 5, 4, 3, 2, 1], + "units": [unit.name] * 6, + } + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, data=test_data_2 + ) + parameter_2 = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 2" + ) + assert parameter_2.data == test_data_2 + + # Test order is conserved with varying types and upon later addition of data + parameter_3 = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 3", + constrained_to_indexsets=[indexset_1.name, indexset_2.name], + column_names=["Column 1", "Column 2"], + ) + unit_2 = test_mp.backend.units.create("Unit 2") + unit_3 = test_mp.backend.units.create("Unit 3") + + test_data_3 = { + "Column 1": ["bar", "foo", ""], + "Column 2": [2, 3, 1], + "values": ["3", 2.0, 1], + "units": [unit_3.name, unit_2.name, unit.name], + } + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_3.id, data=test_data_3 + ) + parameter_3 = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 3" + ) + assert parameter_3.data == test_data_3 + + test_data_4 = { + "Column 1": ["foo", "", "bar"], + "Column 2": [2, 3, 1], + "values": [3.14, 2, "1"], + "units": [unit_2.name, unit.name, unit_3.name], + } + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_3.id, data=test_data_4 + ) + parameter_3 = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 3" + ) + test_data_5 = test_data_3.copy() + for key, value in test_data_4.items(): + test_data_5[key].extend(value) + assert parameter_3.data == test_data_5 + + def test_list_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.backend.runs.create("Model", "Scenario") + # Per default, list() lists scalars for `default` version runs: + test_mp.backend.runs.set_as_default_version(run.id) + _ = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + _ = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset 2" + ) + parameter = test_mp.backend.optimization.parameters.create( + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + ) + parameter_2 = test_mp.backend.optimization.parameters.create( + run_id=run.id, name="Parameter 2", constrained_to_indexsets=["Indexset 2"] + ) + assert [ + parameter, + parameter_2, + ] == test_mp.backend.optimization.parameters.list() + + assert [parameter] == test_mp.backend.optimization.parameters.list( + name="Parameter" + ) + + def test_tabulate_parameter(self, test_mp, request): + test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore + run = test_mp.backend.runs.create("Model", "Scenario") + # Per default, tabulate() lists scalars for `default` version runs: + test_mp.backend.runs.set_as_default_version(run.id) + indexset = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset" + ) + indexset_2 = test_mp.backend.optimization.indexsets.create( + run_id=run.id, name="Indexset 2" + ) + parameter = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + parameter_2 = test_mp.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 2", + constrained_to_indexsets=["Indexset", "Indexset 2"], + ) + pd.testing.assert_frame_equal( + df_from_list([parameter_2]), + test_mp.backend.optimization.parameters.tabulate(name="Parameter 2"), + ) + + unit = test_mp.backend.units.create("Unit") + unit_2 = test_mp.backend.units.create("Unit 2") + test_mp.backend.optimization.indexsets.add_elements( + indexset_id=indexset.id, elements=["foo", "bar"] + ) + test_mp.backend.optimization.indexsets.add_elements( + indexset_id=indexset_2.id, elements=[1, 2, 3] + ) + test_data_1 = { + "Indexset": ["foo"], + "Indexset 2": [1], + "values": ["value"], + "units": [unit.name], + } + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter.id, data=test_data_1 + ) + parameter = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter" + ) + + test_data_2 = { + "Indexset 2": [2, 3], + "Indexset": ["foo", "bar"], + "values": [1, "value"], + "units": [unit.name, unit_2.name], + } + test_mp.backend.optimization.parameters.add_data( + parameter_id=parameter_2.id, data=test_data_2 + ) + parameter_2 = test_mp.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 2" + ) + pd.testing.assert_frame_equal( + df_from_list([parameter, parameter_2]), + test_mp.backend.optimization.parameters.tabulate(), + ) diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index 132cae47..4eaef13e 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -19,11 +19,8 @@ def df_from_list(tables: list): table.data, table.name, table.id, - table.data, - table.name, table.created_at, table.created_by, - table.run__id, ] for table in tables ], @@ -32,11 +29,8 @@ def df_from_list(tables: list): "data", "name", "id", - "data", - "name", "created_at", "created_by", - "run__id", ], ) From 02e3f4311f88db31e864bbc90b6028a9d6984258 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 3 Jul 2024 13:44:39 +0200 Subject: [PATCH 06/19] Fix references to DB filters in docs --- ixmp4/data/abstract/optimization/indexset.py | 4 ++-- ixmp4/data/abstract/optimization/parameter.py | 4 ++-- ixmp4/data/abstract/optimization/scalar.py | 4 ++-- ixmp4/data/abstract/optimization/table.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ixmp4/data/abstract/optimization/indexset.py b/ixmp4/data/abstract/optimization/indexset.py index a03b99a0..ccf4988f 100644 --- a/ixmp4/data/abstract/optimization/indexset.py +++ b/ixmp4/data/abstract/optimization/indexset.py @@ -93,7 +93,7 @@ def list(self, *, name: str | None = None, **kwargs) -> list[IndexSet]: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.indexset.filter.OptimizationIndexSetFilter`. Returns ------- @@ -112,7 +112,7 @@ def tabulate(self, *, name: str | None = None, **kwargs) -> pd.DataFrame: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.indexset.filter.OptimizationIndexSetFilter`. Returns ------- diff --git a/ixmp4/data/abstract/optimization/parameter.py b/ixmp4/data/abstract/optimization/parameter.py index 01e1cacc..a4742b0a 100644 --- a/ixmp4/data/abstract/optimization/parameter.py +++ b/ixmp4/data/abstract/optimization/parameter.py @@ -134,7 +134,7 @@ def list(self, *, name: str | None = None, **kwargs) -> Iterable[Parameter]: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.parameter.filter.OptimizationParameterFilter`. Returns ------- @@ -153,7 +153,7 @@ def tabulate(self, *, name: str | None = None, **kwargs) -> pd.DataFrame: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.parameter.filter.OptimizationParameterFilter`. Returns ------- diff --git a/ixmp4/data/abstract/optimization/scalar.py b/ixmp4/data/abstract/optimization/scalar.py index 190f2e18..e332d168 100644 --- a/ixmp4/data/abstract/optimization/scalar.py +++ b/ixmp4/data/abstract/optimization/scalar.py @@ -142,7 +142,7 @@ def list(self, *, name: str | None = None, **kwargs) -> Iterable[Scalar]: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.scalar.filter.OptimizationScalarFilter`. Returns ------- @@ -161,7 +161,7 @@ def tabulate(self, *, name: str | None = None, **kwargs) -> pd.DataFrame: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.scalar.filter.OptimizationScalarFilter`. Returns ------- diff --git a/ixmp4/data/abstract/optimization/table.py b/ixmp4/data/abstract/optimization/table.py index 9814f755..6aff32fc 100644 --- a/ixmp4/data/abstract/optimization/table.py +++ b/ixmp4/data/abstract/optimization/table.py @@ -134,7 +134,7 @@ def list(self, *, name: str | None = None, **kwargs) -> Iterable[Table]: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.table.filter.OptimizationTableFilter`. Returns ------- @@ -153,7 +153,7 @@ def tabulate(self, *, name: str | None = None, **kwargs) -> pd.DataFrame: # TODO: Update kwargs \*\*kwargs: any More filter parameters as specified in - `ixmp4.data.db.iamc.variable.filters.VariableFilter`. + `ixmp4.data.db.optimization.table.filter.OptimizationTableFilter`. Returns ------- From 33d186e39a37c55f1337a508995c45606e1e3af5 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Wed, 3 Jul 2024 13:45:14 +0200 Subject: [PATCH 07/19] Streamline naming in tests --- tests/core/{test_indexset.py => test_optimization_indexset.py} | 0 tests/core/test_optimization_parameter.py | 2 +- tests/core/{test_scalar.py => test_optimization_scalar.py} | 0 tests/core/{test_table.py => test_optimization_table.py} | 0 4 files changed, 1 insertion(+), 1 deletion(-) rename tests/core/{test_indexset.py => test_optimization_indexset.py} (100%) rename tests/core/{test_scalar.py => test_optimization_scalar.py} (100%) rename tests/core/{test_table.py => test_optimization_table.py} (100%) diff --git a/tests/core/test_indexset.py b/tests/core/test_optimization_indexset.py similarity index 100% rename from tests/core/test_indexset.py rename to tests/core/test_optimization_indexset.py diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index 63a3da5d..1438deea 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -31,7 +31,7 @@ def df_from_list(parameters: list): @all_platforms -class TestDataOptimizationParameter: +class TestCoreParameter: def test_create_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") diff --git a/tests/core/test_scalar.py b/tests/core/test_optimization_scalar.py similarity index 100% rename from tests/core/test_scalar.py rename to tests/core/test_optimization_scalar.py diff --git a/tests/core/test_table.py b/tests/core/test_optimization_table.py similarity index 100% rename from tests/core/test_table.py rename to tests/core/test_optimization_table.py From 6910d9e9a25306992ad6012eb2070fcdf1d1e4ab Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 5 Aug 2024 10:14:52 +0200 Subject: [PATCH 08/19] Fix and test parameter list and tabulate for specific runs --- ixmp4/core/optimization/parameter.py | 4 ++- tests/core/test_optimization_parameter.py | 33 +++++++++++++++--- tests/data/test_optimization_parameter.py | 42 ++++++++++++++++++++--- 3 files changed, 68 insertions(+), 11 deletions(-) diff --git a/ixmp4/core/optimization/parameter.py b/ixmp4/core/optimization/parameter.py index 6612be41..32c07295 100644 --- a/ixmp4/core/optimization/parameter.py +++ b/ixmp4/core/optimization/parameter.py @@ -128,4 +128,6 @@ def list(self, name: str | None = None) -> Iterable[Parameter]: ] def tabulate(self, name: str | None = None) -> pd.DataFrame: - return self.backend.optimization.parameters.tabulate(name=name) + return self.backend.optimization.parameters.tabulate( + run_id=self._run.id, name=name + ) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index 1438deea..40b90cdc 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -1,7 +1,7 @@ import pandas as pd import pytest -from ixmp4 import Parameter, Platform +from ixmp4.core import Parameter, Platform from ..utils import all_platforms @@ -242,8 +242,6 @@ def test_parameter_add_data(self, test_mp, request): def test_list_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - # Per default, list() lists scalars for `default` version runs: - run.set_as_default() _ = run.optimization.indexsets.create("Indexset") _ = run.optimization.indexsets.create("Indexset 2") parameter = run.optimization.parameters.create( @@ -264,11 +262,22 @@ def test_list_parameter(self, test_mp, request): ] assert not (set(expected_id) ^ set(list_id)) + # Test that only Parameters belonging to a Run are listed + run_2 = test_mp.runs.create("Model", "Scenario") + indexset = run_2.optimization.indexsets.create("Indexset") + parameter_3 = run_2.optimization.parameters.create( + "Parameter", constrained_to_indexsets=[indexset.name] + ) + parameter_4 = run_2.optimization.parameters.create( + "Parameter 2", constrained_to_indexsets=[indexset.name] + ) + expected_ids = [parameter_3.id, parameter_4.id] + list_ids = [parameter.id for parameter in run_2.optimization.parameters.list()] + assert not (set(expected_ids) ^ set(list_ids)) + def test_tabulate_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - # Per default, tabulate() lists scalars for `default` version runs: - run.set_as_default() indexset = run.optimization.indexsets.create("Indexset") indexset_2 = run.optimization.indexsets.create("Indexset 2") parameter = run.optimization.parameters.create( @@ -308,6 +317,20 @@ def test_tabulate_parameter(self, test_mp, request): run.optimization.parameters.tabulate(), ) + # Test that only Parameters belonging to a Run are listed + run_2 = test_mp.runs.create("Model", "Scenario") + indexset = run_2.optimization.indexsets.create("Indexset") + parameter_3 = run_2.optimization.parameters.create( + "Parameter", constrained_to_indexsets=[indexset.name] + ) + parameter_4 = run_2.optimization.parameters.create( + "Parameter 2", constrained_to_indexsets=[indexset.name] + ) + pd.testing.assert_frame_equal( + df_from_list([parameter_3, parameter_4]), + run_2.optimization.parameters.tabulate(), + ) + def test_parameter_docs(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py index 260539f6..8545f487 100644 --- a/tests/data/test_optimization_parameter.py +++ b/tests/data/test_optimization_parameter.py @@ -1,7 +1,7 @@ import pandas as pd import pytest -from ixmp4 import Parameter, Platform +from ixmp4.core import Parameter, Platform from ..utils import all_platforms @@ -278,8 +278,6 @@ def test_parameter_add_data(self, test_mp, request): def test_list_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") - # Per default, list() lists scalars for `default` version runs: - test_mp.backend.runs.set_as_default_version(run.id) _ = test_mp.backend.optimization.indexsets.create( run_id=run.id, name="Indexset" ) @@ -301,11 +299,27 @@ def test_list_parameter(self, test_mp, request): name="Parameter" ) + # Test listing of Parameters belonging to specific Run + run_2 = test_mp.backend.runs.create("Model", "Scenario") + indexset = test_mp.backend.optimization.indexsets.create( + run_id=run_2.id, name="Indexset" + ) + parameter_3 = test_mp.backend.optimization.parameters.create( + run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] + ) + parameter_4 = test_mp.backend.optimization.parameters.create( + run_id=run_2.id, + name="Parameter 2", + constrained_to_indexsets=[indexset.name], + ) + assert [ + parameter_3, + parameter_4, + ] == test_mp.backend.optimization.parameters.list(run_id=run_2.id) + def test_tabulate_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") - # Per default, tabulate() lists scalars for `default` version runs: - test_mp.backend.runs.set_as_default_version(run.id) indexset = test_mp.backend.optimization.indexsets.create( run_id=run.id, name="Indexset" ) @@ -364,3 +378,21 @@ def test_tabulate_parameter(self, test_mp, request): df_from_list([parameter, parameter_2]), test_mp.backend.optimization.parameters.tabulate(), ) + + # Test tabulation of Parameters belonging to specific Run + run_2 = test_mp.backend.runs.create("Model", "Scenario") + indexset = test_mp.backend.optimization.indexsets.create( + run_id=run_2.id, name="Indexset" + ) + parameter_3 = test_mp.backend.optimization.parameters.create( + run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] + ) + parameter_4 = test_mp.backend.optimization.parameters.create( + run_id=run_2.id, + name="Parameter 2", + constrained_to_indexsets=[indexset.name], + ) + pd.testing.assert_frame_equal( + df_from_list([parameter_3, parameter_4]), + test_mp.backend.optimization.parameters.tabulate(run_id=run_2.id), + ) From 247b06c58b215318eefe959e3e16b8601b61e626 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 5 Aug 2024 10:15:34 +0200 Subject: [PATCH 09/19] Include Run-side of relationship --- ixmp4/data/db/run/model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ixmp4/data/db/run/model.py b/ixmp4/data/db/run/model.py index c34dc3ab..bc1906b4 100644 --- a/ixmp4/data/db/run/model.py +++ b/ixmp4/data/db/run/model.py @@ -4,6 +4,7 @@ from ixmp4.data import abstract, types from ixmp4.data.db.model.model import Model from ixmp4.data.db.optimization.indexset import IndexSet +from ixmp4.data.db.optimization.parameter import Parameter from ixmp4.data.db.optimization.scalar import Scalar from ixmp4.data.db.optimization.table import Table from ixmp4.data.db.scenario.model import Scenario @@ -41,6 +42,7 @@ class Run(base.BaseModel, mixins.HasUpdateInfo): ) indexsets: types.Mapped[list["IndexSet"]] = db.relationship() + parameters: types.Mapped[list["Parameter"]] = db.relationship() scalars: types.Mapped[list["Scalar"]] = db.relationship() tables: types.Mapped[list["Table"]] = db.relationship() From 2d02e4faec53619896373c470c0103579d576269 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 8 Aug 2024 14:18:35 +0200 Subject: [PATCH 10/19] Make indexset-creation a test utility --- tests/core/test_optimization_parameter.py | 105 +++++++++++--------- tests/data/test_optimization_parameter.py | 116 ++++++++++------------ 2 files changed, 109 insertions(+), 112 deletions(-) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index 40b90cdc..1ae9cefb 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -1,9 +1,9 @@ import pandas as pd import pytest -from ixmp4.core import Parameter, Platform +from ixmp4.core import IndexSet, Parameter, Platform -from ..utils import all_platforms +from ..utils import all_platforms, create_indexsets_for_run def df_from_list(parameters: list): @@ -37,38 +37,41 @@ def test_create_parameter(self, test_mp, request): run = test_mp.runs.create("Model", "Scenario") # Test normal creation - indexset_1 = run.optimization.indexsets.create("Indexset") + indexset, indexset_2 = tuple( + IndexSet(_backend=test_mp.backend, _model=model) + for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + ) parameter = run.optimization.parameters.create( name="Parameter", - constrained_to_indexsets=["Indexset"], + constrained_to_indexsets=[indexset.name], ) assert parameter.run_id == run.id assert parameter.name == "Parameter" assert parameter.data == {} # JsonDict type currently requires a dict, not None - assert parameter.columns[0].name == "Indexset" - assert parameter.constrained_to_indexsets == [indexset_1.name] + assert parameter.columns[0].name == indexset.name + assert parameter.constrained_to_indexsets == [indexset.name] assert parameter.values == [] assert parameter.units == [] # Test duplicate name raises with pytest.raises(Parameter.NotUnique): _ = run.optimization.parameters.create( - "Parameter", constrained_to_indexsets=["Indexset"] + "Parameter", constrained_to_indexsets=[indexset.name] ) # Test mismatch in constrained_to_indexsets and column_names raises with pytest.raises(ValueError, match="not equal in length"): _ = run.optimization.parameters.create( "Parameter 2", - constrained_to_indexsets=["Indexset"], + constrained_to_indexsets=[indexset.name], column_names=["Dimension 1", "Dimension 2"], ) # Test columns_names are used for names if given parameter_2 = run.optimization.parameters.create( "Parameter 2", - constrained_to_indexsets=[indexset_1.name], + constrained_to_indexsets=[indexset.name], column_names=["Column 1"], ) assert parameter_2.columns[0].name == "Column 1" @@ -77,16 +80,15 @@ def test_create_parameter(self, test_mp, request): with pytest.raises(ValueError, match="`column_names` are not unique"): _ = run.optimization.parameters.create( name="Parameter 3", - constrained_to_indexsets=[indexset_1.name, indexset_1.name], + constrained_to_indexsets=[indexset.name, indexset.name], column_names=["Column 1", "Column 1"], ) # Test column.dtype is registered correctly - indexset_2 = run.optimization.indexsets.create("Indexset 2") indexset_2.add(elements=2024) parameter_3 = run.optimization.parameters.create( "Parameter 5", - constrained_to_indexsets=["Indexset", indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) # If indexset doesn't have elements, a generic dtype is registered assert parameter_3.columns[0].dtype == "object" @@ -95,9 +97,11 @@ def test_create_parameter(self, test_mp, request): def test_get_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run.id, amount=1 + ) _ = run.optimization.parameters.create( - name="Parameter", constrained_to_indexsets=["Indexset"] + name="Parameter", constrained_to_indexsets=[indexset.name] ) parameter = run.optimization.parameters.get(name="Parameter") assert parameter.run_id == run.id @@ -116,9 +120,11 @@ def test_parameter_add_data(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") unit = test_mp.units.create("Unit") - indexset_1 = run.optimization.indexsets.create("Indexset") - indexset_1.add(elements=["foo", "bar", ""]) - indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset, indexset_2 = tuple( + IndexSet(_backend=test_mp.backend, _model=model) + for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + ) + indexset.add(elements=["foo", "bar", ""]) indexset_2.add(elements=[1, 2, 3]) # pandas can only convert dicts to dataframes if the values are lists # or if index is given. But maybe using read_json instead of from_dict @@ -126,14 +132,14 @@ def test_parameter_add_data(self, test_mp, request): # "ValueError: If using all scalar values, you must pass an index" and # reraise a custom informative error? test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], + indexset.name: ["foo"], + indexset_2.name: [1], "values": [3.14], "units": [unit.name], } parameter = run.optimization.parameters.create( "Parameter", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) parameter.add(data=test_data_1) assert parameter.data == test_data_1 @@ -142,7 +148,7 @@ def test_parameter_add_data(self, test_mp, request): parameter_2 = run.optimization.parameters.create( name="Parameter 2", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) with pytest.raises( @@ -151,8 +157,8 @@ def test_parameter_add_data(self, test_mp, request): parameter_2.add( pd.DataFrame( { - "Indexset": [None], - "Indexset 2": [2], + indexset.name: [None], + indexset_2.name: [2], "units": [unit.name], } ), @@ -164,8 +170,8 @@ def test_parameter_add_data(self, test_mp, request): parameter_2.add( data=pd.DataFrame( { - "Indexset": [None], - "Indexset 2": [2], + indexset.name: [None], + indexset_2.name: [2], "values": [""], } ), @@ -176,8 +182,8 @@ def test_parameter_add_data(self, test_mp, request): with pytest.raises(ValueError, match="All arrays must be of the same length"): parameter_2.add( data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], + indexset.name: ["foo", "foo"], + indexset_2.name: [2, 2], "values": [1, 2], "units": [unit.name], }, @@ -186,8 +192,8 @@ def test_parameter_add_data(self, test_mp, request): with pytest.raises(ValueError, match="contains duplicate rows"): parameter_2.add( data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], + indexset.name: ["foo", "foo"], + indexset_2.name: [2, 2], "values": [1, 2], "units": [unit.name, unit.name], }, @@ -195,8 +201,8 @@ def test_parameter_add_data(self, test_mp, request): # Test that order is conserved test_data_2 = { - "Indexset": ["", "", "foo", "foo", "bar", "bar"], - "Indexset 2": [3, 1, 2, 1, 2, 3], + indexset.name: ["", "", "foo", "foo", "bar", "bar"], + indexset_2.name: [3, 1, 2, 1, 2, 3], "values": [6, 5, 4, 3, 2, 1], "units": [unit.name] * 6, } @@ -208,7 +214,7 @@ def test_parameter_add_data(self, test_mp, request): # Test order is conserved with varying types and upon later addition of data parameter_3 = run.optimization.parameters.create( name="Parameter 3", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], column_names=["Column 1", "Column 2"], ) unit_2 = test_mp.units.create("Unit 2") @@ -242,10 +248,9 @@ def test_parameter_add_data(self, test_mp, request): def test_list_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - _ = run.optimization.indexsets.create("Indexset") - _ = run.optimization.indexsets.create("Indexset 2") + create_indexsets_for_run(platform=test_mp, run_id=run.id) parameter = run.optimization.parameters.create( - "Parameter", constrained_to_indexsets=["Indexset"] + "Parameter", constrained_to_indexsets=["Indexset 1"] ) parameter_2 = run.optimization.parameters.create( "Parameter 2", constrained_to_indexsets=["Indexset 2"] @@ -264,7 +269,9 @@ def test_list_parameter(self, test_mp, request): # Test that only Parameters belonging to a Run are listed run_2 = test_mp.runs.create("Model", "Scenario") - indexset = run_2.optimization.indexsets.create("Indexset") + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 + ) parameter_3 = run_2.optimization.parameters.create( "Parameter", constrained_to_indexsets=[indexset.name] ) @@ -278,15 +285,17 @@ def test_list_parameter(self, test_mp, request): def test_tabulate_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") - indexset_2 = run.optimization.indexsets.create("Indexset 2") + indexset, indexset_2 = tuple( + IndexSet(_backend=test_mp.backend, _model=model) + for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + ) parameter = run.optimization.parameters.create( name="Parameter", - constrained_to_indexsets=["Indexset", "Indexset 2"], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) parameter_2 = run.optimization.parameters.create( name="Parameter 2", - constrained_to_indexsets=["Indexset", "Indexset 2"], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) pd.testing.assert_frame_equal( df_from_list([parameter_2]), @@ -298,16 +307,16 @@ def test_tabulate_parameter(self, test_mp, request): indexset.add(elements=["foo", "bar"]) indexset_2.add(elements=[1, 2, 3]) test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], + indexset.name: ["foo"], + indexset_2.name: [1], "values": ["value"], "units": [unit.name], } parameter.add(data=test_data_1) test_data_2 = { - "Indexset 2": [2, 3], - "Indexset": ["foo", "bar"], + indexset_2.name: [2, 3], + indexset.name: ["foo", "bar"], "values": [1, "value"], "units": [unit.name, unit_2.name], } @@ -319,7 +328,9 @@ def test_tabulate_parameter(self, test_mp, request): # Test that only Parameters belonging to a Run are listed run_2 = test_mp.runs.create("Model", "Scenario") - indexset = run_2.optimization.indexsets.create("Indexset") + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 + ) parameter_3 = run_2.optimization.parameters.create( "Parameter", constrained_to_indexsets=[indexset.name] ) @@ -334,7 +345,9 @@ def test_tabulate_parameter(self, test_mp, request): def test_parameter_docs(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") - indexset = run.optimization.indexsets.create("Indexset") + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run.id, amount=1 + ) parameter_1 = run.optimization.parameters.create( "Parameter 1", constrained_to_indexsets=[indexset.name] ) diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py index 8545f487..eeb50cf2 100644 --- a/tests/data/test_optimization_parameter.py +++ b/tests/data/test_optimization_parameter.py @@ -1,9 +1,10 @@ import pandas as pd import pytest -from ixmp4.core import Parameter, Platform +from ixmp4.core import Platform +from ixmp4.data.abstract import Parameter -from ..utils import all_platforms +from ..utils import all_platforms, create_indexsets_for_run def df_from_list(parameters: list): @@ -37,25 +38,25 @@ def test_create_parameter(self, test_mp, request): run = test_mp.backend.runs.create("Model", "Scenario") # Test normal creation - indexset_1 = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset" - ) + indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) parameter = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter", - constrained_to_indexsets=["Indexset"], + constrained_to_indexsets=[indexset.name], ) assert parameter.run__id == run.id assert parameter.name == "Parameter" assert parameter.data == {} # JsonDict type currently requires a dict, not None - assert parameter.columns[0].name == "Indexset" - assert parameter.columns[0].constrained_to_indexset == indexset_1.id + assert parameter.columns[0].name == indexset.name + assert parameter.columns[0].constrained_to_indexset == indexset.id # Test duplicate name raises with pytest.raises(Parameter.NotUnique): _ = test_mp.backend.optimization.parameters.create( - run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + run_id=run.id, + name="Parameter", + constrained_to_indexsets=[indexset.name], ) # Test mismatch in constrained_to_indexsets and column_names raises @@ -63,7 +64,7 @@ def test_create_parameter(self, test_mp, request): _ = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", - constrained_to_indexsets=["Indexset"], + constrained_to_indexsets=[indexset.name], column_names=["Dimension 1", "Dimension 2"], ) @@ -71,7 +72,7 @@ def test_create_parameter(self, test_mp, request): parameter_2 = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", - constrained_to_indexsets=[indexset_1.name], + constrained_to_indexsets=[indexset.name], column_names=["Column 1"], ) assert parameter_2.columns[0].name == "Column 1" @@ -81,14 +82,11 @@ def test_create_parameter(self, test_mp, request): _ = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 3", - constrained_to_indexsets=[indexset_1.name, indexset_1.name], + constrained_to_indexsets=[indexset.name, indexset.name], column_names=["Column 1", "Column 1"], ) # Test column.dtype is registered correctly - indexset_2 = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset 2" - ) test_mp.backend.optimization.indexsets.add_elements( indexset_2.id, elements=2024 ) @@ -96,7 +94,7 @@ def test_create_parameter(self, test_mp, request): parameter_3 = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 5", - constrained_to_indexsets=["Indexset", indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) # If indexset doesn't have elements, a generic dtype is registered assert parameter_3.columns[0].dtype == "object" @@ -105,11 +103,9 @@ def test_create_parameter(self, test_mp, request): def test_get_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") - _ = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset" - ) + create_indexsets_for_run(platform=test_mp, run_id=run.id, amount=1) parameter = test_mp.backend.optimization.parameters.create( - run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset 1"] ) assert parameter == test_mp.backend.optimization.parameters.get( run_id=run.id, name="Parameter" @@ -124,14 +120,9 @@ def test_parameter_add_data(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") unit = test_mp.backend.units.create("Unit") - indexset_1 = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset" - ) + indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) test_mp.backend.optimization.indexsets.add_elements( - indexset_id=indexset_1.id, elements=["foo", "bar", ""] - ) - indexset_2 = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset 2" + indexset_id=indexset.id, elements=["foo", "bar", ""] ) test_mp.backend.optimization.indexsets.add_elements( indexset_id=indexset_2.id, elements=[1, 2, 3] @@ -142,15 +133,15 @@ def test_parameter_add_data(self, test_mp, request): # "ValueError: If using all scalar values, you must pass an index" and # reraise a custom informative error? test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], + indexset.name: ["foo"], + indexset_2.name: [1], "values": [3.14], "units": [unit.name], } parameter = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) test_mp.backend.optimization.parameters.add_data( parameter_id=parameter.id, data=test_data_1 @@ -164,7 +155,7 @@ def test_parameter_add_data(self, test_mp, request): parameter_2 = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) with pytest.raises( @@ -174,8 +165,8 @@ def test_parameter_add_data(self, test_mp, request): parameter_id=parameter_2.id, data=pd.DataFrame( { - "Indexset": [None], - "Indexset 2": [2], + indexset.name: [None], + indexset_2.name: [2], "units": [unit.name], } ), @@ -188,8 +179,8 @@ def test_parameter_add_data(self, test_mp, request): parameter_id=parameter_2.id, data=pd.DataFrame( { - "Indexset": [None], - "Indexset 2": [2], + indexset.name: [None], + indexset_2.name: [2], "values": [""], } ), @@ -201,8 +192,8 @@ def test_parameter_add_data(self, test_mp, request): test_mp.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], + indexset.name: ["foo", "foo"], + indexset_2.name: [2, 2], "values": [1, 2], "units": [unit.name], }, @@ -212,8 +203,8 @@ def test_parameter_add_data(self, test_mp, request): test_mp.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={ - "Indexset": ["foo", "foo"], - "Indexset 2": [2, 2], + indexset.name: ["foo", "foo"], + indexset_2.name: [2, 2], "values": [1, 2], "units": [unit.name, unit.name], }, @@ -221,8 +212,8 @@ def test_parameter_add_data(self, test_mp, request): # Test that order is conserved test_data_2 = { - "Indexset": ["", "", "foo", "foo", "bar", "bar"], - "Indexset 2": [3, 1, 2, 1, 2, 3], + indexset.name: ["", "", "foo", "foo", "bar", "bar"], + indexset_2.name: [3, 1, 2, 1, 2, 3], "values": [6, 5, 4, 3, 2, 1], "units": [unit.name] * 6, } @@ -238,7 +229,7 @@ def test_parameter_add_data(self, test_mp, request): parameter_3 = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 3", - constrained_to_indexsets=[indexset_1.name, indexset_2.name], + constrained_to_indexsets=[indexset.name, indexset_2.name], column_names=["Column 1", "Column 2"], ) unit_2 = test_mp.backend.units.create("Unit 2") @@ -278,17 +269,14 @@ def test_parameter_add_data(self, test_mp, request): def test_list_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") - _ = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset" - ) - _ = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset 2" - ) + indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) parameter = test_mp.backend.optimization.parameters.create( - run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset"] + run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name] ) parameter_2 = test_mp.backend.optimization.parameters.create( - run_id=run.id, name="Parameter 2", constrained_to_indexsets=["Indexset 2"] + run_id=run.id, + name="Parameter 2", + constrained_to_indexsets=[indexset_2.name], ) assert [ parameter, @@ -301,9 +289,10 @@ def test_list_parameter(self, test_mp, request): # Test listing of Parameters belonging to specific Run run_2 = test_mp.backend.runs.create("Model", "Scenario") - indexset = test_mp.backend.optimization.indexsets.create( - run_id=run_2.id, name="Indexset" + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 ) + parameter_3 = test_mp.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] ) @@ -320,21 +309,16 @@ def test_list_parameter(self, test_mp, request): def test_tabulate_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.backend.runs.create("Model", "Scenario") - indexset = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset" - ) - indexset_2 = test_mp.backend.optimization.indexsets.create( - run_id=run.id, name="Indexset 2" - ) + indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) parameter = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter", - constrained_to_indexsets=["Indexset", "Indexset 2"], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) parameter_2 = test_mp.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", - constrained_to_indexsets=["Indexset", "Indexset 2"], + constrained_to_indexsets=[indexset.name, indexset_2.name], ) pd.testing.assert_frame_equal( df_from_list([parameter_2]), @@ -350,8 +334,8 @@ def test_tabulate_parameter(self, test_mp, request): indexset_id=indexset_2.id, elements=[1, 2, 3] ) test_data_1 = { - "Indexset": ["foo"], - "Indexset 2": [1], + indexset.name: ["foo"], + indexset_2.name: [1], "values": ["value"], "units": [unit.name], } @@ -363,8 +347,8 @@ def test_tabulate_parameter(self, test_mp, request): ) test_data_2 = { - "Indexset 2": [2, 3], - "Indexset": ["foo", "bar"], + indexset_2.name: [2, 3], + indexset.name: ["foo", "bar"], "values": [1, "value"], "units": [unit.name, unit_2.name], } @@ -381,8 +365,8 @@ def test_tabulate_parameter(self, test_mp, request): # Test tabulation of Parameters belonging to specific Run run_2 = test_mp.backend.runs.create("Model", "Scenario") - indexset = test_mp.backend.optimization.indexsets.create( - run_id=run_2.id, name="Indexset" + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 ) parameter_3 = test_mp.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] From 87dd6456d3eedaacfe75eac8c6c686d8437f3b55 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 9 Aug 2024 09:03:14 +0200 Subject: [PATCH 11/19] Make new tests more efficient --- tests/core/test_optimization_parameter.py | 47 ++++++++--------------- 1 file changed, 16 insertions(+), 31 deletions(-) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index 1ae9cefb..16fc42a3 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -255,6 +255,14 @@ def test_list_parameter(self, test_mp, request): parameter_2 = run.optimization.parameters.create( "Parameter 2", constrained_to_indexsets=["Indexset 2"] ) + # Create new run to test listing parameters for specific run + run_2 = test_mp.runs.create("Model", "Scenario") + (indexset,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 + ) + run_2.optimization.parameters.create( + "Parameter", constrained_to_indexsets=[indexset.name] + ) expected_ids = [parameter.id, parameter_2.id] list_ids = [parameter.id for parameter in run.optimization.parameters.list()] assert not (set(expected_ids) ^ set(list_ids)) @@ -267,21 +275,6 @@ def test_list_parameter(self, test_mp, request): ] assert not (set(expected_id) ^ set(list_id)) - # Test that only Parameters belonging to a Run are listed - run_2 = test_mp.runs.create("Model", "Scenario") - (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 - ) - parameter_3 = run_2.optimization.parameters.create( - "Parameter", constrained_to_indexsets=[indexset.name] - ) - parameter_4 = run_2.optimization.parameters.create( - "Parameter 2", constrained_to_indexsets=[indexset.name] - ) - expected_ids = [parameter_3.id, parameter_4.id] - list_ids = [parameter.id for parameter in run_2.optimization.parameters.list()] - assert not (set(expected_ids) ^ set(list_ids)) - def test_tabulate_parameter(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") @@ -297,6 +290,14 @@ def test_tabulate_parameter(self, test_mp, request): name="Parameter 2", constrained_to_indexsets=[indexset.name, indexset_2.name], ) + # Create new run to test listing parameters for specific run + run_2 = test_mp.runs.create("Model", "Scenario") + (indexset_3,) = create_indexsets_for_run( + platform=test_mp, run_id=run_2.id, amount=1 + ) + run_2.optimization.parameters.create( + "Parameter", constrained_to_indexsets=[indexset_3.name] + ) pd.testing.assert_frame_equal( df_from_list([parameter_2]), run.optimization.parameters.tabulate(name="Parameter 2"), @@ -326,22 +327,6 @@ def test_tabulate_parameter(self, test_mp, request): run.optimization.parameters.tabulate(), ) - # Test that only Parameters belonging to a Run are listed - run_2 = test_mp.runs.create("Model", "Scenario") - (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 - ) - parameter_3 = run_2.optimization.parameters.create( - "Parameter", constrained_to_indexsets=[indexset.name] - ) - parameter_4 = run_2.optimization.parameters.create( - "Parameter 2", constrained_to_indexsets=[indexset.name] - ) - pd.testing.assert_frame_equal( - df_from_list([parameter_3, parameter_4]), - run_2.optimization.parameters.tabulate(), - ) - def test_parameter_docs(self, test_mp, request): test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore run = test_mp.runs.create("Model", "Scenario") From 94b337e17b6aad70bba206de9065e692c79bcd26 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 22 Aug 2024 15:46:15 +0200 Subject: [PATCH 12/19] Incorporate changes from #110 --- tests/core/test_optimization_parameter.py | 80 +++++------ tests/data/test_optimization_parameter.py | 166 +++++++++++----------- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index 16fc42a3..c763e979 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -1,9 +1,12 @@ +from typing import Any + import pandas as pd import pytest -from ixmp4.core import IndexSet, Parameter, Platform +import ixmp4 +from ixmp4.core import IndexSet, Parameter -from ..utils import all_platforms, create_indexsets_for_run +from ..utils import create_indexsets_for_run def df_from_list(parameters: list): @@ -30,16 +33,14 @@ def df_from_list(parameters: list): ) -@all_platforms class TestCoreParameter: - def test_create_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") + def test_create_parameter(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") # Test normal creation indexset, indexset_2 = tuple( - IndexSet(_backend=test_mp.backend, _model=model) - for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + IndexSet(_backend=platform.backend, _model=model) + for model in create_indexsets_for_run(platform=platform, run_id=run.id) ) parameter = run.optimization.parameters.create( name="Parameter", @@ -94,11 +95,10 @@ def test_create_parameter(self, test_mp, request): assert parameter_3.columns[0].dtype == "object" assert parameter_3.columns[1].dtype == "int64" - def test_get_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") + def test_get_parameter(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run.id, amount=1 + platform=platform, run_id=run.id, amount=1 ) _ = run.optimization.parameters.create( name="Parameter", constrained_to_indexsets=[indexset.name] @@ -116,13 +116,12 @@ def test_get_parameter(self, test_mp, request): with pytest.raises(Parameter.NotFound): _ = run.optimization.parameters.get("Parameter 2") - def test_parameter_add_data(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - unit = test_mp.units.create("Unit") + def test_parameter_add_data(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") + unit = platform.units.create("Unit") indexset, indexset_2 = tuple( - IndexSet(_backend=test_mp.backend, _model=model) - for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + IndexSet(_backend=platform.backend, _model=model) + for model in create_indexsets_for_run(platform=platform, run_id=run.id) ) indexset.add(elements=["foo", "bar", ""]) indexset_2.add(elements=[1, 2, 3]) @@ -217,10 +216,10 @@ def test_parameter_add_data(self, test_mp, request): constrained_to_indexsets=[indexset.name, indexset_2.name], column_names=["Column 1", "Column 2"], ) - unit_2 = test_mp.units.create("Unit 2") - unit_3 = test_mp.units.create("Unit 3") + unit_2 = platform.units.create("Unit 2") + unit_3 = platform.units.create("Unit 3") - test_data_3 = { + test_data_3: dict[str, list[Any]] = { "Column 1": ["bar", "foo", ""], "Column 2": [2, 3, 1], "values": ["3", 2.0, 1], @@ -231,24 +230,23 @@ def test_parameter_add_data(self, test_mp, request): assert parameter_3.values == test_data_3["values"] assert parameter_3.units == test_data_3["units"] - test_data_4 = { + test_data_4: dict[str, list[Any]] = { "Column 1": ["foo", "", "bar"], "Column 2": [2, 3, 1], "values": [3.14, 2, "1"], "units": [unit_2.name, unit.name, unit_3.name], } parameter_3.add(data=test_data_4) - test_data_5 = test_data_3.copy() + test_data_5: dict[str, list[Any]] = test_data_3.copy() for key, value in test_data_4.items(): test_data_5[key].extend(value) assert parameter_3.data == test_data_5 assert parameter_3.values == test_data_5["values"] assert parameter_3.units == test_data_5["units"] - def test_list_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") - create_indexsets_for_run(platform=test_mp, run_id=run.id) + def test_list_parameter(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") + create_indexsets_for_run(platform=platform, run_id=run.id) parameter = run.optimization.parameters.create( "Parameter", constrained_to_indexsets=["Indexset 1"] ) @@ -256,9 +254,9 @@ def test_list_parameter(self, test_mp, request): "Parameter 2", constrained_to_indexsets=["Indexset 2"] ) # Create new run to test listing parameters for specific run - run_2 = test_mp.runs.create("Model", "Scenario") + run_2 = platform.runs.create("Model", "Scenario") (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 + platform=platform, run_id=run_2.id, amount=1 ) run_2.optimization.parameters.create( "Parameter", constrained_to_indexsets=[indexset.name] @@ -275,12 +273,11 @@ def test_list_parameter(self, test_mp, request): ] assert not (set(expected_id) ^ set(list_id)) - def test_tabulate_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") + def test_tabulate_parameter(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") indexset, indexset_2 = tuple( - IndexSet(_backend=test_mp.backend, _model=model) - for model in create_indexsets_for_run(platform=test_mp, run_id=run.id) + IndexSet(_backend=platform.backend, _model=model) + for model in create_indexsets_for_run(platform=platform, run_id=run.id) ) parameter = run.optimization.parameters.create( name="Parameter", @@ -291,9 +288,9 @@ def test_tabulate_parameter(self, test_mp, request): constrained_to_indexsets=[indexset.name, indexset_2.name], ) # Create new run to test listing parameters for specific run - run_2 = test_mp.runs.create("Model", "Scenario") + run_2 = platform.runs.create("Model", "Scenario") (indexset_3,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 + platform=platform, run_id=run_2.id, amount=1 ) run_2.optimization.parameters.create( "Parameter", constrained_to_indexsets=[indexset_3.name] @@ -303,8 +300,8 @@ def test_tabulate_parameter(self, test_mp, request): run.optimization.parameters.tabulate(name="Parameter 2"), ) - unit = test_mp.units.create("Unit") - unit_2 = test_mp.units.create("Unit 2") + unit = platform.units.create("Unit") + unit_2 = platform.units.create("Unit 2") indexset.add(elements=["foo", "bar"]) indexset_2.add(elements=[1, 2, 3]) test_data_1 = { @@ -327,11 +324,10 @@ def test_tabulate_parameter(self, test_mp, request): run.optimization.parameters.tabulate(), ) - def test_parameter_docs(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.runs.create("Model", "Scenario") + def test_parameter_docs(self, platform: ixmp4.Platform): + run = platform.runs.create("Model", "Scenario") (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run.id, amount=1 + platform=platform, run_id=run.id, amount=1 ) parameter_1 = run.optimization.parameters.create( "Parameter 1", constrained_to_indexsets=[indexset.name] diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py index eeb50cf2..adbe5dbf 100644 --- a/tests/data/test_optimization_parameter.py +++ b/tests/data/test_optimization_parameter.py @@ -1,10 +1,10 @@ import pandas as pd import pytest -from ixmp4.core import Platform +import ixmp4 from ixmp4.data.abstract import Parameter -from ..utils import all_platforms, create_indexsets_for_run +from ..utils import create_indexsets_for_run def df_from_list(parameters: list): @@ -31,15 +31,15 @@ def df_from_list(parameters: list): ) -@all_platforms class TestDataOptimizationParameter: - def test_create_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.backend.runs.create("Model", "Scenario") + def test_create_parameter(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") # Test normal creation - indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) - parameter = test_mp.backend.optimization.parameters.create( + indexset, indexset_2 = create_indexsets_for_run( + platform=platform, run_id=run.id + ) + parameter = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name], @@ -53,7 +53,7 @@ def test_create_parameter(self, test_mp, request): # Test duplicate name raises with pytest.raises(Parameter.NotUnique): - _ = test_mp.backend.optimization.parameters.create( + _ = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name], @@ -61,7 +61,7 @@ def test_create_parameter(self, test_mp, request): # Test mismatch in constrained_to_indexsets and column_names raises with pytest.raises(ValueError, match="not equal in length"): - _ = test_mp.backend.optimization.parameters.create( + _ = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", constrained_to_indexsets=[indexset.name], @@ -69,7 +69,7 @@ def test_create_parameter(self, test_mp, request): ) # Test columns_names are used for names if given - parameter_2 = test_mp.backend.optimization.parameters.create( + parameter_2 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", constrained_to_indexsets=[indexset.name], @@ -79,7 +79,7 @@ def test_create_parameter(self, test_mp, request): # Test duplicate column_names raise with pytest.raises(ValueError, match="`column_names` are not unique"): - _ = test_mp.backend.optimization.parameters.create( + _ = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 3", constrained_to_indexsets=[indexset.name, indexset.name], @@ -87,11 +87,13 @@ def test_create_parameter(self, test_mp, request): ) # Test column.dtype is registered correctly - test_mp.backend.optimization.indexsets.add_elements( + platform.backend.optimization.indexsets.add_elements( indexset_2.id, elements=2024 ) - indexset_2 = test_mp.backend.optimization.indexsets.get(run.id, indexset_2.name) - parameter_3 = test_mp.backend.optimization.parameters.create( + indexset_2 = platform.backend.optimization.indexsets.get( + run.id, indexset_2.name + ) + parameter_3 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 5", constrained_to_indexsets=[indexset.name, indexset_2.name], @@ -100,31 +102,31 @@ def test_create_parameter(self, test_mp, request): assert parameter_3.columns[0].dtype == "object" assert parameter_3.columns[1].dtype == "int64" - def test_get_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.backend.runs.create("Model", "Scenario") - create_indexsets_for_run(platform=test_mp, run_id=run.id, amount=1) - parameter = test_mp.backend.optimization.parameters.create( + def test_get_parameter(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + create_indexsets_for_run(platform=platform, run_id=run.id, amount=1) + parameter = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=["Indexset 1"] ) - assert parameter == test_mp.backend.optimization.parameters.get( + assert parameter == platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter" ) with pytest.raises(Parameter.NotFound): - _ = test_mp.backend.optimization.parameters.get( + _ = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter 2" ) - def test_parameter_add_data(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.backend.runs.create("Model", "Scenario") - unit = test_mp.backend.units.create("Unit") - indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) - test_mp.backend.optimization.indexsets.add_elements( + def test_parameter_add_data(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + unit = platform.backend.units.create("Unit") + indexset, indexset_2 = create_indexsets_for_run( + platform=platform, run_id=run.id + ) + platform.backend.optimization.indexsets.add_elements( indexset_id=indexset.id, elements=["foo", "bar", ""] ) - test_mp.backend.optimization.indexsets.add_elements( + platform.backend.optimization.indexsets.add_elements( indexset_id=indexset_2.id, elements=[1, 2, 3] ) # pandas can only convert dicts to dataframes if the values are lists @@ -138,21 +140,21 @@ def test_parameter_add_data(self, test_mp, request): "values": [3.14], "units": [unit.name], } - parameter = test_mp.backend.optimization.parameters.create( + parameter = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name, indexset_2.name], ) - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter.id, data=test_data_1 ) - parameter = test_mp.backend.optimization.parameters.get( + parameter = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter" ) assert parameter.data == test_data_1 - parameter_2 = test_mp.backend.optimization.parameters.create( + parameter_2 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", constrained_to_indexsets=[indexset.name, indexset_2.name], @@ -161,7 +163,7 @@ def test_parameter_add_data(self, test_mp, request): with pytest.raises( AssertionError, match=r"must include the column\(s\): values!" ): - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data=pd.DataFrame( { @@ -175,7 +177,7 @@ def test_parameter_add_data(self, test_mp, request): with pytest.raises( AssertionError, match=r"must include the column\(s\): units!" ): - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data=pd.DataFrame( { @@ -189,7 +191,7 @@ def test_parameter_add_data(self, test_mp, request): # By converting data to pd.DataFrame, we automatically enforce equal length # of new columns, raises All arrays must be of the same length otherwise: with pytest.raises(ValueError, match="All arrays must be of the same length"): - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={ indexset.name: ["foo", "foo"], @@ -200,7 +202,7 @@ def test_parameter_add_data(self, test_mp, request): ) with pytest.raises(ValueError, match="contains duplicate rows"): - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={ indexset.name: ["foo", "foo"], @@ -217,23 +219,23 @@ def test_parameter_add_data(self, test_mp, request): "values": [6, 5, 4, 3, 2, 1], "units": [unit.name] * 6, } - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data=test_data_2 ) - parameter_2 = test_mp.backend.optimization.parameters.get( + parameter_2 = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter 2" ) assert parameter_2.data == test_data_2 # Test order is conserved with varying types and upon later addition of data - parameter_3 = test_mp.backend.optimization.parameters.create( + parameter_3 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 3", constrained_to_indexsets=[indexset.name, indexset_2.name], column_names=["Column 1", "Column 2"], ) - unit_2 = test_mp.backend.units.create("Unit 2") - unit_3 = test_mp.backend.units.create("Unit 3") + unit_2 = platform.backend.units.create("Unit 2") + unit_3 = platform.backend.units.create("Unit 3") test_data_3 = { "Column 1": ["bar", "foo", ""], @@ -241,10 +243,10 @@ def test_parameter_add_data(self, test_mp, request): "values": ["3", 2.0, 1], "units": [unit_3.name, unit_2.name, unit.name], } - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_3.id, data=test_data_3 ) - parameter_3 = test_mp.backend.optimization.parameters.get( + parameter_3 = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter 3" ) assert parameter_3.data == test_data_3 @@ -255,25 +257,26 @@ def test_parameter_add_data(self, test_mp, request): "values": [3.14, 2, "1"], "units": [unit_2.name, unit.name, unit_3.name], } - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_3.id, data=test_data_4 ) - parameter_3 = test_mp.backend.optimization.parameters.get( + parameter_3 = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter 3" ) test_data_5 = test_data_3.copy() for key, value in test_data_4.items(): - test_data_5[key].extend(value) + test_data_5[key].extend(value) # type: ignore assert parameter_3.data == test_data_5 - def test_list_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.backend.runs.create("Model", "Scenario") - indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) - parameter = test_mp.backend.optimization.parameters.create( + def test_list_parameter(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + indexset, indexset_2 = create_indexsets_for_run( + platform=platform, run_id=run.id + ) + parameter = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name] ) - parameter_2 = test_mp.backend.optimization.parameters.create( + parameter_2 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", constrained_to_indexsets=[indexset_2.name], @@ -281,22 +284,22 @@ def test_list_parameter(self, test_mp, request): assert [ parameter, parameter_2, - ] == test_mp.backend.optimization.parameters.list() + ] == platform.backend.optimization.parameters.list() - assert [parameter] == test_mp.backend.optimization.parameters.list( + assert [parameter] == platform.backend.optimization.parameters.list( name="Parameter" ) # Test listing of Parameters belonging to specific Run - run_2 = test_mp.backend.runs.create("Model", "Scenario") + run_2 = platform.backend.runs.create("Model", "Scenario") (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 + platform=platform, run_id=run_2.id, amount=1 ) - parameter_3 = test_mp.backend.optimization.parameters.create( + parameter_3 = platform.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] ) - parameter_4 = test_mp.backend.optimization.parameters.create( + parameter_4 = platform.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter 2", constrained_to_indexsets=[indexset.name], @@ -304,33 +307,34 @@ def test_list_parameter(self, test_mp, request): assert [ parameter_3, parameter_4, - ] == test_mp.backend.optimization.parameters.list(run_id=run_2.id) + ] == platform.backend.optimization.parameters.list(run_id=run_2.id) - def test_tabulate_parameter(self, test_mp, request): - test_mp: Platform = request.getfixturevalue(test_mp) # type: ignore - run = test_mp.backend.runs.create("Model", "Scenario") - indexset, indexset_2 = create_indexsets_for_run(platform=test_mp, run_id=run.id) - parameter = test_mp.backend.optimization.parameters.create( + def test_tabulate_parameter(self, platform: ixmp4.Platform): + run = platform.backend.runs.create("Model", "Scenario") + indexset, indexset_2 = create_indexsets_for_run( + platform=platform, run_id=run.id + ) + parameter = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter", constrained_to_indexsets=[indexset.name, indexset_2.name], ) - parameter_2 = test_mp.backend.optimization.parameters.create( + parameter_2 = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", constrained_to_indexsets=[indexset.name, indexset_2.name], ) pd.testing.assert_frame_equal( df_from_list([parameter_2]), - test_mp.backend.optimization.parameters.tabulate(name="Parameter 2"), + platform.backend.optimization.parameters.tabulate(name="Parameter 2"), ) - unit = test_mp.backend.units.create("Unit") - unit_2 = test_mp.backend.units.create("Unit 2") - test_mp.backend.optimization.indexsets.add_elements( + unit = platform.backend.units.create("Unit") + unit_2 = platform.backend.units.create("Unit 2") + platform.backend.optimization.indexsets.add_elements( indexset_id=indexset.id, elements=["foo", "bar"] ) - test_mp.backend.optimization.indexsets.add_elements( + platform.backend.optimization.indexsets.add_elements( indexset_id=indexset_2.id, elements=[1, 2, 3] ) test_data_1 = { @@ -339,10 +343,10 @@ def test_tabulate_parameter(self, test_mp, request): "values": ["value"], "units": [unit.name], } - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter.id, data=test_data_1 ) - parameter = test_mp.backend.optimization.parameters.get( + parameter = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter" ) @@ -352,31 +356,31 @@ def test_tabulate_parameter(self, test_mp, request): "values": [1, "value"], "units": [unit.name, unit_2.name], } - test_mp.backend.optimization.parameters.add_data( + platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data=test_data_2 ) - parameter_2 = test_mp.backend.optimization.parameters.get( + parameter_2 = platform.backend.optimization.parameters.get( run_id=run.id, name="Parameter 2" ) pd.testing.assert_frame_equal( df_from_list([parameter, parameter_2]), - test_mp.backend.optimization.parameters.tabulate(), + platform.backend.optimization.parameters.tabulate(), ) # Test tabulation of Parameters belonging to specific Run - run_2 = test_mp.backend.runs.create("Model", "Scenario") + run_2 = platform.backend.runs.create("Model", "Scenario") (indexset,) = create_indexsets_for_run( - platform=test_mp, run_id=run_2.id, amount=1 + platform=platform, run_id=run_2.id, amount=1 ) - parameter_3 = test_mp.backend.optimization.parameters.create( + parameter_3 = platform.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter", constrained_to_indexsets=[indexset.name] ) - parameter_4 = test_mp.backend.optimization.parameters.create( + parameter_4 = platform.backend.optimization.parameters.create( run_id=run_2.id, name="Parameter 2", constrained_to_indexsets=[indexset.name], ) pd.testing.assert_frame_equal( df_from_list([parameter_3, parameter_4]), - test_mp.backend.optimization.parameters.tabulate(run_id=run_2.id), + platform.backend.optimization.parameters.tabulate(run_id=run_2.id), ) From 8e737c67c1c57694e2717ab94fcacf0b4767f729 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Tue, 27 Aug 2024 09:02:15 +0200 Subject: [PATCH 13/19] Use pandas for updated add_data behaviour --- .../db/optimization/parameter/repository.py | 8 ++- tests/core/test_optimization_parameter.py | 67 +++++++++--------- tests/data/test_optimization_parameter.py | 68 ++++++++++--------- 3 files changed, 78 insertions(+), 65 deletions(-) diff --git a/ixmp4/data/db/optimization/parameter/repository.py b/ixmp4/data/db/optimization/parameter/repository.py index 735194fe..a0391e78 100644 --- a/ixmp4/data/db/optimization/parameter/repository.py +++ b/ixmp4/data/db/optimization/parameter/repository.py @@ -167,8 +167,12 @@ def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> No message=f"'{unit_name}' is not defined for this Platform!" ) from e - parameter.data = pd.concat( - [pd.DataFrame.from_dict(parameter.data), data] + index_list = [column.name for column in parameter.columns] + existing_data = pd.DataFrame(parameter.data) + if not existing_data.empty: + existing_data.set_index(index_list, inplace=True) + parameter.data = ( + data.set_index(index_list).combine_first(existing_data).reset_index() ).to_dict(orient="list") self.session.add(parameter) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index c763e979..f13bdf4a 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -1,12 +1,10 @@ -from typing import Any - import pandas as pd import pytest import ixmp4 from ixmp4.core import IndexSet, Parameter -from ..utils import create_indexsets_for_run +from ..utils import assert_unordered_equality, create_indexsets_for_run def df_from_list(parameters: list): @@ -210,39 +208,44 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): assert parameter_2.values == test_data_2["values"] assert parameter_2.units == test_data_2["units"] - # Test order is conserved with varying types and upon later addition of data - parameter_3 = run.optimization.parameters.create( - name="Parameter 3", + unit_2 = platform.backend.units.create("Unit 2") + + # Test updating of existing keys + parameter_4 = platform.backend.optimization.parameters.create( + run_id=run.id, + name="Parameter 4", constrained_to_indexsets=[indexset.name, indexset_2.name], - column_names=["Column 1", "Column 2"], ) - unit_2 = platform.units.create("Unit 2") - unit_3 = platform.units.create("Unit 3") - - test_data_3: dict[str, list[Any]] = { - "Column 1": ["bar", "foo", ""], - "Column 2": [2, 3, 1], - "values": ["3", 2.0, 1], - "units": [unit_3.name, unit_2.name, unit.name], + test_data_6 = { + indexset.name: ["foo", "foo", "bar", "bar"], + indexset_2.name: [1, 3, 1, 2], + "values": [1, "2", 2.3, "4"], + "units": [unit.name] * 4, } - parameter_3.add(data=test_data_3) - assert parameter_3.data == test_data_3 - assert parameter_3.values == test_data_3["values"] - assert parameter_3.units == test_data_3["units"] - - test_data_4: dict[str, list[Any]] = { - "Column 1": ["foo", "", "bar"], - "Column 2": [2, 3, 1], - "values": [3.14, 2, "1"], - "units": [unit_2.name, unit.name, unit_3.name], + platform.backend.optimization.parameters.add_data( + parameter_id=parameter_4.id, data=test_data_6 + ) + test_data_7 = { + indexset.name: ["foo", "foo", "bar", "bar", "bar"], + indexset_2.name: [1, 2, 3, 2, 1], + "values": [1, 2.3, 3, 4, "5"], + "units": [unit.name] * 2 + [unit_2.name] * 3, } - parameter_3.add(data=test_data_4) - test_data_5: dict[str, list[Any]] = test_data_3.copy() - for key, value in test_data_4.items(): - test_data_5[key].extend(value) - assert parameter_3.data == test_data_5 - assert parameter_3.values == test_data_5["values"] - assert parameter_3.units == test_data_5["units"] + platform.backend.optimization.parameters.add_data( + parameter_id=parameter_4.id, data=test_data_7 + ) + parameter_4 = platform.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 4" + ) + expected = ( + pd.DataFrame(test_data_7) + .set_index([indexset.name, indexset_2.name]) + .combine_first( + pd.DataFrame(test_data_6).set_index([indexset.name, indexset_2.name]) + ) + .reset_index() + ) + assert_unordered_equality(expected, pd.DataFrame(parameter_4.data)) def test_list_parameter(self, platform: ixmp4.Platform): run = platform.runs.create("Model", "Scenario") diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py index adbe5dbf..7365da34 100644 --- a/tests/data/test_optimization_parameter.py +++ b/tests/data/test_optimization_parameter.py @@ -4,7 +4,7 @@ import ixmp4 from ixmp4.data.abstract import Parameter -from ..utils import create_indexsets_for_run +from ..utils import assert_unordered_equality, create_indexsets_for_run def df_from_list(parameters: list): @@ -227,46 +227,52 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): ) assert parameter_2.data == test_data_2 - # Test order is conserved with varying types and upon later addition of data - parameter_3 = platform.backend.optimization.parameters.create( + # TODO With the current update method (using pandas), order is not conserved. + # Is that a bad thing, though? Because order is based on the indexsets, which + # shouldn't be too bad. + # It seems a little inconsistent though, at the moment: when there's no data + # before, add_data will combine_first() with empty df as other, which doesn't + # change anything, so reset_index() restores order. But if other is not empty, + # order is not restored after combination. And how would it be? All new in place + # or appended? + unit_2 = platform.backend.units.create("Unit 2") + + # Test updating of existing keys + parameter_4 = platform.backend.optimization.parameters.create( run_id=run.id, - name="Parameter 3", + name="Parameter 4", constrained_to_indexsets=[indexset.name, indexset_2.name], - column_names=["Column 1", "Column 2"], ) - unit_2 = platform.backend.units.create("Unit 2") - unit_3 = platform.backend.units.create("Unit 3") - - test_data_3 = { - "Column 1": ["bar", "foo", ""], - "Column 2": [2, 3, 1], - "values": ["3", 2.0, 1], - "units": [unit_3.name, unit_2.name, unit.name], + test_data_6 = { + indexset.name: ["foo", "foo", "bar", "bar"], + indexset_2.name: [1, 3, 1, 2], + "values": [1, "2", 2.3, "4"], + "units": [unit.name] * 4, } platform.backend.optimization.parameters.add_data( - parameter_id=parameter_3.id, data=test_data_3 - ) - parameter_3 = platform.backend.optimization.parameters.get( - run_id=run.id, name="Parameter 3" + parameter_id=parameter_4.id, data=test_data_6 ) - assert parameter_3.data == test_data_3 - - test_data_4 = { - "Column 1": ["foo", "", "bar"], - "Column 2": [2, 3, 1], - "values": [3.14, 2, "1"], - "units": [unit_2.name, unit.name, unit_3.name], + test_data_7 = { + indexset.name: ["foo", "foo", "bar", "bar", "bar"], + indexset_2.name: [1, 2, 3, 2, 1], + "values": [1, 2.3, 3, 4, "5"], + "units": [unit.name] * 2 + [unit_2.name] * 3, } platform.backend.optimization.parameters.add_data( - parameter_id=parameter_3.id, data=test_data_4 + parameter_id=parameter_4.id, data=test_data_7 ) - parameter_3 = platform.backend.optimization.parameters.get( - run_id=run.id, name="Parameter 3" + parameter_4 = platform.backend.optimization.parameters.get( + run_id=run.id, name="Parameter 4" + ) + expected = ( + pd.DataFrame(test_data_7) + .set_index([indexset.name, indexset_2.name]) + .combine_first( + pd.DataFrame(test_data_6).set_index([indexset.name, indexset_2.name]) + ) + .reset_index() ) - test_data_5 = test_data_3.copy() - for key, value in test_data_4.items(): - test_data_5[key].extend(value) # type: ignore - assert parameter_3.data == test_data_5 + assert_unordered_equality(expected, pd.DataFrame(parameter_4.data)) def test_list_parameter(self, platform: ixmp4.Platform): run = platform.backend.runs.create("Model", "Scenario") From 52a07ed31df1ac57eb3989ce7cacd1906d120eae Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Thu, 29 Aug 2024 10:07:47 +0200 Subject: [PATCH 14/19] Remove superfluous session.add() for parameter --- ixmp4/data/db/optimization/parameter/repository.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ixmp4/data/db/optimization/parameter/repository.py b/ixmp4/data/db/optimization/parameter/repository.py index a0391e78..f4cbb85f 100644 --- a/ixmp4/data/db/optimization/parameter/repository.py +++ b/ixmp4/data/db/optimization/parameter/repository.py @@ -175,5 +175,4 @@ def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> No data.set_index(index_list).combine_first(existing_data).reset_index() ).to_dict(orient="list") - self.session.add(parameter) self.session.commit() From 5b488abb522d23fa6bb98b437c1a29c1cc261464 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 30 Aug 2024 13:08:17 +0200 Subject: [PATCH 15/19] Use core layer in core test --- tests/core/test_optimization_parameter.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index f13bdf4a..e0f92768 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -208,11 +208,10 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): assert parameter_2.values == test_data_2["values"] assert parameter_2.units == test_data_2["units"] - unit_2 = platform.backend.units.create("Unit 2") + unit_2 = platform.units.create("Unit 2") # Test updating of existing keys - parameter_4 = platform.backend.optimization.parameters.create( - run_id=run.id, + parameter_4 = run.optimization.parameters.create( name="Parameter 4", constrained_to_indexsets=[indexset.name, indexset_2.name], ) @@ -222,21 +221,14 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): "values": [1, "2", 2.3, "4"], "units": [unit.name] * 4, } - platform.backend.optimization.parameters.add_data( - parameter_id=parameter_4.id, data=test_data_6 - ) + parameter_4.add(data=test_data_6) test_data_7 = { indexset.name: ["foo", "foo", "bar", "bar", "bar"], indexset_2.name: [1, 2, 3, 2, 1], "values": [1, 2.3, 3, 4, "5"], "units": [unit.name] * 2 + [unit_2.name] * 3, } - platform.backend.optimization.parameters.add_data( - parameter_id=parameter_4.id, data=test_data_7 - ) - parameter_4 = platform.backend.optimization.parameters.get( - run_id=run.id, name="Parameter 4" - ) + parameter_4.add(data=test_data_7) expected = ( pd.DataFrame(test_data_7) .set_index([indexset.name, indexset_2.name]) From ce5ff142afceb5533db2319a99e31380ee820c51 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Fri, 30 Aug 2024 13:09:11 +0200 Subject: [PATCH 16/19] Raise minimum pandas version to enable add_data upsert --- .github/workflows/pytest.yaml | 4 ++-- poetry.lock | 2 +- pyproject.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index a844053d..e8cb5c5b 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -39,12 +39,12 @@ jobs: postgres-version: "16" backend: "sqlite,rest-sqlite" pandas-version: "2.1.3" - # pandas 2.0.0 + # pandas 2.1.0 - python-version: "3.10" with-pyarrow: true postgres-version: "16" backend: "sqlite,rest-sqlite" - pandas-version: "2.0.0" + pandas-version: "2.1.0" name: py${{ matrix.python-version }} | backend=${{ matrix.backend }} | with-pyarrow=${{ matrix.with-pyarrow }} | pgsql=${{ matrix.postgres-version }} | pandas=${{ matrix.pandas-version }} runs-on: ubuntu-latest diff --git a/poetry.lock b/poetry.lock index ec65d6ba..f516b2c5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3719,4 +3719,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.10, <3.13" -content-hash = "48a2f4520df959aa58b32454831d31d271320b1bbd65945d369ba844b9ea3b85" +content-hash = "99b50a6c19e87ad973082c0a08edf2a6bce5fcce2d22bfb13627ab191ed5b9db" diff --git a/pyproject.toml b/pyproject.toml index e23f22d9..86f99914 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ fastapi = ">=0.100.0" httpx = { extras = ["http2"], version = ">=0.25.0" } openpyxl = ">=3.0.9" # remove legacy-handling in timeseries- and meta-repositories when dropping pandas < 2.2 -pandas = ">=2.0.0" +pandas = ">=2.1.0" pandera = ">=0.17.0" pydantic = ">=2.3.0" python = ">=3.10, <3.13" From b41eb288ea4ede5bdd6003e38c541bc4dcd759dd Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 30 Sep 2024 11:15:09 +0200 Subject: [PATCH 17/19] Generalize UsageError for more optimization items --- ixmp4/core/exceptions.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ixmp4/core/exceptions.py b/ixmp4/core/exceptions.py index 6c64a12b..6c43fbf0 100644 --- a/ixmp4/core/exceptions.py +++ b/ixmp4/core/exceptions.py @@ -186,9 +186,6 @@ class OptimizationDataValidationError(IxmpError): http_error_name = "optimization_data_validation_error" -# == Optimization.Table == - - -class OptimizationTableUsageError(IxmpError): +class OptimizationItemUsageError(IxmpError): http_status_code = 422 - http_error_name = "optimization_table_usage_error" + http_error_name = "optimization_item_usage_error" From f160e9cbcdee34f0bb7679e6fe279853a3bab332 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 30 Sep 2024 11:15:39 +0200 Subject: [PATCH 18/19] Use generalized UsageError for Table --- ixmp4/data/db/optimization/table/repository.py | 10 +++++++--- tests/core/test_optimization_table.py | 6 +++--- tests/data/test_optimization_table.py | 6 +++--- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ixmp4/data/db/optimization/table/repository.py b/ixmp4/data/db/optimization/table/repository.py index 6edf0bf5..eef57e9f 100644 --- a/ixmp4/data/db/optimization/table/repository.py +++ b/ixmp4/data/db/optimization/table/repository.py @@ -3,7 +3,7 @@ import pandas as pd from ixmp4 import db -from ixmp4.core.exceptions import OptimizationTableUsageError +from ixmp4.core.exceptions import OptimizationItemUsageError from ixmp4.data.abstract import optimization as abstract from ixmp4.data.auth.decorators import guard @@ -20,7 +20,7 @@ class TableRepository( ): model_class = Table - UsageError = OptimizationTableUsageError + UsageError = OptimizationItemUsageError def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) @@ -112,6 +112,7 @@ def create( constrained_to_indexsets = list(constrained_to_indexsets) if column_names and len(column_names) != len(constrained_to_indexsets): raise self.UsageError( + f"While processing Table {name}: \n" "`constrained_to_indexsets` and `column_names` not equal in length! " "Please provide the same number of entries for both!" ) @@ -120,7 +121,10 @@ def create( # if len(constrained_to_indexsets) != len(set(constrained_to_indexsets)): # raise self.UsageError("Each dimension must be constrained to a unique indexset!") # noqa if column_names and len(column_names) != len(set(column_names)): - raise self.UsageError("The given `column_names` are not unique!") + raise self.UsageError( + f"While processing Table {name}: \n" + "The given `column_names` are not unique!" + ) table = super().create( run_id=run_id, diff --git a/tests/core/test_optimization_table.py b/tests/core/test_optimization_table.py index 0fd081c9..57110950 100644 --- a/tests/core/test_optimization_table.py +++ b/tests/core/test_optimization_table.py @@ -5,7 +5,7 @@ from ixmp4.core import IndexSet, Table from ixmp4.core.exceptions import ( OptimizationDataValidationError, - OptimizationTableUsageError, + OptimizationItemUsageError, ) from ..utils import create_indexsets_for_run @@ -64,7 +64,7 @@ def test_create_table(self, platform: ixmp4.Platform): ) # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(OptimizationTableUsageError, match="not equal in length"): + with pytest.raises(OptimizationItemUsageError, match="not equal in length"): _ = run.optimization.tables.create( name="Table 2", constrained_to_indexsets=[indexset.name], @@ -81,7 +81,7 @@ def test_create_table(self, platform: ixmp4.Platform): # Test duplicate column_names raise with pytest.raises( - OptimizationTableUsageError, match="`column_names` are not unique" + OptimizationItemUsageError, match="`column_names` are not unique" ): _ = run.optimization.tables.create( name="Table 3", diff --git a/tests/data/test_optimization_table.py b/tests/data/test_optimization_table.py index 4eaef13e..f4643da7 100644 --- a/tests/data/test_optimization_table.py +++ b/tests/data/test_optimization_table.py @@ -5,7 +5,7 @@ from ixmp4 import Table from ixmp4.core.exceptions import ( OptimizationDataValidationError, - OptimizationTableUsageError, + OptimizationItemUsageError, ) from ..utils import create_indexsets_for_run @@ -60,7 +60,7 @@ def test_create_table(self, platform: ixmp4.Platform): ) # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(OptimizationTableUsageError, match="not equal in length"): + with pytest.raises(OptimizationItemUsageError, match="not equal in length"): _ = platform.backend.optimization.tables.create( run_id=run.id, name="Table 2", @@ -79,7 +79,7 @@ def test_create_table(self, platform: ixmp4.Platform): # Test duplicate column_names raise with pytest.raises( - OptimizationTableUsageError, match="`column_names` are not unique" + OptimizationItemUsageError, match="`column_names` are not unique" ): _ = platform.backend.optimization.tables.create( run_id=run.id, From a9e5ac8682babe6e8970b1ca63450edc2e3aa174 Mon Sep 17 00:00:00 2001 From: Fridolin Glatter Date: Mon, 30 Sep 2024 11:16:28 +0200 Subject: [PATCH 19/19] Use own errors for Parameter --- ixmp4/data/db/optimization/parameter/model.py | 4 ++- .../db/optimization/parameter/repository.py | 27 ++++++++++++++----- tests/core/test_optimization_parameter.py | 23 +++++++++++----- tests/data/test_optimization_parameter.py | 23 +++++++++++----- 4 files changed, 57 insertions(+), 20 deletions(-) diff --git a/ixmp4/data/db/optimization/parameter/model.py b/ixmp4/data/db/optimization/parameter/model.py index 3199675d..bb052ea7 100644 --- a/ixmp4/data/db/optimization/parameter/model.py +++ b/ixmp4/data/db/optimization/parameter/model.py @@ -4,6 +4,7 @@ from sqlalchemy.orm import validates from ixmp4 import db +from ixmp4.core.exceptions import OptimizationDataValidationError from ixmp4.data import types from ixmp4.data.abstract import optimization as abstract @@ -14,6 +15,7 @@ class Parameter(base.BaseModel): # NOTE: These might be mixin-able, but would require some abstraction NotFound: ClassVar = abstract.Parameter.NotFound NotUnique: ClassVar = abstract.Parameter.NotUnique + DataInvalid: ClassVar = OptimizationDataValidationError DeletionPrevented: ClassVar = abstract.Parameter.DeletionPrevented # constrained_to_indexsets: ClassVar[list[str] | None] = None @@ -28,7 +30,7 @@ def validate_data(self, key, data: dict[str, Any]): del data_to_validate["values"] del data_to_validate["units"] _ = utils.validate_data( - key=key, + host=self, data=data_to_validate, columns=self.columns, ) diff --git a/ixmp4/data/db/optimization/parameter/repository.py b/ixmp4/data/db/optimization/parameter/repository.py index f4cbb85f..699cfcf4 100644 --- a/ixmp4/data/db/optimization/parameter/repository.py +++ b/ixmp4/data/db/optimization/parameter/repository.py @@ -3,6 +3,7 @@ import pandas as pd from ixmp4 import db +from ixmp4.core.exceptions import OptimizationItemUsageError from ixmp4.data.abstract import optimization as abstract from ixmp4.data.auth.decorators import guard from ixmp4.data.db.unit import Unit @@ -20,6 +21,8 @@ class ParameterRepository( ): model_class = Parameter + UsageError = OptimizationItemUsageError + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.docs = ParameterDocsRepository(*args, **kwargs) @@ -112,16 +115,20 @@ def create( if isinstance(constrained_to_indexsets, str): constrained_to_indexsets = list(constrained_to_indexsets) if column_names and len(column_names) != len(constrained_to_indexsets): - raise ValueError( + raise self.UsageError( + f"While processing Parameter {name}: \n" "`constrained_to_indexsets` and `column_names` not equal in length! " "Please provide the same number of entries for both!" ) # TODO: activate something like this if each column must be indexed by a unique # indexset # if len(constrained_to_indexsets) != len(set(constrained_to_indexsets)): - # raise ValueError("Each dimension must be constrained to a unique indexset!") # noqa + # raise self.UsageError("Each dimension must be constrained to a unique indexset!") # noqa if column_names and len(column_names) != len(set(column_names)): - raise ValueError("The given `column_names` are not unique!") + raise self.UsageError( + f"While processing Parameter {name}: \n" + "The given `column_names` are not unique!" + ) parameter = super().create( run_id=run_id, @@ -149,13 +156,19 @@ def tabulate(self, *args, **kwargs) -> pd.DataFrame: @guard("edit") def add_data(self, parameter_id: int, data: dict[str, Any] | pd.DataFrame) -> None: if isinstance(data, dict): - data = pd.DataFrame.from_dict(data=data) + try: + data = pd.DataFrame.from_dict(data=data) + except ValueError as e: + raise Parameter.DataInvalid(str(e)) from e + parameter = self.get_by_id(id=parameter_id) missing_columns = set(["values", "units"]) - set(data.columns) - assert ( - not missing_columns - ), f"Parameter.data must include the column(s): {', '.join(missing_columns)}!" + if missing_columns: + raise OptimizationItemUsageError( + "Parameter.data must include the column(s): " + f"{', '.join(missing_columns)}!" + ) # Can use a set for now, need full column if we care about order for unit_name in set(data["units"]): diff --git a/tests/core/test_optimization_parameter.py b/tests/core/test_optimization_parameter.py index e0f92768..8602ddf1 100644 --- a/tests/core/test_optimization_parameter.py +++ b/tests/core/test_optimization_parameter.py @@ -3,6 +3,10 @@ import ixmp4 from ixmp4.core import IndexSet, Parameter +from ixmp4.core.exceptions import ( + OptimizationDataValidationError, + OptimizationItemUsageError, +) from ..utils import assert_unordered_equality, create_indexsets_for_run @@ -60,7 +64,7 @@ def test_create_parameter(self, platform: ixmp4.Platform): ) # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(ValueError, match="not equal in length"): + with pytest.raises(OptimizationItemUsageError, match="not equal in length"): _ = run.optimization.parameters.create( "Parameter 2", constrained_to_indexsets=[indexset.name], @@ -76,7 +80,9 @@ def test_create_parameter(self, platform: ixmp4.Platform): assert parameter_2.columns[0].name == "Column 1" # Test duplicate column_names raise - with pytest.raises(ValueError, match="`column_names` are not unique"): + with pytest.raises( + OptimizationItemUsageError, match="`column_names` are not unique" + ): _ = run.optimization.parameters.create( name="Parameter 3", constrained_to_indexsets=[indexset.name, indexset.name], @@ -149,7 +155,7 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): ) with pytest.raises( - AssertionError, match=r"must include the column\(s\): values!" + OptimizationItemUsageError, match=r"must include the column\(s\): values!" ): parameter_2.add( pd.DataFrame( @@ -162,7 +168,7 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): ) with pytest.raises( - AssertionError, match=r"must include the column\(s\): units!" + OptimizationItemUsageError, match=r"must include the column\(s\): units!" ): parameter_2.add( data=pd.DataFrame( @@ -176,7 +182,10 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): # By converting data to pd.DataFrame, we automatically enforce equal length # of new columns, raises All arrays must be of the same length otherwise: - with pytest.raises(ValueError, match="All arrays must be of the same length"): + with pytest.raises( + OptimizationDataValidationError, + match="All arrays must be of the same length", + ): parameter_2.add( data={ indexset.name: ["foo", "foo"], @@ -186,7 +195,9 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): }, ) - with pytest.raises(ValueError, match="contains duplicate rows"): + with pytest.raises( + OptimizationDataValidationError, match="contains duplicate rows" + ): parameter_2.add( data={ indexset.name: ["foo", "foo"], diff --git a/tests/data/test_optimization_parameter.py b/tests/data/test_optimization_parameter.py index 7365da34..dac17e86 100644 --- a/tests/data/test_optimization_parameter.py +++ b/tests/data/test_optimization_parameter.py @@ -2,6 +2,10 @@ import pytest import ixmp4 +from ixmp4.core.exceptions import ( + OptimizationDataValidationError, + OptimizationItemUsageError, +) from ixmp4.data.abstract import Parameter from ..utils import assert_unordered_equality, create_indexsets_for_run @@ -60,7 +64,7 @@ def test_create_parameter(self, platform: ixmp4.Platform): ) # Test mismatch in constrained_to_indexsets and column_names raises - with pytest.raises(ValueError, match="not equal in length"): + with pytest.raises(OptimizationItemUsageError, match="not equal in length"): _ = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 2", @@ -78,7 +82,9 @@ def test_create_parameter(self, platform: ixmp4.Platform): assert parameter_2.columns[0].name == "Column 1" # Test duplicate column_names raise - with pytest.raises(ValueError, match="`column_names` are not unique"): + with pytest.raises( + OptimizationItemUsageError, match="`column_names` are not unique" + ): _ = platform.backend.optimization.parameters.create( run_id=run.id, name="Parameter 3", @@ -161,7 +167,7 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): ) with pytest.raises( - AssertionError, match=r"must include the column\(s\): values!" + OptimizationItemUsageError, match=r"must include the column\(s\): values!" ): platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, @@ -175,7 +181,7 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): ) with pytest.raises( - AssertionError, match=r"must include the column\(s\): units!" + OptimizationItemUsageError, match=r"must include the column\(s\): units!" ): platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, @@ -190,7 +196,10 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): # By converting data to pd.DataFrame, we automatically enforce equal length # of new columns, raises All arrays must be of the same length otherwise: - with pytest.raises(ValueError, match="All arrays must be of the same length"): + with pytest.raises( + OptimizationDataValidationError, + match="All arrays must be of the same length", + ): platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={ @@ -201,7 +210,9 @@ def test_parameter_add_data(self, platform: ixmp4.Platform): }, ) - with pytest.raises(ValueError, match="contains duplicate rows"): + with pytest.raises( + OptimizationDataValidationError, match="contains duplicate rows" + ): platform.backend.optimization.parameters.add_data( parameter_id=parameter_2.id, data={