From f8d94d7034a5ffe49375d0ba1bbd7830879c3d27 Mon Sep 17 00:00:00 2001 From: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:29:14 +0100 Subject: [PATCH] Create standard/schema for a `Model` (#130) * refactor `Model` with mkstd * remove constraint column (constraints aren't implemented yet) * update test cases 1-8 expected models * refactor to remove `PetabMixin` * predecessor_model now always set to virtual or real model; update candidate_space.py * model subspace: require explicit parameter definitions; implement `can_fix_all` * fix 0009 expected yaml * add schema; add to RTD --------- Co-authored-by: Daniel Weindl --- doc/conf.py | 2 +- doc/problem_definition.rst | 53 +- doc/standard/make_schemas.py | 3 + doc/standard/model.yaml | 67 + doc/test_suite.rst | 12 +- petab_select/candidate_space.py | 128 +- petab_select/cli.py | 2 +- petab_select/constants.py | 209 +-- petab_select/model.py | 1222 +++++++---------- petab_select/model_subspace.py | 133 +- petab_select/models.py | 86 +- petab_select/petab.py | 105 +- petab_select/plot.py | 42 +- petab_select/ui.py | 28 +- pyproject.toml | 3 +- test/analyze/input/models.yaml | 8 +- test/analyze/test_analyze.py | 18 +- .../test_files/predecessor_model.yaml | 47 +- test/candidate_space/test_famos.py | 23 +- test/cli/input/model.yaml | 16 +- test/cli/input/models.yaml | 26 +- test/cli/test_cli.py | 1 - test/model/input/model.yaml | 25 +- test/pypesto/generate_expected_models.py | 27 +- test_cases/0001/expected.yaml | 20 +- test_cases/0002/expected.yaml | 20 +- test_cases/0003/expected.yaml | 20 +- test_cases/0004/expected.yaml | 20 +- test_cases/0005/expected.yaml | 20 +- test_cases/0006/expected.yaml | 20 +- test_cases/0007/expected.yaml | 18 +- test_cases/0008/expected.yaml | 16 +- test_cases/0009/expected.yaml | 58 +- test_cases/0009/predecessor_model.yaml | 4 +- 34 files changed, 1179 insertions(+), 1323 deletions(-) create mode 100644 doc/standard/make_schemas.py create mode 100644 doc/standard/model.yaml diff --git a/doc/conf.py b/doc/conf.py index 93865e8e..e21a5944 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -58,7 +58,7 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = "sphinx_rtd_theme" -# html_static_path = ['_static'] +html_static_path = ["standard"] html_logo = "logo/logo-wide.svg" diff --git a/doc/problem_definition.rst b/doc/problem_definition.rst index eb05699d..9545e300 100644 --- a/doc/problem_definition.rst +++ b/doc/problem_definition.rst @@ -7,7 +7,9 @@ Model selection problems for PEtab Select are defined by the following files: #. a specification of the model space, and #. (optionally) a specification of the initial candidate model. -The different file formats are described below. +The different file formats are described below. Each file format is a YAML file +and comes with a YAML-formatted JSON schema, such that these files can be +easily worked with independently of the PEtab Select library. 1. Selection problem -------------------- @@ -116,28 +118,41 @@ can be specified like selected model. Here, the format for a single model is shown. Multiple models can be specified -as a YAML list of the same format. The only required key is the ``petab_yaml``, -as a model requires a PEtab problem. Other keys are required in different +as a YAML list of the same format. Some optional keys are required in different contexts (for example, model comparison will require ``criteria``). +Brief format description +^^^^^^^^^^^^^^^^^^^^^^^^ + + .. code-block:: yaml - criteria: # dict[string, float] (optional). Criterion ID => criterion value. - estimated_parameters: # dict[string, float] (optional). Parameter ID => parameter value. - model_hash: # string (optional). - model_id: # string (optional). - model_subspace_id: # string (optional). - model_subspace_indices: # string (optional). - parameters: # dict[string, float] (optional). Parameter ID => parameter value or "estimate". - petab_yaml: # string. - predecessor_model_hash: # string (optional). + model_subspace_id: # str (required). + model_subspace_indices: # list[int] (required). + criteria: # dict[str, float] (optional). Criterion ID => criterion value. + model_hash: # str (optional). + model_subspace_petab_yaml: # str (required). + estimated_parameters: # dict[str, float] (optional). Parameter ID => parameter value. + iteration: # int (optional). + model_id: # str (optional). + parameters: # dict[str, float | int | "estimate"] (required). Parameter ID => parameter value or "estimate". + predecessor_model_hash: # str (optional). -- ``criteria``: The value of the criterion by which model selection was performed, at least. Optionally, other criterion values too. -- ``estimated_parameters``: Parameter estimates, not only of parameters specified to be estimated in a model space file, but also parameters specified to be estimated in the original PEtab problem of the model. -- ``model_hash``: The model hash, generated by the PEtab Select library. -- ``model_id``: The model ID. - ``model_subspace_id``: Same as in the model space files. - ``model_subspace_indices``: The indices that locate this model in its model subspace. -- ``parameters``: The parameters from the problem (either values or ``'estimate'``) (a specific combination from a model space file, but uncalibrated). -- ``petab_yaml``: Same as in model space files. -- ``predecessor_model_hash``: The hash of the model that preceded this model during the model selection process. +- ``criteria``: The value of the criterion by which model selection was performed, at least. Optionally, other criterion values too. +- ``model_hash``: The model hash, generated by the PEtab Select library. The format is ``[MODEL_SUBSPACE_ID]-[MODEL_SUBSPACE_INDICES_HASH]``. If all parameters are in the model are defined like ``0;estimate``, then the hash is a string of ``1`` and ``0``, for parameters that are estimated or not, respectively. +- ``model_subspace_petab_yaml``: Same as in model space files (see ``petab_yaml``). +- ``estimated_parameters``: Parameter estimates, including all estimated parameters that are not in the model selection problem; i.e., parameters that are set to be estimated in the model subspace PEtab problem but don't appear in the column header of the model space file. +- ``iteration``: The iteration of model selection in which this model appeared. +- ``model_id``: The model ID. +- ``parameters``: The parameter combination from the model space file that defines this model (either values or ``"estimate"``). Not the calibrated values, which are in ``estimated_parameters``! +- ``predecessor_model_hash``: The hash of the model that preceded this model during the model selection process. Will be ``virtual_initial_model-`` if the model had no predecessor model. + +Schema +^^^^^^ + +The format is provided as `YAML-formatted JSON schema <_static/model.yaml>`_, which enables easy validation with various third-party tools. + +.. literalinclude:: standard/model.yaml + :language: yaml diff --git a/doc/standard/make_schemas.py b/doc/standard/make_schemas.py new file mode 100644 index 00000000..8e371a11 --- /dev/null +++ b/doc/standard/make_schemas.py @@ -0,0 +1,3 @@ +from petab_select.model import ModelStandard + +ModelStandard.save_schema("model.yaml") diff --git a/doc/standard/model.yaml b/doc/standard/model.yaml new file mode 100644 index 00000000..a49a042d --- /dev/null +++ b/doc/standard/model.yaml @@ -0,0 +1,67 @@ +$defs: + ModelHash: + type: string +description: "A model.\n\nSee :class:`ModelBase` for the standardized attributes.\ + \ Additional\nattributes are available in ``Model`` to improve usability.\n\nAttributes:\n\ + \ _model_subspace_petab_problem:\n The PEtab problem of the model subspace\ + \ of this model.\n If not provided, this is reconstructed from\n :attr:`model_subspace_petab_yaml`." +properties: + model_subspace_id: + title: Model Subspace Id + type: string + model_subspace_indices: + items: + type: integer + title: Model Subspace Indices + type: array + criteria: + additionalProperties: + type: number + title: Criteria + type: object + model_hash: + $ref: '#/$defs/ModelHash' + default: null + model_subspace_petab_yaml: + anyOf: + - format: path + type: string + - type: 'null' + title: Model Subspace Petab Yaml + estimated_parameters: + anyOf: + - additionalProperties: + type: number + type: object + - type: 'null' + default: null + title: Estimated Parameters + iteration: + anyOf: + - type: integer + - type: 'null' + default: null + title: Iteration + model_id: + default: null + title: Model Id + type: string + parameters: + additionalProperties: + anyOf: + - type: number + - type: integer + - const: estimate + type: string + title: Parameters + type: object + predecessor_model_hash: + $ref: '#/$defs/ModelHash' + default: null +required: +- model_subspace_id +- model_subspace_indices +- model_subspace_petab_yaml +- parameters +title: Model +type: object diff --git a/doc/test_suite.rst b/doc/test_suite.rst index 9b9aa443..4684963a 100644 --- a/doc/test_suite.rst +++ b/doc/test_suite.rst @@ -15,7 +15,6 @@ the model format. - Method - Model space files - Compressed format - - Constraints files - Predecessor (initial) models files * - 0001 - (all) @@ -23,34 +22,29 @@ the model format. - 1 - - - - * - 0002 [#f1]_ - AIC - forward - 1 - - - - * - 0003 - BIC - - all + - brute force - 1 - Yes - - - * - 0004 - AICc - backward - 1 - - - 1 - * - 0005 - AIC - forward - 1 - - - - 1 * - 0006 - AIC @@ -58,27 +52,23 @@ the model format. - 1 - - - - * - 0007 [#f2]_ - AIC - forward - 1 - - - - * - 0008 [#f2]_ - AICc - backward - 1 - - - - * - 0009 [#f3]_ - AICc - FAMoS - 1 - Yes - - - Yes .. [#f1] Model ``M1_0`` differs from ``M1_1`` in three parameters, but only 1 additional estimated parameter. The effect of this on model selection criteria needs to be clarified. Test case 0006 is a duplicate of 0002 that doesn't have this issue. diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index 03dd2f78..a9a3be39 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -20,14 +20,20 @@ PREDECESSOR_MODEL, PREVIOUS_METHODS, TYPE_PATH, - VIRTUAL_INITIAL_MODEL, VIRTUAL_INITIAL_MODEL_METHODS, Criterion, Method, ) from .handlers import TYPE_LIMIT, LimitHandler -from .model import Model, ModelHash, default_compare +from .model import ( + VIRTUAL_INITIAL_MODEL, + VIRTUAL_INITIAL_MODEL_HASH, + Model, + ModelHash, + default_compare, +) from .models import Models +from .petab import get_petab_parameters __all__ = [ "BackwardCandidateSpace", @@ -159,11 +165,7 @@ def set_iteration_user_calibrated_models( iteration_user_calibrated_models = Models() for model in self.models: if ( - ( - user_model := user_calibrated_models.get( - model.get_hash(), None - ) - ) + (user_model := user_calibrated_models.get(model.hash, None)) is not None ) and ( user_model.get_criterion( @@ -171,18 +173,14 @@ def set_iteration_user_calibrated_models( ) is not None ): - logging.info( - f"Using user-supplied result for: {model.get_hash()}" - ) + logging.info(f"Using user-supplied result for: {model.hash}") user_model_copy = copy.deepcopy(user_model) user_model_copy.predecessor_model_hash = ( - self.predecessor_model.get_hash() - if isinstance(self.predecessor_model, Model) - else self.predecessor_model + self.predecessor_model.hash + ) + iteration_user_calibrated_models[user_model_copy.hash] = ( + user_model_copy ) - iteration_user_calibrated_models[ - user_model_copy.get_hash() - ] = user_model_copy else: iteration_uncalibrated_models.append(model) self.iteration_user_calibrated_models = ( @@ -345,11 +343,7 @@ def accept( distance: The distance of the model from the predecessor model. """ - model.predecessor_model_hash = ( - self.predecessor_model.get_hash() - if isinstance(self.predecessor_model, Model) - else self.predecessor_model - ) + model.predecessor_model_hash = self.predecessor_model.hash self.models.append(model) self.distances.append(distance) self.set_excluded_hashes(model, extend=True) @@ -376,7 +370,7 @@ def excluded( ``True`` if the ``model`` is excluded, otherwise ``False``. """ if isinstance(model_hash, Model): - model_hash = model_hash.get_hash() + model_hash = model_hash.hash return model_hash in self.get_excluded_hashes() @abc.abstractmethod @@ -417,7 +411,7 @@ def consider(self, model: Model | None) -> bool: return False if self.excluded(model): warnings.warn( - f"Model `{model.get_hash()}` has been previously excluded " + f"Model `{model.hash}` has been previously excluded " "from the candidate space so is skipped here.", RuntimeWarning, stacklevel=2, @@ -435,19 +429,14 @@ def reset_accepted(self) -> None: self.models = Models() self.distances = [] - def set_predecessor_model(self, predecessor_model: Model | str | None): + def set_predecessor_model(self, predecessor_model: Model | None): """Set the predecessor model. See class attributes for arguments. """ + if predecessor_model is None: + predecessor_model = VIRTUAL_INITIAL_MODEL self.predecessor_model = predecessor_model - if ( - self.predecessor_model == VIRTUAL_INITIAL_MODEL - and self.method not in VIRTUAL_INITIAL_MODEL_METHODS - ): - raise ValueError( - f"A virtual initial model was requested for a method ({self.method}) that does not support them." - ) def get_predecessor_model(self) -> str | Model: """Get the predecessor model.""" @@ -472,7 +461,7 @@ def set_excluded_hashes( excluded_hashes = set() for potential_hash in hashes: if isinstance(potential_hash, Model): - potential_hash = potential_hash.get_hash() + potential_hash = potential_hash.hash excluded_hashes.add(potential_hash) if extend: @@ -531,7 +520,7 @@ def wrapper(): def reset( self, - predecessor_model: Model | str | None | None = None, + predecessor_model: Model | None = None, # FIXME change `Any` to some `TYPE_MODEL_HASH` (e.g. union of str/int/float) excluded_hashes: list[ModelHash] | None = None, limit: TYPE_LIMIT = None, @@ -592,18 +581,24 @@ def distances_in_estimated_parameters( model0 = self.predecessor_model model1 = model - if model0 != VIRTUAL_INITIAL_MODEL and not model1.petab_yaml.samefile( - model0.petab_yaml + if ( + model0.hash != VIRTUAL_INITIAL_MODEL_HASH + and not model1.model_subspace_petab_yaml.samefile( + model0.model_subspace_petab_yaml + ) ): + # FIXME raise NotImplementedError( - "Computation of distances between different PEtab problems is " - "currently not supported. This error is also raised if the same " - "PEtab problem is read from YAML files in different locations." + "Computing distances between models that have different " + "model subspace PEtab problems is currently not supported. " + "This check is based on the PEtab YAML file location." ) # All parameters from the PEtab problem are used in the computation. - if model0 == VIRTUAL_INITIAL_MODEL: - parameter_ids = list(model1.petab_parameters) + if model0.hash == VIRTUAL_INITIAL_MODEL_HASH: + parameter_ids = list( + get_petab_parameters(model1._model_subspace_petab_problem) + ) if self.method == Method.FORWARD: parameters0 = np.array([0 for _ in parameter_ids]) elif self.method == Method.BACKWARD: @@ -615,21 +610,12 @@ def distances_in_estimated_parameters( "developers." ) else: - parameter_ids = list(model0.petab_parameters) + parameter_ids = list( + get_petab_parameters(model0._model_subspace_petab_problem) + ) parameters0 = np.array( model0.get_parameter_values(parameter_ids=parameter_ids) ) - # FIXME need to take superset of all parameters amongst all PEtab problems - # in all model subspaces to get an accurate comparable distance. Currently - # only reasonable when working with a single PEtab problem for all models - # in all subspaces. - if model0.petab_yaml.resolve() != model1.petab_yaml.resolve(): - raise ValueError( - "Computing the distance between different models that " - 'have different "base" PEtab problems is not yet ' - f"supported. First base PEtab problem: {model0.petab_yaml}." - f" Second base PEtab problem: {model1.petab_yaml}." - ) parameters1 = np.array( model1.get_parameter_values(parameter_ids=parameter_ids) ) @@ -722,7 +708,7 @@ def is_plausible(self, model: Model) -> bool: # A model is plausible if the number of estimated parameters strictly # increases (or decreases, if `self.direction == -1`), and no # previously estimated parameters become fixed. - if self.predecessor_model == VIRTUAL_INITIAL_MODEL or ( + if self.predecessor_model.hash == VIRTUAL_INITIAL_MODEL.hash or ( n_steps > 0 and distances["l1"] == n_steps ): return True @@ -882,7 +868,7 @@ class FamosCandidateSpace(CandidateSpace): def __init__( self, *args, - predecessor_model: Model | str | None | None = None, + predecessor_model: Model | None = None, critical_parameter_sets: list = [], swap_parameter_sets: list = [], method_scheme: dict[tuple, str] = None, @@ -914,10 +900,10 @@ def __init__( predecessor_model = VIRTUAL_INITIAL_MODEL if ( - predecessor_model == VIRTUAL_INITIAL_MODEL + predecessor_model.hash == VIRTUAL_INITIAL_MODEL.hash and critical_parameter_sets ) or ( - predecessor_model != VIRTUAL_INITIAL_MODEL + predecessor_model.hash != VIRTUAL_INITIAL_MODEL.hash and not self.check_critical(predecessor_model) ): raise ValueError( @@ -925,7 +911,7 @@ def __init__( ) if ( - predecessor_model == VIRTUAL_INITIAL_MODEL + predecessor_model.hash == VIRTUAL_INITIAL_MODEL.hash and self.initial_method not in VIRTUAL_INITIAL_MODEL_METHODS ): raise ValueError( @@ -976,11 +962,7 @@ def __init__( ), Method.LATERAL: LateralCandidateSpace( *args, - predecessor_model=( - predecessor_model - if predecessor_model != VIRTUAL_INITIAL_MODEL - else None - ), + predecessor_model=predecessor_model, max_steps=1, **kwargs, ), @@ -1097,7 +1079,8 @@ def update_from_iteration_calibrated_models( go_into_switch_method = True for model in iteration_calibrated_models: if ( - self.best_model_of_current_run == VIRTUAL_INITIAL_MODEL + self.best_model_of_current_run.hash + == VIRTUAL_INITIAL_MODEL_HASH or default_compare( model0=self.best_model_of_current_run, model1=model, @@ -1183,9 +1166,9 @@ def check_swap(self, model: Model) -> bool: return True predecessor_estimated_parameters_ids = set( - self.predecessor_model.get_estimated_parameter_ids_all() + self.predecessor_model.get_estimated_parameter_ids() ) - estimated_parameters_ids = set(model.get_estimated_parameter_ids_all()) + estimated_parameters_ids = set(model.get_estimated_parameter_ids()) swapped_parameters_ids = estimated_parameters_ids.symmetric_difference( predecessor_estimated_parameters_ids @@ -1198,7 +1181,7 @@ def check_swap(self, model: Model) -> bool: def check_critical(self, model: Model) -> bool: """Check if the model contains all necessary critical parameters""" - estimated_parameters_ids = set(model.get_estimated_parameter_ids_all()) + estimated_parameters_ids = set(model.get_estimated_parameter_ids()) for critical_set in self.critical_parameter_sets: if not estimated_parameters_ids.intersection(set(critical_set)): return False @@ -1303,6 +1286,9 @@ def jump_to_most_distant( # critical parameter from each critical parameter set if not self.check_critical(predecessor_model): for critical_set in self.critical_parameter_sets: + # FIXME is this a good idea? probably better to request + # the model from the model subspace, rather than editing + # the parameters... predecessor_model.parameters[critical_set[0]] = ESTIMATE # self.update_method(self.initial_method) @@ -1341,7 +1327,11 @@ def get_most_distant( most_distant_indices = [] # FIXME for multiple PEtab problems? - parameter_ids = self.best_models[0].petab_parameters + parameter_ids = list( + get_petab_parameters( + self.best_models[0]._model_subspace_petab_problem + ) + ) for model in self.best_models: model_estimated_parameters = np.array( @@ -1392,7 +1382,7 @@ def get_most_distant( ) most_distant_model = Model( - petab_yaml=model.petab_yaml, + model_subspace_petab_yaml=model.model_subspace_petab_yaml, model_subspace_id=model.model_subspace_id, model_subspace_indices=most_distant_indices, parameters=most_distant_parameters, @@ -1413,7 +1403,6 @@ class LateralCandidateSpace(CandidateSpace): def __init__( self, *args, - predecessor_model: Model | None, max_steps: int = None, **kwargs, ): @@ -1425,7 +1414,6 @@ def __init__( super().__init__( *args, method=Method.LATERAL, - predecessor_model=predecessor_model, **kwargs, ) self.max_steps = max_steps diff --git a/petab_select/cli.py b/petab_select/cli.py index 37f83551..d0def393 100644 --- a/petab_select/cli.py +++ b/petab_select/cli.py @@ -177,7 +177,7 @@ def start_iteration( excluded_model_hashes += f.read().split("\n") excluded_hashes = [ - excluded_model.get_hash() for excluded_model in excluded_models + excluded_model.hash for excluded_model in excluded_models ] excluded_hashes += [ ModelHash.from_hash(hash_str) for hash_str in excluded_model_hashes diff --git a/petab_select/constants.py b/petab_select/constants.py index 2946aeb5..c25f6cfa 100644 --- a/petab_select/constants.py +++ b/petab_select/constants.py @@ -8,46 +8,56 @@ from pathlib import Path from typing import Literal -# Zero-indexed column/row indices -MODEL_ID_COLUMN = 0 -PETAB_YAML_COLUMN = 1 -# It is assumed that all columns after PARAMETER_DEFINITIONS_START contain -# parameter IDs. -PARAMETER_DEFINITIONS_START = 2 -HEADER_ROW = 0 +# Checked -PARAMETER_VALUE_DELIMITER = ";" -CODE_DELIMITER = "-" -ESTIMATE = "estimate" -PETAB_ESTIMATE_FALSE = 0 -PETAB_ESTIMATE_TRUE = 1 +# Criteria +CRITERIA = "criteria" +CRITERION = "criterion" -# TYPING_PATH = Union[str, Path] -TYPE_PATH = str | Path -# Model space file columns -# TODO ensure none of these occur twice in the column header (this would -# suggest that a parameter has a conflicting name) -# MODEL_ID = 'modelId' # TODO already defined, reorganize constants -# YAML = 'YAML' # FIXME +class Criterion(str, Enum): + """String literals for model selection criteria.""" + + #: The Akaike information criterion. + AIC = "AIC" + #: The corrected Akaike information criterion. + AICC = "AICc" + #: The Bayesian information criterion. + BIC = "BIC" + #: The likelihood. + LH = "LH" + #: The log-likelihood. + LLH = "LLH" + #: The negative log-likelihood. + NLLH = "NLLH" + #: The sum of squared residuals. + SSR = "SSR" + + +# Model +ESTIMATED_PARAMETERS = "estimated_parameters" +ITERATION = "iteration" MODEL_ID = "model_id" MODEL_SUBSPACE_ID = "model_subspace_id" MODEL_SUBSPACE_INDICES = "model_subspace_indices" -MODEL_CODE = "model_code" +PARAMETERS = "parameters" +MODEL_SUBSPACE_PETAB_YAML = "model_subspace_petab_yaml" +MODEL_SUBSPACE_PETAB_PROBLEM = "_model_subspace_petab_problem" +PETAB_YAML = "petab_yaml" +ROOT_PATH = "root_path" +ESTIMATE = "estimate" + +PETAB_PROBLEM = "petab_problem" + +# Model hash MODEL_HASH = "model_hash" -MODEL_HASHES = "model_hashes" MODEL_HASH_DELIMITER = "-" +MODEL_SUBSPACE_INDICES_HASH = "model_subspace_indices_hash" MODEL_SUBSPACE_INDICES_HASH_DELIMITER = "." MODEL_SUBSPACE_INDICES_HASH_MAP = ( # [0-9]+[A-Z]+[a-z] string.digits + string.ascii_uppercase + string.ascii_lowercase ) -PETAB_HASH_DIGEST_SIZE = None -# If `predecessor_model_hash` is defined for a model, it is the ID of the model that the -# current model was/is to be compared to. This is part of the result and is -# only (optionally) set by the PEtab calibration tool. It is not defined by the -# PEtab Select model selection problem (but may be subsequently stored in the -# PEtab Select model report format. PREDECESSOR_MODEL_HASH = "predecessor_model_hash" ITERATION = "iteration" PETAB_PROBLEM = "petab_problem" @@ -57,61 +67,24 @@ # MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_ID, PETAB_YAML] MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_SUBSPACE_ID, PETAB_YAML] -# COMPARED_MODEL_ID = 'compared_'+MODEL_ID -YAML_FILENAME = "yaml" - -# DISTANCES = { -# FORWARD: { -# 'l1': 1, -# 'size': 1, -# }, -# BACKWARD: { -# 'l1': 1, -# 'size': -1, -# }, -# LATERAL: { -# 'l1': 2, -# 'size': 0, -# }, -# } - -CRITERIA = "criteria" - -PARAMETERS = "parameters" -# PARAMETER_ESTIMATE = 'parameter_estimate' -ESTIMATED_PARAMETERS = "estimated_parameters" +# PEtab +PETAB_ESTIMATE_TRUE = 1 -# Problem keys -CRITERION = "criterion" -METHOD = "method" -VERSION = "version" +# Problem MODEL_SPACE_FILES = "model_space_files" -PROBLEM_ID = "problem_id" PROBLEM = "problem" +PROBLEM_ID = "problem_id" +VERSION = "version" +# Candidate space CANDIDATE_SPACE = "candidate_space" CANDIDATE_SPACE_ARGUMENTS = "candidate_space_arguments" +METHOD = "method" METHOD_SCHEME = "method_scheme" -PREVIOUS_METHODS = "previous_methods" NEXT_METHOD = "next_method" +PREVIOUS_METHODS = "previous_methods" PREDECESSOR_MODEL = "predecessor_model" -MODEL = "model" -MODELS = "models" -UNCALIBRATED_MODELS = "uncalibrated_models" -TERMINATE = "terminate" - -# Parameters can be fixed to a value, or estimated if indicated with the string -# `ESTIMATE`. -TYPE_PARAMETER = float | int | Literal[ESTIMATE] -TYPE_PARAMETER_OPTIONS = list[TYPE_PARAMETER] -# Parameter ID -> parameter value mapping. -TYPE_PARAMETER_DICT = dict[str, TYPE_PARAMETER] -# Parameter ID -> multiple possible parameter values. -TYPE_PARAMETER_OPTIONS_DICT = dict[str, TYPE_PARAMETER_OPTIONS] - -TYPE_CRITERION = float - class Method(str, Enum): """String literals for model selection methods.""" @@ -130,24 +103,13 @@ class Method(str, Enum): MOST_DISTANT = "most_distant" -class Criterion(str, Enum): - """String literals for model selection criteria.""" - - #: The Akaike information criterion. - AIC = "AIC" - #: The corrected Akaike information criterion. - AICC = "AICc" - #: The Bayesian information criterion. - BIC = "BIC" - #: The likelihood. - LH = "LH" - #: The log-likelihood. - LLH = "LLH" - #: The negative log-likelihood. - NLLH = "NLLH" - #: The sum of squared residuals. - SSR = "SSR" +# Typing +TYPE_PATH = str | Path +# UI +MODELS = "models" +UNCALIBRATED_MODELS = "uncalibrated_models" +TERMINATE = "terminate" #: Methods that move through model space by taking steps away from some model. STEPWISE_METHODS = [ @@ -163,7 +125,8 @@ class Criterion(str, Enum): ] #: Virtual initial models can be used to initialize some initial model methods. -VIRTUAL_INITIAL_MODEL = "virtual_initial_model" +# FIXME replace by real "dummy" model object +# VIRTUAL_INITIAL_MODEL = "virtual_initial_model" #: Methods that are compatible with a virtual initial model. VIRTUAL_INITIAL_MODEL_METHODS = [ Method.BACKWARD, @@ -177,3 +140,69 @@ class Criterion(str, Enum): if not x.startswith("_") and x not in ("sys", "Enum", "Path", "Dict", "List", "Literal", "Union") ] + + +# Unchecked +MODEL = "model" + +# Zero-indexed column/row indices +MODEL_ID_COLUMN = 0 +PETAB_YAML_COLUMN = 1 +# It is assumed that all columns after PARAMETER_DEFINITIONS_START contain +# parameter IDs. +PARAMETER_DEFINITIONS_START = 2 +HEADER_ROW = 0 + +PARAMETER_VALUE_DELIMITER = ";" +CODE_DELIMITER = "-" +PETAB_ESTIMATE_FALSE = 0 + +# TYPING_PATH = Union[str, Path] + +# Model space file columns +# TODO ensure none of these occur twice in the column header (this would +# suggest that a parameter has a conflicting name) +# MODEL_ID = 'modelId' # TODO already defined, reorganize constants +# YAML = 'YAML' # FIXME +MODEL_CODE = "model_code" +MODEL_HASHES = "model_hashes" +PETAB_HASH_DIGEST_SIZE = None +# If `predecessor_model_hash` is defined for a model, it is the ID of the model that the +# current model was/is to be compared to. This is part of the result and is +# only (optionally) set by the PEtab calibration tool. It is not defined by the +# PEtab Select model selection problem (but may be subsequently stored in the +# PEtab Select model report format. +HASH = "hash" + +# MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_ID, PETAB_YAML] +MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_SUBSPACE_ID, PETAB_YAML] + +# COMPARED_MODEL_ID = 'compared_'+MODEL_ID +YAML_FILENAME = "yaml" + +# DISTANCES = { +# FORWARD: { +# 'l1': 1, +# 'size': 1, +# }, +# BACKWARD: { +# 'l1': 1, +# 'size': -1, +# }, +# LATERAL: { +# 'l1': 2, +# 'size': 0, +# }, +# } + + +# Parameters can be fixed to a value, or estimated if indicated with the string +# `ESTIMATE`. +TYPE_PARAMETER = float | int | Literal[ESTIMATE] +TYPE_PARAMETER_OPTIONS = list[TYPE_PARAMETER] +# Parameter ID -> parameter value mapping. +TYPE_PARAMETER_DICT = dict[str, TYPE_PARAMETER] +# Parameter ID -> multiple possible parameter values. +TYPE_PARAMETER_OPTIONS_DICT = dict[str, TYPE_PARAMETER_OPTIONS] + +TYPE_CRITERION = float diff --git a/petab_select/model.py b/petab_select/model.py index 81d73145..ae92df8a 100644 --- a/petab_select/model.py +++ b/petab_select/model.py @@ -2,17 +2,18 @@ from __future__ import annotations +import copy import warnings from os.path import relpath from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, ClassVar, Literal +import mkstd import petab.v1 as petab -import yaml -from more_itertools import one -from petab.v1.C import ESTIMATE, NOMINAL_VALUE +from petab.v1.C import NOMINAL_VALUE from .constants import ( + ESTIMATE, CRITERIA, ESTIMATED_PARAMETERS, ITERATION, @@ -21,470 +22,559 @@ MODEL_ID, MODEL_SUBSPACE_ID, MODEL_SUBSPACE_INDICES, + MODEL_SUBSPACE_INDICES_HASH, MODEL_SUBSPACE_INDICES_HASH_DELIMITER, MODEL_SUBSPACE_INDICES_HASH_MAP, - PARAMETERS, - PETAB_ESTIMATE_TRUE, + MODEL_SUBSPACE_PETAB_YAML, PETAB_PROBLEM, PETAB_YAML, - PREDECESSOR_MODEL_HASH, - TYPE_CRITERION, + ROOT_PATH, TYPE_PARAMETER, - TYPE_PATH, - VIRTUAL_INITIAL_MODEL, Criterion, ) from .criteria import CriterionComputer from .misc import ( parameter_string_to_value, ) -from .petab import PetabMixin +from .petab import get_petab_parameters if TYPE_CHECKING: from .problem import Problem + +from pydantic import ( + BaseModel, + PrivateAttr, + ValidationInfo, + ValidatorFunctionWrapHandler, +) + __all__ = [ "Model", "default_compare", "ModelHash", - "VIRTUAL_INITIAL_MODEL_HASH", + "VIRTUAL_INITIAL_MODEL", ] +from pydantic import ( + Field, + field_serializer, + field_validator, + model_serializer, + model_validator, +) -class Model(PetabMixin): - """A (possibly uncalibrated) model. - NB: some of these attribute names correspond to constants defined in the - `constants.py` file, to facilitate loading models from/saving models to - disk (see the `Model.saved_attributes` class attribute). +class ModelHash(BaseModel): + """The model hash. + + The model hash is designed to be human-readable and able to be converted + back into the corresponding model. Currently, if two models from two + different model subspaces are actually the same PEtab problem, they will + still have different model hashes. Attributes: - converters_load: - Functions to convert attributes from YAML to :class:`Model`. - converters_save: - Functions to convert attributes from :class:`Model` to YAML. - criteria: - The criteria values of the calibrated model (e.g. AIC). - iteration: - The iteration of the model selection algorithm where this model was - identified. - model_id: - The model ID. - petab_yaml: - The path to the PEtab problem YAML file. - parameters: - Parameter values that will overwrite the PEtab problem definition, - or change parameters to be estimated. - estimated_parameters: - Parameter estimates from a model calibration tool, for parameters - that are specified as estimated in the PEtab problem or PEtab - Select model YAML. These are untransformed values (i.e., not on - log scale). - saved_attributes: - Attributes that will be saved to disk by the :meth:`Model.to_yaml` - method. + model_subspace_id: + The ID of the model subspace of the model. Unique up to a single + PEtab Select problem model space. + model_subspace_indices_hash: + A hash of the location of the model in its model + subspace. Unique up to a single model subspace. """ - saved_attributes = ( - MODEL_ID, - MODEL_SUBSPACE_ID, - MODEL_SUBSPACE_INDICES, - MODEL_HASH, - PREDECESSOR_MODEL_HASH, - PETAB_YAML, - PARAMETERS, - ESTIMATED_PARAMETERS, - CRITERIA, - ITERATION, - ) - converters_load = { - MODEL_ID: lambda x: x, - MODEL_SUBSPACE_ID: lambda x: x, - MODEL_SUBSPACE_INDICES: lambda x: [] if not x else x, - MODEL_HASH: lambda x: x, - PREDECESSOR_MODEL_HASH: lambda x: x, - PETAB_YAML: lambda x: x, - PARAMETERS: lambda x: x, - ESTIMATED_PARAMETERS: lambda x: x, - CRITERIA: lambda x: { - # `criterion_id_value` is the ID of the criterion in the enum `Criterion`. - Criterion(criterion_id_value): float(criterion_value) - for criterion_id_value, criterion_value in x.items() - }, - ITERATION: lambda x: int(x) if x is not None else x, - } - converters_save = { - MODEL_ID: lambda x: str(x), - MODEL_SUBSPACE_ID: lambda x: str(x), - MODEL_SUBSPACE_INDICES: lambda x: [int(xi) for xi in x], - MODEL_HASH: lambda x: str(x), - PREDECESSOR_MODEL_HASH: lambda x: str(x) if x is not None else x, - PETAB_YAML: lambda x: str(x), - PARAMETERS: lambda x: {str(k): v for k, v in x.items()}, - # FIXME handle with a `set_estimated_parameters` method instead? - # to avoid `float` cast here. Reason for cast is because e.g. pyPESTO - # can provide type `np.float64`, which causes issues when writing to - # YAML. - # ESTIMATED_PARAMETERS: lambda x: x, - ESTIMATED_PARAMETERS: lambda x: { - str(id): float(value) for id, value in x.items() - }, - CRITERIA: lambda x: { - criterion_id.value: float(criterion_value) - for criterion_id, criterion_value in x.items() - }, - ITERATION: lambda x: int(x) if x is not None else None, - } + model_subspace_id: str + model_subspace_indices_hash: str - def __init__( - self, - petab_yaml: TYPE_PATH, - model_subspace_id: str = None, - model_id: str = None, - model_subspace_indices: list[int] = None, - predecessor_model_hash: str = None, - parameters: dict[str, int | float] = None, - estimated_parameters: dict[str, int | float] = None, - criteria: dict[str, float] = None, - iteration: int = None, - # Optionally provided to reduce repeated parsing of `petab_yaml`. - petab_problem: petab.Problem | None = None, - model_hash: Any | None = None, - ): - self.model_id = model_id - self.model_subspace_id = model_subspace_id - self.model_subspace_indices = model_subspace_indices - # TODO clean parameters, ensure single float or str (`ESTIMATE`) type - self.parameters = parameters - self.estimated_parameters = estimated_parameters - self.criteria = criteria - self.iteration = iteration + @model_validator(mode="wrap") + def _check_kwargs( + kwargs: dict[str, str | list[int]] | ModelHash, + handler: ValidatorFunctionWrapHandler, + info: ValidationInfo, + ) -> ModelHash: + """Handle `ModelHash` creation from different sources. + + See documentation of Pydantic wrap validators. + """ + if isinstance(kwargs, ModelHash): + return kwargs - self.predecessor_model_hash = predecessor_model_hash - if self.predecessor_model_hash is not None: - self.predecessor_model_hash = ModelHash.from_hash( - self.predecessor_model_hash + if isinstance(kwargs, dict): + kwargs[MODEL_SUBSPACE_INDICES_HASH] = ( + ModelHash.hash_model_subspace_indices( + kwargs[MODEL_SUBSPACE_INDICES] + ) ) + del kwargs[MODEL_SUBSPACE_INDICES] + + if isinstance(kwargs, str): + kwargs = ModelHash.kwargs_from_str(hash_str=kwargs) + + expected_model_hash = None + if MODEL_HASH in kwargs: + expected_model_hash = kwargs[MODEL_HASH] + if isinstance(expected_model_hash, str): + expected_model_hash = ModelHash.from_str(expected_model_hash) + del kwargs[MODEL_HASH] + + model_hash = handler(kwargs) + + if expected_model_hash is not None: + if model_hash != expected_model_hash: + warnings.warn( + "The provided model hash is inconsistent with its model " + "subspace and model subspace indices. Old hash: " + f"`{expected_model_hash}`. New hash: `{model_hash}`.", + stacklevel=2, + ) - if self.parameters is None: - self.parameters = {} - if self.estimated_parameters is None: - self.estimated_parameters = {} - if self.criteria is None: - self.criteria = {} + return model_hash - super().__init__(petab_yaml=petab_yaml, petab_problem=petab_problem) + @model_serializer() + def _serialize(self) -> str: + return str(self) - self.model_hash = None - self.get_hash() - if model_hash is not None: - model_hash = ModelHash.from_hash(model_hash) - if self.model_hash != model_hash: - raise ValueError( - "The supplied model hash does not match the computed " - "model hash." - ) + @staticmethod + def kwargs_from_str(hash_str: str) -> dict[str, str]: + """Convert a model hash string into constructor kwargs.""" + return dict( + zip( + [MODEL_SUBSPACE_ID, MODEL_SUBSPACE_INDICES_HASH], + hash_str.split(MODEL_HASH_DELIMITER), + strict=True, + ) + ) + + @staticmethod + def hash_model_subspace_indices(model_subspace_indices: list[int]) -> str: + """Hash the location of a model in its subspace. + + Args: + model_subspace_indices: + The location (indices) of the model in its subspace. + + Returns: + The hash. + """ + if not model_subspace_indices: + return "" + if max(model_subspace_indices) < len(MODEL_SUBSPACE_INDICES_HASH_MAP): + return "".join( + MODEL_SUBSPACE_INDICES_HASH_MAP[index] + for index in model_subspace_indices + ) + return MODEL_SUBSPACE_INDICES_HASH_DELIMITER.join( + str(i) for i in model_subspace_indices + ) + + def unhash_model_subspace_indices(self) -> list[int]: + """Get the location of a model in its subspace. + + Returns: + The location, as indices of the subspace. + """ + if ( + MODEL_SUBSPACE_INDICES_HASH_DELIMITER + not in self.model_subspace_indices_hash + ): + return [ + MODEL_SUBSPACE_INDICES_HASH_MAP.index(s) + for s in self.model_subspace_indices_hash + ] + return [ + int(s) + for s in self.model_subspace_indices_hash.split( + MODEL_SUBSPACE_INDICES_HASH_DELIMITER + ) + ] + + def get_model(self, problem: Problem) -> Model: + """Get the model that a hash corresponds to. + + Args: + problem: + The :class:`Problem` that will be used to look up the model. + + Returns: + The model. + """ + return problem.model_space.model_subspaces[ + self.model_subspace_id + ].indices_to_model(self.unhash_model_subspace_indices()) + + def __hash__(self) -> str: + """Not the model hash! Use `Model.hash` instead.""" + return hash(str(self)) + + def __eq__(self, other_hash: str | ModelHash) -> bool: + """Check whether two model hashes are equivalent.""" + return str(self) == str(other_hash) + + def __str__(self) -> str: + """Convert the hash to a string.""" + return MODEL_HASH_DELIMITER.join( + [self.model_subspace_id, self.model_subspace_indices_hash] + ) + + def __repr__(self) -> str: + """Convert the hash to a string representation.""" + return str(self) + + +class VirtualModelBase(BaseModel): + """Sufficient information for the virtual initial model.""" + + model_subspace_id: str + """The ID of the subspace that this model belongs to.""" + model_subspace_indices: list[int] + """The location of this model in its subspace.""" + criteria: dict[Criterion, float] = Field(default_factory=dict) + """The criterion values of the calibrated model (e.g. AIC).""" + model_hash: ModelHash = Field(default=None) + """The model hash (treat as read-only after initialization).""" + + @model_validator(mode="after") + def _check_hash(self: ModelBase) -> ModelBase: + """Validate the model hash.""" + kwargs = { + MODEL_SUBSPACE_ID: self.model_subspace_id, + MODEL_SUBSPACE_INDICES: self.model_subspace_indices, + } + if self.model_hash is not None: + kwargs[MODEL_HASH] = self.model_hash + self.model_hash = ModelHash.model_validate(kwargs) + + return self + + @field_validator("criteria", mode="after") + @classmethod + def _fix_criteria_typing( + cls, criteria: dict[str | Criterion, float] + ) -> dict[Criterion, float]: + """Fix criteria typing.""" + criteria = { + ( + criterion + if isinstance(criterion, Criterion) + else Criterion[criterion] + ): value + for criterion, value in criteria.items() + } + return criteria + + @field_serializer("criteria") + def _serialize_criteria( + self, criteria: dict[Criterion, float] + ) -> dict[str, float]: + """Serialize criteria.""" + criteria = { + criterion.value: value for criterion, value in criteria.items() + } + return criteria + + @property + def hash(self) -> ModelHash: + """Get the model hash.""" + return self.model_hash + + def __hash__(self) -> None: + """Use ``Model.hash`` instead.""" + raise NotImplementedError("Use `Model.hash` instead.") + + # def __eq__(self, other_model: Model | _VirtualInitialModel) -> bool: + # """Check whether two model hashes are equivalent.""" + # return self.hash == other.hash + + +class ModelBase(VirtualModelBase): + """Definition of the standardized model. + + :class:`Model` is extended with additional helper methods -- use that + instead of ``ModelBase``. + """ + + # TODO would use `FilePath` here (and remove `None` as an option), + # but then need to handle the + # `VIRTUAL_INITIAL_MODEL` dummy path differently. + model_subspace_petab_yaml: Path | None + """The location of the base PEtab problem for the model subspace. + + N.B.: Not the PEtab problem for this model specifically! + Use :meth:`Model.to_petab` to get the model-specific PEtab + problem. + """ + estimated_parameters: dict[str, float] | None = Field(default=None) + """The parameter estimates of the calibrated model (always unscaled).""" + iteration: int | None = Field(default=None) + """The iteration of model selection that calibrated this model.""" + model_id: str = Field(default=None) + """The model ID.""" + parameters: dict[str, float | int | Literal[ESTIMATE]] + """PEtab problem parameters overrides for this model. + + For example, fixes parameters to certain values, or sets them to be + estimated. + """ + predecessor_model_hash: ModelHash = Field(default=None) + """The predecessor model hash.""" + + PATH_ATTRIBUTES: ClassVar[list[str]] = [ + MODEL_SUBSPACE_PETAB_YAML, + ] + + @model_validator(mode="wrap") + def _fix_relative_paths( + data: dict[str, Any] | ModelBase, + handler: ValidatorFunctionWrapHandler, + info: ValidationInfo, + ) -> ModelBase: + if isinstance(data, ModelBase): + return data + model = handler(data) + + root_path = None + if ROOT_PATH in data: + root_path = data.pop(ROOT_PATH) + if root_path is None: + return model + + model.resolve_paths(root_path=root_path) + return model + + @model_validator(mode="after") + def _fix_id(self: ModelBase) -> ModelBase: + """Fix a missing ID by setting it to the hash.""" if self.model_id is None: - self.model_id = self.get_hash() + self.model_id = str(self.hash) + return self - self.criterion_computer = CriterionComputer(self) + @model_validator(mode="after") + def _fix_predecessor_model_hash(self: ModelBase) -> ModelBase: + """Fix missing predecessor model hashes. - def set_criterion(self, criterion: Criterion, value: float) -> None: - """Set a criterion value for the model. + Sets them to ``VIRTUAL_INITIAL_MODEL.hash``. + """ + if self.predecessor_model_hash is None: + self.predecessor_model_hash = VIRTUAL_INITIAL_MODEL.hash + self.predecessor_model_hash = ModelHash.model_validate( + self.predecessor_model_hash + ) + return self + + def to_yaml( + self, + yaml_path: str | Path, + ) -> None: + """Save a model to a YAML file. + + All paths will be made relative to the ``yaml_path`` directory. Args: - criterion: - The criterion (e.g. ``petab_select.constants.Criterion.AIC``). - value: - The criterion value for the (presumably calibrated) model. + yaml_path: + The model YAML file location. """ - if criterion in self.criteria: - warnings.warn( - "Overwriting saved criterion value. " - f"Criterion: {criterion}. Value: {self.get_criterion(criterion)}.", - stacklevel=2, + root_path = Path(yaml_path).parent + + model = copy.deepcopy(self) + model.set_relative_paths(root_path=root_path) + ModelStandard.save_data(data=model, filename=yaml_path) + + def set_relative_paths(self, root_path: str | Path) -> None: + """Change all paths to be relative to ``root_path``.""" + root_path = Path(root_path).resolve() + for path_attribute in self.PATH_ATTRIBUTES: + setattr( + self, + path_attribute, + relpath( + getattr(self, path_attribute).resolve(), + start=root_path, + ), ) - # FIXME debug why value is overwritten during test case 0002. - if False: - print( - "Overwriting saved criterion value. " - f"Criterion: {criterion}. Value: {self.get_criterion(criterion)}." - ) - breakpoint() - self.criteria[criterion] = value + + def resolve_paths(self, root_path: str | Path) -> None: + """Resolve all paths to be relative to ``root_path``.""" + root_path = Path(root_path).resolve() + for path_attribute in self.PATH_ATTRIBUTES: + setattr( + self, + path_attribute, + (root_path / getattr(self, path_attribute)).resolve(), + ) + + +class Model(ModelBase): + """A model. + + See :class:`ModelBase` for the standardized attributes. Additional + attributes are available in ``Model`` to improve usability. + + Attributes: + _model_subspace_petab_problem: + The PEtab problem of the model subspace of this model. + If not provided, this is reconstructed from + :attr:`model_subspace_petab_yaml`. + """ + + _model_subspace_petab_problem: petab.Problem = PrivateAttr(default=None) + + @model_validator(mode="after") + def _fix_petab_problem(self: Model) -> Model: + """Fix a missing PEtab problem by loading it from disk.""" + if ( + self._model_subspace_petab_problem is None + and self.model_subspace_petab_yaml is not None + ): + self._model_subspace_petab_problem = petab.Problem.from_yaml( + self.model_subspace_petab_yaml + ) + return self + + def model_post_init(self, __context: Any) -> None: + """Add additional instance attributes.""" + self._criterion_computer = CriterionComputer(self) def has_criterion(self, criterion: Criterion) -> bool: - """Check whether the model provides a value for a criterion. + """Check whether a value for a criterion has been set.""" + return self.criteria.get(criterion) is not None - Args: - criterion: - The criterion (e.g. `petab_select.constants.Criterion.AIC`). - """ - # TODO also `and self.criteria[id] is not None`? - return criterion in self.criteria + def set_criterion(self, criterion: Criterion, value: float) -> None: + """Set a criterion value.""" + if self.has_criterion(criterion=criterion): + warnings.warn( + f"Overwriting saved criterion value. Criterion: {criterion}. " + f"Value: `{self.get_criterion(criterion)}`.", + stacklevel=2, + ) + self.criteria[criterion] = float(value) def get_criterion( self, criterion: Criterion, compute: bool = True, raise_on_failure: bool = True, - ) -> TYPE_CRITERION | None: + ) -> float | None: """Get a criterion value for the model. Args: criterion: - The ID of the criterion (e.g. ``petab_select.constants.Criterion.AIC``). + The criterion. compute: - Whether to try to compute the criterion value based on other model - attributes. For example, if the ``'AIC'`` criterion is requested, this - can be computed from a predetermined model likelihood and its - number of estimated parameters. + Whether to attempt computing the criterion value. For example, + the AIC can be computed if the likelihood is available. raise_on_failure: - Whether to raise a `ValueError` if the criterion could not be - computed. If `False`, `None` is returned. + Whether to raise a ``ValueError`` if the criterion could not be + computed. If ``False``, ``None`` is returned. Returns: - The criterion value, or `None` if it is not available. - TODO check for previous use of this method before `.get` was used + The criterion value, or ``None`` if it is not available. """ - if criterion not in self.criteria and compute: + if not self.has_criterion(criterion=criterion) and compute: self.compute_criterion( criterion=criterion, raise_on_failure=raise_on_failure, ) - # value = self.criterion_computer(criterion=id) - # self.set_criterion(id=id, value=value) - return self.criteria.get(criterion, None) def compute_criterion( self, criterion: Criterion, raise_on_failure: bool = True, - ) -> TYPE_CRITERION: + ) -> float: """Compute a criterion value for the model. - The value will also be stored, which will overwrite any previously stored value - for the criterion. + The value will also be stored, which will overwrite any previously + stored value for the criterion. Args: criterion: - The ID of the criterion - (e.g. :obj:`petab_select.constants.Criterion.AIC`). + The criterion. raise_on_failure: - Whether to raise a `ValueError` if the criterion could not be - computed. If `False`, `None` is returned. + Whether to raise a ``ValueError`` if the criterion could not be + computed. If ``False``, ``None`` is returned. Returns: The criterion value. """ + criterion_value = None try: - criterion_value = self.criterion_computer(criterion) + criterion_value = self._criterion_computer(criterion) self.set_criterion(criterion, criterion_value) - result = criterion_value except ValueError as err: if raise_on_failure: raise ValueError( - f"Insufficient information to compute criterion `{criterion}`." + "Insufficient information to compute criterion " + f"`{criterion}`." ) from err - result = None - return result + return criterion_value def set_estimated_parameters( self, estimated_parameters: dict[str, float], scaled: bool = False, ) -> None: - """Set the estimated parameters. + """Set parameter estimates. Args: estimated_parameters: The estimated parameters. scaled: - Whether the ``estimated_parameters`` values are on the scale - defined in the PEtab problem (``True``), or untransformed - (``False``). + Whether the parameter estimates are on the scale defined in the + PEtab problem (``True``), or unscaled (``False``). """ if scaled: - estimated_parameters = self.petab_problem.unscale_parameters( - estimated_parameters - ) - self.estimated_parameters = estimated_parameters - - @staticmethod - def from_dict( - model_dict: dict[str, Any], - base_path: TYPE_PATH = None, - petab_problem: petab.Problem = None, - ) -> Model: - """Generate a model from a dictionary of attributes. - - Args: - model_dict: - A dictionary of attributes. The keys are attribute - names, the values are the corresponding attribute values for - the model. Required attributes are the required arguments of - the :meth:`Model.__init__` method. - base_path: - The path that any relative paths in the model are relative to - (e.g. the path to the PEtab problem YAML file - :meth:`Model.petab_yaml` may be relative). - petab_problem: - Optionally provide the PEtab problem, to avoid loading it multiple - times. - NB: This may cause issues if multiple models write to the same PEtab - problem in memory. - - Returns: - A model instance, initialized with the provided attributes. - """ - unknown_attributes = set(model_dict).difference(Model.converters_load) - if unknown_attributes: - warnings.warn( - "Ignoring unknown attributes: " - + ", ".join(unknown_attributes), - stacklevel=2, - ) - - if base_path is not None: - model_dict[PETAB_YAML] = base_path / model_dict[PETAB_YAML] - - model_dict = { - attribute: Model.converters_load[attribute](value) - for attribute, value in model_dict.items() - if attribute in Model.converters_load - } - model_dict[PETAB_PROBLEM] = petab_problem - return Model(**model_dict) - - @staticmethod - def from_yaml(model_yaml: TYPE_PATH) -> Model: - """Generate a model from a PEtab Select model YAML file. - - Args: - model_yaml: - The path to the PEtab Select model YAML file. - - Returns: - A model instance, initialized with the provided attributes. - """ - with open(str(model_yaml)) as f: - model_dict = yaml.safe_load(f) - # TODO check that the hash is reproducible - if isinstance(model_dict, list): - try: - model_dict = one(model_dict) - except ValueError: - if len(model_dict) <= 1: - raise - raise ValueError( - "The provided YAML file contains a list with greater than " - "one element. Use the `Models.from_yaml` or provide a " - "YAML file with only one model specified." + estimated_parameters = ( + self._model_subspace_petab_problem.unscale_parameters( + estimated_parameters ) - - return Model.from_dict(model_dict, base_path=Path(model_yaml).parent) - - def to_dict( - self, - resolve_paths: bool = True, - paths_relative_to: str | Path = None, - ) -> dict[str, Any]: - """Generate a dictionary from the attributes of a :class:`Model` instance. - - Args: - resolve_paths: - Whether to resolve relative paths into absolute paths. - paths_relative_to: - If not ``None``, paths will be converted to be relative to this path. - Takes priority over ``resolve_paths``. - - Returns: - A dictionary of attributes. The keys are attribute - names, the values are the corresponding attribute values for - the model. Required attributes are the required arguments of - the :meth:`Model.__init__` method. - """ - model_dict = {} - for attribute in self.saved_attributes: - model_dict[attribute] = self.converters_save[attribute]( - getattr(self, attribute) ) - # TODO test - if resolve_paths: - if model_dict[PETAB_YAML]: - model_dict[PETAB_YAML] = str( - Path(model_dict[PETAB_YAML]).resolve() - ) - if paths_relative_to is not None: - if model_dict[PETAB_YAML]: - model_dict[PETAB_YAML] = relpath( - Path(model_dict[PETAB_YAML]).resolve(), - Path(paths_relative_to).resolve(), - ) - return model_dict - - def to_yaml(self, petab_yaml: TYPE_PATH, *args, **kwargs) -> None: - """Generate a PEtab Select model YAML file from a :class:`Model` instance. - - Parameters: - petab_yaml: - The location where the PEtab Select model YAML file will be - saved. - args, kwargs: - Additional arguments are passed to ``self.to_dict``. - """ - # FIXME change `getattr(self, PETAB_YAML)` to be relative to - # destination? - # kind of fixed, as the path will be resolved in `to_dict`. - with open(petab_yaml, "w") as f: - yaml.dump(self.to_dict(*args, **kwargs), f) - # yaml.dump(self.to_dict(), str(petab_yaml)) + self.estimated_parameters = estimated_parameters def to_petab( self, - output_path: TYPE_PATH = None, + output_path: str | Path = None, set_estimated_parameters: bool | None = None, - ) -> dict[str, petab.Problem | TYPE_PATH]: - """Generate a PEtab problem. + ) -> dict[str, petab.Problem | str | Path]: + """Generate the PEtab problem for this model. Args: output_path: - The directory where PEtab files will be written to disk. If not - specified, the PEtab files will not be written to disk. + If specified, the PEtab tables will be written to disk, inside + this directory. set_estimated_parameters: - Whether to set the nominal value of estimated parameters to their - estimates. If parameter estimates are available, this - will default to `True`. + Whether to implement ``Model.estimated_parameters`` as the + nominal values of the PEtab problem parameter table. + Defaults to ``True`` if ``Model.estimated_parameters`` is set. Returns: - A 2-tuple. The first value is a PEtab problem that can be used - with a PEtab-compatible tool for calibration of this model. If - ``output_path`` is not ``None``, the second value is the path to a - PEtab YAML file that can be used to load the PEtab problem (the - first value) into any PEtab-compatible tool. + The PEtab problem. Also returns the path of the PEtab problem YAML + file, if ``output_path`` is specified. """ - # TODO could use `copy.deepcopy(self.petab_problem)` from PetabMixin? - petab_problem = petab.Problem.from_yaml(str(self.petab_yaml)) + petab_problem = petab.Problem.from_yaml(self.model_subspace_petab_yaml) if set_estimated_parameters is None and self.estimated_parameters: set_estimated_parameters = True + if set_estimated_parameters: + required_estimates = { + parameter_id + for parameter_id, value in self.parameters.items() + if value == ESTIMATE + } + missing_estimates = required_estimates.difference( + self.estimated_parameters + ) + if missing_estimates: + raise ValueError( + "Try again with `set_estimated_parameters=False`, because " + "some parameter estimates are missing. Missing estimates for: " + f"`{missing_estimates}`." + ) + for parameter_id, parameter_value in self.parameters.items(): # If the parameter is to be estimated. if parameter_value == ESTIMATE: petab_problem.parameter_df.loc[parameter_id, ESTIMATE] = 1 - if set_estimated_parameters: - if parameter_id not in self.estimated_parameters: - raise ValueError( - "Not all estimated parameters are available " - "in `model.estimated_parameters`. Hence, the " - "estimated parameter vector cannot be set as " - "the nominal value in the PEtab problem. " - "Try calling this method with " - "`set_estimated_parameters=False`." - ) petab_problem.parameter_df.loc[ parameter_id, NOMINAL_VALUE ] = self.estimated_parameters[parameter_id] @@ -494,7 +584,6 @@ def to_petab( petab_problem.parameter_df.loc[parameter_id, NOMINAL_VALUE] = ( parameter_string_to_value(parameter_value) ) - # parameter_value petab_yaml = None if output_path is not None: @@ -509,94 +598,43 @@ def to_petab( PETAB_YAML: petab_yaml, } - def get_hash(self) -> str: - """Get the model hash. - - See the documentation for :class:`ModelHash` for more information. - - This is not implemented as ``__hash__`` because Python automatically - truncates values in a system-dependent manner, which reduces - interoperability - ( https://docs.python.org/3/reference/datamodel.html#object.__hash__ ). - - Returns: - The hash. - """ - if self.model_hash is None: - self.model_hash = ModelHash.from_model(model=self) - return self.model_hash - - def __hash__(self) -> None: - """Use `Model.get_hash` instead.""" - raise NotImplementedError("Use `Model.get_hash() instead.`") - - def __str__(self): - """Get a print-ready string representation of the model. - - Returns: - The print-ready string representation, in TSV format. - """ + def __str__(self) -> str: + """Printable model summary.""" parameter_ids = "\t".join(self.parameters.keys()) parameter_values = "\t".join(str(v) for v in self.parameters.values()) - header = "\t".join([MODEL_ID, PETAB_YAML, parameter_ids]) + header = "\t".join( + [MODEL_ID, MODEL_SUBSPACE_PETAB_YAML, parameter_ids] + ) data = "\t".join( - [self.model_id, str(self.petab_yaml), parameter_values] + [ + self.model_id, + str(self.model_subspace_petab_yaml), + parameter_values, + ] ) - # header = f'{MODEL_ID}\t{PETAB_YAML}\t{parameter_ids}' - # data = f'{self.model_id}\t{self.petab_yaml}\t{parameter_values}' return f"{header}\n{data}" def __repr__(self) -> str: - """The model hash.""" - return f'' + """The model hash. - def get_mle(self) -> dict[str, float]: - """Get the maximum likelihood estimate of the model.""" - """ - FIXME(dilpath) - # Check if original PEtab problem or PEtab Select model has estimated - # parameters. e.g. can use some of `self.to_petab` to get the parameter - # df and see if any are estimated. - if not self.has_estimated_parameters: - warn('The MLE for this model contains no estimated parameters.') - if not all([ - parameter_id in getattr(self, ESTIMATED_PARAMETERS) - for parameter_id in self.get_estimated_parameter_ids_all() - ]): - warn('Not all estimated parameters have estimates stored.') - petab_problem = petab.Problem.from_yaml(str(self.petab_yaml)) - return { - parameter_id: ( - getattr(self, ESTIMATED_PARAMETERS).get( - # Return estimated parameter from `petab_select.Model` - # if possible. - parameter_id, - # Else return nominal value from PEtab parameter table. - petab_problem.parameter_df.loc[ - parameter_id, NOMINAL_VALUE - ], - ) - ) - for parameter_id in petab_problem.parameter_df.index - } - # TODO rewrite to construct return dict in a for loop, for more - # informative error message as soon as a "should-be-estimated" - # parameter has not estimate available in `self.estimated_parameters`. + The hash can be used to reconstruct the model (see + :meth:``ModelHash.get_model``). """ - # TODO - pass + return f'' - def get_estimated_parameter_ids_all(self) -> list[str]: - estimated_parameter_ids = [] + def get_estimated_parameter_ids(self, full: bool = True) -> list[str]: + """Get estimated parameter IDs. - # Add all estimated parameters in the PEtab problem. - petab_problem = petab.Problem.from_yaml(str(self.petab_yaml)) - for parameter_id in petab_problem.parameter_df.index: - if ( - petab_problem.parameter_df.loc[parameter_id, ESTIMATE] - == PETAB_ESTIMATE_TRUE - ): - estimated_parameter_ids.append(parameter_id) + Args: + full: + Whether to provide all IDs, including additional parameters + that are not part of the model selection problem but estimated. + """ + estimated_parameter_ids = [] + if full: + estimated_parameter_ids = ( + self._model_subspace_petab_problem.x_free_ids + ) # Add additional estimated parameters, and collect fixed parameters, # in this model's parameterization. @@ -616,7 +654,6 @@ def get_estimated_parameter_ids_all(self) -> list[str]: for parameter_id in estimated_parameter_ids if parameter_id not in fixed_parameter_ids ] - return estimated_parameter_ids def get_parameter_values( @@ -627,28 +664,41 @@ def get_parameter_values( Includes ``ESTIMATE`` for parameters that should be estimated. - The ordering is by ``parameter_ids`` if supplied, else - ``self.petab_parameters``. - Args: parameter_ids: The IDs of parameters that values will be returned for. Order - is maintained. + is maintained. Defaults to the model subspace PEtab problem + parameters (including those not part of the model selection + problem). Returns: The values of parameters. """ + nominal_values = get_petab_parameters( + self._model_subspace_petab_problem + ) if parameter_ids is None: - parameter_ids = list(self.petab_parameters) + parameter_ids = list(nominal_values) return [ - self.parameters.get( - parameter_id, - # Default to PEtab problem. - self.petab_parameters[parameter_id], - ) + self.parameters.get(parameter_id, nominal_values[parameter_id]) for parameter_id in parameter_ids ] + @staticmethod + def from_yaml( + yaml_path: str | Path, + ) -> Model: + """Load a model from a YAML file. + + Args: + yaml_path: + The model YAML file location. + """ + model = ModelStandard.load_data( + filename=yaml_path, root_path=yaml_path.parent + ) + return model + def default_compare( model0: Model, @@ -669,12 +719,12 @@ def default_compare( criterion: The criterion. criterion_threshold: - The value by which the new model must improve on the original - model. Should be non-negative, regardless of the criterion. + The non-negative value by which the new model must improve on the + original model. Returns: - ``True` if ``model1`` has a better criterion value than ``model0``, else - ``False``. + ``True` if ``model1`` has a better criterion value than ``model0``, + else ``False``. """ if not model1.has_criterion(criterion): warnings.warn( @@ -683,7 +733,7 @@ def default_compare( stacklevel=2, ) return False - if model0 == VIRTUAL_INITIAL_MODEL or model0 is None: + if model0.hash == VIRTUAL_INITIAL_MODEL_HASH or model0 is None: return True if criterion_threshold < 0: warnings.warn( @@ -715,282 +765,14 @@ def default_compare( raise NotImplementedError(f"Unknown criterion: {criterion}.") -class ModelHash(str): - """A class to handle model hash functionality. - - The model hash is designed to be human-readable and able to be converted - back into the corresponding model. Currently, if two models from two - different model subspaces are actually the same PEtab problem, they will - still have different model hashes. - - Attributes: - model_subspace_id: - The ID of the model subspace of the model. Unique up to a single - PEtab Select problem model space. - model_subspace_indices_hash: - A hash of the location of the model in its model - subspace. Unique up to a single model subspace. - """ - - # FIXME petab problem--specific hashes that are cross-platform? - """ - The model hash is designed to be: human-readable; able to be converted - back into the corresponding model, and unique up to the same PEtab - problem and parameters. - - Consider two different models in different model subspaces, with - `ModelHash`s `model_hash0` and `model_hash1`, respectively. Assume that - these two models end up encoding the same PEtab problem (e.g. they set the - same parameters to be estimated). - The string representation will be different, - `str(model_hash0) != str(model_hash1)`, but their hashes will pass the - equality check: `model_hash0 == model_hash1` and - `hash(model_hash0) == hash(model_hash1)`. - - This means that different models in different model subspaces that end up - being the same PEtab problem will have different human-readable hashes, - but if these models arise during model selection, then only one of them - will be calibrated. - - The PEtab hash size is computed automatically as the smallest size that - ensures a collision probability of less than $2^{-64}$. - N.B.: this assumes only one model subspace, and only 2 options for each - parameter (e.g. `0` and `estimate`). You can manually set the size with - :const:`petab_select.constants.PETAB_HASH_DIGEST_SIZE`. - - petab_hash: - A hash that is unique up to the same PEtab problem, which is - determined by: the PEtab problem YAML file location, nominal - parameter values, and parameters set to be estimated. This means - that different models may have the same `unique_petab_hash`, - because they are the same estimation problem. - """ - - def __init__( - self, - model_subspace_id: str, - model_subspace_indices_hash: str, - # petab_hash: str, - ): - self.model_subspace_id = model_subspace_id - self.model_subspace_indices_hash = model_subspace_indices_hash - # self.petab_hash = petab_hash - - def __new__( - cls, - model_subspace_id: str, - model_subspace_indices_hash: str, - # petab_hash: str, - ): - hash_str = MODEL_HASH_DELIMITER.join( - [ - model_subspace_id, - model_subspace_indices_hash, - # petab_hash, - ] - ) - instance = super().__new__(cls, hash_str) - return instance - - def __getnewargs_ex__(self): - return ( - (), - { - "model_subspace_id": self.model_subspace_id, - "model_subspace_indices_hash": self.model_subspace_indices_hash, - # 'petab_hash': self.petab_hash, - }, - ) - - def __copy__(self): - return ModelHash( - model_subspace_id=self.model_subspace_id, - model_subspace_indices_hash=self.model_subspace_indices_hash, - # petab_hash=self.petab_hash, - ) - - def __deepcopy__(self, memo): - return self.__copy__() - - # @staticmethod - # def get_petab_hash(model: Model) -> str: - # """Get a hash that is unique up to the same estimation problem. - - # See :attr:`petab_hash` for more information. - - # Args: - # model: - # The model. - - # Returns: - # The unique PEtab hash. - # """ - # digest_size = PETAB_HASH_DIGEST_SIZE - # if digest_size is None: - # petab_info_bits = len(model.model_subspace_indices) - # # Ensure <2^{-64} probability of collision - # petab_info_bits += 64 - # # Convert to bytes, round up. - # digest_size = int(petab_info_bits / 8) + 1 - - # petab_yaml = str(model.petab_yaml.resolve()) - # model_parameter_df = model.to_petab(set_estimated_parameters=False)[ - # PETAB_PROBLEM - # ].parameter_df - # nominal_parameter_hash = hash_parameter_dict( - # model_parameter_df[NOMINAL_VALUE].to_dict() - # ) - # estimate_parameter_hash = hash_parameter_dict( - # model_parameter_df[ESTIMATE].to_dict() - # ) - # return hash_str( - # petab_yaml + estimate_parameter_hash + nominal_parameter_hash, - # digest_size=digest_size, - # ) - - @staticmethod - def from_hash(model_hash: str | ModelHash) -> ModelHash: - """Reconstruct a :class:`ModelHash` object. - - Args: - model_hash: - The model hash. - - Returns: - The :class:`ModelHash` object. - """ - if isinstance(model_hash, ModelHash): - return model_hash - - if model_hash == VIRTUAL_INITIAL_MODEL: - return ModelHash( - model_subspace_id=VIRTUAL_INITIAL_MODEL, - model_subspace_indices_hash="", - # petab_hash=VIRTUAL_INITIAL_MODEL, - ) - - ( - model_subspace_id, - model_subspace_indices_hash, - # petab_hash, - ) = model_hash.split(MODEL_HASH_DELIMITER) - return ModelHash( - model_subspace_id=model_subspace_id, - model_subspace_indices_hash=model_subspace_indices_hash, - # petab_hash=petab_hash, - ) - - @staticmethod - def from_model(model: Model) -> ModelHash: - """Create a hash for a model. - - Args: - model: - The model. - - Returns: - The model hash. - """ - model_subspace_id = "" - model_subspace_indices_hash = "" - if model.model_subspace_id is not None: - model_subspace_id = model.model_subspace_id - model_subspace_indices_hash = ( - ModelHash.hash_model_subspace_indices( - model.model_subspace_indices - ) - ) - - return ModelHash( - model_subspace_id=model_subspace_id, - model_subspace_indices_hash=model_subspace_indices_hash, - # petab_hash=ModelHash.get_petab_hash(model=model), - ) - - @staticmethod - def hash_model_subspace_indices(model_subspace_indices: list[int]) -> str: - """Hash the location of a model in its subspace. - - Args: - model_subspace_indices: - The location (indices) of the model in its subspace. - - Returns: - The hash. - """ - try: - return "".join( - MODEL_SUBSPACE_INDICES_HASH_MAP[index] - for index in model_subspace_indices - ) - except KeyError: - return MODEL_SUBSPACE_INDICES_HASH_DELIMITER.join( - str(i) for i in model_subspace_indices - ) - - def unhash_model_subspace_indices(self) -> list[int]: - """Get the location of a model in its subspace. - - Returns: - The location, as indices of the subspace. - """ - if ( - MODEL_SUBSPACE_INDICES_HASH_DELIMITER - in self.model_subspace_indices_hash - ): - return [ - int(s) - for s in self.model_subspace_indices_hash.split( - MODEL_SUBSPACE_INDICES_HASH_DELIMITER - ) - ] - else: - return [ - MODEL_SUBSPACE_INDICES_HASH_MAP.index(s) - for s in self.model_subspace_indices_hash - ] - - def get_model(self, petab_select_problem: Problem) -> Model: - """Get the model that a hash corresponds to. - - Args: - petab_select_problem: - The PEtab Select problem. The model will be found in its model - space. - - Returns: - The model. - """ - # if self.petab_hash == VIRTUAL_INITIAL_MODEL: - # return self.petab_hash - - return petab_select_problem.model_space.model_subspaces[ - self.model_subspace_id - ].indices_to_model(self.unhash_model_subspace_indices()) - - def __hash__(self) -> str: - """The PEtab hash. - - N.B.: this is not the model hash! As the equality between two models - is determined by their PEtab hash only, this method only returns the - PEtab hash. However, the model hash is the full string with the - human-readable elements as well. :func:`ModelHash.from_hash` does not - accept the PEtab hash as input, rather the full string. - """ - return hash(str(self)) - - def __eq__(self, other_hash: str | ModelHash) -> bool: - """Check whether two model hashes are equivalent. - - Returns: - Whether the two hashes correspond to equivalent PEtab problems. - """ - # petab_hash = other_hash - # # Check whether the PEtab hash needs to be extracted - # if MODEL_HASH_DELIMITER in other_hash: - # petab_hash = ModelHash.from_hash(other_hash).petab_hash - # return self.petab_hash == petab_hash - return str(self) == str(other_hash) +VIRTUAL_INITIAL_MODEL = VirtualModelBase.model_validate( + { + "model_subspace_id": "virtual_initial_model", + "model_subspace_indices": [], + } +) +# TODO deprecate, use `VIRTUAL_INITIAL_MODEL.hash` instead +VIRTUAL_INITIAL_MODEL_HASH = VIRTUAL_INITIAL_MODEL.hash -VIRTUAL_INITIAL_MODEL_HASH = ModelHash.from_hash(VIRTUAL_INITIAL_MODEL) +ModelStandard = mkstd.YamlStandard(model=Model) diff --git a/petab_select/model_subspace.py b/petab_select/model_subspace.py index 1f077996..1f62bd75 100644 --- a/petab_select/model_subspace.py +++ b/petab_select/model_subspace.py @@ -7,6 +7,7 @@ import numpy as np import pandas as pd +import petab.v1 as petab from more_itertools import powerset from .candidate_space import CandidateSpace @@ -20,19 +21,18 @@ TYPE_PARAMETER_OPTIONS, TYPE_PARAMETER_OPTIONS_DICT, TYPE_PATH, - VIRTUAL_INITIAL_MODEL, Method, ) from .misc import parameter_string_to_value -from .model import Model -from .petab import PetabMixin +from .model import VIRTUAL_INITIAL_MODEL, Model +from .petab import get_petab_parameters __all__ = [ "ModelSubspace", ] -class ModelSubspace(PetabMixin): +class ModelSubspace: """Efficient representation of exponentially large model subspaces. Attributes: @@ -42,37 +42,38 @@ class ModelSubspace(PetabMixin): The location of the PEtab problem YAML file. parameters: The key is the ID of the parameter. The value is a list of values - that the parameter can take (including `ESTIMATE`). + that the parameter can take (including ``ESTIMATE``). exclusions: Hashes of models that have been previously submitted to a candidate space for consideration (:meth:`CandidateSpace.consider`). """ - """ - FIXME(dilpath) - #history: - # A history of all models that have been accepted by the candidate - # space. Models are represented as indices (see e.g. - # `ModelSubspace.parameters_to_indices`). - """ - def __init__( self, model_subspace_id: str, - petab_yaml: str, + petab_yaml: str | Path, parameters: TYPE_PARAMETER_OPTIONS_DICT, exclusions: list[Any] | None | None = None, ): self.model_subspace_id = model_subspace_id + self.petab_yaml = Path(petab_yaml) self.parameters = parameters - # TODO switch from mixin to attribute - super().__init__(petab_yaml=petab_yaml, parameters_as_lists=True) - self.exclusions = set() if exclusions is not None: self.exclusions = set(exclusions) + self.petab_problem = petab.Problem.from_yaml(self.petab_yaml) + + for parameter_id, parameter_value in self.parameters.items(): + if not parameter_value: + raise ValueError( + f"The parameter `{parameter_id}` is in the definition " + "of this model subspace. However, its value is empty. " + f"Please specify either its fixed value or `'{ESTIMATE}'` " + "(e.g. in the model space table)." + ) + def check_compatibility_stepwise_method( self, candidate_space: CandidateSpace, @@ -91,9 +92,15 @@ def check_compatibility_stepwise_method( """ if candidate_space.method not in STEPWISE_METHODS: return True - if candidate_space.predecessor_model != VIRTUAL_INITIAL_MODEL and ( - str(candidate_space.predecessor_model.petab_yaml.resolve()) - != str(self.petab_yaml.resolve()) + if ( + candidate_space.predecessor_model.hash + != VIRTUAL_INITIAL_MODEL.hash + and ( + str( + candidate_space.predecessor_model.model_subspace_petab_yaml.resolve() + ) + != str(self.petab_yaml.resolve()) + ) ): warnings.warn( "The supplied candidate space is initialized with a model " @@ -101,10 +108,9 @@ def check_compatibility_stepwise_method( "This is currently not supported for stepwise methods " "(e.g. forward or backward). " f"This model subspace: `{self.model_subspace_id}`. " - "This model subspace PEtab YAML: " - f"`{self.petab_yaml}`. " + f"This model subspace PEtab YAML: `{self.petab_yaml}`. " "The candidate space PEtab YAML: " - f"`{candidate_space.predecessor_model.petab_yaml}`.", + f"`{candidate_space.predecessor_model.model_subspace_petab_yaml}`.", stacklevel=2, ) return False @@ -238,29 +244,37 @@ def continue_searching( # Compute parameter sets that are useful for finding minimal forward or backward # moves in the subspace. # Parameters that are currently estimated in the predecessor model. - if candidate_space.predecessor_model == VIRTUAL_INITIAL_MODEL: + if ( + candidate_space.predecessor_model.hash + == VIRTUAL_INITIAL_MODEL.hash + ): if candidate_space.method == Method.FORWARD: - old_estimated_all = set() - old_fixed_all = set(self.parameters) + old_estimated_all = self.must_estimate_all + old_fixed_all = self.can_fix_all elif candidate_space.method == Method.BACKWARD: - old_estimated_all = set(self.parameters) - old_fixed_all = set() + old_estimated_all = self.can_estimate_all + old_fixed_all = self.must_fix_all + elif candidate_space.method == Method.BRUTE_FORCE: + # doesn't matter what these are set to + old_estimated_all = self.must_estimate_all + old_fixed_all = self.must_fix_all else: # Should already be handled elsewhere (e.g. # `self.check_compatibility_stepwise_method`). raise NotImplementedError( - f"The default parameter set for a candidate space with the virtual initial model and method {candidate_space.method} is not implemented. Please report if this is desired." + "The virtual initial model and method " + f"{candidate_space.method} is not implemented. " + "Please report at https://github.com/PEtab-dev/petab_select/issues if this is desired." ) else: - old_estimated_all = set() - old_fixed_all = set() - if isinstance(candidate_space.predecessor_model, Model): - old_estimated_all = candidate_space.predecessor_model.get_estimated_parameter_ids_all() - old_fixed_all = [ - parameter_id - for parameter_id in self.parameters_all - if parameter_id not in old_estimated_all - ] + old_estimated_all = ( + candidate_space.predecessor_model.get_estimated_parameter_ids() + ) + old_fixed_all = [ + parameter_id + for parameter_id in self.parameters_all + if parameter_id not in old_estimated_all + ] # Parameters that are fixed in the candidate space # predecessor model but are necessarily estimated in this subspace. @@ -307,7 +321,8 @@ def continue_searching( # there are no valid "forward" moves. if ( not new_can_estimate_all - and candidate_space.predecessor_model != VIRTUAL_INITIAL_MODEL + and candidate_space.predecessor_model.hash + != VIRTUAL_INITIAL_MODEL.hash ): return # There are estimated parameters in the predecessor model that @@ -318,7 +333,8 @@ def continue_searching( # parameters. if ( new_must_estimate_all - or candidate_space.predecessor_model == VIRTUAL_INITIAL_MODEL + or candidate_space.predecessor_model.hash + == VIRTUAL_INITIAL_MODEL.hash ): # Consider minimal models that have all necessarily-estimated # parameters. @@ -397,7 +413,8 @@ def continue_searching( # are no valid "backward" moves. if ( not new_can_fix_all - and candidate_space.predecessor_model != VIRTUAL_INITIAL_MODEL + and candidate_space.predecessor_model.hash + != VIRTUAL_INITIAL_MODEL.hash ): return # There are fixed parameters in the predecessor model that must be estimated @@ -408,7 +425,8 @@ def continue_searching( # parameters. if ( new_must_fix_all - or candidate_space.predecessor_model == VIRTUAL_INITIAL_MODEL + or candidate_space.predecessor_model.hash + == VIRTUAL_INITIAL_MODEL.hash ): # Consider minimal models that have all necessarily-fixed # parameters. @@ -508,7 +526,8 @@ def continue_searching( if ( # `and` is redundant with the "equal number" check above. (new_must_estimate_all and new_must_fix_all) - or candidate_space.predecessor_model == VIRTUAL_INITIAL_MODEL + or candidate_space.predecessor_model.hash + == VIRTUAL_INITIAL_MODEL.hash ): # Consider all models that have the required estimated and # fixed parameters. @@ -654,7 +673,7 @@ def exclude_model(self, model: Model) -> None: model: The model that will be excluded. """ - self.exclude_model_hash(model_hash=model.get_hash()) + self.exclude_model_hash(model_hash=model.hash) def exclude_models(self, models: Iterable[Model]) -> None: """Exclude models from the model subspace. @@ -674,7 +693,7 @@ def excluded( model: Model, ) -> bool: """Whether a model is excluded.""" - return model.get_hash() in self.exclusions + return model.hash in self.exclusions def reset_exclusions( self, @@ -744,11 +763,11 @@ def indices_to_model(self, indices: list[int]) -> Model | None: ``None``, if the model is excluded from the subspace. """ model = Model( - petab_yaml=self.petab_yaml, model_subspace_id=self.model_subspace_id, model_subspace_indices=indices, + model_subspace_petab_yaml=self.petab_yaml, parameters=self.indices_to_parameters(indices), - petab_problem=self.petab_problem, + _model_subspace_petab_problem=self.petab_problem, ) if self.excluded(model): return None @@ -828,7 +847,10 @@ def parameters_all(self) -> TYPE_PARAMETER_DICT: Parameter values in the PEtab problem are overwritten by the model subspace values. """ - return {**self.petab_parameters, **self.parameters} + return { + **get_petab_parameters(self.petab_problem, as_lists=True), + **self.parameters, + } @property def can_fix(self) -> list[str]: @@ -840,10 +862,15 @@ def can_fix(self) -> list[str]: return [ parameter_id for parameter_id, parameter_values in self.parameters.items() - # If the possible parameter values are not only `ESTIMATE`, then - # it is assumed there is a fixed possible parameter value. - # TODO explicitly check for a lack of `ValueError` when cast to - # float? + if parameter_values != [ESTIMATE] + ] + + @property + def can_fix_all(self) -> list[str]: + """All arameters that can be fixed, according to the subspace.""" + return [ + parameter_id + for parameter_id, parameter_values in self.parameters_all.items() if parameter_values != [ESTIMATE] ] @@ -909,7 +936,7 @@ def must_estimate_all(self) -> list[str]: """All parameters that must be estimated in this subspace.""" must_estimate_petab = [ parameter_id - for parameter_id in self.petab_parameter_ids_estimated + for parameter_id in self.petab_problem.x_free_ids if parameter_id not in self.parameters ] return [*must_estimate_petab, *self.must_estimate] diff --git a/petab_select/models.py b/petab_select/models.py index 03996adb..6e770d35 100644 --- a/petab_select/models.py +++ b/petab_select/models.py @@ -16,13 +16,16 @@ ITERATION, MODEL_HASH, MODEL_ID, + MODEL_SUBSPACE_PETAB_PROBLEM, PREDECESSOR_MODEL_HASH, + ROOT_PATH, TYPE_PATH, Criterion, ) from .model import ( Model, ModelHash, + VirtualModelBase, ) if TYPE_CHECKING: @@ -107,6 +110,8 @@ def __contains__(self, item: ModelLike) -> bool: return item in self._models case ModelHash() | str(): return item in self._hashes + case VirtualModelBase(): + return False case _: raise TypeError(f"Unexpected type: `{type(item)}`.") @@ -176,7 +181,7 @@ def __setitem__(self, key: ModelIndex, item: ModelLike) -> None: if key < len(self): self._models[key] = item - self._hashes[key] = item.get_hash() + self._hashes[key] = item.hash else: # Key doesn't exist, e.g., instead of # models[1] = model1 @@ -199,17 +204,17 @@ def _update(self, index: int, item: ModelLike) -> None: A model or a model hash. """ model = self._model_like_to_model(item) - if model.get_hash() in self: + if model.hash in self: warnings.warn( ( - f"A model with hash `{model.get_hash()}` already exists " + f"A model with hash `{model.hash}` already exists " "in this collection of models. The previous model will be " "overwritten." ), RuntimeWarning, stacklevel=2, ) - self[model.get_hash()] = model + self[model.hash] = model else: self._models.insert(index, None) self._hashes.insert(index, None) @@ -285,14 +290,14 @@ def insert(self, index: int, item: ModelLike): # def remove(self, item: ModelLike): # # Re-use __delitem__ logic # if isinstance(item, Model): - # item = item.get_hash() + # item = item.hash # del self[item] # skipped clear, copy, count def index(self, item: ModelLike, *args) -> int: if isinstance(item, Model): - item = item.get_hash() + item = item.hash return self._hashes.index(item, *args) # skipped reverse, sort @@ -369,7 +374,9 @@ def from_yaml( models_yaml: The path to the PEtab Select list of model YAML file. petab_problem: - See :meth:`Model.from_dict`. + Provide a preloaded copy of the PEtab problem. Note: + all models should share the same PEtab problem if this is + provided. problem: The PEtab Select problem. @@ -381,25 +388,20 @@ def from_yaml( if not model_dict_list: # Empty file models = [] - elif not isinstance(model_dict_list, list): + elif isinstance(model_dict_list, dict): # File contains a single model - models = [ - Model.from_dict( - model_dict_list, - base_path=Path(models_yaml).parent, - petab_problem=petab_problem, - ) - ] - else: - # File contains a list of models - models = [ - Model.from_dict( - model_dict, - base_path=Path(models_yaml).parent, - petab_problem=petab_problem, - ) - for model_dict in model_dict_list - ] + model_dict_list = [model_dict_list] + + models = [ + Model.model_validate( + { + **model_dict, + ROOT_PATH: Path(models_yaml).parent, + MODEL_SUBSPACE_PETAB_PROBLEM: petab_problem, + } + ) + for model_dict in model_dict_list + ] return Models(models=models, problem=problem) @@ -541,25 +543,7 @@ def models_from_yaml_list( allow_single_model: bool = True, problem: Problem = None, ) -> Models: - """Generate a model from a PEtab Select list of model YAML file. - - Deprecated. Use `petab_select.Models.from_yaml` instead. - - Args: - model_list_yaml: - The path to the PEtab Select list of model YAML file. - petab_problem: - See :meth:`Model.from_dict`. - allow_single_model: - Given a YAML file that contains a single model directly (not in - a 1-element list), if ``True`` then the single model will be read in, - else a ``ValueError`` will be raised. - problem: - The PEtab Select problem. - - Returns: - The models. - """ + """Deprecated. Use `petab_select.Models.from_yaml` instead.""" warnings.warn( ( "Use `petab_select.Models.from_yaml` instead. " @@ -580,19 +564,7 @@ def models_to_yaml_list( output_yaml: TYPE_PATH, relative_paths: bool = True, ) -> None: - """Generate a YAML listing of models. - - Deprecated. Use `petab_select.Models.to_yaml` instead. - - Args: - models: - The models. - output_yaml: - The location where the YAML will be saved. - relative_paths: - Whether to rewrite the paths in each model (e.g. the path to the - model's PEtab problem) relative to the `output_yaml` location. - """ + """Deprecated. Use `petab_select.Models.to_yaml` instead.""" warnings.warn( "Use `petab_select.Models.to_yaml` instead.", DeprecationWarning, diff --git a/petab_select/petab.py b/petab_select/petab.py index 8d370c8e..792e6ddf 100644 --- a/petab_select/petab.py +++ b/petab_select/petab.py @@ -1,91 +1,32 @@ -from pathlib import Path +"""Helper methods for working with PEtab problems.""" -import petab.v1 as petab -from more_itertools import one -from petab.v1.C import ESTIMATE, NOMINAL_VALUE +from typing import Literal -from .constants import PETAB_ESTIMATE_FALSE, TYPE_PARAMETER_DICT, TYPE_PATH +import numpy as np +import petab.v1 as petab +from petab.v1.C import ESTIMATE +__all__ = ["get_petab_parameters"] -class PetabMixin: - """Useful things for classes that contain a PEtab problem. - All attributes/methods are prefixed with `petab_`. +def get_petab_parameters( + petab_problem: petab.Problem, as_lists: bool = False +) -> dict[str, float | Literal[ESTIMATE] | list[float | Literal[ESTIMATE]]]: + """Convert PEtab problem parameters to the format in model space files. - Attributes: - petab_yaml: - The location of the PEtab problem YAML file. + Args: petab_problem: The PEtab problem. - petab_parameters: - The parameters from the PEtab parameters table, where keys are - parameter IDs, and values are either :obj:`ESTIMATE` if the - parameter is set to be estimated, else the nominal value. - """ - - def __init__( - self, - petab_yaml: TYPE_PATH | None = None, - petab_problem: petab.Problem | None = None, - parameters_as_lists: bool = False, - ): - if petab_yaml is None and petab_problem is None: - raise ValueError( - "Please supply at least one of either the location of the " - "PEtab problem YAML file, or an instance of the PEtab problem." - ) - self.petab_yaml = petab_yaml - if self.petab_yaml is not None: - self.petab_yaml = Path(self.petab_yaml) - - self.petab_problem = petab_problem - if self.petab_problem is None: - self.petab_problem = petab.Problem.from_yaml(str(petab_yaml)) - - self.petab_parameters = { - parameter_id: ( - row[NOMINAL_VALUE] - if row[ESTIMATE] == PETAB_ESTIMATE_FALSE - else ESTIMATE - ) - for parameter_id, row in self.petab_problem.parameter_df.iterrows() - } - if parameters_as_lists: - self.petab_parameters = { - k: [v] for k, v in self.petab_parameters.items() - } + as_lists: + Each value will be provided inside a list object, similar to the + format for multiple values for a parameter in a model subspace. - @property - def petab_parameter_ids_estimated(self) -> list[str]: - """Get the IDs of all estimated parameters. - - Returns: - The parameter IDs. - """ - return [ - parameter_id - for parameter_id, parameter_value in self.petab_parameters.items() - if parameter_value == ESTIMATE - ] - - @property - def petab_parameter_ids_fixed(self) -> list[str]: - """Get the IDs of all fixed parameters. - - Returns: - The parameter IDs. - """ - estimated = self.petab_parameter_ids_estimated - return [ - parameter_id - for parameter_id in self.petab_parameters - if parameter_id not in estimated - ] - - @property - def petab_parameters_singular(self) -> TYPE_PARAMETER_DICT: - """TODO deprecate and remove?""" - return { - parameter_id: one(parameter_value) - for parameter_id, parameter_value in self.petab_parameters - } + Returns: + Keys are parameter IDs, values are the nominal values for fixed + parameters, or :const:`ESTIMATE` for estimated parameters. + """ + values = np.array(petab_problem.x_nominal, dtype=object) + values[petab_problem.x_free_indices] = ESTIMATE + if as_lists: + values = [[v] for v in values] + return dict(zip(petab_problem.x_ids, values, strict=True)) diff --git a/petab_select/plot.py b/petab_select/plot.py index 859c6a33..e485ba07 100644 --- a/petab_select/plot.py +++ b/petab_select/plot.py @@ -56,7 +56,7 @@ def upset( index = np.argsort(values) values = values[index] labels = [ - model.get_estimated_parameter_ids_all() + model.get_estimated_parameter_ids() for model in np.array(models)[index] ] @@ -122,7 +122,7 @@ def line_best_by_iteration( [best_by_iteration[iteration] for iteration in iterations] ) iteration_labels = [ - str(iteration) + f"\n({labels.get(model.get_hash(), model.model_id)})" + str(iteration) + f"\n({labels.get(model.hash, model.model_id)})" for iteration, model in zip(iterations, best_models, strict=True) ] @@ -208,9 +208,9 @@ def graph_history( if labels is None: labels = { - model.get_hash(): model.model_id + model.hash: model.model_id + ( - f"\n{criterion_values[model.get_hash()]:.2f}" + f"\n{criterion_values[model.hash]:.2f}" if criterion is not None else "" ) @@ -230,7 +230,7 @@ def graph_history( if predecessor_model_hash in models: predecessor_model = models[predecessor_model_hash] from_ = labels.get( - predecessor_model.get_hash(), + predecessor_model.hash, predecessor_model.model_id, ) else: @@ -239,7 +239,7 @@ def graph_history( "not yet implemented." ) from_ = "None" - to = labels.get(model.get_hash(), model.model_id) + to = labels.get(model.hash, model.model_id) edges.append((from_, to)) G.add_edges_from(edges) @@ -312,13 +312,13 @@ def bar_criterion_vs_models( bar_kwargs = {} if labels is None: - labels = {model.get_hash(): model.model_id for model in models} + labels = {model.hash: model.model_id for model in models} if ax is None: _, ax = plt.subplots() bar_model_labels = [ - labels.get(model.get_hash(), model.model_id) for model in models + labels.get(model.hash, model.model_id) for model in models ] criterion_values = models.get_criterion( criterion=criterion, relative=relative @@ -385,7 +385,7 @@ def scatter_criterion_vs_n_estimated( The plot axes. """ labels = { - model.get_hash(): labels.get(model.model_id, model.model_id) + model.hash: labels.get(model.model_id, model.model_id) for model in models } @@ -405,7 +405,7 @@ def scatter_criterion_vs_n_estimated( n_estimated = [] for model in models: - n_estimated.append(len(model.get_estimated_parameter_ids_all())) + n_estimated.append(len(model.get_estimated_parameter_ids())) criterion_values = models.get_criterion( criterion=criterion, relative=relative @@ -495,36 +495,34 @@ def graph_iteration_layers( if draw_networkx_kwargs is None: draw_networkx_kwargs = default_draw_networkx_kwargs - ancestry = { - model.get_hash(): model.predecessor_model_hash for model in models - } + ancestry = {model.hash: model.predecessor_model_hash for model in models} ancestry_as_set = {k: {v} for k, v in ancestry.items()} ordering = [ - [model.get_hash() for model in iteration_models] + [model.hash for model in iteration_models] for iteration_models in group_by_iteration(models).values() ] if VIRTUAL_INITIAL_MODEL_HASH in ancestry.values(): ordering.insert(0, [VIRTUAL_INITIAL_MODEL_HASH]) model_estimated_parameters = { - model.get_hash(): set(model.estimated_parameters) for model in models + model.hash: set(model.estimated_parameters) for model in models } model_criterion_values = models.get_criterion( criterion=criterion, relative=relative, as_dict=True ) model_parameter_diffs = { - model.get_hash(): ( + model.hash: ( (set(), set()) if model.predecessor_model_hash not in model_estimated_parameters else ( - model_estimated_parameters[model.get_hash()].difference( + model_estimated_parameters[model.hash].difference( model_estimated_parameters[model.predecessor_model_hash] ), model_estimated_parameters[ model.predecessor_model_hash - ].difference(model_estimated_parameters[model.get_hash()]), + ].difference(model_estimated_parameters[model.hash]), ) ) for model in models @@ -534,9 +532,9 @@ def graph_iteration_layers( labels = ( labels | { - model.get_hash(): model.model_id + model.hash: model.model_id for model in models - if model.get_hash() not in labels + if model.hash not in labels } | { ModelHash.from_hash( @@ -670,8 +668,8 @@ def __getitem__(self, key): # selected_hashes = set(ancestry.values()) # selected_models = {} # for model in models: - # if model.get_hash() in selected_hashes: - # selected_models[model.get_hash()] = model + # if model.hash in selected_hashes: + # selected_models[model.hash] = model # selected_parameters = { # model_hash: sorted(model.estimated_parameters) diff --git a/petab_select/ui.py b/petab_select/ui.py index 720a319c..34abc14a 100644 --- a/petab_select/ui.py +++ b/petab_select/ui.py @@ -15,11 +15,10 @@ TERMINATE, TYPE_PATH, UNCALIBRATED_MODELS, - VIRTUAL_INITIAL_MODEL, Criterion, Method, ) -from .model import Model, ModelHash, default_compare +from .model import VIRTUAL_INITIAL_MODEL, Model, ModelHash, default_compare from .models import Models from .problem import Problem @@ -145,10 +144,7 @@ def start_iteration( predecessor_model = candidate_space.previous_predecessor_model # If the predecessor model has not yet been calibrated, then calibrate it. - if ( - predecessor_model is not None - and predecessor_model != VIRTUAL_INITIAL_MODEL - ): + if predecessor_model.hash != VIRTUAL_INITIAL_MODEL.hash: if ( predecessor_model.get_criterion( criterion, @@ -202,8 +198,7 @@ def start_iteration( ): return start_iteration_result(candidate_space=candidate_space) - if predecessor_model is not None: - candidate_space.reset(predecessor_model) + candidate_space.reset(predecessor_model) # FIXME store exclusions in candidate space only problem.model_space.exclude_model_hashes(model_hashes=excluded_hashes) @@ -388,7 +383,7 @@ def write_summary_tsv( previous_predecessor_parameter_ids = set() if isinstance(previous_predecessor_model, Model): previous_predecessor_parameter_ids = set( - previous_predecessor_model.get_estimated_parameter_ids_all() + previous_predecessor_model.get_estimated_parameter_ids() ) if predecessor_model is None: @@ -397,7 +392,7 @@ def write_summary_tsv( predecessor_criterion = None if isinstance(predecessor_model, Model): predecessor_parameter_ids = set( - predecessor_model.get_estimated_parameter_ids_all() + predecessor_model.get_estimated_parameter_ids() ) predecessor_criterion = predecessor_model.get_criterion( problem.criterion @@ -412,7 +407,7 @@ def write_summary_tsv( diff_candidates_parameter_ids = [] for candidate_model in candidate_space.models: candidate_parameter_ids = set( - candidate_model.get_estimated_parameter_ids_all() + candidate_model.get_estimated_parameter_ids() ) diff_candidates_parameter_ids.append( list( @@ -423,14 +418,13 @@ def write_summary_tsv( ) # FIXME remove once MostDistantCandidateSpace exists... + # which might be difficult to implement because the most + # distant is a hypothetical model, which is then used to find a + # real model in its neighborhood of the model space method = candidate_space.method - if ( - isinstance(candidate_space, FamosCandidateSpace) - and isinstance(candidate_space.predecessor_model, Model) - and candidate_space.predecessor_model.predecessor_model_hash is None - ): + if isinstance(candidate_space, FamosCandidateSpace): with open(candidate_space.summary_tsv) as f: - if sum(1 for _ in f) > 1: + if f.readlines()[-1].startswith("Jumped"): method = Method.MOST_DISTANT candidate_space.write_summary_tsv( diff --git a/pyproject.toml b/pyproject.toml index 77e1d38c..12d1afaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ dependencies = [ "pyyaml>=6.0.2", "click>=8.1.7", "dill>=0.3.9", + "mkstd>=0.0.5", ] [project.optional-dependencies] plot = [ @@ -37,7 +38,7 @@ test = [ "amici >= 0.11.25", "fides >= 0.7.5", # "pypesto > 0.2.13", - "pypesto @ git+https://github.com/ICB-DCM/pyPESTO.git@select_class_models#egg=pypesto", + "pypesto @ git+https://github.com/ICB-DCM/pyPESTO.git@select_mkstd#egg=pypesto", "tox >= 3.12.4", ] doc = [ diff --git a/test/analyze/input/models.yaml b/test/analyze/input/models.yaml index 264e1154..3730b6fc 100644 --- a/test/analyze/input/models.yaml +++ b/test/analyze/input/models.yaml @@ -14,7 +14,7 @@ estimated_parameters: k2: 0.15 k3: 0.0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml predecessor_model_hash: dummy_p0-0 - criteria: AIC: 4 @@ -29,7 +29,7 @@ k1: estimate k2: estimate k3: 0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml predecessor_model_hash: virtual_initial_model- - criteria: AIC: 3 @@ -47,7 +47,7 @@ estimated_parameters: k2: 0.15 k3: 0.0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml predecessor_model_hash: virtual_initial_model- - criteria: AIC: 2 @@ -62,5 +62,5 @@ k1: estimate k2: estimate k3: 0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml predecessor_model_hash: virtual_initial_model- diff --git a/test/analyze/test_analyze.py b/test/analyze/test_analyze.py index 32169a85..f37e6013 100644 --- a/test/analyze/test_analyze.py +++ b/test/analyze/test_analyze.py @@ -5,7 +5,6 @@ from petab_select import ( VIRTUAL_INITIAL_MODEL, Criterion, - ModelHash, Models, analyze, ) @@ -13,7 +12,6 @@ base_dir = Path(__file__).parent DUMMY_HASH = "dummy_p0-0" -VIRTUAL_HASH = ModelHash.from_hash(VIRTUAL_INITIAL_MODEL) @pytest.fixture @@ -26,15 +24,15 @@ def test_group_by_predecessor_model(models: Models) -> None: groups = analyze.group_by_predecessor_model(models) # Expected groups assert len(groups) == 2 - assert VIRTUAL_HASH in groups + assert VIRTUAL_INITIAL_MODEL.hash in groups assert DUMMY_HASH in groups # Expected group members assert len(groups[DUMMY_HASH]) == 1 assert "M-011" in groups[DUMMY_HASH] - assert len(groups[VIRTUAL_HASH]) == 3 - assert "M-110" in groups[VIRTUAL_HASH] - assert "M2-011" in groups[VIRTUAL_HASH] - assert "M2-110" in groups[VIRTUAL_HASH] + assert len(groups[VIRTUAL_INITIAL_MODEL.hash]) == 3 + assert "M-110" in groups[VIRTUAL_INITIAL_MODEL.hash] + assert "M2-011" in groups[VIRTUAL_INITIAL_MODEL.hash] + assert "M2-110" in groups[VIRTUAL_INITIAL_MODEL.hash] def test_group_by_iteration(models: Models) -> None: @@ -64,9 +62,9 @@ def test_get_best_by_iteration(models: Models) -> None: assert 2 in groups assert 5 in groups # Expected best models - assert groups[1].get_hash() == "M2-011" - assert groups[2].get_hash() == "M2-110" - assert groups[5].get_hash() == "M-110" + assert groups[1].hash == "M2-011" + assert groups[2].hash == "M2-110" + assert groups[5].hash == "M-110" def test_relative_criterion_values(models: Models) -> None: diff --git a/test/candidate_space/input/famos_synthetic/test_files/predecessor_model.yaml b/test/candidate_space/input/famos_synthetic/test_files/predecessor_model.yaml index f0442820..3a3aad43 100644 --- a/test/candidate_space/input/famos_synthetic/test_files/predecessor_model.yaml +++ b/test/candidate_space/input/famos_synthetic/test_files/predecessor_model.yaml @@ -1,8 +1,28 @@ +model_subspace_id: model_subspace_1 +model_subspace_indices: +- 1 +- 1 +- 0 +- 0 +- 1 +- 1 +- 0 +- 1 +- 1 +- 1 +- 0 +- 0 +- 0 +- 1 +- 1 +- 1 criteria: AIC: 30330.782621349786 AICc: 30332.80096997364 BIC: 30358.657538777607 NLLH: 15155.391310674893 +model_hash: model_subspace_1-1100110111000111 +model_subspace_petab_yaml: ../petab/FAMoS_2019_problem.yaml estimated_parameters: mu_AB: 0.09706971737957297 mu_AD: -0.6055359156893474 @@ -14,26 +34,23 @@ estimated_parameters: mu_DC: -1.1619119214640863 ro_A: -1.6431508614147425 ro_B: 2.9912966824709097 -model_hash: null -model_id: M_1100110111000111 -model_subspace_id: model_subspace_1 -model_subspace_indices: null +iteration: null +model_id: model_subspace_1-1100110111000111 parameters: + ro_A: estimate + ro_B: estimate + ro_C: 0 + ro_D: 0 mu_AB: estimate + mu_BA: estimate mu_AC: 0 + mu_CA: estimate mu_AD: estimate - mu_BA: estimate + mu_DA: estimate mu_BC: 0 - mu_BD: 0 - mu_CA: estimate mu_CB: 0 - mu_CD: estimate - mu_DA: estimate + mu_BD: 0 mu_DB: estimate + mu_CD: estimate mu_DC: estimate - ro_A: estimate - ro_B: estimate - ro_C: 0 - ro_D: 0 -petab_yaml: ../petab/FAMoS_2019_problem.yaml -predecessor_model_hash: null +predecessor_model_hash: virtual_initial_model- diff --git a/test/candidate_space/test_famos.py b/test/candidate_space/test_famos.py index f4ad33e1..5036dc69 100644 --- a/test/candidate_space/test_famos.py +++ b/test/candidate_space/test_famos.py @@ -5,7 +5,7 @@ from more_itertools import one import petab_select -from petab_select import Method, Models +from petab_select import Method, ModelHash, Models from petab_select.constants import ( CANDIDATE_SPACE, MODEL_HASH, @@ -35,7 +35,7 @@ def expected_criterion_values(input_path): sep="\t", ).set_index(MODEL_HASH) return { - petab_select.model.ModelHash.from_hash(k): v + ModelHash.model_validate(k): v for k, v in calibration_results[Criterion.AICC].items() } @@ -93,7 +93,7 @@ def calibrate( ) -> None: model.set_criterion( criterion=petab_select_problem.criterion, - value=expected_criterion_values[model.get_hash()], + value=expected_criterion_values[model.hash], ) def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]: @@ -129,6 +129,7 @@ def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]: all_calibrated_models = Models() candidate_space = petab_select_problem.new_candidate_space() + expected_repeated_model_hash0 = candidate_space.predecessor_model.hash candidate_space.summary_tsv.unlink(missing_ok=True) candidate_space._setup_summary_tsv() @@ -147,7 +148,7 @@ def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]: calibrated_models = Models() for candidate_model in iteration[UNCALIBRATED_MODELS]: calibrate(candidate_model) - calibrated_models[candidate_model.get_hash()] = candidate_model + calibrated_models[candidate_model.hash] = candidate_model # Finalize iteration iteration_results = petab_select.ui.end_iteration( @@ -162,14 +163,20 @@ def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]: raise StopIteration("No valid models found.") # A model is encountered twice and therefore skipped. - expected_repeated_model_hash = petab_select_problem.get_model( + expected_repeated_model_hash1 = petab_select_problem.get_model( model_subspace_id=one( petab_select_problem.model_space.model_subspaces ), model_subspace_indices=[int(s) for s in "0001011010010010"], - ).get_hash() - assert len(warning_record) == 1 - assert expected_repeated_model_hash in warning_record[0].message.args[0] + ).hash + # The predecessor model is also re-encountered. + assert len(warning_record) == 2 + assert ( + str(expected_repeated_model_hash0) in warning_record[0].message.args[0] + ) + assert ( + str(expected_repeated_model_hash1) in warning_record[1].message.args[0] + ) progress_list = parse_summary_to_progress_list(candidate_space.summary_tsv) diff --git a/test/cli/input/model.yaml b/test/cli/input/model.yaml index dcaaa5a2..7cda4c4a 100644 --- a/test/cli/input/model.yaml +++ b/test/cli/input/model.yaml @@ -1,10 +1,16 @@ -- criteria: {} +- model_subspace_id: M + model_subspace_indices: + - 0 + - 1 + - 1 + criteria: {} + model_hash: M-011 + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + estimated_parameters: + k2: 0.15 + k3: 0.0 model_id: model parameters: k1: 0.2 k2: estimate k3: estimate - estimated_parameters: - k2: 0.15 - k3: 0.0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml diff --git a/test/cli/input/models.yaml b/test/cli/input/models.yaml index 06aa3933..b9d12b8e 100644 --- a/test/cli/input/models.yaml +++ b/test/cli/input/models.yaml @@ -1,4 +1,14 @@ -- criteria: {} +- model_subspace_id: M + model_subspace_indices: + - 0 + - 1 + - 1 + criteria: {} + model_hash: M-011 + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml + estimated_parameters: + k2: 0.15 + k3: 0.0 model_id: model_1 model_subspace_id: M model_subspace_indices: @@ -9,11 +19,14 @@ k1: 0.2 k2: estimate k3: estimate - estimated_parameters: - k2: 0.15 - k3: 0.0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml -- criteria: {} +- model_subspace_id: M + model_subspace_indices: + - 1 + - 1 + - 0 + criteria: {} + model_hash: M-110 + model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml model_id: model_2 model_subspace_id: M model_subspace_indices: @@ -24,4 +37,3 @@ k1: estimate k2: estimate k3: 0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml diff --git a/test/cli/test_cli.py b/test/cli/test_cli.py index 0a4dc34d..ccf015ea 100644 --- a/test/cli/test_cli.py +++ b/test/cli/test_cli.py @@ -55,7 +55,6 @@ def test_model_to_petab( ], ) - print(result.stdout) # The new PEtab problem YAML file is output to stdout correctly. assert ( result.stdout == f'{base_dir / "output" / "model" / "problem.yaml"}\n' diff --git a/test/model/input/model.yaml b/test/model/input/model.yaml index dcaaa5a2..233861de 100644 --- a/test/model/input/model.yaml +++ b/test/model/input/model.yaml @@ -1,10 +1,15 @@ -- criteria: {} - model_id: model - parameters: - k1: 0.2 - k2: estimate - k3: estimate - estimated_parameters: - k2: 0.15 - k3: 0.0 - petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml +model_subspace_id: M +model_subspace_indices: +- 0 +- 1 +- 1 +criteria: {} +model_subspace_petab_yaml: ../../../doc/examples/model_selection/petab_problem.yaml +model_id: model +parameters: + k1: 0.2 + k2: estimate + k3: estimate +estimated_parameters: + k2: 0.15 + k3: 0.0 diff --git a/test/pypesto/generate_expected_models.py b/test/pypesto/generate_expected_models.py index 912748ff..ca6e8b09 100644 --- a/test/pypesto/generate_expected_models.py +++ b/test/pypesto/generate_expected_models.py @@ -13,8 +13,9 @@ # Set to `[]` to test all test_cases = [ - #'0004', - #'0008', + #'0001', + # "0003", + "0009", ] # Do not use computationally-expensive test cases in CI @@ -41,6 +42,12 @@ def objective_customizer(obj): obj.amici_solver.setRelativeTolerance(1e-12) +model_problem_options = { + "minimize_options": minimize_options, + "objective_customizer": objective_customizer, +} + + for test_case_path in test_cases_path.glob("*"): if test_cases and test_case_path.stem not in test_cases: continue @@ -67,19 +74,17 @@ def objective_customizer(obj): ) # Run the selection process until "exhausted". - pypesto_select_problem.select_to_completion( - minimize_options=minimize_options, - objective_customizer=objective_customizer, - ) + pypesto_select_problem.select_to_completion(**model_problem_options) # Get the best model - best_model = petab_select_problem.get_best( + best_model = petab_select.analyze.get_best( models=pypesto_select_problem.calibrated_models, + criterion=petab_select_problem.criterion, ) # Generate the expected model. - best_model.to_yaml(expected_model_yaml, paths_relative_to=test_case_path) + best_model.to_yaml(expected_model_yaml) - pypesto_select_problem.calibrated_models.to_yaml( - f"all_models_{test_case_path.stem}.yaml" - ) + # pypesto_select_problem.calibrated_models.to_yaml( + # output_yaml="all_models.yaml", + # ) diff --git a/test_cases/0001/expected.yaml b/test_cases/0001/expected.yaml index 25c97f14..a230aa28 100644 --- a/test_cases/0001/expected.yaml +++ b/test_cases/0001/expected.yaml @@ -1,19 +1,19 @@ -criteria: - AIC: -6.1754055040468785 - NLLH: -4.087702752023439 -estimated_parameters: - sigma_x2: 0.12242920616053495 -iteration: 1 -model_hash: M1_1-000 -model_id: M1_1-000 model_subspace_id: M1_1 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: -4.087702752023436 + AIC: -6.175405504046871 +model_hash: M1_1-000 +model_subspace_petab_yaml: petab/petab_problem.yaml +estimated_parameters: + sigma_x2: 0.12242920313036142 +iteration: 1 +model_id: M1_1-000 parameters: k1: 0.2 k2: 0.1 k3: 0 -petab_yaml: petab/petab_problem.yaml -predecessor_model_hash: null +predecessor_model_hash: virtual_initial_model- diff --git a/test_cases/0002/expected.yaml b/test_cases/0002/expected.yaml index 57811a85..510c60ce 100644 --- a/test_cases/0002/expected.yaml +++ b/test_cases/0002/expected.yaml @@ -1,20 +1,20 @@ -criteria: - AIC: -4.705325991177407 - NLLH: -4.3526629955887035 -estimated_parameters: - k1: 0.20160877932991236 - sigma_x2: 0.11714038666761385 -iteration: 2 -model_hash: M1_3-000 -model_id: M1_3-000 model_subspace_id: M1_3 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: -4.352662995581719 + AIC: -4.705325991163438 +model_hash: M1_3-000 +model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml +estimated_parameters: + k1: 0.2016087813530968 + sigma_x2: 0.11714041764571122 +iteration: 2 +model_id: M1_3-000 parameters: k1: estimate k2: 0.1 k3: 0 -petab_yaml: ../0001/petab/petab_problem.yaml predecessor_model_hash: M1_0-000 diff --git a/test_cases/0003/expected.yaml b/test_cases/0003/expected.yaml index a0366cfb..218cba26 100644 --- a/test_cases/0003/expected.yaml +++ b/test_cases/0003/expected.yaml @@ -1,19 +1,19 @@ -criteria: - BIC: -6.383646034818824 - NLLH: -4.087702752023439 -estimated_parameters: - sigma_x2: 0.12242920723808924 -iteration: 1 -model_hash: M1-110 -model_id: M1-110 model_subspace_id: M1 model_subspace_indices: - 1 - 1 - 0 +criteria: + NLLH: -4.0877027520227704 + BIC: -6.383646034817486 +model_hash: M1-110 +model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml +estimated_parameters: + sigma_x2: 0.12242924701706556 +iteration: 1 +model_id: M1-110 parameters: k1: 0.2 k2: 0.1 k3: 0 -petab_yaml: ../0001/petab/petab_problem.yaml -predecessor_model_hash: null +predecessor_model_hash: virtual_initial_model- diff --git a/test_cases/0004/expected.yaml b/test_cases/0004/expected.yaml index 24f8ae41..8f220f09 100644 --- a/test_cases/0004/expected.yaml +++ b/test_cases/0004/expected.yaml @@ -1,20 +1,20 @@ -criteria: - AICc: -0.7053259911583094 - NLLH: -4.352662995579155 -estimated_parameters: - k1: 0.2016087783781175 - sigma_x2: 0.11714035262205941 -iteration: 3 -model_hash: M1_3-000 -model_id: M1_3-000 model_subspace_id: M1_3 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: -4.352662995594862 + AICc: -0.7053259911897243 +model_hash: M1_3-000 +model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml +estimated_parameters: + k1: 0.20160877986376358 + sigma_x2: 0.11714041204425464 +iteration: 3 +model_id: M1_3-000 parameters: k1: estimate k2: 0.1 k3: 0 -petab_yaml: ../0001/petab/petab_problem.yaml predecessor_model_hash: M1_6-000 diff --git a/test_cases/0005/expected.yaml b/test_cases/0005/expected.yaml index c30365a8..35949e30 100644 --- a/test_cases/0005/expected.yaml +++ b/test_cases/0005/expected.yaml @@ -1,20 +1,20 @@ -criteria: - AIC: -4.705325991200599 - NLLH: -4.3526629956003 -estimated_parameters: - k1: 0.2016087798698859 - sigma_x2: 0.11714036476432785 -iteration: 2 -model_hash: M1_3-000 -model_id: M1_3-000 model_subspace_id: M1_3 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: -4.352662995589992 + AIC: -4.7053259911799845 +model_hash: M1_3-000 +model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml +estimated_parameters: + k1: 0.20160877971477925 + sigma_x2: 0.11714036509532029 +iteration: 2 +model_id: M1_3-000 parameters: k1: estimate k2: 0.1 k3: 0 -petab_yaml: ../0001/petab/petab_problem.yaml predecessor_model_hash: M1_0-000 diff --git a/test_cases/0006/expected.yaml b/test_cases/0006/expected.yaml index c8e92c9c..4a05253a 100644 --- a/test_cases/0006/expected.yaml +++ b/test_cases/0006/expected.yaml @@ -1,19 +1,19 @@ -criteria: - AIC: -6.1754055040468785 - NLLH: -4.087702752023439 -estimated_parameters: - sigma_x2: 0.12242920606535417 -iteration: 1 -model_hash: M1_0-000 -model_id: M1_0-000 model_subspace_id: M1_0 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: -4.087702752023439 + AIC: -6.1754055040468785 +model_hash: M1_0-000 +model_subspace_petab_yaml: ../0001/petab/petab_problem.yaml +estimated_parameters: + sigma_x2: 0.12242920634250658 +iteration: 1 +model_id: M1_0-000 parameters: k1: 0.2 k2: 0.1 k3: 0 -petab_yaml: ../0001/petab/petab_problem.yaml -predecessor_model_hash: virtual_initial_model +predecessor_model_hash: virtual_initial_model- diff --git a/test_cases/0007/expected.yaml b/test_cases/0007/expected.yaml index 4efd158a..f8d17428 100644 --- a/test_cases/0007/expected.yaml +++ b/test_cases/0007/expected.yaml @@ -1,18 +1,18 @@ -criteria: - AIC: 11.117195861535194 - NLLH: 5.558597930767597 -estimated_parameters: {} -iteration: 1 -model_hash: M1_0-000 -model_id: M1_0-000 model_subspace_id: M1_0 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: 5.558597930767597 + AIC: 11.117195861535194 +model_hash: M1_0-000 +model_subspace_petab_yaml: petab/petab_problem.yaml +estimated_parameters: {} +iteration: 1 +model_id: M1_0-000 parameters: k1: 0.2 k2: 0.1 k3: 0 -petab_yaml: petab/petab_problem.yaml -predecessor_model_hash: virtual_initial_model +predecessor_model_hash: virtual_initial_model- diff --git a/test_cases/0008/expected.yaml b/test_cases/0008/expected.yaml index 6162ff4c..715ec176 100644 --- a/test_cases/0008/expected.yaml +++ b/test_cases/0008/expected.yaml @@ -1,18 +1,18 @@ -criteria: - AICc: 11.117195861535194 - NLLH: 5.558597930767597 -estimated_parameters: {} -iteration: 4 -model_hash: M1_0-000 -model_id: M1_0-000 model_subspace_id: M1_0 model_subspace_indices: - 0 - 0 - 0 +criteria: + NLLH: 5.558597930767597 + AICc: 11.117195861535194 +model_hash: M1_0-000 +model_subspace_petab_yaml: ../0007/petab/petab_problem.yaml +estimated_parameters: {} +iteration: 4 +model_id: M1_0-000 parameters: k1: 0.2 k2: 0.1 k3: 0 -petab_yaml: ../0007/petab/petab_problem.yaml predecessor_model_hash: M1_3-000 diff --git a/test_cases/0009/expected.yaml b/test_cases/0009/expected.yaml index 6abbaa99..58bb09fa 100644 --- a/test_cases/0009/expected.yaml +++ b/test_cases/0009/expected.yaml @@ -1,18 +1,3 @@ -criteria: - AICc: -1708.1109924658595 - NLLH: -862.351792529226 -estimated_parameters: - a_0ac_k08: 0.4085141271467614 - a_b: 0.06675812072340812 - a_k05_k05k12: 30.88819982704895 - a_k05k12_k05k08k12: 4.872706275493909 - a_k08k12k16_4ac: 53.80184925213997 - a_k12_k05k12: 8.267871339049703 - a_k12k16_k08k12k16: 33.03793450182137 - a_k16_k12k16: 10.42455614921354 -iteration: 11 -model_hash: M-01000100001000010010000000010001 -model_id: M-01000100001000010010000000010001 model_subspace_id: M model_subspace_indices: - 0 @@ -47,6 +32,22 @@ model_subspace_indices: - 0 - 0 - 1 +criteria: + NLLH: -862.3517925313981 + AICc: -1708.1109924702037 +model_hash: M-01000100001000010010000000010001 +model_subspace_petab_yaml: petab/petab_problem.yaml +estimated_parameters: + a_0ac_k08: 0.40850355273291267 + a_k05_k05k12: 30.888150959586138 + a_k12_k05k12: 8.267845459216893 + a_k16_k12k16: 10.424629099941777 + a_k05k12_k05k08k12: 4.872747603868694 + a_k12k16_k08k12k16: 33.03769174387633 + a_k08k12k16_4ac: 53.80106471593421 + a_b: 0.06675819571287103 +iteration: 11 +model_id: M-01000100001000010010000000010001 parameters: a_0ac_k05: 1 a_0ac_k08: estimate @@ -55,30 +56,29 @@ parameters: a_k05_k05k08: 1 a_k05_k05k12: estimate a_k05_k05k16: 1 + a_k08_k05k08: 1 + a_k08_k08k12: 1 + a_k08_k08k16: 1 + a_k12_k05k12: estimate + a_k12_k08k12: 1 + a_k12_k12k16: 1 + a_k16_k05k16: 1 + a_k16_k08k16: 1 + a_k16_k12k16: estimate a_k05k08_k05k08k12: 1 a_k05k08_k05k08k16: 1 - a_k05k08k12_4ac: 1 - a_k05k08k16_4ac: 1 a_k05k12_k05k08k12: estimate a_k05k12_k05k12k16: 1 - a_k05k12k16_4ac: 1 a_k05k16_k05k08k16: 1 a_k05k16_k05k12k16: 1 - a_k08_k05k08: 1 - a_k08_k08k12: 1 - a_k08_k08k16: 1 a_k08k12_k05k08k12: 1 a_k08k12_k08k12k16: 1 - a_k08k12k16_4ac: estimate a_k08k16_k05k08k16: 1 a_k08k16_k08k12k16: 1 - a_k12_k05k12: estimate - a_k12_k08k12: 1 - a_k12_k12k16: 1 a_k12k16_k05k12k16: 1 a_k12k16_k08k12k16: estimate - a_k16_k05k16: 1 - a_k16_k08k16: 1 - a_k16_k12k16: estimate -petab_yaml: petab/petab_problem.yaml + a_k05k08k12_4ac: 1 + a_k05k08k16_4ac: 1 + a_k05k12k16_4ac: 1 + a_k08k12k16_4ac: estimate predecessor_model_hash: M-01000100001010010010000000010001 diff --git a/test_cases/0009/predecessor_model.yaml b/test_cases/0009/predecessor_model.yaml index 4471224b..581fa453 100644 --- a/test_cases/0009/predecessor_model.yaml +++ b/test_cases/0009/predecessor_model.yaml @@ -70,5 +70,5 @@ parameters: a_k16_k05k16: 1 a_k16_k08k16: 1 a_k16_k12k16: estimate -petab_yaml: petab/petab_problem.yaml -predecessor_model_hash: null +model_subspace_petab_yaml: petab/petab_problem.yaml +predecessor_model_hash: virtual_initial_model-