From 703f480c193641e06f3a57abfbe756f03966f671 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Mon, 13 Sep 2021 11:45:21 +0200 Subject: [PATCH 1/8] Add support for FORECAST, ELMCOUNT, :NA: --- .../supported_vensim_functions.rst | 74 ++-- pysd/_version.py | 2 +- pysd/py_backend/builder.py | 117 ++++- pysd/py_backend/functions.py | 50 +++ pysd/py_backend/vensim/vensim2py.py | 404 ++++++++++-------- tests/integration_test_vensim_pathway.py | 4 + tests/unit_test_functions.py | 33 ++ tests/unit_test_pysd.py | 23 + tests/unit_test_vensim2py.py | 87 ++++ 9 files changed, 564 insertions(+), 230 deletions(-) diff --git a/docs/development/supported_vensim_functions.rst b/docs/development/supported_vensim_functions.rst index 7e792228..60aa4dd6 100644 --- a/docs/development/supported_vensim_functions.rst +++ b/docs/development/supported_vensim_functions.rst @@ -1,63 +1,75 @@ +------------------------------+------------------------------+ | Vensim | Python Translation | +==============================+==============================+ -| COS | np.cos | +| ABS | abs | +------------------------------+------------------------------+ -| EXP | np.exp | +| INTEGER | int | +------------------------------+------------------------------+ | MIN | min | +------------------------------+------------------------------+ -| <= | <= | +| MAX | max | +------------------------------+------------------------------+ -| STEP | functions.step | +| = | == | +------------------------------+------------------------------+ -| PULSE | functions.pulse | +| < | < | +------------------------------+------------------------------+ -| POISSON | np.random.poisson | +| > | > | +------------------------------+------------------------------+ -| EXPRND | np.random.exponential | +| >= | >= | +------------------------------+------------------------------+ -| SIN | np.sin | +| <= | <= | +------------------------------+------------------------------+ -| >= | >= | +| ^ | \** | +------------------------------+------------------------------+ -| IF THEN ELSE | functions.if_then_else | +| SQRT | np.sqrt | ++------------------------------+------------------------------+ +| EXP | np.exp | +------------------------------+------------------------------+ | LN | np.log | +------------------------------+------------------------------+ -| PULSE TRAIN | functions.pulse_train | +| PI | np.pi | +------------------------------+------------------------------+ -| RAMP | functions.ramp | +| SIN | np.sin | +------------------------------+------------------------------+ -| INTEGER | int | +| COS | np.cos | +------------------------------+------------------------------+ | TAN | np.tan | +------------------------------+------------------------------+ -| PI | np.pi | -+------------------------------+------------------------------+ -| = | == | +| ARCSIN | np.arcsin | +------------------------------+------------------------------+ -| < | < | +| ARCCOS | np.arccos | +------------------------------+------------------------------+ -| > | > | +| ARCTAN | np.arctan | +------------------------------+------------------------------+ | MODULO | np.mod | +------------------------------+------------------------------+ -| ARCSIN | np.arcsin | +| ELMCOUNT | len | +------------------------------+------------------------------+ -| ABS | abs | +| IF THEN ELSE | functions.if_then_else | +------------------------------+------------------------------+ -| ^ | \** | +| PULSE TRAIN | functions.pulse_train | ++------------------------------+------------------------------+ +| RAMP | functions.ramp | ++------------------------------+------------------------------+ +| INVERT MATRIX | functions.invert_matrix | ++------------------------------+------------------------------+ +| VMIN | functions.vmin | ++------------------------------+------------------------------+ +| VMAX | functions.vmax | ++------------------------------+------------------------------+ +| SUM | functions.sum | ++------------------------------+------------------------------+ +| PROD | functions.prod | +------------------------------+------------------------------+ | LOGNORMAL | np.random.lognormal | +------------------------------+------------------------------+ -| MAX | max | +| STEP | functions.step | +------------------------------+------------------------------+ -| SQRT | np.sqrt | +| PULSE | functions.pulse | +------------------------------+------------------------------+ -| ARCTAN | np.arctan | +| EXPRND | np.random.exponential | +------------------------------+------------------------------+ -| ARCCOS | np.arccos | +| POISSON | np.random.poisson | +------------------------------+------------------------------+ | RANDOM NORMAL | functions.bounded_normal | +------------------------------+------------------------------+ @@ -71,6 +83,8 @@ +------------------------------+------------------------------+ | DELAY FIXED | functions.DelayFixed | +------------------------------+------------------------------+ +| FORECAST | functions.Forecast | ++------------------------------+------------------------------+ | SAMPLE IF TRUE | functions.SampleIfTrue | +------------------------------+------------------------------+ | SMOOTH3 | functions.Smooth | @@ -85,16 +99,6 @@ +------------------------------+------------------------------+ | ZIDZ | functions.XIDZ | +------------------------------+------------------------------+ -| VMIN | functions.vmin | -+------------------------------+------------------------------+ -| VMAX | functions.vmax | -+------------------------------+------------------------------+ -| SUM | functions.sum | -+------------------------------+------------------------------+ -| PROD | functions.prod | -+------------------------------+------------------------------+ -| INVERT MATRIX | functions.invert_matrix | -+------------------------------+------------------------------+ | GET XLS DATA | external.ExtData | +------------------------------+------------------------------+ | GET DIRECT DATA | external.ExtData | diff --git a/pysd/_version.py b/pysd/_version.py index fcfdf383..f84c53b0 100644 --- a/pysd/_version.py +++ b/pysd/_version.py @@ -1 +1 @@ -__version__ = "1.10.0" +__version__ = "1.11.0" diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index fdcc46bc..a7208925 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -790,7 +790,7 @@ def add_stock(identifier, expression, initial_condition, subs, merge_subs): merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + merging with other objects. Returns ------- @@ -920,7 +920,7 @@ def add_delay(identifier, delay_input, delay_time, initial_value, order, merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + merging with other objects. Returns ------- @@ -1119,7 +1119,7 @@ def add_n_delay(identifier, delay_input, delay_time, initial_value, order, merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + merging with other objects. Returns ------- @@ -1211,6 +1211,103 @@ def add_n_delay(identifier, delay_input, delay_time, initial_value, order, return "%s()" % py_name, new_structure +def add_forecast(identifier, forecast_input, average_time, horizon, + subs, merge_subs): + """ + Constructs Forecast object. + + Parameters + ---------- + identifier: str + The python-safe name of the forecast. + + forecast_input: str + Input of the forecast. + + average_time: str + Average time of the forecast. + + horizon: str + Horizon for the forecast. + + subs: list of strings + List of strings of subscript indices that correspond to the + list of expressions, and collectively define the shape of the output. + + merge_subs: list of strings + List of the final subscript range of the python array after + merging with other objects. + + Returns + ------- + reference: str + Reference to the forecast object `__call__` method, which will return + the output of the forecast process. + + new_structure: list + List of element construction dictionaries for the builder to assemble. + + """ + Imports.add("functions", "Forecast") + + new_structure = [] + py_name = "_forecast_%s" % identifier + + if len(subs) == 0: + stateful_py_expr = "Forecast(lambda: %s, lambda: %s,"\ + " lambda: %s, '%s')" % ( + forecast_input, average_time, + horizon, py_name) + + else: + # only need to re-dimension init as xarray will take care of other + stateful_py_expr = "Forecast(_forecast_input_%s, lambda: %s,"\ + " lambda: %s, '%s')" % ( + identifier, average_time, + horizon, py_name) + + # following elements not specified in the model file, but must exist + # create the delay initialization element + new_structure.append( + { + "py_name": "_forecast_input_%s" % identifier, + "parent_name": identifier, + "real_name": "Implicit", + "kind": "setup", # not specified in the model file, but must + # exist + "py_expr": forecast_input, + "subs": subs, + "merge_subs": merge_subs, + "doc": "Provides input for %s function" + % identifier, + "unit": "See docs for %s" % identifier, + "lims": "None", + "eqn": "None", + "arguments": "", + } + ) + + new_structure.append( + { + "py_name": py_name, + "parent_name": identifier, + "real_name": "Forecast of %s" % forecast_input, + "doc": "Forecast average time: %s \n Horizon %s" + % (average_time, horizon), + "py_expr": stateful_py_expr, + "unit": "None", + "lims": "None", + "eqn": "None", + "subs": "", + "merge_subs": None, + "kind": "stateful", + "arguments": "", + } + ) + + return "%s()" % py_name, new_structure + + def add_sample_if_true(identifier, condition, actual_value, initial_value, subs, merge_subs): """ @@ -1239,7 +1336,7 @@ def add_sample_if_true(identifier, condition, actual_value, initial_value, merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + merging with other objects. Returns ------- @@ -1338,7 +1435,7 @@ def add_n_smooth(identifier, smooth_input, smooth_time, initial_value, order, merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + . Returns ------- @@ -1448,7 +1545,7 @@ def add_n_trend(identifier, trend_input, average_time, initial_trend, Average time of the trend. trend_initial: str - This is used to initialize the trend . + This is used to initialize the trend. subs: list of strings List of strings of subscript indices that correspond to the @@ -1456,7 +1553,7 @@ def add_n_trend(identifier, trend_input, average_time, initial_trend, merge_subs: list of strings List of the final subscript range of the python array after - merging with other objects + merging with other objects. Returns ------- @@ -1940,8 +2037,11 @@ def build_function_call(function_def, user_arguments): delayed runtime evaluation in the method call "time", - provide access to current instance of time object - "scope" - provide access to current instance of + "scope", - provide access to current instance of scope object (instance of Macro object) + "subs_range_to_list" + - provides the list of subscripts in a given + subscript range ] user_arguments: list of arguments provided from model. @@ -2000,6 +2100,7 @@ def build_function_call(function_def, user_arguments): "lambda": "lambda: " + user_argument, "time": "__data['time']", "scope": "__data['scope']", + "subs_range_to_list": f"_subscript_dict['{user_argument}']" }[parameter_type] ) diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index 527cd521..1c3db062 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -355,6 +355,56 @@ def export(self): 'pipe': self.pipe}} +class Forecast(DynamicStateful): + """ + Implements FORECAST function + """ + def __init__(self, forecast_input, average_time, horizon, py_name): + """ + + Parameters + ---------- + forecast_input: function + average_time: function + horizon: function + py_name: str + Python name to identify the object + """ + + super().__init__() + self.horizon = horizon + self.average_time = average_time + self.input = forecast_input + self.py_name = py_name + + def initialize(self, init_val=None): + + # self.state = AV in the vensim docs + if init_val is None: + self.state = self.input() + else: + self.state = init_val + + if isinstance(self.state, xr.DataArray): + self.shape_info = {'dims': self.state.dims, + 'coords': self.state.coords} + + def __call__(self): + return self.input() * ( + 1 + zidz(self.input() - self.state, + self.average_time() * self.state + )*self.horizon() + ) + + def ddt(self): + return (self.input() - self.state) / self.average_time() + + def export(self): + return {self.py_name: { + 'state': self.state, + 'shape_info': self.shape_info}} + + class Smooth(DynamicStateful): """ Implements SMOOTH function diff --git a/pysd/py_backend/vensim/vensim2py.py b/pysd/py_backend/vensim/vensim2py.py index f3c5bdb3..05009c40 100644 --- a/pysd/py_backend/vensim/vensim2py.py +++ b/pysd/py_backend/vensim/vensim2py.py @@ -4,7 +4,6 @@ knowledge of vensim syntax should be here. """ -from inspect import cleandoc import os import re import warnings @@ -645,6 +644,12 @@ def parse_units(units_str): "exprnd": {"name": "np.random.exponential", "module": "numpy"}, "random 0 1": {"name": "random_0_1", "module": "functions"}, "random uniform": {"name": "random_uniform", "module": "functions"}, + "elmcount": { + "name": "len", + "parameters": [ + {"name": "subs_range", "type": "subs_range_to_list"}, + ] + }, "if then else": { "name": "if_then_else", "parameters": [ @@ -718,19 +723,32 @@ def parse_units(units_str): ], "module": "functions"}, # TODO functions/stateful objects to be added - # https://github.com/JamesPHoughton/pysd/issues/154 - "forecast": { + "get time value": { "name": "not_implemented_function", "module": "functions", - "original_name": "FORECAST", + "original_name": "GET TIME VALUE", }, - "get time value": { + # https://github.com/JamesPHoughton/pysd/issues/263 + "allocate by priority": { "name": "not_implemented_function", "module": "functions", - "original_name": "GET TIME VALUE", + "original_name": "ALLOCATE BY PRIORITY", + }, + # https://github.com/JamesPHoughton/pysd/issues/266 + "vector select": { + "name": "not_implemented_function", + "module": "functions", + "original_name": "VECTOR SELECT", + }, + # https://github.com/JamesPHoughton/pysd/issues/265 + "shift if true": { + "name": "not_implemented_function", + "module": "functions", + "original_name": "SHIFT IF TRUE", }, } + # list of fuctions that accept a dimension to apply over vectorial_funcs = ["sum", "prod", "vmax", "vmin"] @@ -774,180 +792,189 @@ def parse_units(units_str): } builders = { - "integ": lambda element, subscript_dict, merge_subs, args: - builder.add_stock( - identifier=element["py_name"], - expression=args[0], - initial_condition=args[1], - subs=element["subs"], - merge_subs=merge_subs - ), - "delay1": lambda element, subscript_dict, merge_subs, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[0], - order="1", - subs=element["subs"], - merge_subs=merge_subs - ), - "delay1i": lambda element, subscript_dict, merge_subs, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order="1", - subs=element["subs"], - merge_subs=merge_subs - ), - "delay3": lambda element, subscript_dict, merge_subs, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[0], - order="3", - subs=element["subs"], - merge_subs=merge_subs - ), - "delay3i": lambda element, subscript_dict, merge_subs, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order="3", - subs=element["subs"], - merge_subs=merge_subs - ), - "delay fixed": lambda element, subscript_dict, merge_subs, args: - builder.add_delay_f( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2] - ), - "delay n": lambda element, subscript_dict, merge_subs, args: - builder.add_n_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order=args[3], - subs=element["subs"], - merge_subs=merge_subs - ), - "sample if true": lambda element, subscript_dict, merge_subs, args: - builder.add_sample_if_true( - identifier=element["py_name"], - condition=args[0], - actual_value=args[1], - initial_value=args[2], - subs=element["subs"], - merge_subs=merge_subs - ), - "smooth": lambda element, subscript_dict, merge_subs, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[0], - order="1", - subs=element["subs"], - merge_subs=merge_subs - ), - "smoothi": lambda element, subscript_dict, merge_subs, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order="1", - subs=element["subs"], - merge_subs=merge_subs - ), - "smooth3": lambda element, subscript_dict, merge_subs, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[0], - order="3", - subs=element["subs"], - merge_subs=merge_subs - ), - "smooth3i": lambda element, subscript_dict, merge_subs, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order="3", - subs=element["subs"], - merge_subs=merge_subs - ), - "smooth n": lambda element, subscript_dict, merge_subs, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order=args[3], - subs=element["subs"], - merge_subs=merge_subs - ), - "trend": lambda element, subscript_dict, merge_subs, args: - builder.add_n_trend( - identifier=element["py_name"], - trend_input=args[0], - average_time=args[1], - initial_trend=args[2], - subs=element["subs"], - merge_subs=merge_subs - ), - "get xls data": lambda element, subscript_dict, merge_subs, args: - builder.add_ext_data( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - time_row_or_col=args[2], - cell=args[3], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=merge_subs, - keyword=element["keyword"], - ), - "get xls constants": lambda element, subscript_dict, merge_subs, args: - builder.add_ext_constant( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - cell=args[2], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=merge_subs, - ), - "get xls lookups": lambda element, subscript_dict, merge_subs, args: - builder.add_ext_lookup( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - x_row_or_col=args[2], - cell=args[3], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=merge_subs, - ), - "initial": lambda element, subscript_dict, merge_subs, args: - builder.add_initial( - identifier=element["py_name"], - value=args[0]), - "a function of": lambda element, subscript_dict, merge_subs, args: - builder.add_incomplete( - element["real_name"], args - ), + "integ": lambda element, subscript_dict, args: + builder.add_stock( + identifier=element["py_name"], + expression=args[0], + initial_condition=args[1], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "delay1": lambda element, subscript_dict, args: + builder.add_delay( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[0], + order="1", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "delay1i": lambda element, subscript_dict, args: + builder.add_delay( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[2], + order="1", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "delay3": lambda element, subscript_dict, args: + builder.add_delay( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[0], + order="3", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "delay3i": lambda element, subscript_dict, args: + builder.add_delay( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[2], + order="3", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "delay fixed": lambda element, subscript_dict, args: + builder.add_delay_f( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[2] + ), + "delay n": lambda element, subscript_dict, args: + builder.add_n_delay( + identifier=element["py_name"], + delay_input=args[0], + delay_time=args[1], + initial_value=args[2], + order=args[3], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "forecast": lambda element, subscript_dict, args: + builder.add_forecast( + identifier=element["py_name"], + forecast_input=args[0], + average_time=args[1], + horizon=args[2], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "sample if true": lambda element, subscript_dict, args: + builder.add_sample_if_true( + identifier=element["py_name"], + condition=args[0], + actual_value=args[1], + initial_value=args[2], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "smooth": lambda element, subscript_dict, args: + builder.add_n_smooth( + identifier=element["py_name"], + smooth_input=args[0], + smooth_time=args[1], + initial_value=args[0], + order="1", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "smoothi": lambda element, subscript_dict, args: + builder.add_n_smooth( + identifier=element["py_name"], + smooth_input=args[0], + smooth_time=args[1], + initial_value=args[2], + order="1", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "smooth3": lambda element, subscript_dict, args: + builder.add_n_smooth( + identifier=element["py_name"], + smooth_input=args[0], + smooth_time=args[1], + initial_value=args[0], + order="3", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "smooth3i": lambda element, subscript_dict, args: + builder.add_n_smooth( + identifier=element["py_name"], + smooth_input=args[0], + smooth_time=args[1], + initial_value=args[2], + order="3", + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "smooth n": lambda element, subscript_dict, args: + builder.add_n_smooth( + identifier=element["py_name"], + smooth_input=args[0], + smooth_time=args[1], + initial_value=args[2], + order=args[3], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "trend": lambda element, subscript_dict, args: + builder.add_n_trend( + identifier=element["py_name"], + trend_input=args[0], + average_time=args[1], + initial_trend=args[2], + subs=element["subs"], + merge_subs=element["merge_subs"] + ), + "get xls data": lambda element, subscript_dict, args: + builder.add_ext_data( + identifier=element["py_name"], + file_name=args[0], + tab=args[1], + time_row_or_col=args[2], + cell=args[3], + subs=element["subs"], + subscript_dict=subscript_dict, + merge_subs=element["merge_subs"], + keyword=element["keyword"], + ), + "get xls constants": lambda element, subscript_dict, args: + builder.add_ext_constant( + identifier=element["py_name"], + file_name=args[0], + tab=args[1], + cell=args[2], + subs=element["subs"], + subscript_dict=subscript_dict, + merge_subs=element["merge_subs"], + ), + "get xls lookups": lambda element, subscript_dict, args: + builder.add_ext_lookup( + identifier=element["py_name"], + file_name=args[0], + tab=args[1], + x_row_or_col=args[2], + cell=args[3], + subs=element["subs"], + subscript_dict=subscript_dict, + merge_subs=element["merge_subs"], + ), + "initial": lambda element, subscript_dict, args: + builder.add_initial( + identifier=element["py_name"], + value=args[0]), + "a function of": lambda element, subscript_dict, args: + builder.add_incomplete( + element["real_name"], args + ), } # direct and xls methods are identically implemented in PySD @@ -1048,7 +1075,7 @@ def parse_general_expression(element, namespace={}, subscript_dict={}, expression_grammar = _include_common_grammar( r""" expr_type = array / expr / empty - expr = _ pre_oper? _ (lookup_with_def / build_call / macro_call / call / lookup_call / parens / number / string / reference) _ (in_oper _ expr)? + expr = _ pre_oper? _ (lookup_with_def / build_call / macro_call / call / lookup_call / parens / number / string / reference / nan) _ (in_oper _ expr)? subs_expr = subs _ in_oper _ subs logical_expr = logical_in_expr / logical_pre_expr / logical_parens / subs_expr @@ -1060,10 +1087,11 @@ def parse_general_expression(element, namespace={}, subscript_dict={}, lookup_call = lookup_call_subs _ parens lookup_call_subs = (id _ subscript_list) / id # check first for subscript + nan = ":NA:" number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?" range = _ "[" ~r"[^\]]*" "]" _ "," - arguments = ((logical_expr / expr) _ ","? _)* + arguments = ((logical_expr / (subs_range !(_ id)) / expr) _ ","? _)* parens = "(" _ expr _ ")" logical_parens = "(" _ logical_expr _ ")" @@ -1082,6 +1110,7 @@ def parse_general_expression(element, namespace={}, subscript_dict={}, subs = ~r"(%(subs)s)"IU # subscript names and elements (if none, use # non-printable character) + subs_range = ~r"(%(subs_range)s)"IU # subscript names func = ~r"(%(funcs)s)"IU # functions (case insensitive) in_oper = ~r"(%(in_ops)s)"IU # infix operators (case insensitive) pre_oper = ~r"(%(pre_ops)s)"IU # prefix operators (case insensitive) @@ -1100,6 +1129,7 @@ def parse_general_expression(element, namespace={}, subscript_dict={}, # finding a partial keyword 'subs': '|'.join(reversed(sorted(sub_names_list + sub_elems_list, key=len))), + 'subs_range': '|'.join(reversed(sorted(sub_names_list,key=len))), 'funcs': '|'.join(reversed(sorted(functions.keys(), key=len))), 'in_ops': '|'.join(reversed(sorted(in_ops_list, key=len))), 'pre_ops': '|'.join(reversed(sorted(pre_ops_list, key=len))), @@ -1380,8 +1410,7 @@ def visit_build_call(self, n, vc): self.kind = "component" builder_name = vc[0].strip().lower() name, structure = builders[builder_name]( - element, subs_dict, element["merge_subs"], - vc[4]) + element, subs_dict, vc[4]) self.new_structure += structure @@ -1419,6 +1448,10 @@ def visit__(self, n, vc): """Handles whitespace characters""" return "" + def visit_nan(self, n, vc): + builder.Imports.add("numpy") + return "np.nan" + def visit_empty(self, n, vc): return "None" @@ -1501,8 +1534,7 @@ def visit_excelLookup(self, n, vc): if sub in subscript_dict }) trans, structure = builders["get xls lookups"]( - element, subs_dict, - element["merge_subs"], arglist + element, subs_dict, arglist ) self.translation = trans diff --git a/tests/integration_test_vensim_pathway.py b/tests/integration_test_vensim_pathway.py index f044f812..78c76d2f 100644 --- a/tests/integration_test_vensim_pathway.py +++ b/tests/integration_test_vensim_pathway.py @@ -91,6 +91,10 @@ def test_exponentiation(self): output, canon = runner(test_models + '/exponentiation/exponentiation.mdl') assert_frames_close(output, canon, rtol=rtol) + def test_forecast(self): + output, canon = runner(test_models + '/forecast/test_forecast.mdl') + assert_frames_close(output, canon, rtol=rtol) + def test_function_capitalization(self): output, canon = runner(test_models + '/function_capitalization/test_function_capitalization.mdl') assert_frames_close(output, canon, rtol=rtol) diff --git a/tests/unit_test_functions.py b/tests/unit_test_functions.py index b82c3140..bf83ff9f 100644 --- a/tests/unit_test_functions.py +++ b/tests/unit_test_functions.py @@ -504,6 +504,39 @@ def test_delay_order(self): self.assertIn("Casting delay order from 1.500000 to 2", str(wu[0].message)) + def test_forecast(self): + import pysd + + input_val = 5 + + def input(): + return input_val + + frcst = pysd.functions.Forecast(forecast_input=input, + average_time=lambda: 3, + horizon=lambda: 10, + py_name='forecast') + + frcst.initialize() + self.assertEqual(frcst(), input_val) + + frcst.state = frcst.state + 0.1*frcst.ddt() + input_val = 20 + self.assertEqual(frcst(), 220) + + frcst.state = frcst.state + 0.1*frcst.ddt() + input_val = 35.5 + self.assertEqual( + frcst(), + input_val*(1+(input_val-frcst.state)/(3*frcst.state)*10)) + + input_val = 7 + init_val = 6 + frcst.initialize(init_val) + self.assertEqual( + frcst(), + input_val*(1+(input_val-init_val)/(3*init_val)*10)) + def test_initial(self): import pysd a = 1 diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index c602bb88..cf6e3785 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -568,6 +568,29 @@ def test_run_export_import(self): assert_frames_close(stocks2, stocks) + # forecast + test_trend = os.path.join( + _root, + 'test-models/tests/forecast/' + + 'test_forecast.mdl') + model = pysd.read_vensim(test_trend) + stocks = model.run(return_timestamps=50, flatten_output=True) + model.initialize() + model.run(return_timestamps=20) + model.export('frcst20.pic') + stocks2 = model.run(initial_condition='frcst20.pic', + return_timestamps=50, + flatten_output=True) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 20).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('frcst20.pic') + + assert_frames_close(stocks2, stocks) + # smooth test_smooth = os.path.join( _root, diff --git a/tests/unit_test_vensim2py.py b/tests/unit_test_vensim2py.py index 82bff28f..df8d8633 100644 --- a/tests/unit_test_vensim2py.py +++ b/tests/unit_test_vensim2py.py @@ -468,6 +468,15 @@ def test_number_parsing(self): res = parse_general_expression({"expr": "-1.3e-10"}) self.assertEqual(res[0]["py_expr"], "-1.3e-10") + def test_nan_parsing(self): + from pysd.py_backend.vensim.vensim2py import parse_general_expression + from pysd.py_backend.builder import Imports + + self.assertFalse(Imports._numpy) + res = parse_general_expression({'expr': ':NA:'}) + self.assertEqual(res[0]['py_expr'], 'np.nan') + self.assertTrue(Imports._numpy) + def test_stock_construction_function_no_subscripts(self): """ stock construction should create a stateful variable and reference it """ @@ -519,6 +528,35 @@ def time_step(): # check the reference to that variable self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") + def test_forecast_construction_function_no_subscripts(self): + """ Tests translation of 'forecast' + + This translation should create a new stateful object to hold the + forecast elements, and then pass back a reference to that value + """ + from pysd.py_backend.vensim.vensim2py import parse_general_expression + from pysd.py_backend.functions import Forecast + + res = parse_general_expression( + { + "expr": "FORECAST(Variable, AverageTime, Horizon)", + "py_name": "test_forecast", + "subs": [], + "merge_subs": [] + }, + {"Variable": "variable", "AverageTime": "averagetime", + "Horizon": "horizon"}, + elements_subs_dict={"test_forecast": []}, + ) + + # check stateful object creation + self.assertEqual(res[1][0]["kind"], "stateful") + a = eval(res[1][0]["py_expr"]) + self.assertIsInstance(a, Forecast) + + # check the reference to that variable + self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") + def test_smooth_construction_function_no_subscripts(self): """ Tests translation of 'smooth' @@ -874,6 +912,28 @@ def test_invert_matrix(self): self.assertEqual(res[0]["py_expr"], "invert_matrix(a())") + def test_subscript_elmcount(self): + from pysd.py_backend.vensim.vensim2py import parse_general_expression + + res = parse_general_expression( + { + "expr": "ELMCOUNT(dim1)", + "real_name": "A", + "py_name": "a", + "merge_subs": [] + }, + { + "A": "a", + }, + subscript_dict={ + "dim1": ["a", "b", "c"], "dim2": ["a", "b", "c"] + } + ) + + self.assertIn( + "len(_subscript_dict['dim1'])", + res[0]["py_expr"], ) + def test_subscript_logicals(self): from pysd.py_backend.vensim.vensim2py import parse_general_expression @@ -899,6 +959,33 @@ def test_subscript_logicals(self): "{'dim2': _subscript_dict['dim2']},'dim2')", res[0]["py_expr"], ) + def test_ref_with_subscript_prefix(self): + from pysd.py_backend.vensim.vensim2py import parse_general_expression + + # When parsing functions arguments first the subscript ranges are + # parsed and later the general id is used, however, the if a reference + # to a var starts with a subscript range name this could make the + # parser crash + res = parse_general_expression( + { + "expr": "ABS(Upper var)", + "real_name": "A", + "eqn": "A = ABS(Upper var)", + "py_name": "a", + "merge_subs": [] + }, + { + "Upper var": "upper_var", + }, + subscript_dict={ + "upper": ["a", "b", "c"] + } + ) + + self.assertIn( + "abs(upper_var())", + res[0]["py_expr"], ) + def test_incomplete_expression(self): from pysd.py_backend.vensim.vensim2py import parse_general_expression from warnings import catch_warnings From ce5f524a9c53124860705bc539551dac815bcb90 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Wed, 15 Sep 2021 16:39:09 +0200 Subject: [PATCH 2/8] Keep missing values when interpolation is not possible in external objects --- pysd/py_backend/external.py | 54 +++++++++++---- tests/data/input.xlsx | Bin 15013 -> 15023 bytes tests/unit_test_external.py | 132 +++++++++++++++++++++++++++++++++--- 3 files changed, 163 insertions(+), 23 deletions(-) diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index 10035aed..52e73d63 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -388,6 +388,14 @@ def _initialize_data(self, element_type): valid_values = ~np.isnan(series) series = series[valid_values] data = data[valid_values] + if all(np.isnan(series)): + raise ValueError( + self.py_name + "\n" + + "Dimension given in:\n" + + self._file_sheet + + "\t{}:\t{}\n".format(series_across, self.x_row_or_col) + + " has length 0" + ) if self.missing == "warning": warnings.warn( self.py_name + "\n" @@ -419,7 +427,6 @@ def _initialize_data(self, element_type): series_across, self.x_row_or_col) + " has repeated values") - # Check for missing values in data if np.any(np.isnan(data)) and self.missing != "keep": if series_across == "name": @@ -430,13 +437,19 @@ def _initialize_data(self, element_type): if self.missing == "warning": # Fill missing values with the chosen interpolation method # what Vensim does during running for DATA + if self.interp != "raw": + interpolate_message =\ + " the corresponding value will be filled "\ + + "with the interpolation method of the object." + else: + interpolate_message = "" + warnings.warn( self.py_name + "\n" + "Data value missing or non-valid in:\n" + self._file_sheet + "\t{}:\t{}\n".format(cell_type, self.cell) - + " the corresponding value will be filled " - + "with the interpolation method of the object.\n\n" + + interpolate_message + "\n\n" ) elif self.missing == "raise": raise ValueError( @@ -446,7 +459,8 @@ def _initialize_data(self, element_type): + "\t{}:\t{}\n".format(cell_type, self.cell) ) # fill values - self._fill_missing(series, data) + if self.interp != "raw": + self._fill_missing(series, data) reshape_dims = tuple([len(series)] + utils.compute_shape(self.coords)) @@ -483,17 +497,29 @@ def _fill_missing(self, series, data): """ # if data is 2dims we need to interpolate datanan = np.isnan(data) + keeping_nan = False if len(data.shape) == 1: - data[datanan] = self._interpolate_missing( - series[datanan], - series[~datanan], - data[~datanan]) + if not np.all(datanan): + data[datanan] = self._interpolate_missing( + series[datanan], + series[~datanan], + data[~datanan]) + else: + keeping_nan = True else: for i, nanlist in enumerate(list(datanan.transpose())): - data[nanlist, i] = self._interpolate_missing( - series[nanlist], - series[~nanlist], - data[~nanlist][:, i]) + if not np.all(nanlist): + data[nanlist, i] = self._interpolate_missing( + series[nanlist], + series[~nanlist], + data[~nanlist][:, i]) + else: + keeping_nan = True + + if keeping_nan: + warnings.warn( + "Not able to interpolate some values..." + " keeping them as missing.\n") def _interpolate_missing(self, x, xr, yr): """ @@ -516,9 +542,7 @@ def _interpolate_missing(self, x, xr, yr): """ y = np.empty_like(x, dtype=float) for i, value in enumerate(x): - if self.interp == "raw": - y[i] = np.nan - elif value >= xr[-1]: + if value >= xr[-1]: y[i] = yr[-1] elif value <= xr[0]: y[i] = yr[0] diff --git a/tests/data/input.xlsx b/tests/data/input.xlsx index 46a5695f579bff86d2cdbc5478304975f4fedbaa..0b2d91d3edffc6dc77a7c50fc09bdb05b18bff17 100644 GIT binary patch delta 11786 zcmaL71ymhd@;!{ZTX1)7aDsbq2=4Cgo&X)(_2TXx+?@cy0|fU#fZ#5{|0Hkb`@J`7 z=DTZMt$p_CQ>VLX*X=5DuXaaMl7oi9fj~e&fOxcJQLROzf`s|~0n7!-0(5_bh}^@# zQd~N=;8)=8G&9phv#|@0e=EJT_}nG+22MO+9(4!GRA!DhHpaj=W=*&#{7gEiFw=WT zs6I|qYlv#(SnzT}vwKx4L@iZU$r=M0xQYT!#ZE3n08%zJsftPKHC%2) zNOjW6zKHLDKZrf2OBwGhDKs}STkfRd@QNsUhM2GB=QY2?8du(p>vmq3pi^9))2Rgf zh`2s-`GA^q67O)Ok<~C2BtYuo#x$-HStrEwgqjTNwoD(5d`m`onr$s1dPbGRDg99g zg#jlFUD=}1F3;+{by=6H^7?9ZHw~)_BMm~6+3=^D8Q!<&5n1novyQv6h#U$tongC+ zKm>al_)kaD+>g-!=^s zQv~(Gh#OG+V1q~srPcwCvi6BIZ8yWPSn$I4;pPKwIe$TVI2 z&~e8TZd<*g6}N8{aXF)P4y@&BTBoXM27jG`%5ty@6?&$9)k`V0Aoy%oFr)YJ&D#V7 z*{$k6dAFzs)qbntU7`SvDXeqDO73l(Oz2+YU)uO;n2;qy42JAHx65~oB5}W1Nd2oN z5kcJt=cs#pzA{+B#$XQ%?It0+1iGsdwZ-Qdn3z9&O-L7$V3`FbXUoVX>;P%s4az#5NWL#6K^^dJ))FsK2g7 z3Is-A)Mfb#7TotoZzP;o+9Z+0(l`vkCL^C88B?~Y)fYwkmEIEckVDW9_}y_poIDcw zAAsV5sr;aT{oa50Ob8+OVs5bzwOtbPGlKJ)8KY#wM1>ob$*^ zd?k}y_u{&xcTASxO56CuX@|->TUjd1XR8~dhV`= zOB1_!t)R`PK?h!T#-q8l;{NP=%V8RJ#i#A8U)e-3aj;5GqgbHxu6v1E!X5u8=U2&> z$kAPWp?zZ^OVt@q&6ppxMEERn8bzi3ZCyIycEVgfBkgCa1FjYoIcZvWNrue>@VrhG z0NF8(4ERo$e{jlW3hk*(ZB9n#f*qrFY?M!yiMbkgiV4`cI+6I$`9vus!=p#a^6JN9 zMoJ`4yxc;eKEv&u@n!sK=zb-lHPw^!uMh6mjVkv2?F@7+3I+SD29X+R{eB6qW$vx3 zKP=Cx!e^41D4OSPgS%Vw>Yx*c4t4l^ zN`>9>Efu?SzoT`o_Ola2TQNzoD`B~}J496Ka5 z*pvXN-VzKYgb5?i{7uWyeotiOH;Lk0*o*-2-lFfyrwlMSVvVNp)$j&e zwrxW=oJf4IfdMkT`9`Wv^es37NJ7CM#k^K)hq$3x&QbNvkd`2Eczu}HZ-Ayu&=*IS zrHa0CQ<(NBR#vgjC|0)3JND0?!}iRBqA{COFwrE(n5p!lQVk`&-28ozAcIefsTQaG z!GiWb^P12#AL)}~r|So+{SsyEm)c3*Q=vqWd$N6?0!9525j%XsDDkXva17!uA_@=_ zp~&>G3tRT*?mzMqCl-Eejst*m(OHKN7qOU^%c71!;h3E68bH`8zh(*d=cDLT2kWPo zm3>~0d6^+<7zF1BlkaxcD;<7IP#@5<<+gedaawRHt8P!n8} z8FWPrq8BxmK$Mf!QvIBdrIA!q8RSj_0vVel%E{?B(!6nV4i|Mtd<#7#ih_aN4^=Bl zjPa@;`b-omF3vqEePObZ7H)tREP{v2G^g%uBA$|sKa(xOi9ym2!6pjAAnJ#778SrC zqlQ4#)`Sd0)rT}`z&S$z8{y#qLv1!7yIo4;g28905Q$)-J59zr+W$5#FA!CK&*&26 zY3S5UxJV|~@!Nkn7ImV9$!$2viOyJZ!Z0+GkE!H%VW^uQTgXYn(6K%SRUU{;ga}P> zyr1)dnG$isfbIuiYDMfY;QL|EM10~PsLk|4t=xCT8)OyT{yT%syo@@@U19lW8QlNo z4W%Q#vhu)YQ6n;#H)2L~Fim1cEHH1xjksZ&#Epbu-n=!Ef@ykdr1O5ruU}Iby&FJ>f`7yWN; zSLX?hnOHLCFz9MR?Mc$Bus7g-hI22z$2;149BdNLxO?cjO{uW>KFu>Kp-k)jsMhf5 zY$QZVK@o5tn>r}7&lusTM)#c|Ql?#(Fpw(ya*>{5sj%qSQ3=L1z&}Z!1OiKa6t<$L zn_54(aAiys-^ZLxkLuu{kF&fS*W#IQ>!88g*W9zQRd!1;=hG0j6#Mn_KTM$?nv^0+ zzi+#qckUe|+X8bNt8c!Lr5E0?#OhB`6%lNf*_~iV-aL$k{8T)*-tJvsZ!~7Q{bbk;7$^Wup-9lk^L<*k!*uD2l)l}M)TO7BA0|G8 z#RSgWY0r-R77B^i2Z~2E_qBLGE`BDm#CB)YJ&-BM!6TsE#-P$8KtQas|EDAXQy@@* zYcSveIoFIKJowYcN1Q~w`ZhMG#pI)S&ZLb;H^#3kWo`T*9X#Az0uhe{MPbzUc^BN` zHh5<7TEb`wt}$Ds?P!tfUFKz%BO{C(w=Vg53UpYb^8)YAQ?|EaS$0C(!_{V$Nu?Mj z7G2J>)WPc-&)OYl=eCXJmC5}O-i?`d2W?(J+_p`pxwe}AS-|$@P)LflT4c(0_UW!`rhkshatMGZ3F(vu)#CG_7Y>##Xjh8ZkiS3J`%{kVGdB-3+N)1fmI3f7F| z!DO%-VNSBE&-76v6}zhXEd^3)n#(Wh)$DIG7-(DM3if&aB?UP0+*|p-TRs(^&ETbB zIDR_zf98mhe)sUaE%mD5<%}4}=YZE`&;u-4gIBS+{GAgduTV`iVhAq-A6}?CS zXF&tfM|F8`-rEi4HJftAPXO-K`Fqu({pu#_ z&4n_P@t)P3*haSHn<)Bg3T#T`-(mpwLZxbF`qr(ul)FSAyl$aVrj4i4AUyUOT{)(J zIAQ!I{Cbf|PQr&juPU7B@IEH*(;!YupM||fIK3MF*mM-GH{E`J6Vm`J%IsC=cMd-b!fJ;V74>RC+@k7Gb(p>06_=el&8NRLxsSW^)qRdlNFR6 z5-IFo3B2t1@daKo+ZpP!61Q&xyzmMUn;0PsxAWo~qe?CWVxlR0l^r(ny|@6Y+~B&pvDx&jt51dtBTGq*BR`E_hi{8i9H z@cSrs_$e-R=l`&S(#g@8!{9dqVaW5oHP04CbKb;E^o2t*9gxdntDsm&Vb9V)NR9`(#i1YooFBi|~wk_;&YOfiWB9L>U8N zydyB&w`7TNDy-SEw=6%Iy%-fMg_?>Sz0)4Pdy^Cqj@oK?yk9ZY)~2%+AV2wV_mc2s z`h%yNH2vOlb!V&-6Yfs1*cyN`EO{JeZuerPrvZ-Yfe-tgRLf1G&jLLm7i||hLV}7l zOofz;TDOT0R!dtF`~0XEN3L*#RTq7usu{O$Z9=?2%Ln}rub=D8Zwp#<&zhLAo$HV4 ziAH65N86$jbmY0C=M05+&T^Iug#_Ek2h3-@WsP3jIp zpZHnYrX9TWB`cHFlIico>Z5}*me$hK++Um1Un@(S2+SEemjl@kTznXJ2@Y%)gq|2B zroN+EF1GkUS@QsJO5*wz!~qHufd@AtYU_=oSRh_FCm)*322+uyUwcjEj-#3eQ2wr*kigY`F71Y* zK)N06tV(3LrH`c*v^&atJ48a~<1`_PUo#Jq^koM|GJ%pMJ_Led!|(}r?E=<;j@P0@ z4`GWF)V7w2Tm0NO<6q3oh#j+|9zK>R4OJ<3kREvMe6mf08V&=Xe59Fj8r|g3-k>|Z z%5OMLA$Y}kdJ>55%2d!^a0bsSFVnJ>_Fc7N>6B%MfeD3k!OlDE#%o+Q1w~MJ6XOHQ z{p>wJ>H*ry66lhgpazlr@{0DLz@|7oMtFVrf%Hr(RzgVpIDtApk^xPe?VbJw%z@MM zqpki2D(d%_N<;N#9(+j$j*CZ=jOX652E)v9qqXXlA(mnWK{CIN5R&SY!yag*D>F@T z+Co~~V=t`i>UFucZfy2WQ(mcJNk?WgXn?{WKQ`!V>nb-tC~Dx z01{Qs$|=_v7S&Bi^>s zko_zuH8VrfPntS!P`^Q(n2U-z^6aAKtOuP3hA*#^QiZgSuhwo(!g6~QMJ#9~D3-cy0a`?xy; zC@wTbTBw$%E412$IO?csOMNB8u(AdzM%+JL z&Zq%+@9W{-GTN~i9l}YLe@GlsiT=H1q)ob%-(qj<=j)TzCUjf`k;w|?V6P5g$2$D| zVeP{+J)jb~U(be3f9$za-(UKqvtQTPsbHR#U{_2{eBXuJ&qCK`v@j)^8=6^e*A`98yd$=Go)J zW#1NbM1tb<;YsK^|7RgVSj=~gj=ypr$uT3IpZvMoF{V1$e#kT3pE#Iht{}vEvp!E0 z@HFR|=X6^-#CAzax@SJr2`q5jqW?~2fa&CT2^<6jG4sfYcgD*$KxS> z#)EF)iyN&ypTc`S*JGSGdj$w(ie&}AM{UGFNleE{f%Fv)b7G$chMHVtzJ>J*6k*>7?_8Jc8U7%7HF2A+VlDcSGiSk; zp`K`PxQ|Og+HaU6{qs;lWRP?VfWt&L0Yekt#SorVb%2%`R5Qa&xe|8M|9zAOc`qvT zr9P`eptO>wF3hRcPEMJ6dV3P_3b8IwWz@%p;cG-ZqruOSu#_65(@d6a^C7UWjYZP@ zSNDRi)IN$Ph?o-@hfq+KN-NlrX+BfDOVhPzPx@MNcyf$g=LmD=GMky70WC?5OaGCa zG}4sV2Y!U@8V(Zv`;3wwVgA83i?2mfu$9iJ~*G8tZEp(8q&B&3`* zv|xbROc`L+BgPFLdOb;Fp`D3x6_Z2u-A~k+esx>qU=uNWkbjcP7 zqK3=htI}rnj z_)({LuU6q_BUZhJi38S%(%xE3n9=Li>>+kCr&E`ns1MaC} zUW)&&R)xAAYI*G&=1WGHmTx)Ff%x!xT6RnUN_VH33K?6JpPWa6(}yf;@doqS*UT)U z%To~C)0F|!WVaUci#9+{zX7Y4A%ElkD$Te13&4Tg>$`lDCX-9mw?ch4qzc9-S5zDn7*#{j>WQCao-&c<&z-3qa26jxz7OK?$59# zkbH+_#dL*~ha=Np!t5r|ZlLPL+=}j#`?*r>K=NbtlggS;b|)2rP)`HZg-5@m6rL1@ zPY`c7=aKM5#gCuwCZz3y#@}G}9cq}p9O#S9&RQw}pbB$V@w6ey2#QE@5ZV6zjW%iD zx3L(HM6BAAV;)tk@V+xc#)_EMM9nC9Pm{S*Q`gf{yqIiGmE?urO2Ri`TSKD@S!7Z=eXNR49 z8{|?r$TO*X6WN zKQ5b*)kK#a;*cCYDbDvZ)U=ndYlZo#3hZ&jlNFb5-P5N#qNDG*jTMwyAt(yR zQQz|`%9xR6Plj)2hZ`?{ zp70$C%`KG8Bc{wgyURaA{|>=T-o01Gun-U|1b+*`ePzUzh?XW}_c`20SF43jdq7T;*Gza5QrF*Nn0FC;S|TAd%<$UgNX%MTwydz1g# z{k2x%ilVvFo3id~cEt80h*o^rzR@0;P6$=Hj$}zV+k&jxJ`y<*hCUyWbg4RZ*-#Qd zMN8~F#V|D%{TPIPq!e}ud~t3e+r&S^Q*ef2A{sHF{+?h@lFc3k<*rK1YtX*xYVsC~ zKhmjh4Li``P5&VF{)tIu7zm*c6H7@5U1lWGsyong02!aJMmavYww!?$Y7mWSOXHTbnD`gfR6B{i*`55_0uJVXrAe9xK{!q_W7WY8Noo6^x%^#3^uLNN6!mIh|o5#R_Otn%?G78muQ{evcAz6CXt0&${JEr|7a>wupv_Ujx!wl z&LG&HLga?0!s4(^oxFX1vpbQ_rS?)jq^gT$C9SLk?(l0P1VG}KAQGgMw!Giip0(!Q zt;-1e1yqwrlH7^s)3V+Z2Q|E1oHpa>6-)KKBBC7MY{+AG25IOZjoe=nS`rZ1`bmO*dr-=Z^VFPg?{$iH7V)ogXj78Z-5xb~3qIC) z|6nwbyOBC1F)&$-Gt^!qlFb~47Ugb~53Nd)y4RxzkH=Rt{f(a2>k=9 zhTI40=16KD+7jLTKG=k|z|c}&L9+U5YFd;-7Mn1UsWE%iepJ!$>3&qwu#=qRc1*Ey`C1+#I?C7c7iI4#NkPp^fZ+5!`9)+O1)H+abRS75yS*`UbR!Z`7G(KZk){+ z#l3jisLPPWy~ui;-FOZ%wP9}n)z?S04*x1u#{0=ITHjdp)L)vm*j|&?xg!5)+Xig# z-O=qkEvE=MH$+o~#ze_&xs6)o?g|$8=W|EHMXYfV(vut^I^^#9{{AHfZY2EchQULk z6)83_?wbGMnyJJ;zd7~=C`?HI9@;v0m!$CJhd4M{shhFFOe@OcBJ`>%&!*^>wM8M7 zf=s6?kol6xl|88wq>RTI3Kb$a#fr~S?T4EJpjdZL3=b${7IZZz9+ zdCo2|TRx76t=&%xkKHcJjS}7QyuRVcYhNMmXyuYH;1d)+>hqxxG4d}Hp~lG zciUvr&+@QVs|{G?c@u0sAu7tP9wP*^%#A9ECQY=CT7nrJ>_N))6(%w9hRK1*%RmCf zaoPy>r1&|`<`$_3vIhrLhWbTMUX}_iJ|)yhblkH`28^&c<%n}CE~{Fa!0q?p)GsbI zL)(Y+W#RlXh!6NlMY!dpm^sBTua2mepXHC1kcKRvN9ar>@r8>SH}1((k7Hv`twaLJ zGt#4la(NyFG^)w`j!J($yfqp7bfWajNbnmz4IsRE#4HBVRTG$3eRhEV$`%!sihJ?= zGHpuHns8~2ccs7TDxF?3^u`qMJG3qsbjMP?gjO}Y{|v3a#VzpWPf*oRa9w_b(Rrik zxuBTi-BxX;MKO06ghZ4m2H7z-&bO$^y7QN(j7iJDBVGtT|) zVbHCtR+O=%uEu5Ld1ZH(NUf5`KWAJV49rOU`q<60oVMm}^{t`PZ1bkz2D46+fTHB7PR`M9NMm*H8Rp1! zU`E(gkbnaoBcaJ?-^fJEychl453}tumWHAAlO9Fo)jb_;3lDmr=0}qxEPuwluFeZh z;6875&s)(T_p-WDcZmfi9Ly(OjoQ}E{hSJ2=!-s2 z7+?E7VyEY}z46b&>Ua8WL}QjdFl(TxHU-H(o+ZlJ2DDZ|O!g)8v>XJ2lhBnc1m0!zTQ|xr zA7Q}EnbPcA$lFq5rmd}4$h>=O9NCLoB7?(hH_!yJRA}k~F+4bDJex{=Kf-``r#oAk zjx16cFJ}?ZekZG}t98l zC4(`gIuQ;{BMuLKa~6e3AoM>s`JYsMYIFV z(R%LckI07ZBHqb)=iiuH-sqRlbrfDsH+{HEz^DJzWO*Kgv){M!+;y)U@OT>ZVHw8I znNCKQe^BoyC;XT0ZJk!h5A2IeU?q8)lF zQggAbILA-eX;tUwz0{*&%z1WukPx%jc}0m#%_3?oh)T<1K;l|XtEK6L8Rb}VBTWr3 znf+&iRY31#0AWOy5q&2mpB&zp+(=frj(>o0X)c^GCoN{LupV^z95-3R5N!R<4)q3Y zBfQ{!X)7HmTIepTRhfrh!1_;?$T^##U|n43M9ZI|D#afO>7kh7JdWOP*`NMu$jlQ6 z2lG%$_V#b#iQlDt zxO#%U*&W*NRS%FMeVZ5jimKj`;risdVA^dw+;7UthUFLO@(X{cV5#YG?&t zuu}pGj-OdkLQd{+)%zP2mZPi0tS5`D=?ae~^H$42&J?O$iG>JHk5E=U1UYuybEfw+ zqn#RyXg+asZl#Q}iT)cJ()D|jn$OwI^vLl9y@_0tdC)Koh3H$;y6{4>2-=jXlX>=0 z4*t&HbtgeMGH+xEKFx!Qzd6>BNDXg{pUVJ&;VsOgaD74nnB*>=kog?WwUIYs!P|rP zpWB36`U?u-k8mPI5W9*yqvfwupndI@sP$yeH!~A52euIPhsn^%N@#wymPU9hPiTCT zzu#!L57}X8C&2%P-Q7b~HL`d>oit<{;Akv$erNaQ(~d;iCgsljy=&MI z5U79ZihsYby(CC4I~G?rZ##3>-x*O!4iX9n;{SSz|K87l|L;+M-mwtC8)UfP0&Yz5 z-*fzRXXM55a%1G5z>gSzfHfGf;A3u-Kj6Rr^Yl;fCjK8V3(sGyf3J`J37jB%0TRAM zv%eVsp6&k$^q~2}*#0lZzlVc=0@oRT16ltFga33xR0Q_S+$UKd&+WF+>2|&P)CWQ}egY#rOy21boL!_7^P)A0Z??n1_$@ WU$gKLLP2nW-}BMHwZHj2=Kld0>emVY delta 11959 zcma*N1yo$iwlxgF-QC@t1a}GUZb5?tO@QDVcW*Sf1^3_%jk^YiK!D)x_LH3Z-gnQv z=Z*3A7<+V&UbCv!TDx|wS-WPXOMwfjiaZoFHUvC8JOlyXwOTbQ6(scUAE4KeY=8(m zw2neZtlN|;&yscXE)A6}Ur{xUbiBmW*weL84F^7Et35j&zI@G?yYt1~r;8_su&B3` z)LSDGSE)hB-LFX1T#s{r_Pax2&irm>Foz{rVq7x!dUH*hiIsFaT?AhxOBRhKK@U|? z9Tq9x*u5_~P)rsq<&Q(%+s2UW53H-O!mG;K@+Zx5VJQ#{u^>y45DA7`iL$!K(gqeU zhmvz>iL&82GfcW+Po^wyp>UpjbPvGl%-rA=-F6Fj)W!cqESN7()_$cDXeOJTgVcWK z`IG^Ts28}2Pe7psCe^V&9gIe(SeJK%O6A{-j!#@p+BbiH`UUAOfGcQo2@pNuy^6)$ zT_o~lBy83CSPFHCGj0)gdl6L=TPX-_cwyl2&Z2V7st9+&uT5!<<*Wjs?HxVZ<)L2y za?s8@=WM~5x@vj>S=+-p**QZKd2qL2EO&Q(HY4IPlqP8(Td zV1QZMS~wq~j8dw(<=lw{ta;>U>5a}l?vVMlkz?ecQms!+(w20#4%>RlrlV1^4HW~Z z2W&(}b1g8ao3(oZQqfhOPYcq;O%QgW)EEf^??O_4;ola9st)}k^5>ewJbPEhvxS`w z)n#&`i?4wJSvbI;OpyLy=Z!-ybAgA}^<5e|Y0c#}*g2z}y1KhcR-l$ypX|*b=uW-l z0Pyz1`s4;Ug<(xMk>A!Sx&{P9YN((+3Lf*dS$FJw%LrSsUb@n3@Sb#P0UbjVLFR&x zUj@=?X9V;=rAcGMom~4MN~5{kF#DB97|N!m z+|Q=++F9NO_YRr9Vum!QyS*@+czjSCyzmpwpNq+}Sg5V3rRzUOTbq{jFp(lSx8;bf zl!gkkU?S~ZI5$1n!P|VaB~R2$kuvG#s>gpY-dgWDYq(`%YfmETW0XaXO^oRJT5{I% zqY5yv(;-aK7PL%ltl5?#)Y`B(6=Xj54^ln<%1ywSH));{?DS?n%2E4a2;ZwMT3cbW z)NZK8hDW9=z6x`ijmOD|RQjIuamq~+EF4{4VDWnck%f_Mw@g1A@$ITp?)#n%nzXI z1}Vodo0Nu2;WfC=YlSrqZ^m4N(e-0c)}uDaYL1I7$Kb&M80tpIH6CJE9JJ&6o<+>E zdvGK&rTlZaJSoe_`B~|OF{SJn!_+a-mX!h9hZ_4Zm)&GuOJ&1PHjhqj8xJ28FB|jt z6-UixkkUgg-&%@9_+cY@o3tzx@c=uOpB)m192Diib!L5ge&Zd&Kl`k%B4w_8XuymQ zByVS>N95bTjC|ePBn(T?DKm0(l5N5y+s~rJfm>kXUSp~0H`1ZpNE=K~!}q?n?TJV` z-pLKpY)r>{(8P2v(stK>Nk0sUgH!V}Z8T*P3EM&Lvzhp)t>kt*JDYNv8Nl~$wxP6E zZ|*b6(T_}jIkCKKb$0p`Y-YqaFd_c(U3u&3@MFgGbJvXna^n$Y6H;HZ)h6hFVIw2b z!bJEhcIB+AozUN4zbdeZtMoMb`Js6LTLY00Cc|H>E8kePih&o~0Fe|XOWbj#W`IYC z?F_}x9B~hln!}5B^}2?5Gz4HXT`cV>F$?q}$T182NMK?NzO)Do!`K{#N1~!l#&{#G zz(z$JnPMp2=*r@m1bcx6C`B6ox_c$-JUpff4Z=l>`C|~}$^j4fbXvsk0fbh<5A2SJNKZJxWnjQc zPt2sQP-yKK9FD;ka&>1+JgA>AK!cJVls(6u6Se`wTe`L2S!?a1Z@-7J+KW6w$C8kF zUp5Bc!qvSS=~&49jO~Tk^^NAaHuGF0fDE4JCtP>g`OEc5!WeqAV3oww7+yATfxyH? zdOia~Y3IC|s~Mnp$)3ONaB$% zWQ@UICv9i>D^oPu4R#3bwRo}T3K+9JUvmAR%(p;m#gKT`nLmR1r$$bhNsgF>#ZsY61%i z>KEgeKRd|=?_q`_=KB32hYdwA^_wS04Mp1UyCL^m*0PDeuXsJEV*5%}j2oQ<48bAB zj7|iGbP^LpC!>yCVpRUPRzOE!qfE{i3Y!~%Pfi;OV;aCq&Ke505di&BA5!M1YGB28;Y{w+el6tiiSz=h)R-KH~h8C&E5l$uhv>BD|trQQ@9>o*ldYWba->QVED+U zec4&)lyu%2TDwQ#@MWU_1!>>@=`1C0N4@7w;#(~e{vw{%P~fZ#5i8LGhiPmPsD7uT z+RN0+zFe1eZ*oc;K}4shfBnm5(U}6T#rvb1?x~mRb90wYD(N$p=RugZsd}4>#q>pd z7Oc)Eql`9EQvJ1tri2haCeqC;m$wZ%_qD4>Uum8u$lCz-gK$!rOLR1;Pg$WI@87*) z_C!}^x^lsY2(j}SHRc#_SH&?g0K#@@f9&n|Gv9e)Y4@{wGB9@6245u(gbhvKUE>Tj z(+HVedPmfh5Df;{-D#{TE_>>825>3{CG|&ixjd4G5T~2>o~MvpE!fvZzK_bMH>#A% ztSKRQ<{pI-k&Ju#cxTSGBN*;len0%#yZlkw)($Jol50#~A{P-1>V1fuI?3!)->*-sMfWN(tN)sx4Amj}H0v=dz~Jm#6S*NwOV;%>vkm zr*}xt)a!0KMl3&j(zGLZF&J!zSQBk))4erF#V;!mtf`(9=JjXqbxZ0WMzOJ$3shZ* zE2T;mOhpD$Cz_W@PPl++H=78BQ2kP1-GNrkHIUOCSO{mhvsM2rF*kXRCBM-8{&c~b z^s@`&*x3b(=e9ME_{f_{*77vI(DT!?UzMgq!@8@~+&E@uxqKmgQ{S2zE2gkc>A9F{vfe{vOPm_4gBlA6f~~nlM+EXJ87eO#v`kd zJ9u7E;$Dxor{u){{3gsS=`-BbaZepQ`*B-c@7tE+_yvhD=y9&`&;45L}+=ZMNDyOP5s$tHeo$o~mr6m6gfJDLRsb z%;CW#dw(uZjLZ(==#x-Drbw)orusX)#2I~MM*S{qx%wo{ksA3FV|RVW9kY5NP-E>;WWN^J*PCJQM4&4?gnicrge*}*j8s$s|_;;L~-B9f|U zNPUv3?~rLERZEadBvosWM5I-lCRQXRN^Pi`fC{OD}C={lqc5bpdnqVTG7FpUJ6~<5`#!bquo;T@T1*FFWqy~1;H+-@>N$r@(YmZ&T>?I9c{aG^)L-VG`7F` zp~>@$v9^qqM%5X^?JD0vIG|5g#vSHe-f=#d+^U^u^f#tcz;x?~Q&q+Ty zta1KoFlwu8-NH~>mgQ@j^nNAT`Awyr+GjLY87G3vObnffFIIvdNFj^Aus%O9H+jS{ zTPBarMC$@VgJ19v1Xg+P_j#5|lQzt2F;d4ra50ol7xVi>nr&WQs2Z}k5g%tc=sXfz z^j7NKUynZ@xIOMCQ=27X*YNCejpsfjj~`ehOK&|H6BQO6ijd)*S*N@QKbk)+g^mUw zsNJ6zy3Y6LAT%Zv23I3nA5d7%II7JyeRDW6+QSrtzyU8(fSs}9@1t#$t)u?nYxcbdCQFM_1n||)N@JFSFzg0zheedqQOvin?2&t-R%s%%cII+8$>nx>IA3C zIrllTw;?#LQ2-#${TUAC-C!XSmjK_;tPsj7aGXu0b$3?rXJleficE2#HC$klnb)2g z3Q`>jgE5BFRfx&^dX!y#(y>>c-0*JC>G{uca8EVF(=AvEM9!tp@a|Y?5EiPKk^|kX z&get>s@{xEv{6@~PbPam#0f;-u4g`WT2@*|-kB%xKLbj2QDp229hfouHDY4^?)s7S zHyE{9FDxl@;Ilpbm#D#k0f)>;!K^}+feO`Dg8k3i+ybs3u{chi;8_mfNY8X)B*meQK;YaM2Gmy^Du$n6S~f^7 z?41vzVoxTeAeBK2yBZZ81q~9VGQWg8Xzsb$+)GnX#{*m8K_^QXoCQ7T2+?sV{O;Jz z!Ht)`wN~3&4K@Quv+--iw7JzY3XK&$tTWZqg+TCd#*Y8akL2TOl9-X!J%np_sPE=s zQhn|bF1{w#8$ko57hR4TK` zeMd)}ik~2Um3n+rF*X&k2jKIt;|rV(3x67ZJX+A{cd0YC06^;ynuLAPnLHl z^h@(&((Ivx%YA&av9!WHqs4nUQ&ueY!+Jv90eGFTWKPHgU52o_c*mE1UEo+s1%TK& zo0A8Cfd2cQpPGHDuiWTtC<_(&C{ij!8Sti09zT?nTEFIW5M4*oiDD*1Z%kokGuGPk{Z8}oZnF3~ zv&s%{^sC0}w2uqOYu~=sM5n(dpYqbAbcLeS9K&Omcj+v}mjxqnK=Rj>F^ zpsB9=Ym^YZxT?*!nh3x6NR?9lb&VR;hs&xA%s8J`hnCE`e2=;Keci@LJ-nG(Joa-~ zSln+k9~Ti+R=;UbmBK>LRjPge?F3MMXva95V9rIeu+FI`u?kXrZ%m+0S-3)I9Uy=B ze!;gBQ!1*A4IiYA!Uh^GvgHm=v7a`a4lMK}P%m`dwzE>E7D|3Z-#!D}hMxkFb0%?)t(hfooXice7|0#sRG#}PkKqJM*Y+@7JRS0vybkP*!~yCQ_*Q3!HMSuj8tr>zGE zJwKrkHs}{X!h^sTa?m*zVr111pXPeJ#0`*eU zz9Gzp%7n(QBm<_0KU1@*XLx9;r}v$MmBe*y@G$Jbo~9?W;V{l)0gB@4y3iQhw3t)l zR7qPF-$Z`2Yy|lqWY2JB>4l%0=7)c8j=r+Z4FK@w!h7SxD+L2xbmMpn*@2|E*WTG@mS6j~sGd>9`3^>C z$DXzqsLhRY^jzg9W@p+DS8rC@#ko(>@>*K27KW(W&&j(K70x&_ag+UnJ~J0&%T+H# zh}?{ARii0&&Xu4&$amGKq1%#(g@1OYDUFR66q-nZjSr5pKH}jF)+IHHrz6$I5O1bT z(bq~B@{A?`3wmAOH`H+vF~AM@r@Sdn05vwL`|!Tmv(0=I(C<; zOOV^B*u3(*7h+2Ke}tV|(iAt6)G0J0TR2M3zSXst8}p1*PX3-$QqaF zao{vQH5m{o9~0;tv|+7xv*8ZGpY0i;dIk(p>Hrv7j2$NVpAnQVnaU)K54^X7`ae6% zo2|RBx}f;u74SthT^EmpQP+m83m_$lrM3>zC?ghq*uD$~VMqqAjWMmLc#cu1d^nXA@KOu}F~Xn9rKHGPgpwyqs-bsVeN`@3Huk>V!l zoB*B<32P#sZoLhJahJt?y(qj;uk9=|!fJ!Q1x&@n7d>?PTlO{D^yzHqKYW>B|Ft&) zYiJTswVu?S)5yv>K<%MuYJ&Ml`=f5G#x|o@s}b#j<&Stq6oaYUnMk~lyjr?Gc>HHdMY*`YL6K^xQD+N z=H!$bulRs@qyIHgCk48Gbr{Ck40T_>(4p&?wlsRpb{;R*f#vz&NSlZ3ySk|=g^kbD0Gcc zd8MAunJ_-UV)Zvy&NmT9y$$fLJ#}Po|9Za?uv&EIH+l)CIrwTu6=(k*AbnNFn_Tyy zaIejtlZlD|2kjtn&tl-isMHFGMZo!fxP;A8pApMuEZp>G}CI4{c~n@@rrhFVg+e}uVrM? znxfQWX2l}nf(^`A;glIAAfli-cx}=+Pf9wq5Ex=IiIV9}E_*dO`|NYm_Ty}mO5V!e zJ5OLAPFd#e8m)O1EQuZPW&i4;aw+T+d>$q4;6q1fNz{kk@imuu@3JuUN-WmW=U*w0 ze^y^crk)TZ7zhYvg8y87A^ztBVt^+EUjKRZsH6C4gBQK+xYF^Ky%41j_SGPSIzfB! z2sI9;fTpLqt)FH!OANJf{JRd8TPUhjiaNND7CiVcq)9jl1)sRGF5DX-yaFqnbls?e^!za`X zOvK%b5Lw0$5$CI4@8mYbjyNOX^wVv0yAF#geqJ|F|0)jOxs6tsR2?ANlr$H^kB)`7 z6{Kch%Y8%@#%43NLX&5?(4da4jpNK*db80BwoI6=v%29PD@ZMEb+V71Be*BSz@@rs zR;(mFb@8Cr_4v_Da zo=&RB&}DMIt(dwARUI!c6)YuT_uho83soBE3+CT5z%NUO;+A;4->*`W$g zK88wv8i2Mf-|OtaSJQ%sv=xtai}{|5%rG&G_;wzTw@CQ93I{`>#si3WXQcrPe?n+S zMM2Z)oPcwKij8(4xy|Jt%blnm>4-h(vY5oygcJ&_)R%vj+f0n19Ee}C)jH}Z?RU}G zJ|}IPESHlPX6!&=OK6vq>9dndo?$fiQK#LS8~cs`GMIP)(OQkQ zO_FHeN^DJz6bk*EPt@e!xujpA+(#aHXKEjE~q1@o* zBn*?N9eBP|qHEjws6_|YzSk)O*T>I3=~s+8;%r<2g+!vG1pxvf#qh;b8wkKGH?|UnHqV-VP?u% z5ksI3n&f*wGS-)p7CFaZX2RKO);DF|NV7RmbQKh3voF2dS!5^UU?ZK1tj#rKZiVpW z3-ahe>fl6KdKxk?I@G(Mn?#$Z?Vz#KE$x!Pu8gqh^2X~>BmbPANIIidE7L+jA^6Sk z1+&?E&I<0K2SE!bt^G6QNGh&MHrs6%`>#)2D{WQLM+hYL=TmgiU!8tV#Hog*vl6_< zZhJqYa*?YlF)V?*!pqbMi6i1?^iIc%E)m=vUIwgf)D{C!)L^XoD% zI2SMW1t<3M2SRbqHSciOlL%D}NDF+G0_J;cy@4INRrN^3fi7 zGZbi7G9`M2^|s`3%GTMlMzyo%0;j?MYYEHzY4a9Y{!y#0NR4 ztyKnnjinhm)$oPzRc@m-C%JVC4LPP74^gi# zm<9?NiZxDKScjE3IEsW!`=M{u$N89IP<%96`TinAGf}VREkHUD&MD$a3G+ z^ZX(afrGayP@biESzpz0|8sr)eWC+GXMzKUKQ8ejudG#0SWV8BTePTd0ppMc71R_8? z3GyEb>&jb%ZdQp@`K-?Bf9mK)`ZG|zL%dOqt;?5t3n5KHZCl|Bq9lhH^3-<%Hs^q= zfOhy8#+n{AB4MHM^P%aDsIlmikD2rnv0ubqxhozv4h^q=tQ-?`N2pFO>aTH&>V&~< z*!EAO>z|CFg7YbCO+f0Iq0636PJjnSC$wWx^f}H89DU9qK3iEWixI}9A zpOJAsAcs3}n!63FO^XxO7;$uA_WUyXVqp#aURP-A13Jqq3kHUuYGQV>qP})OYDoQ^ zo)CkYyR^*h3|iOLYM1nRW~&qWR>a*J+8Pwd)xIeJ3cq>j&=)i&hP6X zX#5~5q?Itr6T?Skqs?=g-wU(@Zad3<{USv>8TILNCyF)dLFqy5+}UHC547FJ0~%Vk{({(2#(L zK`9;OoukmrVQ;d)&4?>BAi^L(ELF*!ljl*u7rXOfOsu7J=oOuL*G#y`W(wG^sMPv^ zwsgs4RHiiM5cymPsak^-EhxilAjQD6Zl0mHTIbr3{Srw<8Y8;)GmFfb8_RA(WST3B z#|_Sez8Hei2EorOD2XK+%4={2p4VPS^$ek=fcz_%21CMyy^=dTpu#5Rj9Y4}K-aOL zMWftZcK6=y`l1&@MX!AP24^s$f$REy&l6$$5Q^3Q(7Ug|3ym-Cd9@bB-a?blaM|kM z$?&ZhoEZiaClG9Pow-wSOl%v41M|p#I`4zuCj$9<5x~=60~TI%V+M%-xd^;)tXecS z?th6p-?Bgdr6H5!UI1RCZKibr$$~EUgfXAC)zT zENfbPgEX@!hYh8Lmdchil&QndUU0^~Rxu~hhGF{v1N99qBKX$uofWMOl*89K<8){u zcM6Nt38-(*N?eiUI^w-{8w5iE=ZhN4w=>LD+orvEzUtyfPB1_;-m=p&d>=o3Zk{=X zf4)GukYFeanQnCTalhhZ0EM$dDxh;$C7-77+SHDzPt<1%FMp=+9LZPMOO;f>h? zzjb+^+UrtG-?JwtbXsIbIH{j7^EoRB?&F!6g;WWJ)HECg95m{SCb}^N7qu~Lt33W8 zjc;xv{546Un%01Ln-oYZ85kXteAg$DbKDo*j{D%O>0xqKK)59+2~%#gA!)g}TEw?0 zaKj0hJRn3Vv|M>(D zlT2V&jyFO1$865F$nRT}xNKkM0to?e@#Uo1cLrbNBx~;Yo!7ZTbD1 zE4^a5CwhzpZjk)oyH*}O*~rY$x+jA5&2NE1)9GW@9Qq#wlt!;}ZHL@Ha!r8ge8eb* zYZvy56w`3iSGuErjxN~7iA*y0S+95WFU)G6{@Eoc5*VPqyMLLi=`ShzyYdnAhI{7E zq^(7L-Sn?{JpAT0!f&7MxISiAE zJmf2Ei2oY96zy{I6D5`!To1cKjt8OlV4h$zFb&;YfTCg=coJ=l_=QN-s8n9uNPs;-!WI* Opg?{a*y=aGTmFA}yAzH8 diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index a3cf641b..2ec7fb45 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -204,12 +204,6 @@ def test_fill_missing(self): interp = np.array([1., 1., 1., 3., 3.5, 4., 5., 6., 7., 8., 8., 8.]) - ext.interp = "raw" - datac = data.copy() - ext._fill_missing(series, datac) - self.assertTrue(np.all(data[~np.isnan(data)] - == datac[~np.isnan(datac)])) - ext.interp = "hold backward" datac = data.copy() ext._fill_missing(series, datac) @@ -1889,6 +1883,76 @@ def test_non_existent_cellrange_name_in_sheet_pyxl(self): # Following test are for ExtData class only # as the initialization of ExtLookup uses the same function + def test_data_interp_h1dm_row(self): + """ + Test for warning 1d horizontal series interpolation when series + has missing or NaN data + """ + import pysd + from warnings import catch_warnings + + file_name = "data/input.xlsx" + sheet = "Horizontal missing" + time_row_or_col = "time_missing" + cell = "len_0" + coords = {} + interp = None + py_name = "test_data_interp_h1dm_row" + + pysd.external.External.missing = "warning" + + data = pysd.external.ExtData(file_name=file_name, + sheet=sheet, + time_row_or_col=time_row_or_col, + root=_root, + cell=cell, + coords=coords, + interp=interp, + py_name=py_name) + + with catch_warnings(record=True) as ws: + data.initialize() + wu = [w for w in ws if issubclass(w.category, UserWarning)] + self.assertTrue("Not able to interpolate" in str(wu[-1].message)) + + self.assertTrue(all(np.isnan(data.data.values))) + + def test_data_interp_h1dm_row2(self): + """ + Test for warning 1d horizontal series interpolation when series + has missing or NaN data + """ + import pysd + from warnings import catch_warnings + + file_name = "data/input.xlsx" + sheet = "Horizontal missing" + time_row_or_col = "4" + cell = "C9" + coords = {"dim": ["B", "C", "D"]} + interp = None + py_name = "test_data_interp_h1dm_row2" + + pysd.external.External.missing = "warning" + + data = pysd.external.ExtData(file_name=file_name, + sheet=sheet, + time_row_or_col=time_row_or_col, + root=_root, + cell=cell, + coords=coords, + interp=interp, + py_name=py_name) + + with catch_warnings(record=True) as ws: + data.initialize() + wu = [w for w in ws if issubclass(w.category, UserWarning)] + self.assertTrue("Not able to interpolate" in str(wu[-1].message)) + + self.assertFalse(any(np.isnan(data.data.loc[:, "B"].values))) + self.assertFalse(any(np.isnan(data.data.loc[:, "C"].values))) + self.assertTrue(all(np.isnan(data.data.loc[:, "D"].values))) + def test_data_interp_h1dm(self): """ Test for warning 1d horizontal series interpolation when series @@ -1921,7 +1985,7 @@ def test_data_interp_h1dm(self): # use only user warnings wu = [w for w in ws if issubclass(w.category, UserWarning)] self.assertEqual(len(wu), 1) - self.assertTrue("missing" in str(wu[0].message)) + self.assertIn("missing", str(wu[0].message)) with catch_warnings(record=True) as ws: for x, y in zip(_exp.xpts, _exp.interp_1d): @@ -2287,6 +2351,9 @@ def test_data_interp_hn3dmd(self): self.assertTrue(np.all( ["missing" in str(w.message) for w in wu] )) + self.assertTrue(np.all( + ["will be filled" in str(w.message) for w in wu] + )) with catch_warnings(record=True) as ws: for x, y in zip(_exp.xpts, _exp.interp_3d): @@ -2300,6 +2367,54 @@ def test_data_interp_hn3dmd(self): self.assertTrue("extrapolating data above the maximum value" + " of the time" in str(wu[1].message)) + def test_data_interp_hn3dmd_raw(self): + """ + Test for warning 1d horizontal series interpolation when series + has missing or NaN data + """ + import pysd + from warnings import catch_warnings + + file_name = "data/input.xlsx" + sheet = "Horizontal missing" + time_row_or_col = "time" + cell_1 = "data_2d" + cell_2 = "data_2db" + coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} + coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + interp = "raw" + py_name = "test_data_interp_hn3dmd_raw" + + pysd.external.External.missing = "warning" + + data = pysd.external.ExtData(file_name=file_name, + sheet=sheet, + time_row_or_col=time_row_or_col, + root=_root, + cell=cell_1, + interp=interp, + coords=coords_1, + py_name=py_name) + + data.add(file_name=file_name, + sheet=sheet, + time_row_or_col=time_row_or_col, + cell=cell_2, + interp=interp, + coords=coords_2) + + with catch_warnings(record=True) as ws: + data.initialize() + # use only user warnings + wu = [w for w in ws if issubclass(w.category, UserWarning)] + self.assertEqual(len(wu), 2) + self.assertTrue(np.all( + ["missing" in str(w.message) for w in wu] + )) + self.assertTrue(np.all( + ["will be filled" not in str(w.message) for w in wu] + )) + def test_lookup_hn3dmd_raise(self): """ Test for error 3d horizontal series interpolation with missing data @@ -2592,6 +2707,7 @@ def test_data_interp_hn1d0(self): coords = {} interp = None py_name = "test_data_interp_h1d0" + pysd.external.External.missing = "warning" data = pysd.external.ExtData(file_name=file_name, sheet=sheet, @@ -2728,7 +2844,7 @@ def test_data_raw_hnnm(self): cell = "C12" coords = {} interp = None - py_name = "test_data_interp_hnnnm" + py_name = "test_data_interp_hnnm" data = pysd.external.ExtData(file_name=file_name, sheet=sheet, From 471679edec87739229b20240c13d94c5c87ffd6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Tue, 21 Sep 2021 13:22:41 +0200 Subject: [PATCH 3/8] allow multiple separators --- docs/advanced_usage.rst | 4 +- pysd/py_backend/builder.py | 102 +++++------- pysd/py_backend/utils.py | 50 ++++++ pysd/py_backend/vensim/vensim2py.py | 56 ++++--- pysd/pysd.py | 8 +- .../test_split_model_sub_subviews.mdl | 150 ++++++++++++++++++ tests/unit_test_cli.py | 12 +- tests/unit_test_pysd.py | 132 +++++++++++++-- 8 files changed, 404 insertions(+), 110 deletions(-) create mode 100644 tests/more-tests/split_model/test_split_model_sub_subviews.mdl diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 04f56341..c4f1789a 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -67,9 +67,9 @@ In a Vensim model with three separate views (e.g. `view_1`, `view_2` and `view_3 .. note :: Often, modelers wish to organise views further. To that end, a common practice is to include a particular character in the View name to indicate that what comes after it is the name of the subview. For instance, we could name one view as `ENERGY.Supply` and another one as `ENERGY.Demand`. - In that particular case, setting the `subview_sep` kwarg equal to `"."`, as in the code below, would name the translated views as `demand.py` and `supply.py` and place them inside the `ENERGY` folder:: + In that particular case, setting the `subview_sep` kwarg equal to `["."]`, as in the code below, would name the translated views as `demand.py` and `supply.py` and place them inside the `ENERGY` folder:: - read_vensim("many_views_model.mdl", split_views=True, subview_sep=".") + read_vensim("many_views_model.mdl", split_views=True, subview_sep=["."]) If macros are present, they will be self-contained in files named as the macro itself. The macro inner variables will be placed inside the module that corresponds with the view in which they were defined. diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index a7208925..331a7d04 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -152,9 +152,9 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, main_filename: str The name of the file to write the main module of the model to. - elements_per_module: dict - Contains the names of the modules as keys and the variables in - each specific module inside a list as values. + elements_per_view: dict + Contains the names of the modules and submodules as keys and the + variables in each specific module inside a list as values. """ root_dir = os.path.dirname(main_filename) @@ -163,53 +163,52 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, # create modules directory if it does not exist os.makedirs(modules_dir, exist_ok=True) - # check if there are subviews or only main views - subviews = all(isinstance(n, dict) for n in elements_per_view.values()) - - all_views = elements_per_view.keys() - # creating the rest of files per module (this needs to be run before the - # main module, as it updates the import_modules) - processed_elements = [] - for view_name in all_views: - view_elems = [] - if not subviews: # only main views + def process_views_tree(view_name, + view_content, + working_directory, + processed_elements): + """ + Creates a directory tree based on the elements_per_view dictionary. + If it's the final view, it creates a file, if not, it creates a folder. + """ + if isinstance(view_content, list): # will become a module + subview_elems = [] for element in elements: - if element.get("py_name", None) in \ - elements_per_view[view_name] or \ - element.get("parent_name", None) in \ - elements_per_view[view_name]: - view_elems.append(element) + if element.get("py_name") in view_content or \ + element.get("parent_name", None) in view_content: + subview_elems.append(element) - _build_separate_module(view_elems, subscript_dict, view_name, - modules_dir) + _build_separate_module(subview_elems, subscript_dict, + view_name, working_directory) + processed_elements += subview_elems - else: - # create subdirectory - view_dir = os.path.join(modules_dir, view_name) - os.makedirs(view_dir, exist_ok=True) + else: # the current view has subviews + working_directory = os.path.join(working_directory, view_name) + os.makedirs(working_directory, exist_ok=True) - for subview_name in elements_per_view[view_name].keys(): - subview_elems = [] - for element in elements: - if element.get("py_name", None) in \ - elements_per_view[view_name][subview_name] or \ - element.get("parent_name", None) in \ - elements_per_view[view_name][subview_name]: - subview_elems.append(element) + for subview_name, subview_content in view_content.items(): + process_views_tree(subview_name, + subview_content, + working_directory, + processed_elements) - _build_separate_module(subview_elems, subscript_dict, - subview_name, view_dir) - view_elems += subview_elems + processed_elements = [] + for view_name, view_content in elements_per_view.items(): + process_views_tree(view_name, + view_content, + modules_dir, + processed_elements) - processed_elements += view_elems + # move back to the main folder + os.chdir(os.path.dirname(modules_dir)) # the unprocessed will go in the main file unprocessed_elements = [ element for element in elements if element not in processed_elements ] + # building main file using the build function - _build_main_module(unprocessed_elements, subscript_dict, - main_filename, subviews) + _build_main_module(unprocessed_elements, subscript_dict, main_filename) # create json file for the modules and corresponding model elements with open(os.path.join(modules_dir, "_modules.json"), "w") as outfile: @@ -228,7 +227,7 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, json.dump(subscript_dict, outfile, indent=4, sort_keys=True) -def _build_main_module(elements, subscript_dict, file_name, subviews): +def _build_main_module(elements, subscript_dict, file_name): """ Constructs and writes the python representation of the main model module, when the split_views=True in the read_vensim function. @@ -252,10 +251,6 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): file_name: str Path of the file where the main module will be stored. - subviews: bool - True or false depending on whether the views are split in subviews or - not. - Returns ------- None or text: None or str @@ -277,7 +272,7 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): funcs = _generate_functions(elements, subscript_dict) Imports.add("utils", "load_model_data") - Imports.add("utils", "open_module") + Imports.add("utils", "load_modules") # import of needed functions and packages text, root = Imports.get_header(os.path.basename(file_name), @@ -302,21 +297,10 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): text += _get_control_vars(control_vars) - if not subviews: - text += textwrap.dedent(""" - # load modules from the modules_%(outfile)s directory - for module in _modules: - exec(open_module(_root, "%(outfile)s", module)) - - """ % { - "outfile": os.path.basename(file_name).split(".")[0], - }) - else: - text += textwrap.dedent(""" - # load submodules from subdirs in modules_%(outfile)s directory - for mod_name, mod_submods in _modules.items(): - for submod_name in mod_submods.keys(): - exec(open_module(_root, "%(outfile)s", mod_name, submod_name)) + text += textwrap.dedent(""" + # load modules from modules_%(outfile)s directory + for module_name, module_content in _modules.items(): + exec(load_modules(module_name, module_content, _root, "%(outfile)s")) """ % { "outfile": os.path.basename(file_name).split(".")[0], diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index dd63866f..e3590476 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -8,6 +8,7 @@ import warnings import keyword import json +from collections.abc import Mapping import regex as re import progressbar @@ -873,6 +874,12 @@ def open_module(root_dir, model_name, module, submodule=None): str: Model file content. """ + + warnings.warn( + "open_module function will be deprecated from release 2.0. Use " + + "load_modules instead.", + FutureWarning + ) if not submodule: rel_file_path = module + ".py" else: @@ -882,6 +889,24 @@ def open_module(root_dir, model_name, module, submodule=None): os.path.join(root_dir, "modules_" + model_name, rel_file_path)).read() +def load_modules(module_name, module_content, root_dir, model_name, + work_dir=None, submodules=[]): + if not work_dir: + work_dir = os.path.join(root_dir, "modules_" + model_name) + + if isinstance(module_content, list): + submodules.append( + open(os.path.join(work_dir, module_name + ".py"), "r").read()) + else: + work_dir = os.path.join(work_dir, module_name) + for submod_name, submod_content in module_content.items(): + load_modules( + submod_name, submod_content, root_dir, model_name, + work_dir=work_dir, submodules=submodules) + + return "\n\n".join(submodules) + + def clean_file_names(*args): """ Removes special characters and makes clean file names @@ -905,6 +930,31 @@ def clean_file_names(*args): return clean +def merge_nested_dicts(original_dict, dict_to_merge): + """ + Merge dictionaries recursively, preserving common keys. + + Parameters + ---------- + original_dict: dict + Dictionary onto which the merge is executed. + + dict_to_merge: dict + Dictionary to be merged to the original_dict. + + Returns + ------- + None + """ + + for k, v in dict_to_merge.items(): + if (k in original_dict and isinstance(original_dict[k], dict) + and isinstance(dict_to_merge[k], Mapping)): + merge_nested_dicts(original_dict[k], dict_to_merge[k]) + else: + original_dict[k] = dict_to_merge[k] + + class ProgressBar: """ Progress bar for integration diff --git a/pysd/py_backend/vensim/vensim2py.py b/pysd/py_backend/vensim/vensim2py.py index 05009c40..464d4302 100644 --- a/pysd/py_backend/vensim/vensim2py.py +++ b/pysd/py_backend/vensim/vensim2py.py @@ -559,8 +559,9 @@ def visit_view_name(self, n, vc): def visit_var_definition(self, n, vc): if int(vc[10]) % 2 != 0: # not a shadow variable - self.view_or_var["variable_name"] = self.namespace.get(vc[4], - "") + self.view_or_var["variable_name"] = \ + self.namespace.get(vc[4], "") or \ + self.namespace.get(vc[4].replace(" ", "_"), "") def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text or "" @@ -1698,8 +1699,8 @@ def _classify_elements_by_module(sketch, namespace, subview_sep): Translation from original model element names (keys) to python safe function identifiers (values). - subview_sep: str - Character used to split view names into view + subview + subview_sep: list + Characters used to split view names into view + subview (e.g. if a view is named ENERGY.Demand and suview_sep is set to ".", then the Demand subview would be placed inside the ENERGY directory) @@ -1735,27 +1736,34 @@ def _classify_elements_by_module(sketch, namespace, subview_sep): # split into subviews, if subview_sep is provided views_dict = {} - - if subview_sep and any(filter(lambda x: subview_sep in x, - non_empty_views.keys())): - for name, elements in non_empty_views.items(): - # split and clean view/subview names as they are not yet safe - view_subview = name.split(subview_sep) - - if len(view_subview) == 2: - view, subview = utils.clean_file_names(*view_subview) - else: - view = utils.clean_file_names(*view_subview)[0] - subview = "" - - if view.upper() not in views_dict.keys(): - views_dict[view.upper()] = {} - if not subview: - views_dict[view.upper()][view.lower()] = elements - else: - views_dict[view.upper()][subview.lower()] = elements + if subview_sep and any( + sep in view for sep in subview_sep for view in non_empty_views): + escaped_separators = list(map(lambda x: re.escape(x), subview_sep)) + for full_name, values in non_empty_views.items(): + # split the full view name using the separator and make the + # individual parts safe file or directory names + clean_view_parts = utils.clean_file_names( + *re.split( + "|".join(escaped_separators), + full_name)) + # creating a nested dict for each view.subview + # (e.g. {view_name: {subview_name: [values]}}) + nested_dict = values + + for item in reversed(clean_view_parts): + + nested_dict = {item: nested_dict} + # merging the new nested_dict into the views_dict, preserving + # repeated keys + utils.merge_nested_dicts(views_dict, nested_dict) + + # view names do not have separators or separator characters not provided else: - # clean file names + if subview_sep and not any( + sep in view for sep in subview_sep for view in non_empty_views): + warnings.warn("The given subview separators were not matched in " + + "any view name.") + for view_name, elements in non_empty_views.items(): views_dict[utils.clean_file_names(view_name)[0]] = elements diff --git a/pysd/pysd.py b/pysd/pysd.py index f6f21ee6..04cde2c9 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -91,10 +91,10 @@ def read_vensim(mdl_file, initialize=True, missing_values="warning", **kwargs: (optional) Additional keyword arguments. - subview_sep:(str) - Character used to separate views and subviews. If provided, - and split_views=True, each submodule will be placed inside the - folder of the parent view. + subview_sep:(list) + Characters used to separate views and subviews (e.g. [",", "."]). + If provided, and split_views=True, each submodule will be placed + inside the directory of the parent view. Returns diff --git a/tests/more-tests/split_model/test_split_model_sub_subviews.mdl b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl new file mode 100644 index 00000000..07bafdaa --- /dev/null +++ b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl @@ -0,0 +1,150 @@ +{UTF-8} +var tolo= + 55+great var + ~ + ~ | + +great var= + 5 + ~ + ~ | + +interesting var 1= + "variable-x"+1 + ~ + ~ | + +interesting var 2= + interesting var 1*5 + ~ + ~ | + +another var= + 3*Stock + ~ + ~ | + +"rate-1"= + "var-n" + ~ + ~ | + +"var-n"= + 5 + ~ + ~ | + +"variable-x"= + 6*another var + ~ + ~ | + +Stock= INTEG ( + "rate-1", + 1) + ~ + ~ | + +******************************************************** + .Control +********************************************************~ + Simulation Control Parameters + | + +FINAL TIME = 100 + ~ Month + ~ The final time for the simulation. + | + +INITIAL TIME = 0 + ~ Month + ~ The initial time for the simulation. + | + +SAVEPER = + TIME STEP + ~ Month [0,?] + ~ The frequency with which output is stored. + | + +TIME STEP = 1 + ~ Month [0,?] + ~ The time step for the simulation. + | + +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1.Submodule 1 +$255-128-0,0,Times New Roman|12||0-0-0|0-0-0|0-192-192|-1--1--1|-1--1--1|96,96,100,0 +10,1,Stock,497,237,40,20,3,3,0,0,0,0,0,0 +12,2,48,297,243,10,8,0,3,0,0,-1,0,0,0 +1,3,5,1,4,0,0,22,0,0,0,-1--1--1,,1|(422,243)| +1,4,5,2,100,0,0,22,0,0,0,-1--1--1,,1|(341,243)| +11,5,48,382,243,6,8,34,3,0,0,1,0,0,0 +10,6,"rate-1",382,262,21,11,40,3,0,0,-1,0,0,0 +12,7,0,1141,258,150,150,3,12,0,0,1,0,0,0 +Stock +10,8,"var-n",207,367,18,11,8,3,0,0,0,0,0,0 +1,9,8,6,0,0,0,0,0,128,0,-1--1--1,,1|(288,318)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1.Submodule 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,another var,89,168,36,11,8,3,0,0,0,0,0,0 +10,2,Stock,334,243,29,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(221,209)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,"variable-x",191,176,32,11,8,3,0,0,0,0,0,0 +10,2,another var,223,395,45,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +12,3,0,461,148,43,11,8,7,0,0,-1,0,0,0 +This is view 2 +1,4,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(208,292)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 1-Sview 1 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,"variable-x",260,232,41,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +10,2,interesting var 1,490,221,50,11,8,3,0,0,0,0,0,0 +1,3,1,2,1,0,0,0,0,128,0,-1--1--1,,1|(269,233)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 1-Sview 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,interesting var 1,249,173,59,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +10,2,interesting var 2,484,153,50,11,8,3,0,0,0,0,0,0 +1,3,1,2,0,0,0,0,0,128,0,-1--1--1,,1|(363,163)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.subview 2-sview 3 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,great var,291,291,29,11,8,3,0,0,0,0,0,0 +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 2-sview 4 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,var tolo,162,186,25,11,8,3,0,0,0,0,0,0 +10,2,great var,128,103,38,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|0||128-128-128 +1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(141,138)| +///---\\\ +:L<%^E!@ +9:Current +15:0,0,0,0,0,0 +19:100,6 +27:0, +34:0, +4:Time +5:var tolo +35:Date +36:YYYY-MM-DD +37:2000 +38:1 +39:1 +40:2 +41:0 +42:1 +24:0 +25:100 +26:100 diff --git a/tests/unit_test_cli.py b/tests/unit_test_cli.py index be0400a7..2d3ca1b1 100644 --- a/tests/unit_test_cli.py +++ b/tests/unit_test_cli.py @@ -259,7 +259,7 @@ def test_read_vensim_split_model_subviews(self): model_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=True, - subview_sep="." + subview_sep=["."] ) namespace_filename = "_namespace_" + model_name + ".json" @@ -273,19 +273,17 @@ def test_read_vensim_split_model_subviews(self): self.assertEqual(out.returncode, 0) # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_2")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) # check creation of module files self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_1.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_2.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_2/" + - "view_2.py")) + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) # check that the results of the split model are the same than those # without splitting diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index cf6e3785..b3710cf7 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -261,7 +261,7 @@ def test_read_vensim_split_model_subviews(self): model_name = "test_split_model_subviews" model_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=True, - subview_sep="." + subview_sep=["."] ) namespace_filename = "_namespace_" + model_name + ".json" @@ -269,19 +269,17 @@ def test_read_vensim_split_model_subviews(self): modules_dirname = "modules_" + model_name # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_2")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) # check creation of module files self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_1.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_2.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_2/" + - "view_2.py")) + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) with open(root_dir + model_name + ".py", 'r') as file: file_content = file.read() @@ -324,23 +322,61 @@ def test_read_vensim_split_model_subviews(self): # remove newly created modules folder shutil.rmtree(root_dir + modules_dirname) - def test_read_vensim_split_model_with_macro(self): + def test_read_vensim_split_model_several_subviews(self): import pysd from pysd.tools.benchmarking import assert_frames_close - root_dir = more_tests + "/split_model_with_macro/" + root_dir = os.path.join(_root, "more-tests/split_model/") - model_name = "test_split_model_with_macro" + model_name = "test_split_model_sub_subviews" model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True + root_dir + model_name + ".mdl", split_views=True, + subview_sep=[".", "-"] ) namespace_filename = "_namespace_" + model_name + ".json" subscript_dict_filename = "_subscripts_" + model_name + ".json" modules_dirname = "modules_" + model_name - # check that the results of the split model are the same - # than those without splitting + # check that the modules folders were created + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_2")) + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_1.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_1.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_3.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_4.py")) + + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() + + # assert that the functions are not defined in the main file + self.assertNotIn("def another_var()", file_content) + self.assertNotIn("def rate1()", file_content) + self.assertNotIn("def varn()", file_content) + self.assertNotIn("def variablex()", file_content) + self.assertNotIn("def stock()", file_content) + self.assertNotIn("def interesting_var_2()", file_content) + self.assertNotIn("def great_var()", file_content) + + # check that the results of the split model are the same than those + # without splitting model_non_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=False ) @@ -348,6 +384,53 @@ def test_read_vensim_split_model_with_macro(self): result_split = model_split.run() result_non_split = model_non_split.run() + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() + + # assert that the functions are in the main file for regular trans + self.assertIn("def another_var()", file_content) + self.assertIn("def rate1()", file_content) + self.assertIn("def varn()", file_content) + self.assertIn("def variablex()", file_content) + self.assertIn("def stock()", file_content) + self.assertIn("def interesting_var_2()", file_content) + self.assertIn("def great_var()", file_content) + + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) + + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) + + def test_read_vensim_split_model_with_macro(self): + import pysd + from pysd.tools.benchmarking import assert_frames_close + + root_dir = more_tests + "/split_model_with_macro/" + + model_name = "test_split_model_with_macro" + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) + + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_dirname = "modules_" + model_name + + # running split model + result_non_split = model_non_split.run() + + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True + ) + result_split = model_split.run() + # results of a split model are the same that those of the regular model assert_frames_close(result_split, result_non_split, atol=0, rtol=0) @@ -375,7 +458,28 @@ def test_read_vensim_split_model_warning(self): self.assertTrue( "Only a single view with no subviews was detected" in str( wu[0].message) - ) # check that warning references the stock + ) + + def test_read_vensim_split_model_non_matching_separator_warning(self): + import pysd + # setting the split_views=True when the model has a single + # view should generate a warning + + root_dir = os.path.join(_root, "more-tests/split_model/") + + model_name = "test_split_model_sub_subviews" + + with catch_warnings(record=True) as ws: + pysd.read_vensim(root_dir + model_name + ".mdl", split_views=True, + subview_sep=["a"]) + + wu = [w for w in ws if issubclass(w.category, UserWarning)] + + self.assertEqual(len(wu), 1) + self.assertTrue( + "The given subview separators were not matched in" in str( + wu[0].message) + ) def test_run_includes_last_value(self): import pysd From a5d3cab0141f6b7f960a4a7d20fe25b3b256f303 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Tue, 21 Sep 2021 13:22:41 +0200 Subject: [PATCH 4/8] allow multiple separators --- docs/advanced_usage.rst | 4 +- pysd/py_backend/builder.py | 101 +++++------- pysd/py_backend/utils.py | 50 ++++++ pysd/py_backend/vensim/vensim2py.py | 56 ++++--- pysd/pysd.py | 8 +- .../test_split_model_sub_subviews.mdl | 150 ++++++++++++++++++ tests/unit_test_cli.py | 12 +- tests/unit_test_pysd.py | 132 +++++++++++++-- 8 files changed, 402 insertions(+), 111 deletions(-) create mode 100644 tests/more-tests/split_model/test_split_model_sub_subviews.mdl diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 04f56341..c4f1789a 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -67,9 +67,9 @@ In a Vensim model with three separate views (e.g. `view_1`, `view_2` and `view_3 .. note :: Often, modelers wish to organise views further. To that end, a common practice is to include a particular character in the View name to indicate that what comes after it is the name of the subview. For instance, we could name one view as `ENERGY.Supply` and another one as `ENERGY.Demand`. - In that particular case, setting the `subview_sep` kwarg equal to `"."`, as in the code below, would name the translated views as `demand.py` and `supply.py` and place them inside the `ENERGY` folder:: + In that particular case, setting the `subview_sep` kwarg equal to `["."]`, as in the code below, would name the translated views as `demand.py` and `supply.py` and place them inside the `ENERGY` folder:: - read_vensim("many_views_model.mdl", split_views=True, subview_sep=".") + read_vensim("many_views_model.mdl", split_views=True, subview_sep=["."]) If macros are present, they will be self-contained in files named as the macro itself. The macro inner variables will be placed inside the module that corresponds with the view in which they were defined. diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index a7208925..04fcf7b7 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -152,9 +152,9 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, main_filename: str The name of the file to write the main module of the model to. - elements_per_module: dict - Contains the names of the modules as keys and the variables in - each specific module inside a list as values. + elements_per_view: dict + Contains the names of the modules and submodules as keys and the + variables in each specific module inside a list as values. """ root_dir = os.path.dirname(main_filename) @@ -163,53 +163,49 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, # create modules directory if it does not exist os.makedirs(modules_dir, exist_ok=True) - # check if there are subviews or only main views - subviews = all(isinstance(n, dict) for n in elements_per_view.values()) - - all_views = elements_per_view.keys() - # creating the rest of files per module (this needs to be run before the - # main module, as it updates the import_modules) - processed_elements = [] - for view_name in all_views: - view_elems = [] - if not subviews: # only main views + def process_views_tree(view_name, + view_content, + working_directory, + processed_elements): + """ + Creates a directory tree based on the elements_per_view dictionary. + If it's the final view, it creates a file, if not, it creates a folder. + """ + if isinstance(view_content, list): # will become a module + subview_elems = [] for element in elements: - if element.get("py_name", None) in \ - elements_per_view[view_name] or \ - element.get("parent_name", None) in \ - elements_per_view[view_name]: - view_elems.append(element) + if element.get("py_name") in view_content or \ + element.get("parent_name", None) in view_content: + subview_elems.append(element) - _build_separate_module(view_elems, subscript_dict, view_name, - modules_dir) + _build_separate_module(subview_elems, subscript_dict, + view_name, working_directory) + processed_elements += subview_elems - else: - # create subdirectory - view_dir = os.path.join(modules_dir, view_name) - os.makedirs(view_dir, exist_ok=True) - - for subview_name in elements_per_view[view_name].keys(): - subview_elems = [] - for element in elements: - if element.get("py_name", None) in \ - elements_per_view[view_name][subview_name] or \ - element.get("parent_name", None) in \ - elements_per_view[view_name][subview_name]: - subview_elems.append(element) + else: # the current view has subviews + working_directory = os.path.join(working_directory, view_name) + os.makedirs(working_directory, exist_ok=True) - _build_separate_module(subview_elems, subscript_dict, - subview_name, view_dir) - view_elems += subview_elems + for subview_name, subview_content in view_content.items(): + process_views_tree(subview_name, + subview_content, + working_directory, + processed_elements) - processed_elements += view_elems + processed_elements = [] + for view_name, view_content in elements_per_view.items(): + process_views_tree(view_name, + view_content, + modules_dir, + processed_elements) # the unprocessed will go in the main file unprocessed_elements = [ element for element in elements if element not in processed_elements ] + # building main file using the build function - _build_main_module(unprocessed_elements, subscript_dict, - main_filename, subviews) + _build_main_module(unprocessed_elements, subscript_dict, main_filename) # create json file for the modules and corresponding model elements with open(os.path.join(modules_dir, "_modules.json"), "w") as outfile: @@ -228,7 +224,7 @@ def build_modular_model(elements, subscript_dict, namespace, main_filename, json.dump(subscript_dict, outfile, indent=4, sort_keys=True) -def _build_main_module(elements, subscript_dict, file_name, subviews): +def _build_main_module(elements, subscript_dict, file_name): """ Constructs and writes the python representation of the main model module, when the split_views=True in the read_vensim function. @@ -252,10 +248,6 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): file_name: str Path of the file where the main module will be stored. - subviews: bool - True or false depending on whether the views are split in subviews or - not. - Returns ------- None or text: None or str @@ -277,7 +269,7 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): funcs = _generate_functions(elements, subscript_dict) Imports.add("utils", "load_model_data") - Imports.add("utils", "open_module") + Imports.add("utils", "load_modules") # import of needed functions and packages text, root = Imports.get_header(os.path.basename(file_name), @@ -302,21 +294,10 @@ def _build_main_module(elements, subscript_dict, file_name, subviews): text += _get_control_vars(control_vars) - if not subviews: - text += textwrap.dedent(""" - # load modules from the modules_%(outfile)s directory - for module in _modules: - exec(open_module(_root, "%(outfile)s", module)) - - """ % { - "outfile": os.path.basename(file_name).split(".")[0], - }) - else: - text += textwrap.dedent(""" - # load submodules from subdirs in modules_%(outfile)s directory - for mod_name, mod_submods in _modules.items(): - for submod_name in mod_submods.keys(): - exec(open_module(_root, "%(outfile)s", mod_name, submod_name)) + text += textwrap.dedent(""" + # load modules from modules_%(outfile)s directory + for module_name, module_content in _modules.items(): + exec(load_modules(module_name, module_content, _root, "%(outfile)s")) """ % { "outfile": os.path.basename(file_name).split(".")[0], diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index dd63866f..e3590476 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -8,6 +8,7 @@ import warnings import keyword import json +from collections.abc import Mapping import regex as re import progressbar @@ -873,6 +874,12 @@ def open_module(root_dir, model_name, module, submodule=None): str: Model file content. """ + + warnings.warn( + "open_module function will be deprecated from release 2.0. Use " + + "load_modules instead.", + FutureWarning + ) if not submodule: rel_file_path = module + ".py" else: @@ -882,6 +889,24 @@ def open_module(root_dir, model_name, module, submodule=None): os.path.join(root_dir, "modules_" + model_name, rel_file_path)).read() +def load_modules(module_name, module_content, root_dir, model_name, + work_dir=None, submodules=[]): + if not work_dir: + work_dir = os.path.join(root_dir, "modules_" + model_name) + + if isinstance(module_content, list): + submodules.append( + open(os.path.join(work_dir, module_name + ".py"), "r").read()) + else: + work_dir = os.path.join(work_dir, module_name) + for submod_name, submod_content in module_content.items(): + load_modules( + submod_name, submod_content, root_dir, model_name, + work_dir=work_dir, submodules=submodules) + + return "\n\n".join(submodules) + + def clean_file_names(*args): """ Removes special characters and makes clean file names @@ -905,6 +930,31 @@ def clean_file_names(*args): return clean +def merge_nested_dicts(original_dict, dict_to_merge): + """ + Merge dictionaries recursively, preserving common keys. + + Parameters + ---------- + original_dict: dict + Dictionary onto which the merge is executed. + + dict_to_merge: dict + Dictionary to be merged to the original_dict. + + Returns + ------- + None + """ + + for k, v in dict_to_merge.items(): + if (k in original_dict and isinstance(original_dict[k], dict) + and isinstance(dict_to_merge[k], Mapping)): + merge_nested_dicts(original_dict[k], dict_to_merge[k]) + else: + original_dict[k] = dict_to_merge[k] + + class ProgressBar: """ Progress bar for integration diff --git a/pysd/py_backend/vensim/vensim2py.py b/pysd/py_backend/vensim/vensim2py.py index 05009c40..464d4302 100644 --- a/pysd/py_backend/vensim/vensim2py.py +++ b/pysd/py_backend/vensim/vensim2py.py @@ -559,8 +559,9 @@ def visit_view_name(self, n, vc): def visit_var_definition(self, n, vc): if int(vc[10]) % 2 != 0: # not a shadow variable - self.view_or_var["variable_name"] = self.namespace.get(vc[4], - "") + self.view_or_var["variable_name"] = \ + self.namespace.get(vc[4], "") or \ + self.namespace.get(vc[4].replace(" ", "_"), "") def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text or "" @@ -1698,8 +1699,8 @@ def _classify_elements_by_module(sketch, namespace, subview_sep): Translation from original model element names (keys) to python safe function identifiers (values). - subview_sep: str - Character used to split view names into view + subview + subview_sep: list + Characters used to split view names into view + subview (e.g. if a view is named ENERGY.Demand and suview_sep is set to ".", then the Demand subview would be placed inside the ENERGY directory) @@ -1735,27 +1736,34 @@ def _classify_elements_by_module(sketch, namespace, subview_sep): # split into subviews, if subview_sep is provided views_dict = {} - - if subview_sep and any(filter(lambda x: subview_sep in x, - non_empty_views.keys())): - for name, elements in non_empty_views.items(): - # split and clean view/subview names as they are not yet safe - view_subview = name.split(subview_sep) - - if len(view_subview) == 2: - view, subview = utils.clean_file_names(*view_subview) - else: - view = utils.clean_file_names(*view_subview)[0] - subview = "" - - if view.upper() not in views_dict.keys(): - views_dict[view.upper()] = {} - if not subview: - views_dict[view.upper()][view.lower()] = elements - else: - views_dict[view.upper()][subview.lower()] = elements + if subview_sep and any( + sep in view for sep in subview_sep for view in non_empty_views): + escaped_separators = list(map(lambda x: re.escape(x), subview_sep)) + for full_name, values in non_empty_views.items(): + # split the full view name using the separator and make the + # individual parts safe file or directory names + clean_view_parts = utils.clean_file_names( + *re.split( + "|".join(escaped_separators), + full_name)) + # creating a nested dict for each view.subview + # (e.g. {view_name: {subview_name: [values]}}) + nested_dict = values + + for item in reversed(clean_view_parts): + + nested_dict = {item: nested_dict} + # merging the new nested_dict into the views_dict, preserving + # repeated keys + utils.merge_nested_dicts(views_dict, nested_dict) + + # view names do not have separators or separator characters not provided else: - # clean file names + if subview_sep and not any( + sep in view for sep in subview_sep for view in non_empty_views): + warnings.warn("The given subview separators were not matched in " + + "any view name.") + for view_name, elements in non_empty_views.items(): views_dict[utils.clean_file_names(view_name)[0]] = elements diff --git a/pysd/pysd.py b/pysd/pysd.py index f6f21ee6..04cde2c9 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -91,10 +91,10 @@ def read_vensim(mdl_file, initialize=True, missing_values="warning", **kwargs: (optional) Additional keyword arguments. - subview_sep:(str) - Character used to separate views and subviews. If provided, - and split_views=True, each submodule will be placed inside the - folder of the parent view. + subview_sep:(list) + Characters used to separate views and subviews (e.g. [",", "."]). + If provided, and split_views=True, each submodule will be placed + inside the directory of the parent view. Returns diff --git a/tests/more-tests/split_model/test_split_model_sub_subviews.mdl b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl new file mode 100644 index 00000000..07bafdaa --- /dev/null +++ b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl @@ -0,0 +1,150 @@ +{UTF-8} +var tolo= + 55+great var + ~ + ~ | + +great var= + 5 + ~ + ~ | + +interesting var 1= + "variable-x"+1 + ~ + ~ | + +interesting var 2= + interesting var 1*5 + ~ + ~ | + +another var= + 3*Stock + ~ + ~ | + +"rate-1"= + "var-n" + ~ + ~ | + +"var-n"= + 5 + ~ + ~ | + +"variable-x"= + 6*another var + ~ + ~ | + +Stock= INTEG ( + "rate-1", + 1) + ~ + ~ | + +******************************************************** + .Control +********************************************************~ + Simulation Control Parameters + | + +FINAL TIME = 100 + ~ Month + ~ The final time for the simulation. + | + +INITIAL TIME = 0 + ~ Month + ~ The initial time for the simulation. + | + +SAVEPER = + TIME STEP + ~ Month [0,?] + ~ The frequency with which output is stored. + | + +TIME STEP = 1 + ~ Month [0,?] + ~ The time step for the simulation. + | + +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1.Submodule 1 +$255-128-0,0,Times New Roman|12||0-0-0|0-0-0|0-192-192|-1--1--1|-1--1--1|96,96,100,0 +10,1,Stock,497,237,40,20,3,3,0,0,0,0,0,0 +12,2,48,297,243,10,8,0,3,0,0,-1,0,0,0 +1,3,5,1,4,0,0,22,0,0,0,-1--1--1,,1|(422,243)| +1,4,5,2,100,0,0,22,0,0,0,-1--1--1,,1|(341,243)| +11,5,48,382,243,6,8,34,3,0,0,1,0,0,0 +10,6,"rate-1",382,262,21,11,40,3,0,0,-1,0,0,0 +12,7,0,1141,258,150,150,3,12,0,0,1,0,0,0 +Stock +10,8,"var-n",207,367,18,11,8,3,0,0,0,0,0,0 +1,9,8,6,0,0,0,0,0,128,0,-1--1--1,,1|(288,318)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1.Submodule 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,another var,89,168,36,11,8,3,0,0,0,0,0,0 +10,2,Stock,334,243,29,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(221,209)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,"variable-x",191,176,32,11,8,3,0,0,0,0,0,0 +10,2,another var,223,395,45,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +12,3,0,461,148,43,11,8,7,0,0,-1,0,0,0 +This is view 2 +1,4,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(208,292)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 1-Sview 1 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,"variable-x",260,232,41,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +10,2,interesting var 1,490,221,50,11,8,3,0,0,0,0,0,0 +1,3,1,2,1,0,0,0,0,128,0,-1--1--1,,1|(269,233)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 1-Sview 2 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,interesting var 1,249,173,59,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-128 +10,2,interesting var 2,484,153,50,11,8,3,0,0,0,0,0,0 +1,3,1,2,0,0,0,0,0,128,0,-1--1--1,,1|(363,163)| +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.subview 2-sview 3 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,great var,291,291,29,11,8,3,0,0,0,0,0,0 +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 3.Subview 2-sview 4 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,var tolo,162,186,25,11,8,3,0,0,0,0,0,0 +10,2,great var,128,103,38,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|0||128-128-128 +1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(141,138)| +///---\\\ +:L<%^E!@ +9:Current +15:0,0,0,0,0,0 +19:100,6 +27:0, +34:0, +4:Time +5:var tolo +35:Date +36:YYYY-MM-DD +37:2000 +38:1 +39:1 +40:2 +41:0 +42:1 +24:0 +25:100 +26:100 diff --git a/tests/unit_test_cli.py b/tests/unit_test_cli.py index be0400a7..2d3ca1b1 100644 --- a/tests/unit_test_cli.py +++ b/tests/unit_test_cli.py @@ -259,7 +259,7 @@ def test_read_vensim_split_model_subviews(self): model_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=True, - subview_sep="." + subview_sep=["."] ) namespace_filename = "_namespace_" + model_name + ".json" @@ -273,19 +273,17 @@ def test_read_vensim_split_model_subviews(self): self.assertEqual(out.returncode, 0) # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_2")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) # check creation of module files self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_1.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_2.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_2/" + - "view_2.py")) + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) # check that the results of the split model are the same than those # without splitting diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index cf6e3785..b3710cf7 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -261,7 +261,7 @@ def test_read_vensim_split_model_subviews(self): model_name = "test_split_model_subviews" model_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=True, - subview_sep="." + subview_sep=["."] ) namespace_filename = "_namespace_" + model_name + ".json" @@ -269,19 +269,17 @@ def test_read_vensim_split_model_subviews(self): modules_dirname = "modules_" + model_name # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/VIEW_2")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) # check creation of module files self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_1.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_1/" + + os.path.isfile(root_dir + modules_dirname + "/view_1/" + "submodule_2.py")) self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/VIEW_2/" + - "view_2.py")) + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) with open(root_dir + model_name + ".py", 'r') as file: file_content = file.read() @@ -324,23 +322,61 @@ def test_read_vensim_split_model_subviews(self): # remove newly created modules folder shutil.rmtree(root_dir + modules_dirname) - def test_read_vensim_split_model_with_macro(self): + def test_read_vensim_split_model_several_subviews(self): import pysd from pysd.tools.benchmarking import assert_frames_close - root_dir = more_tests + "/split_model_with_macro/" + root_dir = os.path.join(_root, "more-tests/split_model/") - model_name = "test_split_model_with_macro" + model_name = "test_split_model_sub_subviews" model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True + root_dir + model_name + ".mdl", split_views=True, + subview_sep=[".", "-"] ) namespace_filename = "_namespace_" + model_name + ".json" subscript_dict_filename = "_subscripts_" + model_name + ".json" modules_dirname = "modules_" + model_name - # check that the results of the split model are the same - # than those without splitting + # check that the modules folders were created + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_2")) + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_1.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_1.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_3.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_4.py")) + + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() + + # assert that the functions are not defined in the main file + self.assertNotIn("def another_var()", file_content) + self.assertNotIn("def rate1()", file_content) + self.assertNotIn("def varn()", file_content) + self.assertNotIn("def variablex()", file_content) + self.assertNotIn("def stock()", file_content) + self.assertNotIn("def interesting_var_2()", file_content) + self.assertNotIn("def great_var()", file_content) + + # check that the results of the split model are the same than those + # without splitting model_non_split = pysd.read_vensim( root_dir + model_name + ".mdl", split_views=False ) @@ -348,6 +384,53 @@ def test_read_vensim_split_model_with_macro(self): result_split = model_split.run() result_non_split = model_non_split.run() + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() + + # assert that the functions are in the main file for regular trans + self.assertIn("def another_var()", file_content) + self.assertIn("def rate1()", file_content) + self.assertIn("def varn()", file_content) + self.assertIn("def variablex()", file_content) + self.assertIn("def stock()", file_content) + self.assertIn("def interesting_var_2()", file_content) + self.assertIn("def great_var()", file_content) + + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) + + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) + + def test_read_vensim_split_model_with_macro(self): + import pysd + from pysd.tools.benchmarking import assert_frames_close + + root_dir = more_tests + "/split_model_with_macro/" + + model_name = "test_split_model_with_macro" + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) + + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_dirname = "modules_" + model_name + + # running split model + result_non_split = model_non_split.run() + + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True + ) + result_split = model_split.run() + # results of a split model are the same that those of the regular model assert_frames_close(result_split, result_non_split, atol=0, rtol=0) @@ -375,7 +458,28 @@ def test_read_vensim_split_model_warning(self): self.assertTrue( "Only a single view with no subviews was detected" in str( wu[0].message) - ) # check that warning references the stock + ) + + def test_read_vensim_split_model_non_matching_separator_warning(self): + import pysd + # setting the split_views=True when the model has a single + # view should generate a warning + + root_dir = os.path.join(_root, "more-tests/split_model/") + + model_name = "test_split_model_sub_subviews" + + with catch_warnings(record=True) as ws: + pysd.read_vensim(root_dir + model_name + ".mdl", split_views=True, + subview_sep=["a"]) + + wu = [w for w in ws if issubclass(w.category, UserWarning)] + + self.assertEqual(len(wu), 1) + self.assertTrue( + "The given subview separators were not matched in" in str( + wu[0].message) + ) def test_run_includes_last_value(self): import pysd From 3d8e2d685d13372e0b3b175a1fd27ba149f199a6 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Mon, 27 Sep 2021 13:40:55 +0200 Subject: [PATCH 5/8] Correct memory bug --- pysd/py_backend/builder.py | 3 +- pysd/py_backend/utils.py | 5 +- pysd/py_backend/vensim/vensim2py.py | 4 +- tests/unit_test_pysd.py | 3148 ++++++++++++++------------- 4 files changed, 1583 insertions(+), 1577 deletions(-) diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index 04fcf7b7..eb1fbf64 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -297,7 +297,8 @@ def _build_main_module(elements, subscript_dict, file_name): text += textwrap.dedent(""" # load modules from modules_%(outfile)s directory for module_name, module_content in _modules.items(): - exec(load_modules(module_name, module_content, _root, "%(outfile)s")) + exec(load_modules(module_name, module_content, _root, + "%(outfile)s", submodules=[])) """ % { "outfile": os.path.basename(file_name).split(".")[0], diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index e3590476..12fc8b46 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -891,12 +891,13 @@ def open_module(root_dir, model_name, module, submodule=None): def load_modules(module_name, module_content, root_dir, model_name, work_dir=None, submodules=[]): + # TODO: document if not work_dir: work_dir = os.path.join(root_dir, "modules_" + model_name) if isinstance(module_content, list): - submodules.append( - open(os.path.join(work_dir, module_name + ".py"), "r").read()) + with open(os.path.join(work_dir, module_name + ".py"), "r") as file: + submodules.append(file.read()) else: work_dir = os.path.join(work_dir, module_name) for submod_name, submod_content in module_content.items(): diff --git a/pysd/py_backend/vensim/vensim2py.py b/pysd/py_backend/vensim/vensim2py.py index 464d4302..d658c6e7 100644 --- a/pysd/py_backend/vensim/vensim2py.py +++ b/pysd/py_backend/vensim/vensim2py.py @@ -1861,7 +1861,9 @@ def translate_vensim(mdl_file, split_views, **kwargs): else: # separate macro elements into their own files section["py_name"] = utils.make_python_identifier( section["name"])[0] - section["file_name"] = out_dir + "/" + section["py_name"] + ".py" + section["file_name"] = os.path.join( + out_dir, + section["py_name"] + ".py") macro_list = [s for s in file_sections if s["name"] != "_main_"] diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index b3710cf7..d7a3d991 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -93,741 +93,684 @@ def test_run_ignore_missing(self): # errors for missing values pysd.load(model_py, missing_values="raise") - def test_read_vensim_split_model(self): + def test_run_includes_last_value(self): import pysd - from pysd.tools.benchmarking import assert_frames_close - - root_dir = more_tests + "/split_model/" - - model_name = "test_split_model" - model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True - ) - - namespace_filename = "_namespace_" + model_name + ".json" - subscript_dict_filename = "_subscripts_" + model_name + ".json" - modules_filename = "_modules.json" - modules_dirname = "modules_" + model_name - # check that _namespace and _subscript_dict json files where created - self.assertTrue(os.path.isfile(root_dir + namespace_filename)) - self.assertTrue(os.path.isfile(root_dir + subscript_dict_filename)) + model = pysd.read_vensim(test_model) + res = model.run() + self.assertEqual(res.index[-1], model.components.final_time()) - # check that the main model file was created - self.assertTrue(os.path.isfile(root_dir + model_name + ".py")) + def test_run_build_timeseries(self): + import pysd - # check that the modules folder was created - self.assertTrue(os.path.isdir(root_dir + modules_dirname)) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + modules_filename) - ) + model = pysd.read_vensim(test_model) - # check creation of module files - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + "view_1.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + "view2.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + "view_3.py")) + model.components.initial_time = lambda: 3 + model.components.final_time = lambda: 7 + model.components.time_step = lambda: 1 + model.initialize() - # check dictionaries - self.assertIn("Stock", model_split.components._namespace.keys()) - self.assertIn("view2", model_split.components._modules.keys()) - self.assertIsInstance(model_split.components._subscript_dict, dict) + res = model.run() - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + actual = list(res.index) + expected = [3.0, 4.0, 5.0, 6.0, 7.0] + self.assertSequenceEqual(actual, expected) - # assert that the functions are not defined in the main file - self.assertNotIn("def another_var()", file_content) - self.assertNotIn("def rate1()", file_content) - self.assertNotIn("def varn()", file_content) - self.assertNotIn("def variablex()", file_content) - self.assertNotIn("def stock()", file_content) + def test_run_progress(self): + import pysd - # check that the results of the split model are the same than those - # without splitting - model_non_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=False - ) + # same as test_run but with progressbar + model = pysd.read_vensim(test_model) + stocks = model.run(progress=True) + self.assertTrue(isinstance(stocks, pd.DataFrame)) + self.assertTrue("Teacup Temperature" in stocks.columns.values) + self.assertGreater(len(stocks), 3) + self.assertTrue(stocks.notnull().all().all()) - result_split = model_split.run() - result_non_split = model_non_split.run() + def test_run_return_timestamps(self): + """Addresses https://github.com/JamesPHoughton/pysd/issues/17""" + import pysd - # results of a split model are the same that those of the regular - # model (un-split) - assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + model = pysd.read_vensim(test_model) + timestamps = np.random.rand(5).cumsum() + stocks = model.run(return_timestamps=timestamps) + self.assertTrue((stocks.index.values == timestamps).all()) - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + stocks = model.run(return_timestamps=5) + self.assertEqual(stocks.index[0], 5) - # assert that the functions are in the main file for regular trans - self.assertIn("def another_var()", file_content) - self.assertIn("def rate1()", file_content) - self.assertIn("def varn()", file_content) - self.assertIn("def variablex()", file_content) - self.assertIn("def stock()", file_content) + timestamps = ['A', 'B'] + with self.assertRaises(TypeError): + model.run(return_timestamps=timestamps) - # remove newly created files - os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) - os.remove(root_dir + subscript_dict_filename) + def test_run_return_timestamps_past_final_time(self): + """ If the user enters a timestamp that is longer than the euler + timeseries that is defined by the normal model file, should + extend the euler series to the largest timestamp""" + import pysd - # remove newly created modules folder - shutil.rmtree(root_dir + modules_dirname) + model = pysd.read_vensim(test_model) + return_timestamps = list(range(0, 100, 10)) + stocks = model.run(return_timestamps=return_timestamps) + self.assertSequenceEqual(return_timestamps, list(stocks.index)) - def test_read_vensim_split_model_vensim_8_2_1(self): + def test_return_timestamps_with_range(self): + """ + Tests that return timestamps may receive a 'range'. + It will be cast to a numpy array in the end... + """ import pysd - from pysd.tools.benchmarking import assert_frames_close - root_dir = os.path.join(_root, "more-tests/split_model_vensim_8_2_1/") + model = pysd.read_vensim(test_model) + return_timestamps = range(0, 100, 10) + stocks = model.run(return_timestamps=return_timestamps) + self.assertSequenceEqual(return_timestamps, list(stocks.index)) - model_name = "test_split_model_vensim_8_2_1" - model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True, subview_sep="." - ) + def test_run_return_columns_original_names(self): + """Addresses https://github.com/JamesPHoughton/pysd/issues/26 + - Also checks that columns are returned in the correct order""" + import pysd - namespace_filename = "_namespace_" + model_name + ".json" - subscript_dict_filename = "_subscripts_" + model_name + ".json" - modules_filename = "_modules.json" - modules_dirname = "modules_" + model_name + model = pysd.read_vensim(test_model) + return_columns = ["Room Temperature", "Teacup Temperature"] + result = model.run(return_columns=return_columns) + self.assertEqual(set(result.columns), set(return_columns)) - # check that _namespace and _subscript_dict json files where created - self.assertTrue(os.path.isfile(root_dir + namespace_filename)) - self.assertTrue(os.path.isfile(root_dir + subscript_dict_filename)) + def test_run_return_columns_step(self): + """ + Return only cache 'step' variables + """ + import pysd + model = pysd.read_vensim(test_model) + result = model.run(return_columns='step') + self.assertEqual( + set(result.columns), + {'Teacup Temperature', 'SAVEPER', 'Heat Loss to Room'}) - # check that the main model file was created - self.assertTrue(os.path.isfile(root_dir + model_name + ".py")) + def test_run_reload(self): + """ Addresses https://github.com/JamesPHoughton/pysd/issues/99""" + import pysd - # check that the modules folder was created - self.assertTrue(os.path.isdir(root_dir + modules_dirname)) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + modules_filename) - ) + model = pysd.read_vensim(test_model) + result0 = model.run() + result1 = model.run(params={"Room Temperature": 1000}) + result2 = model.run() + result3 = model.run(reload=True) - # check creation of module files - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + "teacup.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/" + "cream.py")) + self.assertTrue((result0 == result3).all().all()) + self.assertFalse((result0 == result1).all().all()) + self.assertTrue((result1 == result2).all().all()) - # check dictionaries - self.assertIn("Cream Temperature", - model_split.components._namespace.keys()) - self.assertIn("cream", model_split.components._modules.keys()) - self.assertIsInstance(model_split.components._subscript_dict, dict) + def test_run_return_columns_pysafe_names(self): + """Addresses https://github.com/JamesPHoughton/pysd/issues/26""" + import pysd - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + model = pysd.read_vensim(test_model) + return_columns = ["room_temperature", "teacup_temperature"] + result = model.run(return_columns=return_columns) + self.assertEqual(set(result.columns), set(return_columns)) - # assert that the functions are not defined in the main file - self.assertNotIn("def teacup_temperature()", file_content) - self.assertNotIn("def cream_temperature()", file_content) + def test_run_export_import(self): + import pysd + from pysd.tools.benchmarking import assert_frames_close - # check that the results of the split model are the same than those - # without splitting - model_non_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=False - ) + with catch_warnings(): + simplefilter("ignore") + model = pysd.read_vensim(test_model) + stocks = model.run(return_timestamps=[0, 10, 20, 30]) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks['FINAL TIME'] == 30).all().all()) - result_split = model_split.run() - result_non_split = model_non_split.run() + model.initialize() + stocks1 = model.run(return_timestamps=[0, 10], final_time=12) + self.assertTrue((stocks1['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks1['FINAL TIME'] == 12).all().all()) + model.export('teacup12.pic') + model.initialize() + stocks2 = model.run(initial_condition='teacup12.pic', + return_timestamps=[20, 30]) + self.assertTrue((stocks2['INITIAL TIME'] == 12).all().all()) + self.assertTrue((stocks2['FINAL TIME'] == 30).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks1.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks1.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('teacup12.pic') - # results of a split model are the same that those of the regular - # model (un-split) - assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + assert_frames_close(stocks1, stocks.loc[[0, 10]]) + assert_frames_close(stocks2, stocks.loc[[20, 30]]) - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + # delays + test_delays = os.path.join( + _root, + 'test-models/tests/delays/test_delays.mdl') + model = pysd.read_vensim(test_delays) + stocks = model.run(return_timestamps=20) + model.initialize() + model.run(return_timestamps=[], final_time=7) + model.export('delays7.pic') + stocks2 = model.run(initial_condition='delays7.pic', + return_timestamps=20) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('delays7.pic') - # assert that the functions are in the main file for regular trans - self.assertIn("def teacup_temperature()", file_content) - self.assertIn("def cream_temperature()", file_content) + assert_frames_close(stocks2, stocks) - # remove newly created files - os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) - os.remove(root_dir + subscript_dict_filename) + # delay fixed + test_delayf = os.path.join( + _root, + 'test-models/tests/delay_fixed/test_delay_fixed.mdl') + model = pysd.read_vensim(test_delayf) + stocks = model.run(return_timestamps=20) + model.initialize() + model.run(return_timestamps=7) + model.export('delayf7.pic') + stocks2 = model.run(initial_condition='delayf7.pic', + return_timestamps=20) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('delayf7.pic') - # remove newly created modules folder - shutil.rmtree(root_dir + modules_dirname) + assert_frames_close(stocks2, stocks) - def test_read_vensim_split_model_subviews(self): - import pysd - from pysd.tools.benchmarking import assert_frames_close + # forecast + test_trend = os.path.join( + _root, + 'test-models/tests/forecast/' + + 'test_forecast.mdl') + model = pysd.read_vensim(test_trend) + stocks = model.run(return_timestamps=50, flatten_output=True) + model.initialize() + model.run(return_timestamps=20) + model.export('frcst20.pic') + stocks2 = model.run(initial_condition='frcst20.pic', + return_timestamps=50, + flatten_output=True) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 20).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('frcst20.pic') - root_dir = os.path.join(_root, "more-tests/split_model/") + assert_frames_close(stocks2, stocks) - model_name = "test_split_model_subviews" - model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True, - subview_sep=["."] - ) + # smooth + test_smooth = os.path.join( + _root, + 'test-models/tests/subscripted_smooth/' + + 'test_subscripted_smooth.mdl') + model = pysd.read_vensim(test_smooth) + stocks = model.run(return_timestamps=20, flatten_output=True) + model.initialize() + model.run(return_timestamps=7) + model.export('smooth7.pic') + stocks2 = model.run(initial_condition='smooth7.pic', + return_timestamps=20, + flatten_output=True) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('smooth7.pic') - namespace_filename = "_namespace_" + model_name + ".json" - subscript_dict_filename = "_subscripts_" + model_name + ".json" - modules_dirname = "modules_" + model_name + assert_frames_close(stocks2, stocks) - # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) + # trend + test_trend = os.path.join( + _root, + 'test-models/tests/subscripted_trend/' + + 'test_subscripted_trend.mdl') + model = pysd.read_vensim(test_trend) + stocks = model.run(return_timestamps=20, flatten_output=True) + model.initialize() + model.run(return_timestamps=7) + model.export('trend7.pic') + stocks2 = model.run(initial_condition='trend7.pic', + return_timestamps=20, + flatten_output=True) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('trend7.pic') - # check creation of module files - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_1/" + - "submodule_1.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_1/" + - "submodule_2.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_2.py")) + assert_frames_close(stocks2, stocks) - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + # initial + test_initial = os.path.join( + _root, 'test-models/tests/initial_function/test_initial.mdl') + model = pysd.read_vensim(test_initial) + stocks = model.run(return_timestamps=20) + model.initialize() + model.run(return_timestamps=7) + model.export('initial7.pic') + stocks2 = model.run(initial_condition='initial7.pic', + return_timestamps=20) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('initial7.pic') - # assert that the functions are not defined in the main file - self.assertNotIn("def another_var()", file_content) - self.assertNotIn("def rate1()", file_content) - self.assertNotIn("def varn()", file_content) - self.assertNotIn("def variablex()", file_content) - self.assertNotIn("def stock()", file_content) + assert_frames_close(stocks2, stocks) - # check that the results of the split model are the same than those - # without splitting - model_non_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=False - ) + # sample if true + test_sample_if_true = os.path.join( + _root, + 'test-models/tests/sample_if_true/test_sample_if_true.mdl') + model = pysd.read_vensim(test_sample_if_true) + stocks = model.run(return_timestamps=20, flatten_output=True) + model.initialize() + model.run(return_timestamps=7) + model.export('sample_if_true7.pic') + stocks2 = model.run(initial_condition='sample_if_true7.pic', + return_timestamps=20, + flatten_output=True) + self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) + self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) + stocks.drop('INITIAL TIME', axis=1, inplace=True) + stocks2.drop('INITIAL TIME', axis=1, inplace=True) + stocks.drop('FINAL TIME', axis=1, inplace=True) + stocks2.drop('FINAL TIME', axis=1, inplace=True) + os.remove('sample_if_true7.pic') - result_split = model_split.run() - result_non_split = model_non_split.run() + assert_frames_close(stocks2, stocks) - # results of a split model are the same that those of the regular - # model (un-split) - assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + def test_initial_conditions_tuple_pysafe_names(self): + import pysd - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + model = pysd.read_vensim(test_model) + stocks = model.run( + initial_condition=(3000, {"teacup_temperature": 33}), + return_timestamps=list(range(3000, 3010)) + ) - # assert that the functions are in the main file for regular trans - self.assertIn("def another_var()", file_content) - self.assertIn("def rate1()", file_content) - self.assertIn("def varn()", file_content) - self.assertIn("def variablex()", file_content) - self.assertIn("def stock()", file_content) + self.assertEqual(stocks["Teacup Temperature"].iloc[0], 33) - # remove newly created files - os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) - os.remove(root_dir + subscript_dict_filename) + def test_initial_conditions_tuple_original_names(self): + """ Responds to https://github.com/JamesPHoughton/pysd/issues/77""" + import pysd - # remove newly created modules folder - shutil.rmtree(root_dir + modules_dirname) + model = pysd.read_vensim(test_model) + stocks = model.run( + initial_condition=(3000, {"Teacup Temperature": 33}), + return_timestamps=list(range(3000, 3010)), + ) + self.assertEqual(stocks.index[0], 3000) + self.assertEqual(stocks["Teacup Temperature"].iloc[0], 33) - def test_read_vensim_split_model_several_subviews(self): + def test_initial_conditions_current(self): import pysd - from pysd.tools.benchmarking import assert_frames_close - - root_dir = os.path.join(_root, "more-tests/split_model/") - model_name = "test_split_model_sub_subviews" - model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True, - subview_sep=[".", "-"] + model = pysd.read_vensim(test_model) + stocks1 = model.run(return_timestamps=list(range(0, 31))) + stocks2 = model.run( + initial_condition="current", return_timestamps=list(range(30, 45)) + ) + self.assertEqual( + stocks1["Teacup Temperature"].iloc[-1], + stocks2["Teacup Temperature"].iloc[0], ) - namespace_filename = "_namespace_" + model_name + ".json" - subscript_dict_filename = "_subscripts_" + model_name + ".json" - modules_dirname = "modules_" + model_name + def test_initial_condition_bad_value(self): + import pysd - # check that the modules folders were created - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + - "/subview_1")) - self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + - "/subview_2")) - # check creation of module files - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_2.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_1/" + - "submodule_1.py")) - self.assertTrue( - os.path.isfile(root_dir + modules_dirname + "/view_1/" + - "submodule_2.py")) - self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + - "/subview_1" + "/sview_1.py")) - self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + - "/subview_1" + "/sview_2.py")) - self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + - "/subview_2" + "/sview_3.py")) - self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + - "/subview_2" + "/sview_4.py")) + model = pysd.read_vensim(test_model) - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + with self.assertRaises(FileNotFoundError): + model.run(initial_condition="bad value") - # assert that the functions are not defined in the main file - self.assertNotIn("def another_var()", file_content) - self.assertNotIn("def rate1()", file_content) - self.assertNotIn("def varn()", file_content) - self.assertNotIn("def variablex()", file_content) - self.assertNotIn("def stock()", file_content) - self.assertNotIn("def interesting_var_2()", file_content) - self.assertNotIn("def great_var()", file_content) - - # check that the results of the split model are the same than those - # without splitting - model_non_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=False - ) - - result_split = model_split.run() - result_non_split = model_non_split.run() - - # results of a split model are the same that those of the regular - # model (un-split) - assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + def test_initial_conditions_subscripted_value_with_constant(self): + import pysd - with open(root_dir + model_name + ".py", 'r') as file: - file_content = file.read() + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 5], [5, 5], [5, 5]], coords, dims) - # assert that the functions are in the main file for regular trans - self.assertIn("def another_var()", file_content) - self.assertIn("def rate1()", file_content) - self.assertIn("def varn()", file_content) - self.assertIn("def variablex()", file_content) - self.assertIn("def stock()", file_content) - self.assertIn("def interesting_var_2()", file_content) - self.assertIn("def great_var()", file_content) + model = pysd.read_vensim(test_model_subs) - # remove newly created files - os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) - os.remove(root_dir + subscript_dict_filename) + with catch_warnings(record=True) as ws: + res = model.run(initial_condition=(5, {'initial_values': 5}), + return_columns=['Initial Values'], + return_timestamps=list(range(5, 10))) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "a constant value with initial_conditions will be deprecated", + str(wf[0].message)) - # remove newly created modules folder - shutil.rmtree(root_dir + modules_dirname) + self.assertTrue(output.equals(res['Initial Values'].iloc[0])) + self.assertEqual(res.index[0], 5) - def test_read_vensim_split_model_with_macro(self): + def test_initial_conditions_subscripted_value_with_partial_xarray(self): import pysd - from pysd.tools.benchmarking import assert_frames_close - root_dir = more_tests + "/split_model_with_macro/" + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) + input_val = xr.DataArray( + [5, 3], + {'Second Dimension Subscript': ['Column 1', 'Column 2']}, + ['Second Dimension Subscript']) - model_name = "test_split_model_with_macro" - model_non_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=False - ) + model = pysd.read_vensim(test_model_subs) + with catch_warnings(record=True) as ws: + res = model.run(initial_condition=(5, + {'Initial Values': input_val}), + return_columns=['Initial Values'], + return_timestamps=list(range(5, 10))) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "a constant value with initial_conditions will be deprecated", + str(wf[0].message)) - namespace_filename = "_namespace_" + model_name + ".json" - subscript_dict_filename = "_subscripts_" + model_name + ".json" - modules_dirname = "modules_" + model_name + self.assertTrue(output.equals(res['Initial Values'].iloc[0])) + self.assertEqual(res.index[0], 5) - # running split model - result_non_split = model_non_split.run() + def test_initial_conditions_subscripted_value_with_xarray(self): + import pysd - model_split = pysd.read_vensim( - root_dir + model_name + ".mdl", split_views=True - ) - result_split = model_split.run() + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - # results of a split model are the same that those of the regular model - assert_frames_close(result_split, result_non_split, atol=0, rtol=0) + model = pysd.read_vensim(test_model_subs) - # remove newly created files - os.remove(root_dir + model_name + ".py") - os.remove(root_dir + "expression_macro.py") - os.remove(root_dir + namespace_filename) - os.remove(root_dir + subscript_dict_filename) + with catch_warnings(record=True) as ws: + res = model.run(initial_condition=(5, {'initial_values': output}), + return_columns=['Initial Values'], + return_timestamps=list(range(5, 10))) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "a constant value with initial_conditions will be deprecated", + str(wf[0].message)) - # remove newly created modules folder - shutil.rmtree(root_dir + modules_dirname) + self.assertTrue(output.equals(res['Initial Values'].iloc[0])) + self.assertEqual(res.index[0], 5) - def test_read_vensim_split_model_warning(self): + def test_initial_conditions_subscripted_value_with_numpy_error(self): import pysd - # setting the split_views=True when the model has a single - # view should generate a warning - with catch_warnings(record=True) as ws: - pysd.read_vensim( - test_model, split_views=True - ) # set stock value using params - wu = [w for w in ws if issubclass(w.category, UserWarning)] + input_ = np.array([[5, 3], [4, 8], [9, 3]]) - self.assertEqual(len(wu), 1) - self.assertTrue( - "Only a single view with no subviews was detected" in str( - wu[0].message) - ) + model = pysd.read_vensim(test_model_subs) - def test_read_vensim_split_model_non_matching_separator_warning(self): + with self.assertRaises(TypeError): + model.run(initial_condition=(5, {'initial_values': input_}), + return_columns=['Initial Values'], + return_timestamps=list(range(5, 10))) + + def test_set_constant_parameter(self): + """ In response to: + re: https://github.com/JamesPHoughton/pysd/issues/5""" import pysd - # setting the split_views=True when the model has a single - # view should generate a warning - root_dir = os.path.join(_root, "more-tests/split_model/") + model = pysd.read_vensim(test_model) + model.set_components({"room_temperature": 20}) + self.assertEqual(model.components.room_temperature(), 20) - model_name = "test_split_model_sub_subviews" + model.run(params={"room_temperature": 70}) + self.assertEqual(model.components.room_temperature(), 70) - with catch_warnings(record=True) as ws: - pysd.read_vensim(root_dir + model_name + ".mdl", split_views=True, - subview_sep=["a"]) + with self.assertRaises(NameError): + model.set_components({'not_a_var': 20}) - wu = [w for w in ws if issubclass(w.category, UserWarning)] + def test_set_timeseries_parameter(self): + import pysd - self.assertEqual(len(wu), 1) - self.assertTrue( - "The given subview separators were not matched in" in str( - wu[0].message) + model = pysd.read_vensim(test_model) + timeseries = list(range(30)) + temp_timeseries = pd.Series( + index=timeseries, + data=(50 + np.random.rand(len(timeseries)).cumsum()) + ) + res = model.run( + params={"room_temperature": temp_timeseries}, + return_columns=["room_temperature"], + return_timestamps=timeseries, ) + self.assertTrue((res["room_temperature"] == temp_timeseries).all()) - def test_run_includes_last_value(self): + def test_set_component_with_real_name(self): import pysd model = pysd.read_vensim(test_model) - res = model.run() - self.assertEqual(res.index[-1], model.components.final_time()) + model.set_components({"Room Temperature": 20}) + self.assertEqual(model.components.room_temperature(), 20) - def test_run_build_timeseries(self): + model.run(params={"Room Temperature": 70}) + self.assertEqual(model.components.room_temperature(), 70) + + def test_set_components_warnings(self): + """Addresses https://github.com/JamesPHoughton/pysd/issues/80""" import pysd model = pysd.read_vensim(test_model) + with catch_warnings(record=True) as w: + simplefilter("always") + model.set_components( + {"Teacup Temperature": 20, "Characteristic Time": 15} + ) # set stock value using params + self.assertEqual(len(w), 1) + self.assertTrue( + "Teacup Temperature" in str(w[0].message) + ) # check that warning references the stock - model.components.initial_time = lambda: 3 - model.components.final_time = lambda: 7 - model.components.time_step = lambda: 1 - model.initialize() - - res = model.run() - - actual = list(res.index) - expected = [3.0, 4.0, 5.0, 6.0, 7.0] - self.assertSequenceEqual(actual, expected) + def test_set_components_with_function(self): + def test_func(): + return 5 - def test_run_progress(self): import pysd - # same as test_run but with progressbar model = pysd.read_vensim(test_model) - stocks = model.run(progress=True) - self.assertTrue(isinstance(stocks, pd.DataFrame)) - self.assertTrue("Teacup Temperature" in stocks.columns.values) - self.assertGreater(len(stocks), 3) - self.assertTrue(stocks.notnull().all().all()) + model.set_components({"Room Temperature": test_func}) + res = model.run(return_columns=["Room Temperature"]) + self.assertEqual(test_func(), res["Room Temperature"].iloc[0]) - def test_run_return_timestamps(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/17""" + def test_set_subscripted_value_with_constant(self): import pysd - model = pysd.read_vensim(test_model) - timestamps = np.random.rand(5).cumsum() - stocks = model.run(return_timestamps=timestamps) - self.assertTrue((stocks.index.values == timestamps).all()) - - stocks = model.run(return_timestamps=5) - self.assertEqual(stocks.index[0], 5) + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 5], [5, 5], [5, 5]], coords, dims) - timestamps = ['A', 'B'] - with self.assertRaises(TypeError): - model.run(return_timestamps=timestamps) + model = pysd.read_vensim(test_model_subs) + model.set_components({"initial_values": 5, "final_time": 10}) + res = model.run(return_columns=["Initial Values"]) + self.assertTrue(output.equals(res["Initial Values"].iloc[0])) - def test_run_return_timestamps_past_final_time(self): - """ If the user enters a timestamp that is longer than the euler - timeseries that is defined by the normal model file, should - extend the euler series to the largest timestamp""" + def test_set_subscripted_value_with_partial_xarray(self): import pysd - model = pysd.read_vensim(test_model) - return_timestamps = list(range(0, 100, 10)) - stocks = model.run(return_timestamps=return_timestamps) - self.assertSequenceEqual(return_timestamps, list(stocks.index)) - - def test_return_timestamps_with_range(self): - """ - Tests that return timestamps may receive a 'range'. - It will be cast to a numpy array in the end... - """ - import pysd - - model = pysd.read_vensim(test_model) - return_timestamps = range(0, 100, 10) - stocks = model.run(return_timestamps=return_timestamps) - self.assertSequenceEqual(return_timestamps, list(stocks.index)) - - def test_run_return_columns_original_names(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/26 - - Also checks that columns are returned in the correct order""" - import pysd - - model = pysd.read_vensim(test_model) - return_columns = ["Room Temperature", "Teacup Temperature"] - result = model.run(return_columns=return_columns) - self.assertEqual(set(result.columns), set(return_columns)) + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) + input_val = xr.DataArray( + [5, 3], + {"Second Dimension Subscript": ["Column 1", "Column 2"]}, + ["Second Dimension Subscript"], + ) - def test_run_return_columns_step(self): - """ - Return only cache 'step' variables - """ - import pysd - model = pysd.read_vensim(test_model) - result = model.run(return_columns='step') - self.assertEqual( - set(result.columns), - {'Teacup Temperature', 'SAVEPER', 'Heat Loss to Room'}) + model = pysd.read_vensim(test_model_subs) + model.set_components({"Initial Values": input_val, "final_time": 10}) + res = model.run(return_columns=["Initial Values"]) + self.assertTrue(output.equals(res["Initial Values"].iloc[0])) - def test_run_reload(self): - """ Addresses https://github.com/JamesPHoughton/pysd/issues/99""" + def test_set_subscripted_value_with_xarray(self): import pysd - model = pysd.read_vensim(test_model) - result0 = model.run() - result1 = model.run(params={"Room Temperature": 1000}) - result2 = model.run() - result3 = model.run(reload=True) + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - self.assertTrue((result0 == result3).all().all()) - self.assertFalse((result0 == result1).all().all()) - self.assertTrue((result1 == result2).all().all()) + model = pysd.read_vensim(test_model_subs) + model.set_components({"initial_values": output, "final_time": 10}) + res = model.run(return_columns=["Initial Values"]) + self.assertTrue(output.equals(res["Initial Values"].iloc[0])) - def test_run_return_columns_pysafe_names(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/26""" + def test_set_constant_parameter_lookup(self): import pysd - model = pysd.read_vensim(test_model) - return_columns = ["room_temperature", "teacup_temperature"] - result = model.run(return_columns=return_columns) - self.assertEqual(set(result.columns), set(return_columns)) - - def test_run_export_import(self): - import pysd - from pysd.tools.benchmarking import assert_frames_close + model = pysd.read_vensim(test_model_look) with catch_warnings(): + # avoid warnings related to extrapolation simplefilter("ignore") - model = pysd.read_vensim(test_model) - stocks = model.run(return_timestamps=[0, 10, 20, 30]) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks['FINAL TIME'] == 30).all().all()) - - model.initialize() - stocks1 = model.run(return_timestamps=[0, 10], final_time=12) - self.assertTrue((stocks1['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks1['FINAL TIME'] == 12).all().all()) - model.export('teacup12.pic') - model.initialize() - stocks2 = model.run(initial_condition='teacup12.pic', - return_timestamps=[20, 30]) - self.assertTrue((stocks2['INITIAL TIME'] == 12).all().all()) - self.assertTrue((stocks2['FINAL TIME'] == 30).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks1.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks1.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('teacup12.pic') - - assert_frames_close(stocks1, stocks.loc[[0, 10]]) - assert_frames_close(stocks2, stocks.loc[[20, 30]]) - - # delays - test_delays = os.path.join( - _root, - 'test-models/tests/delays/test_delays.mdl') - model = pysd.read_vensim(test_delays) - stocks = model.run(return_timestamps=20) - model.initialize() - model.run(return_timestamps=[], final_time=7) - model.export('delays7.pic') - stocks2 = model.run(initial_condition='delays7.pic', - return_timestamps=20) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('delays7.pic') - - assert_frames_close(stocks2, stocks) - - # delay fixed - test_delayf = os.path.join( - _root, - 'test-models/tests/delay_fixed/test_delay_fixed.mdl') - model = pysd.read_vensim(test_delayf) - stocks = model.run(return_timestamps=20) - model.initialize() - model.run(return_timestamps=7) - model.export('delayf7.pic') - stocks2 = model.run(initial_condition='delayf7.pic', - return_timestamps=20) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('delayf7.pic') - - assert_frames_close(stocks2, stocks) - - # forecast - test_trend = os.path.join( - _root, - 'test-models/tests/forecast/' - + 'test_forecast.mdl') - model = pysd.read_vensim(test_trend) - stocks = model.run(return_timestamps=50, flatten_output=True) - model.initialize() - model.run(return_timestamps=20) - model.export('frcst20.pic') - stocks2 = model.run(initial_condition='frcst20.pic', - return_timestamps=50, - flatten_output=True) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 20).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('frcst20.pic') - - assert_frames_close(stocks2, stocks) + model.set_components({"lookup_1d": 20}) + for i in range(100): + self.assertEqual(model.components.lookup_1d(i), 20) - # smooth - test_smooth = os.path.join( - _root, - 'test-models/tests/subscripted_smooth/' - + 'test_subscripted_smooth.mdl') - model = pysd.read_vensim(test_smooth) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.initialize() - model.run(return_timestamps=7) - model.export('smooth7.pic') - stocks2 = model.run(initial_condition='smooth7.pic', - return_timestamps=20, - flatten_output=True) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('smooth7.pic') + model.run(params={"lookup_1d": 70}) + for i in range(100): + self.assertEqual(model.components.lookup_1d(i), 70) - assert_frames_close(stocks2, stocks) + model.set_components({"lookup_2d": 20}) + for i in range(100): + self.assertTrue( + model.components.lookup_2d(i).equals( + xr.DataArray(20, {"Rows": ["Row1", "Row2"]}, ["Rows"]) + ) + ) - # trend - test_trend = os.path.join( - _root, - 'test-models/tests/subscripted_trend/' - + 'test_subscripted_trend.mdl') - model = pysd.read_vensim(test_trend) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.initialize() - model.run(return_timestamps=7) - model.export('trend7.pic') - stocks2 = model.run(initial_condition='trend7.pic', - return_timestamps=20, - flatten_output=True) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('trend7.pic') + model.run(params={"lookup_2d": 70}) + for i in range(100): + self.assertTrue( + model.components.lookup_2d(i).equals( + xr.DataArray(70, {"Rows": ["Row1", "Row2"]}, ["Rows"]) + ) + ) - assert_frames_close(stocks2, stocks) + xr1 = xr.DataArray([-10, 50], {"Rows": ["Row1", "Row2"]}, ["Rows"]) + model.set_components({"lookup_2d": xr1}) + for i in range(100): + self.assertTrue(model.components.lookup_2d(i).equals(xr1)) - # initial - test_initial = os.path.join( - _root, 'test-models/tests/initial_function/test_initial.mdl') - model = pysd.read_vensim(test_initial) - stocks = model.run(return_timestamps=20) - model.initialize() - model.run(return_timestamps=7) - model.export('initial7.pic') - stocks2 = model.run(initial_condition='initial7.pic', - return_timestamps=20) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('initial7.pic') + xr2 = xr.DataArray([-100, 500], {"Rows": ["Row1", "Row2"]}, + ["Rows"]) + model.run(params={"lookup_2d": xr2}) + for i in range(100): + self.assertTrue(model.components.lookup_2d(i).equals(xr2)) - assert_frames_close(stocks2, stocks) + def test_set_timeseries_parameter_lookup(self): + import pysd - # sample if true - test_sample_if_true = os.path.join( - _root, - 'test-models/tests/sample_if_true/test_sample_if_true.mdl') - model = pysd.read_vensim(test_sample_if_true) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.initialize() - model.run(return_timestamps=7) - model.export('sample_if_true7.pic') - stocks2 = model.run(initial_condition='sample_if_true7.pic', - return_timestamps=20, - flatten_output=True) - self.assertTrue((stocks['INITIAL TIME'] == 0).all().all()) - self.assertTrue((stocks2['INITIAL TIME'] == 7).all().all()) - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - os.remove('sample_if_true7.pic') + model = pysd.read_vensim(test_model_look) + timeseries = list(range(30)) - assert_frames_close(stocks2, stocks) + with catch_warnings(): + # avoid warnings related to extrapolation + simplefilter("ignore") + temp_timeseries = pd.Series( + index=timeseries, data=(50 + + np.random.rand(len(timeseries) + ).cumsum()) + ) - def test_initial_conditions_tuple_pysafe_names(self): - import pysd + res = model.run( + params={"lookup_1d": temp_timeseries}, + return_columns=["lookup_1d_time"], + return_timestamps=timeseries, + ) - model = pysd.read_vensim(test_model) - stocks = model.run( - initial_condition=(3000, {"teacup_temperature": 33}), - return_timestamps=list(range(3000, 3010)) - ) + self.assertTrue((res["lookup_1d_time"] == temp_timeseries).all()) - self.assertEqual(stocks["Teacup Temperature"].iloc[0], 33) + res = model.run( + params={"lookup_2d": temp_timeseries}, + return_columns=["lookup_2d_time"], + return_timestamps=timeseries, + ) - def test_initial_conditions_tuple_original_names(self): - """ Responds to https://github.com/JamesPHoughton/pysd/issues/77""" - import pysd + self.assertTrue( + all( + [ + a.equals(xr.DataArray(b, {"Rows": ["Row1", "Row2"]}, + ["Rows"])) + for a, b in zip(res["lookup_2d_time"].values, + temp_timeseries) + ] + ) + ) - model = pysd.read_vensim(test_model) - stocks = model.run( - initial_condition=(3000, {"Teacup Temperature": 33}), - return_timestamps=list(range(3000, 3010)), - ) - self.assertEqual(stocks.index[0], 3000) - self.assertEqual(stocks["Teacup Temperature"].iloc[0], 33) + temp_timeseries2 = pd.Series( + index=timeseries, + data=[ + xr.DataArray([50 + x, 20 - y], {"Rows": ["Row1", "Row2"]}, + ["Rows"]) + for x, y in zip( + np.random.rand(len(timeseries)).cumsum(), + np.random.rand(len(timeseries)).cumsum(), + ) + ], + ) - def test_initial_conditions_current(self): - import pysd + res = model.run( + params={"lookup_2d": temp_timeseries2}, + return_columns=["lookup_2d_time"], + return_timestamps=timeseries, + ) - model = pysd.read_vensim(test_model) - stocks1 = model.run(return_timestamps=list(range(0, 31))) - stocks2 = model.run( - initial_condition="current", return_timestamps=list(range(30, 45)) - ) - self.assertEqual( - stocks1["Teacup Temperature"].iloc[-1], - stocks2["Teacup Temperature"].iloc[0], - ) + self.assertTrue( + all( + [ + a.equals(b) + for a, b in zip(res["lookup_2d_time"].values, + temp_timeseries2) + ] + ) + ) - def test_initial_condition_bad_value(self): + def test_set_subscripted_value_with_numpy_error(self): import pysd - model = pysd.read_vensim(test_model) + input_ = np.array([[5, 3], [4, 8], [9, 3]]) - with self.assertRaises(FileNotFoundError): - model.run(initial_condition="bad value") + model = pysd.read_vensim(test_model_subs) + with self.assertRaises(TypeError): + model.set_components({"initial_values": input_, "final_time": 10}) - def test_initial_conditions_subscripted_value_with_constant(self): + def test_set_subscripted_timeseries_parameter_with_constant(self): import pysd coords = { @@ -835,25 +778,28 @@ def test_initial_conditions_subscripted_value_with_constant(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 5], [5, 5], [5, 5]], coords, dims) model = pysd.read_vensim(test_model_subs) + timeseries = list(range(10)) + val_series = [50 + rd for rd in np.random.rand(len(timeseries) + ).cumsum()] + xr_series = [xr.DataArray(val, coords, dims) for val in val_series] - with catch_warnings(record=True) as ws: - res = model.run(initial_condition=(5, {'initial_values': 5}), - return_columns=['Initial Values'], - return_timestamps=list(range(5, 10))) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "a constant value with initial_conditions will be deprecated", - str(wf[0].message)) + temp_timeseries = pd.Series(index=timeseries, data=val_series) + res = model.run( + params={"initial_values": temp_timeseries, "final_time": 10}, + return_columns=["initial_values"], + return_timestamps=timeseries, + ) - self.assertTrue(output.equals(res['Initial Values'].iloc[0])) - self.assertEqual(res.index[0], 5) + self.assertTrue( + np.all( + [r.equals(t) for r, t in zip(res["initial_values"].values, + xr_series)] + ) + ) - def test_initial_conditions_subscripted_value_with_partial_xarray(self): + def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): import pysd coords = { @@ -861,29 +807,30 @@ def test_initial_conditions_subscripted_value_with_partial_xarray(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) + out_b = xr.DataArray([[0, 0], [0, 0], [0, 0]], coords, dims) input_val = xr.DataArray( [5, 3], - {'Second Dimension Subscript': ['Column 1', 'Column 2']}, - ['Second Dimension Subscript']) + {"Second Dimension Subscript": ["Column 1", "Column 2"]}, + ["Second Dimension Subscript"], + ) model = pysd.read_vensim(test_model_subs) - with catch_warnings(record=True) as ws: - res = model.run(initial_condition=(5, - {'Initial Values': input_val}), - return_columns=['Initial Values'], - return_timestamps=list(range(5, 10))) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "a constant value with initial_conditions will be deprecated", - str(wf[0].message)) - - self.assertTrue(output.equals(res['Initial Values'].iloc[0])) - self.assertEqual(res.index[0], 5) + timeseries = list(range(10)) + val_series = [input_val + rd for rd in np.random.rand(len(timeseries) + ).cumsum()] + temp_timeseries = pd.Series(index=timeseries, data=val_series) + out_series = [out_b + val for val in val_series] + model.set_components({"initial_values": temp_timeseries, + "final_time": 10}) + res = model.run(return_columns=["initial_values"]) + self.assertTrue( + np.all( + [r.equals(t) for r, t in zip(res["initial_values"].values, + out_series)] + ) + ) - def test_initial_conditions_subscripted_value_with_xarray(self): + def test_set_subscripted_timeseries_parameter_with_xarray(self): import pysd coords = { @@ -891,274 +838,353 @@ def test_initial_conditions_subscripted_value_with_xarray(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - model = pysd.read_vensim(test_model_subs) + init_val = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - with catch_warnings(record=True) as ws: - res = model.run(initial_condition=(5, {'initial_values': output}), - return_columns=['Initial Values'], - return_timestamps=list(range(5, 10))) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "a constant value with initial_conditions will be deprecated", - str(wf[0].message)) + model = pysd.read_vensim(test_model_subs) + timeseries = list(range(10)) + temp_timeseries = pd.Series( + index=timeseries, + data=[init_val + rd for rd in np.random.rand(len(timeseries) + ).cumsum()], + ) + res = model.run( + params={"initial_values": temp_timeseries, "final_time": 10}, + return_columns=["initial_values"], + return_timestamps=timeseries, + ) - self.assertTrue(output.equals(res['Initial Values'].iloc[0])) - self.assertEqual(res.index[0], 5) + self.assertTrue( + np.all( + [ + r.equals(t) + for r, t in zip( + res["initial_values"].values, temp_timeseries.values + ) + ] + ) + ) - def test_initial_conditions_subscripted_value_with_numpy_error(self): + def test_docs(self): + """ Test that the model prints some documentation """ import pysd - input_ = np.array([[5, 3], [4, 8], [9, 3]]) + model = pysd.read_vensim(test_model) + self.assertIsInstance(str(model), str) # tests string conversion of + # model - model = pysd.read_vensim(test_model_subs) + doc = model.doc() + self.assertIsInstance(doc, pd.DataFrame) + self.assertSetEqual( + { + "Characteristic Time", + "Teacup Temperature", + "FINAL TIME", + "Heat Loss to Room", + "INITIAL TIME", + "Room Temperature", + "SAVEPER", + "TIME STEP", + }, + set(doc["Real Name"].values), + ) - with self.assertRaises(TypeError): - model.run(initial_condition=(5, {'initial_values': input_}), - return_columns=['Initial Values'], - return_timestamps=list(range(5, 10))) + self.assertEqual( + doc[doc["Real Name"] == "Heat Loss to Room"]["Unit"].values[0], + "Degrees Fahrenheit/Minute", + ) + self.assertEqual( + doc[doc["Real Name"] == "Teacup Temperature"]["Py Name"].values[0], + "teacup_temperature", + ) + self.assertEqual( + doc[doc["Real Name"] == "INITIAL TIME"]["Comment"].values[0], + "The initial time for the simulation.", + ) + self.assertEqual( + doc[doc["Real Name"] == "Characteristic Time"]["Type"].values[0], + "constant" + ) + self.assertEqual( + doc[doc["Real Name"] == "Teacup Temperature"]["Lims"].values[0], + "(32.0, 212.0)", + ) - def test_set_constant_parameter(self): - """ In response to: - re: https://github.com/JamesPHoughton/pysd/issues/5""" + def test_docs_multiline_eqn(self): + """ Test that the model prints some documentation """ import pysd - model = pysd.read_vensim(test_model) - model.set_components({"room_temperature": 20}) - self.assertEqual(model.components.room_temperature(), 20) + path2model = os.path.join( + _root, + "test-models/tests/multiple_lines_def/" + + "test_multiple_lines_def.mdl") + model = pysd.read_vensim(path2model) + + doc = model.doc() + + self.assertEqual(doc[doc["Real Name"] == "price"]["Unit"].values[0], + "euros/kg") + self.assertEqual(doc[doc["Real Name"] == "price"]["Py Name"].values[0], + "price") + self.assertEqual( + doc[doc["Real Name"] == "price"]["Subs"].values[0], "['fruits']" + ) + self.assertEqual(doc[doc["Real Name"] == "price"]["Eqn"].values[0], + "1.2; .; .; .; 1.4") + + def test_stepwise_cache(self): + run_history = [] + result_history = [] + + global time + time = lambda: 0 # for testing cache function + from pysd import cache + + cache.time = time() + + @cache.step + def upstream(run_hist, res_hist): + run_hist.append("U") + return "up" + + def downstream(run_hist, res_hist): + run_hist.append("D") + result_history.append(upstream(run_hist, res_hist)) + return "down" + + # initially neither function has a chache value + self.assertFalse("upstream" in cache.data["step"]) + self.assertFalse("downstream" in cache.data["step"]) + + # when the functions are called, + # the cache is instantiated in the upstream (cached) function + result_history.append(downstream(run_history, result_history)) + self.assertTrue("upstream" in cache.data["step"]) + self.assertFalse("upstream" in cache.data["run"]) + self.assertFalse("downstream" in cache.data["step"]) + self.assertEqual(cache.time, 0) + self.assertListEqual(run_history, ["D", "U"]) + self.assertListEqual(result_history, ["up", "down"]) - model.run(params={"room_temperature": 70}) - self.assertEqual(model.components.room_temperature(), 70) + # cleaning only run cache shouldn't affect the step cache + cache.clean("run") + self.assertTrue("upstream" in cache.data["step"]) - with self.assertRaises(NameError): - model.set_components({'not_a_var': 20}) + # at the second call, the uncached function is run, + # but the cached upstream function returns its prior value + result_history.append(downstream(run_history, result_history)) + self.assertEqual(cache.time, 0) + self.assertListEqual(run_history, ["D", "U", "D"]) + self.assertListEqual(result_history, ["up", "down", "up", "down"]) - def test_set_timeseries_parameter(self): - import pysd + # when the time is reset, both functions are run again. + time = lambda: 2 + cache.reset(time()) - model = pysd.read_vensim(test_model) - timeseries = list(range(30)) - temp_timeseries = pd.Series( - index=timeseries, - data=(50 + np.random.rand(len(timeseries)).cumsum()) - ) - res = model.run( - params={"room_temperature": temp_timeseries}, - return_columns=["room_temperature"], - return_timestamps=timeseries, - ) - self.assertTrue((res["room_temperature"] == temp_timeseries).all()) + result_history.append(downstream(run_history, result_history)) + self.assertEqual(cache.time, 2) + self.assertListEqual(run_history, ["D", "U", "D", "D", "U"]) + self.assertListEqual(result_history, ["up", "down", "up", "down", + "up", "down"]) - def test_set_component_with_real_name(self): - import pysd + def test_runwise_cache(self): + # Checks backward compatibility, must be changed to @cache.run when + # deprecated + run_history = [] + result_history = [] - model = pysd.read_vensim(test_model) - model.set_components({"Room Temperature": 20}) - self.assertEqual(model.components.room_temperature(), 20) + global time + time = lambda: 0 # for testing cache function + from pysd import cache - model.run(params={"Room Temperature": 70}) - self.assertEqual(model.components.room_temperature(), 70) + cache.time = time() - def test_set_components_warnings(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/80""" - import pysd + @cache.run + def upstream(run_hist, res_hist): + run_hist.append("U") + return "up" - model = pysd.read_vensim(test_model) - with catch_warnings(record=True) as w: - simplefilter("always") - model.set_components( - {"Teacup Temperature": 20, "Characteristic Time": 15} - ) # set stock value using params - self.assertEqual(len(w), 1) - self.assertTrue( - "Teacup Temperature" in str(w[0].message) - ) # check that warning references the stock + def downstream(run_hist, res_hist): + run_hist.append("D") + result_history.append(upstream(run_hist, res_hist)) + return "down" - def test_set_components_with_function(self): - def test_func(): - return 5 + # initially neither function has a chache value + self.assertFalse("upstream" in cache.data["run"]) + self.assertFalse("downstream" in cache.data["run"]) - import pysd + # when the functions are called, + # the cache is instantiated in the upstream (cached) function + result_history.append(downstream(run_history, result_history)) + self.assertEqual(cache.time, 0) + self.assertTrue("upstream" in cache.data["run"]) + self.assertFalse("upstream" in cache.data["step"]) + self.assertFalse("downstream" in cache.data["run"]) + self.assertListEqual(run_history, ["D", "U"]) + self.assertListEqual(result_history, ["up", "down"]) - model = pysd.read_vensim(test_model) - model.set_components({"Room Temperature": test_func}) - res = model.run(return_columns=["Room Temperature"]) - self.assertEqual(test_func(), res["Room Temperature"].iloc[0]) + # cleaning only step cache shouldn't affect the step cache + cache.clean("step") + self.assertTrue("upstream" in cache.data["run"]) - def test_set_subscripted_value_with_constant(self): - import pysd + # at the second call, the uncached function is run, + # but the cached upstream function returns its prior value + result_history.append(downstream(run_history, result_history)) + self.assertEqual(cache.time, 0) + self.assertListEqual(run_history, ["D", "U", "D"]) + self.assertListEqual(result_history, ["up", "down", "up", "down"]) - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 5], [5, 5], [5, 5]], coords, dims) + # when the time is reset, this has no impact on the upstream cache. + time = lambda: 2 + cache.reset(time()) - model = pysd.read_vensim(test_model_subs) - model.set_components({"initial_values": 5, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) - self.assertTrue(output.equals(res["Initial Values"].iloc[0])) + result_history.append(downstream(run_history, result_history)) + self.assertEqual(cache.time, 2) + self.assertListEqual(run_history, ["D", "U", "D", "D"]) + self.assertListEqual(result_history, ["up", "down", "up", "down", + "up", "down"]) - def test_set_subscripted_value_with_partial_xarray(self): + def test_initialize(self): import pysd - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) - input_val = xr.DataArray( - [5, 3], - {"Second Dimension Subscript": ["Column 1", "Column 2"]}, - ["Second Dimension Subscript"], - ) - - model = pysd.read_vensim(test_model_subs) - model.set_components({"Initial Values": input_val, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) - self.assertTrue(output.equals(res["Initial Values"].iloc[0])) + model = pysd.read_vensim(test_model) + initial_temp = model.components.teacup_temperature() + model.run() + final_temp = model.components.teacup_temperature() + model.initialize() + reset_temp = model.components.teacup_temperature() + self.assertNotEqual(initial_temp, final_temp) + self.assertEqual(initial_temp, reset_temp) - def test_set_subscripted_value_with_xarray(self): + def test_initialize_order(self): import pysd + model = pysd.load(more_tests + "/initialization_order/" + "test_initialization_order.py") - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) + if model._stateful_elements[0].py_name.endswith('stock_a'): + # we want to have stock b first always + model._stateful_elements.reverse() - model = pysd.read_vensim(test_model_subs) - model.set_components({"initial_values": output, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) - self.assertTrue(output.equals(res["Initial Values"].iloc[0])) + self.assertEqual(model.components.stock_b(), 42) + self.assertEqual(model.components.stock_a(), 42) + model.components.initial_parameter = lambda: 1 + model.initialize() + self.assertEqual(model.components.stock_b(), 1) + self.assertEqual(model.components.stock_a(), 1) - def test_set_constant_parameter_lookup(self): + def test_set_state(self): import pysd - model = pysd.read_vensim(test_model_look) - - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - model.set_components({"lookup_1d": 20}) - for i in range(100): - self.assertEqual(model.components.lookup_1d(i), 20) + model = pysd.read_vensim(test_model) - model.run(params={"lookup_1d": 70}) - for i in range(100): - self.assertEqual(model.components.lookup_1d(i), 70) + initial_temp = model.components.teacup_temperature() - model.set_components({"lookup_2d": 20}) - for i in range(100): - self.assertTrue( - model.components.lookup_2d(i).equals( - xr.DataArray(20, {"Rows": ["Row1", "Row2"]}, ["Rows"]) - ) - ) + new_time = np.random.rand() - model.run(params={"lookup_2d": 70}) - for i in range(100): - self.assertTrue( - model.components.lookup_2d(i).equals( - xr.DataArray(70, {"Rows": ["Row1", "Row2"]}, ["Rows"]) - ) - ) + with catch_warnings(record=True) as ws: + # Test that we can set with real names + model.set_state(new_time, {'Teacup Temperature': 500}) + self.assertNotEqual(initial_temp, 500) + self.assertEqual(model.components.teacup_temperature(), 500) + self.assertEqual(model.components.time(), new_time) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "set_state will be deprecated, use set_initial_value instead.", + str(wf[0].message)) - xr1 = xr.DataArray([-10, 50], {"Rows": ["Row1", "Row2"]}, ["Rows"]) - model.set_components({"lookup_2d": xr1}) - for i in range(100): - self.assertTrue(model.components.lookup_2d(i).equals(xr1)) + with catch_warnings(record=True) as ws: + # Test setting with pysafe names + model.set_state(new_time + 1, {'teacup_temperature': 202}) + self.assertEqual(model.components.teacup_temperature(), 202) + self.assertEqual(model.components.time(), new_time + 1) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "set_state will be deprecated, use set_initial_value instead.", + str(wf[0].message)) - xr2 = xr.DataArray([-100, 500], {"Rows": ["Row1", "Row2"]}, - ["Rows"]) - model.run(params={"lookup_2d": xr2}) - for i in range(100): - self.assertTrue(model.components.lookup_2d(i).equals(xr2)) + with catch_warnings(record=True) as ws: + # Test setting with stateful object name + model.set_state(new_time + 2, {'_integ_teacup_temperature': 302}) + self.assertEqual(model.components.teacup_temperature(), 302) + self.assertEqual(model.components.time(), new_time + 2) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "set_state will be deprecated, use set_initial_value instead.", + str(wf[0].message)) - def test_set_timeseries_parameter_lookup(self): + def test_set_initial_value(self): import pysd + model = pysd.read_vensim(test_model) - model = pysd.read_vensim(test_model_look) - timeseries = list(range(30)) + initial_temp = model.components.teacup_temperature() + + new_time = np.random.rand() + + # Test that we can set with real names + model.set_initial_value(new_time, {'Teacup Temperature': 500}) + self.assertNotEqual(initial_temp, 500) + self.assertEqual(model.components.teacup_temperature(), 500) + self.assertEqual(model.components.time(), new_time) - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - temp_timeseries = pd.Series( - index=timeseries, data=(50 + - np.random.rand(len(timeseries) - ).cumsum()) - ) + # Test setting with pysafe names + model.set_initial_value(new_time + 1, {'teacup_temperature': 202}) + self.assertEqual(model.components.teacup_temperature(), 202) + self.assertEqual(model.components.time(), new_time + 1) - res = model.run( - params={"lookup_1d": temp_timeseries}, - return_columns=["lookup_1d_time"], - return_timestamps=timeseries, - ) + # Test setting with stateful object name + model.set_initial_value(new_time + 2, + {'_integ_teacup_temperature': 302}) + self.assertEqual(model.components.teacup_temperature(), 302) + self.assertEqual(model.components.time(), new_time + 2) - self.assertTrue((res["lookup_1d_time"] == temp_timeseries).all()) + with self.assertRaises(NameError): + model.set_initial_value(new_time, {'not_a_var': 500}) - res = model.run( - params={"lookup_2d": temp_timeseries}, - return_columns=["lookup_2d_time"], - return_timestamps=timeseries, - ) + def test_set_initial_value_lookup(self): + import pysd - self.assertTrue( - all( - [ - a.equals(xr.DataArray(b, {"Rows": ["Row1", "Row2"]}, - ["Rows"])) - for a, b in zip(res["lookup_2d_time"].values, - temp_timeseries) - ] - ) - ) + model = pysd.read_vensim(test_model_look) - temp_timeseries2 = pd.Series( - index=timeseries, - data=[ - xr.DataArray([50 + x, 20 - y], {"Rows": ["Row1", "Row2"]}, - ["Rows"]) - for x, y in zip( - np.random.rand(len(timeseries)).cumsum(), - np.random.rand(len(timeseries)).cumsum(), - ) - ], - ) + new_time = np.random.rand() - res = model.run( - params={"lookup_2d": temp_timeseries2}, - return_columns=["lookup_2d_time"], - return_timestamps=timeseries, - ) + # Test that we can set with real names + with catch_warnings(record=True) as ws: + model.set_initial_value(new_time, {'lookup 1d': 500}) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "a constant value with initial_conditions will be deprecated", + str(wf[0].message)) - self.assertTrue( - all( - [ - a.equals(b) - for a, b in zip(res["lookup_2d_time"].values, - temp_timeseries2) - ] - ) - ) + self.assertEqual(model.components.lookup_1d(0), 500) + self.assertEqual(model.components.lookup_1d(100), 500) - def test_set_subscripted_value_with_numpy_error(self): - import pysd + with catch_warnings(record=True) as ws: + model.set_initial_value(new_time, {'lookup 2d': 520}) + # use only future warnings + wf = [w for w in ws if issubclass(w.category, FutureWarning)] + self.assertEqual(len(wf), 1) + self.assertIn( + "a constant value with initial_conditions will be deprecated", + str(wf[0].message)) - input_ = np.array([[5, 3], [4, 8], [9, 3]]) + expected = xr.DataArray(520, {"Rows": ["Row1", "Row2"]}, ["Rows"]) + self.assertTrue(model.components.lookup_2d(0).equals(expected)) + self.assertTrue(model.components.lookup_2d(100).equals(expected)) - model = pysd.read_vensim(test_model_subs) - with self.assertRaises(TypeError): - model.set_components({"initial_values": input_, "final_time": 10}) + with catch_warnings(): + # avoid warnings related to extrapolation + simplefilter("ignore") + model.run() - def test_set_subscripted_timeseries_parameter_with_constant(self): + def test_set_initial_value_subscripted_value_with_constant(self): import pysd coords = { @@ -1166,28 +1192,34 @@ def test_set_subscripted_timeseries_parameter_with_constant(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output_b = xr.DataArray([[0, 0], [0, 0], [0, 0]], coords, dims) + + new_time = np.random.rand() model = pysd.read_vensim(test_model_subs) - timeseries = list(range(10)) - val_series = [50 + rd for rd in np.random.rand(len(timeseries) - ).cumsum()] - xr_series = [xr.DataArray(val, coords, dims) for val in val_series] + initial_stock = model.components.stock_a() - temp_timeseries = pd.Series(index=timeseries, data=val_series) - res = model.run( - params={"initial_values": temp_timeseries, "final_time": 10}, - return_columns=["initial_values"], - return_timestamps=timeseries, - ) + # Test that we can set with real names + model.set_initial_value(new_time, {'Stock A': 500}) + self.assertFalse(initial_stock.equals(output_b + 500)) + self.assertTrue(model.components.stock_a().equals(output_b + 500)) - self.assertTrue( - np.all( - [r.equals(t) for r, t in zip(res["initial_values"].values, - xr_series)] + # Test setting with pysafe names + model.set_initial_value(new_time + 1, {'stock_a': 202}) + self.assertTrue(model.components.stock_a().equals(output_b + 202)) + + # Test setting with stateful object name + model.set_initial_value(new_time + 2, {'_integ_stock_a': 302}) + self.assertTrue(model.components.stock_a().equals(output_b + 302)) + + # Test error when coords are not a subset + with self.assertRaises(ValueError): + model.set_initial_value( + new_time + 2, + {'_integ_stock_a': xr.DataArray(302, {'D': ['A', 'B']}, ['D'])} ) - ) - def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): + def test_set_initial_value_subscripted_value_with_partial_xarray(self): import pysd coords = { @@ -1195,355 +1227,219 @@ def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - out_b = xr.DataArray([[0, 0], [0, 0], [0, 0]], coords, dims) - input_val = xr.DataArray( + output1 = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) + input_val1 = xr.DataArray( [5, 3], {"Second Dimension Subscript": ["Column 1", "Column 2"]}, ["Second Dimension Subscript"], - ) - - model = pysd.read_vensim(test_model_subs) - timeseries = list(range(10)) - val_series = [input_val + rd for rd in np.random.rand(len(timeseries) - ).cumsum()] - temp_timeseries = pd.Series(index=timeseries, data=val_series) - out_series = [out_b + val for val in val_series] - model.set_components({"initial_values": temp_timeseries, - "final_time": 10}) - res = model.run(return_columns=["initial_values"]) - self.assertTrue( - np.all( - [r.equals(t) for r, t in zip(res["initial_values"].values, - out_series)] - ) - ) - - def test_set_subscripted_timeseries_parameter_with_xarray(self): - import pysd - - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - - init_val = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - - model = pysd.read_vensim(test_model_subs) - timeseries = list(range(10)) - temp_timeseries = pd.Series( - index=timeseries, - data=[init_val + rd for rd in np.random.rand(len(timeseries) - ).cumsum()], - ) - res = model.run( - params={"initial_values": temp_timeseries, "final_time": 10}, - return_columns=["initial_values"], - return_timestamps=timeseries, - ) - - self.assertTrue( - np.all( - [ - r.equals(t) - for r, t in zip( - res["initial_values"].values, temp_timeseries.values - ) - ] - ) - ) - - def test_docs(self): - """ Test that the model prints some documentation """ - import pysd - - model = pysd.read_vensim(test_model) - self.assertIsInstance(str(model), str) # tests string conversion of - # model - - doc = model.doc() - self.assertIsInstance(doc, pd.DataFrame) - self.assertSetEqual( - { - "Characteristic Time", - "Teacup Temperature", - "FINAL TIME", - "Heat Loss to Room", - "INITIAL TIME", - "Room Temperature", - "SAVEPER", - "TIME STEP", - }, - set(doc["Real Name"].values), - ) - - self.assertEqual( - doc[doc["Real Name"] == "Heat Loss to Room"]["Unit"].values[0], - "Degrees Fahrenheit/Minute", - ) - self.assertEqual( - doc[doc["Real Name"] == "Teacup Temperature"]["Py Name"].values[0], - "teacup_temperature", - ) - self.assertEqual( - doc[doc["Real Name"] == "INITIAL TIME"]["Comment"].values[0], - "The initial time for the simulation.", - ) - self.assertEqual( - doc[doc["Real Name"] == "Characteristic Time"]["Type"].values[0], - "constant" - ) - self.assertEqual( - doc[doc["Real Name"] == "Teacup Temperature"]["Lims"].values[0], - "(32.0, 212.0)", - ) - - def test_docs_multiline_eqn(self): - """ Test that the model prints some documentation """ - import pysd - - path2model = os.path.join( - _root, - "test-models/tests/multiple_lines_def/" + - "test_multiple_lines_def.mdl") - model = pysd.read_vensim(path2model) - - doc = model.doc() + ) - self.assertEqual(doc[doc["Real Name"] == "price"]["Unit"].values[0], - "euros/kg") - self.assertEqual(doc[doc["Real Name"] == "price"]["Py Name"].values[0], - "price") - self.assertEqual( - doc[doc["Real Name"] == "price"]["Subs"].values[0], "['fruits']" + output2 = xr.DataArray([[55, 33], [55, 33], [55, 33]], coords, dims) + input_val2 = xr.DataArray( + [55, 33], + {"Second Dimension Subscript": ["Column 1", "Column 2"]}, + ["Second Dimension Subscript"], ) - self.assertEqual(doc[doc["Real Name"] == "price"]["Eqn"].values[0], - "1.2; .; .; .; 1.4") - def test_stepwise_cache(self): - run_history = [] - result_history = [] + output3 = xr.DataArray([[40, 20], [40, 20], [40, 20]], coords, dims) + input_val3 = xr.DataArray( + [40, 20], + {"Second Dimension Subscript": ["Column 1", "Column 2"]}, + ["Second Dimension Subscript"], + ) - global time - time = lambda: 0 # for testing cache function - from pysd import cache + new_time = np.random.rand() - cache.time = time() + model = pysd.read_vensim(test_model_subs) + initial_stock = model.components.stock_a() - @cache.step - def upstream(run_hist, res_hist): - run_hist.append("U") - return "up" + # Test that we can set with real names + model.set_initial_value(new_time, {'Stock A': input_val1}) + self.assertFalse(initial_stock.equals(output1)) + self.assertTrue(model.components.stock_a().equals(output1)) - def downstream(run_hist, res_hist): - run_hist.append("D") - result_history.append(upstream(run_hist, res_hist)) - return "down" + # Test setting with pysafe names + model.set_initial_value(new_time + 1, {'stock_a': input_val2}) + self.assertTrue(model.components.stock_a().equals(output2)) - # initially neither function has a chache value - self.assertFalse("upstream" in cache.data["step"]) - self.assertFalse("downstream" in cache.data["step"]) + # Test setting with stateful object name + model.set_initial_value(new_time + 2, {'_integ_stock_a': input_val3}) + self.assertTrue(model.components.stock_a().equals(output3)) - # when the functions are called, - # the cache is instantiated in the upstream (cached) function - result_history.append(downstream(run_history, result_history)) - self.assertTrue("upstream" in cache.data["step"]) - self.assertFalse("upstream" in cache.data["run"]) - self.assertFalse("downstream" in cache.data["step"]) - self.assertEqual(cache.time, 0) - self.assertListEqual(run_history, ["D", "U"]) - self.assertListEqual(result_history, ["up", "down"]) + def test_set_initial_value_subscripted_value_with_xarray(self): + import pysd - # cleaning only run cache shouldn't affect the step cache - cache.clean("run") - self.assertTrue("upstream" in cache.data["step"]) + coords = { + "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], + "Second Dimension Subscript": ["Column 1", "Column 2"], + } + dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + output1 = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) + output2 = xr.DataArray([[53, 43], [84, 80], [29, 63]], coords, dims) + output3 = xr.DataArray([[54, 32], [40, 87], [93, 93]], coords, dims) - # at the second call, the uncached function is run, - # but the cached upstream function returns its prior value - result_history.append(downstream(run_history, result_history)) - self.assertEqual(cache.time, 0) - self.assertListEqual(run_history, ["D", "U", "D"]) - self.assertListEqual(result_history, ["up", "down", "up", "down"]) + new_time = np.random.rand() - # when the time is reset, both functions are run again. - time = lambda: 2 - cache.reset(time()) + model = pysd.read_vensim(test_model_subs) + initial_stock = model.components.stock_a() - result_history.append(downstream(run_history, result_history)) - self.assertEqual(cache.time, 2) - self.assertListEqual(run_history, ["D", "U", "D", "D", "U"]) - self.assertListEqual(result_history, ["up", "down", "up", "down", - "up", "down"]) + # Test that we can set with real names + model.set_initial_value(new_time, {'Stock A': output1}) + self.assertFalse(initial_stock.equals(output1)) + self.assertTrue(model.components.stock_a().equals(output1)) - def test_runwise_cache(self): - # Checks backward compatibility, must be changed to @cache.run when - # deprecated - run_history = [] - result_history = [] + # Test setting with pysafe names + model.set_initial_value(new_time + 1, {'stock_a': output2}) + self.assertTrue(model.components.stock_a().equals(output2)) - global time - time = lambda: 0 # for testing cache function - from pysd import cache + # Test setting with stateful object name + model.set_initial_value(new_time + 2, {'_integ_stock_a': output3}) + self.assertTrue(model.components.stock_a().equals(output3)) - cache.time = time() + def test_set_initial_value_subscripted_value_with_numpy_error(self): + import pysd - @cache.run - def upstream(run_hist, res_hist): - run_hist.append("U") - return "up" + input1 = np.array([[5, 3], [4, 8], [9, 3]]) + input2 = np.array([[53, 43], [84, 80], [29, 63]]) + input3 = np.array([[54, 32], [40, 87], [93, 93]]) - def downstream(run_hist, res_hist): - run_hist.append("D") - result_history.append(upstream(run_hist, res_hist)) - return "down" + new_time = np.random.rand() - # initially neither function has a chache value - self.assertFalse("upstream" in cache.data["run"]) - self.assertFalse("downstream" in cache.data["run"]) + model = pysd.read_vensim(test_model_subs) - # when the functions are called, - # the cache is instantiated in the upstream (cached) function - result_history.append(downstream(run_history, result_history)) - self.assertEqual(cache.time, 0) - self.assertTrue("upstream" in cache.data["run"]) - self.assertFalse("upstream" in cache.data["step"]) - self.assertFalse("downstream" in cache.data["run"]) - self.assertListEqual(run_history, ["D", "U"]) - self.assertListEqual(result_history, ["up", "down"]) + # Test that we can set with real names + with self.assertRaises(TypeError): + model.set_initial_value(new_time, {'Stock A': input1}) - # cleaning only step cache shouldn't affect the step cache - cache.clean("step") - self.assertTrue("upstream" in cache.data["run"]) + # Test setting with pysafe names + with self.assertRaises(TypeError): + model.set_initial_value(new_time + 1, {'stock_a': input2}) - # at the second call, the uncached function is run, - # but the cached upstream function returns its prior value - result_history.append(downstream(run_history, result_history)) - self.assertEqual(cache.time, 0) - self.assertListEqual(run_history, ["D", "U", "D"]) - self.assertListEqual(result_history, ["up", "down", "up", "down"]) + # Test setting with stateful object name + with self.assertRaises(TypeError): + model.set_initial_value(new_time + 2, {'_integ_stock_a': input3}) - # when the time is reset, this has no impact on the upstream cache. - time = lambda: 2 - cache.reset(time()) + def test_replace_element(self): + import pysd - result_history.append(downstream(run_history, result_history)) - self.assertEqual(cache.time, 2) - self.assertListEqual(run_history, ["D", "U", "D", "D"]) - self.assertListEqual(result_history, ["up", "down", "up", "down", - "up", "down"]) + model = pysd.read_vensim(test_model) + stocks1 = model.run() + model.components.characteristic_time = lambda: 3 + stocks2 = model.run() + self.assertGreater( + stocks1["Teacup Temperature"].loc[10], + stocks2["Teacup Temperature"].loc[10] + ) - def test_initialize(self): + def test_set_initial_condition_origin_full(self): import pysd model = pysd.read_vensim(test_model) initial_temp = model.components.teacup_temperature() - model.run() - final_temp = model.components.teacup_temperature() - model.initialize() - reset_temp = model.components.teacup_temperature() - self.assertNotEqual(initial_temp, final_temp) - self.assertEqual(initial_temp, reset_temp) + initial_time = model.components.time() - def test_initialize_order(self): - import pysd - model = pysd.load(more_tests + "/initialization_order/" - "test_initialization_order.py") + new_state = {"Teacup Temperature": 500} + new_time = 10 - if model._stateful_elements[0].py_name.endswith('stock_a'): - # we want to have stock b first always - model._stateful_elements.reverse() + model.set_initial_condition((new_time, new_state)) + set_temp = model.components.teacup_temperature() + set_time = model.components.time() - self.assertEqual(model.components.stock_b(), 42) - self.assertEqual(model.components.stock_a(), 42) - model.components.initial_parameter = lambda: 1 - model.initialize() - self.assertEqual(model.components.stock_b(), 1) - self.assertEqual(model.components.stock_a(), 1) + self.assertNotEqual( + set_temp, + initial_temp, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(set_temp, 500) - def test_set_state(self): + self.assertNotEqual( + initial_time, + new_time, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(new_time, set_time) + + model.set_initial_condition("original") + set_temp = model.components.teacup_temperature() + set_time = model.components.time() + + self.assertEqual(initial_temp, set_temp) + self.assertEqual(initial_time, set_time) + + def test_set_initial_condition_origin_short(self): import pysd model = pysd.read_vensim(test_model) - initial_temp = model.components.teacup_temperature() + initial_time = model.components.time() - new_time = np.random.rand() + new_state = {"Teacup Temperature": 500} + new_time = 10 - with catch_warnings(record=True) as ws: - # Test that we can set with real names - model.set_state(new_time, {'Teacup Temperature': 500}) - self.assertNotEqual(initial_temp, 500) - self.assertEqual(model.components.teacup_temperature(), 500) - self.assertEqual(model.components.time(), new_time) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "set_state will be deprecated, use set_initial_value instead.", - str(wf[0].message)) + model.set_initial_condition((new_time, new_state)) + set_temp = model.components.teacup_temperature() + set_time = model.components.time() - with catch_warnings(record=True) as ws: - # Test setting with pysafe names - model.set_state(new_time + 1, {'teacup_temperature': 202}) - self.assertEqual(model.components.teacup_temperature(), 202) - self.assertEqual(model.components.time(), new_time + 1) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "set_state will be deprecated, use set_initial_value instead.", - str(wf[0].message)) + self.assertNotEqual( + set_temp, + initial_temp, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(set_temp, 500) - with catch_warnings(record=True) as ws: - # Test setting with stateful object name - model.set_state(new_time + 2, {'_integ_teacup_temperature': 302}) - self.assertEqual(model.components.teacup_temperature(), 302) - self.assertEqual(model.components.time(), new_time + 2) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "set_state will be deprecated, use set_initial_value instead.", - str(wf[0].message)) + self.assertNotEqual( + initial_time, + new_time, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(new_time, set_time) - def test_set_initial_value(self): + model.set_initial_condition("o") + set_temp = model.components.teacup_temperature() + set_time = model.components.time() + + self.assertEqual(initial_temp, set_temp) + self.assertEqual(initial_time, set_time) + + def test_set_initial_condition_for_stock_component(self): import pysd - model = pysd.read_vensim(test_model) + model = pysd.read_vensim(test_model) initial_temp = model.components.teacup_temperature() + initial_time = model.components.time() - new_time = np.random.rand() - - # Test that we can set with real names - model.set_initial_value(new_time, {'Teacup Temperature': 500}) - self.assertNotEqual(initial_temp, 500) - self.assertEqual(model.components.teacup_temperature(), 500) - self.assertEqual(model.components.time(), new_time) + new_state = {"Teacup Temperature": 500} + new_time = 10 - # Test setting with pysafe names - model.set_initial_value(new_time + 1, {'teacup_temperature': 202}) - self.assertEqual(model.components.teacup_temperature(), 202) - self.assertEqual(model.components.time(), new_time + 1) + model.set_initial_condition((new_time, new_state)) + set_temp = model.components.teacup_temperature() + set_time = model.components.time() - # Test setting with stateful object name - model.set_initial_value(new_time + 2, - {'_integ_teacup_temperature': 302}) - self.assertEqual(model.components.teacup_temperature(), 302) - self.assertEqual(model.components.time(), new_time + 2) + self.assertNotEqual( + set_temp, + initial_temp, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(set_temp, 500) - with self.assertRaises(NameError): - model.set_initial_value(new_time, {'not_a_var': 500}) + self.assertNotEqual( + initial_time, 10, "Test definition is wrong, please change" + + " configuration" + ) + self.assertEqual(set_time, 10) - def test_set_initial_value_lookup(self): + def test_set_initial_condition_for_constant_component(self): import pysd - model = pysd.read_vensim(test_model_look) + model = pysd.read_vensim(test_model) + initial_temp = model.components.teacup_temperature() + initial_time = model.components.time() - new_time = np.random.rand() + new_state = {"Room Temperature": 100} + new_time = 10 - # Test that we can set with real names with catch_warnings(record=True) as ws: - model.set_initial_value(new_time, {'lookup 1d': 500}) + model.set_initial_condition((new_time, new_state)) # use only future warnings wf = [w for w in ws if issubclass(w.category, FutureWarning)] self.assertEqual(len(wf), 1) @@ -1551,28 +1447,42 @@ def test_set_initial_value_lookup(self): "a constant value with initial_conditions will be deprecated", str(wf[0].message)) - self.assertEqual(model.components.lookup_1d(0), 500) - self.assertEqual(model.components.lookup_1d(100), 500) + set_temp = model.components.room_temperature() + set_time = model.components.time() - with catch_warnings(record=True) as ws: - model.set_initial_value(new_time, {'lookup 2d': 520}) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "a constant value with initial_conditions will be deprecated", - str(wf[0].message)) + self.assertNotEqual( + set_temp, + initial_temp, + "Test definition is wrong, please change configuration", + ) + self.assertEqual(set_temp, 100) - expected = xr.DataArray(520, {"Rows": ["Row1", "Row2"]}, ["Rows"]) - self.assertTrue(model.components.lookup_2d(0).equals(expected)) - self.assertTrue(model.components.lookup_2d(100).equals(expected)) + self.assertNotEqual( + initial_time, 10, "Test definition is wrong, please change " + + "configuration" + ) + self.assertEqual(set_time, 10) - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - model.run() + def test_get_args(self): + import pysd - def test_set_initial_value_subscripted_value_with_constant(self): + model = pysd.read_vensim(test_model) + model2 = pysd.read_vensim(test_model_look) + + self.assertEqual(model.get_args('Room Temperature'), []) + self.assertEqual(model.get_args('room_temperature'), []) + self.assertEqual(model.get_args('teacup_temperature'), []) + self.assertEqual(model.get_args('_integ_teacup_temperature'), []) + + self.assertEqual(model2.get_args('lookup 1d'), ['x']) + self.assertEqual(model2.get_args('lookup_1d'), ['x']) + self.assertEqual(model2.get_args('lookup 2d'), ['x']) + self.assertEqual(model2.get_args('lookup_2d'), ['x']) + + with self.assertRaises(NameError): + model.get_args('not_a_var') + + def test_get_coords(self): import pysd coords = { @@ -1580,540 +1490,632 @@ def test_set_initial_value_subscripted_value_with_constant(self): "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output_b = xr.DataArray([[0, 0], [0, 0], [0, 0]], coords, dims) - new_time = np.random.rand() + coords_dims = (coords, dims) - model = pysd.read_vensim(test_model_subs) - initial_stock = model.components.stock_a() + model = pysd.read_vensim(test_model) + model2 = pysd.read_vensim(test_model_subs) - # Test that we can set with real names - model.set_initial_value(new_time, {'Stock A': 500}) - self.assertFalse(initial_stock.equals(output_b + 500)) - self.assertTrue(model.components.stock_a().equals(output_b + 500)) + self.assertIsNone(model.get_coords("Room Temperature")) + self.assertIsNone(model.get_coords("room_temperature")) + self.assertIsNone(model.get_coords("teacup_temperature")) + self.assertIsNone(model.get_coords("_integ_teacup_temperature")) - # Test setting with pysafe names - model.set_initial_value(new_time + 1, {'stock_a': 202}) - self.assertTrue(model.components.stock_a().equals(output_b + 202)) + self.assertEqual(model2.get_coords("Initial Values"), coords_dims) + self.assertEqual(model2.get_coords("initial_values"), coords_dims) + self.assertEqual(model2.get_coords("Stock A"), coords_dims) + self.assertEqual(model2.get_coords("stock_a"), coords_dims) + self.assertEqual(model2.get_coords("_integ_stock_a"), coords_dims) - # Test setting with stateful object name - model.set_initial_value(new_time + 2, {'_integ_stock_a': 302}) - self.assertTrue(model.components.stock_a().equals(output_b + 302)) + with self.assertRaises(NameError): + model.get_coords('not_a_var') - # Test error when coords are not a subset - with self.assertRaises(ValueError): - model.set_initial_value( - new_time + 2, - {'_integ_stock_a': xr.DataArray(302, {'D': ['A', 'B']}, ['D'])} - ) + def test__build_euler_timeseries(self): + import pysd - def test_set_initial_value_subscripted_value_with_partial_xarray(self): + model = pysd.read_vensim(test_model) + model.components.initial_time = lambda: 3 + model.components.final_time = lambda: 50 + model.components.time_step = lambda: 1 + model.initialize() + + actual = list(model._build_euler_timeseries(return_timestamps=[10])) + expected = range(3, 11, 1) + self.assertSequenceEqual(actual, expected) + + actual = list(model._build_euler_timeseries(return_timestamps=[10], + final_time=50)) + expected = range(3, 51, 1) + self.assertSequenceEqual(actual, expected) + + def test__integrate(self): import pysd - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output1 = xr.DataArray([[5, 3], [5, 3], [5, 3]], coords, dims) - input_val1 = xr.DataArray( - [5, 3], - {"Second Dimension Subscript": ["Column 1", "Column 2"]}, - ["Second Dimension Subscript"], - ) + # Todo: think through a stronger test here... + model = pysd.read_vensim(test_model) + model.progress = False + res = model._integrate(time_steps=list(range(5)), + capture_elements=['teacup_temperature'], + return_timestamps=list(range(0, 5, 2))) + self.assertIsInstance(res, pd.DataFrame) + self.assertIn('teacup_temperature', res) + self.assertTrue(all(res.index.values == list(range(0, 5, 2)))) - output2 = xr.DataArray([[55, 33], [55, 33], [55, 33]], coords, dims) - input_val2 = xr.DataArray( - [55, 33], - {"Second Dimension Subscript": ["Column 1", "Column 2"]}, - ["Second Dimension Subscript"], - ) + def test_default_returns_with_construction_functions(self): + """ + If the run function is called with no arguments, should still be able + to get default return functions. - output3 = xr.DataArray([[40, 20], [40, 20], [40, 20]], coords, dims) - input_val3 = xr.DataArray( - [40, 20], - {"Second Dimension Subscript": ["Column 1", "Column 2"]}, - ["Second Dimension Subscript"], + """ + import pysd + + model = pysd.read_vensim(os.path.join( + _root, "test-models/tests/delays/test_delays.mdl")) + ret = model.run() + self.assertTrue( + { + "Initial Value", + "Input", + "Order Variable", + "Output Delay1", + "Output Delay1I", + "Output Delay3", + } + <= set(ret.columns.values) ) - new_time = np.random.rand() + def test_default_returns_with_lookups(self): + """ + Addresses https://github.com/JamesPHoughton/pysd/issues/114 + The default settings should skip model elements with no particular + return value + """ + import pysd - model = pysd.read_vensim(test_model_subs) - initial_stock = model.components.stock_a() + model = pysd.read_vensim(os.path.join( + _root, "test-models/tests/lookups/test_lookups.mdl")) + ret = model.run() + self.assertTrue( + {"accumulation", "rate", "lookup function call"} <= + set(ret.columns.values) + ) - # Test that we can set with real names - model.set_initial_value(new_time, {'Stock A': input_val1}) - self.assertFalse(initial_stock.equals(output1)) - self.assertTrue(model.components.stock_a().equals(output1)) + def test_py_model_file(self): + """Addresses https://github.com/JamesPHoughton/pysd/issues/86""" + import pysd + + model = pysd.read_vensim(test_model) + self.assertEqual(model.py_model_file, + test_model.replace(".mdl", ".py")) - # Test setting with pysafe names - model.set_initial_value(new_time + 1, {'stock_a': input_val2}) - self.assertTrue(model.components.stock_a().equals(output2)) + def test_mdl_file(self): + """Relates to https://github.com/JamesPHoughton/pysd/issues/86""" + import pysd - # Test setting with stateful object name - model.set_initial_value(new_time + 2, {'_integ_stock_a': input_val3}) - self.assertTrue(model.components.stock_a().equals(output3)) + model = pysd.read_vensim(test_model) + self.assertEqual(model.mdl_file, test_model) - def test_set_initial_value_subscripted_value_with_xarray(self): + @unittest.skip("infinite loop") + def test_incomplete_model(self): import pysd - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - output1 = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - output2 = xr.DataArray([[53, 43], [84, 80], [29, 63]], coords, dims) - output3 = xr.DataArray([[54, 32], [40, 87], [93, 93]], coords, dims) + with catch_warnings(record=True) as w: + simplefilter("always") + model = pysd.read_vensim(os.path.join( + _root, + "test-models/tests/incomplete_equations/" + + "test_incomplete_model.mdl" + )) + self.assertTrue(any([warn.category == SyntaxWarning for warn in w])) - new_time = np.random.rand() + with catch_warnings(record=True) as w: + model.run() + self.assertEqual(len(w), 1) - model = pysd.read_vensim(test_model_subs) - initial_stock = model.components.stock_a() - # Test that we can set with real names - model.set_initial_value(new_time, {'Stock A': output1}) - self.assertFalse(initial_stock.equals(output1)) - self.assertTrue(model.components.stock_a().equals(output1)) +class TestModelInteraction(unittest.TestCase): + """ The tests in this class test pysd's interaction with itself + and other modules. """ - # Test setting with pysafe names - model.set_initial_value(new_time + 1, {'stock_a': output2}) - self.assertTrue(model.components.stock_a().equals(output2)) + def test_multiple_load(self): + """ + Test that we can load and run multiple models at the same time, + and that the models don't interact with each other. This can + happen if we arent careful about class attributes vs instance + attributes - # Test setting with stateful object name - model.set_initial_value(new_time + 2, {'_integ_stock_a': output3}) - self.assertTrue(model.components.stock_a().equals(output3)) + This test responds to issue: + https://github.com/JamesPHoughton/pysd/issues/23 - def test_set_initial_value_subscripted_value_with_numpy_error(self): + """ import pysd - input1 = np.array([[5, 3], [4, 8], [9, 3]]) - input2 = np.array([[53, 43], [84, 80], [29, 63]]) - input3 = np.array([[54, 32], [40, 87], [93, 93]]) - - new_time = np.random.rand() + model_1 = pysd.read_vensim(os.path.join( + _root, "test-models/samples/teacup/teacup.mdl")) + model_2 = pysd.read_vensim(os.path.join( + _root, "test-models/samples/SIR/SIR.mdl")) - model = pysd.read_vensim(test_model_subs) + self.assertNotIn("teacup_temperature", dir(model_2.components)) + self.assertIn("susceptible", dir(model_2.components)) - # Test that we can set with real names - with self.assertRaises(TypeError): - model.set_initial_value(new_time, {'Stock A': input1}) + self.assertNotIn("susceptible", dir(model_1.components)) + self.assertIn("teacup_temperature", dir(model_1.components)) - # Test setting with pysafe names - with self.assertRaises(TypeError): - model.set_initial_value(new_time + 1, {'stock_a': input2}) + def test_no_crosstalk(self): + """ + Need to check that if we instantiate two copies of the same model, + changes to one copy do not influence the other copy. - # Test setting with stateful object name - with self.assertRaises(TypeError): - model.set_initial_value(new_time + 2, {'_integ_stock_a': input3}) + Checks for issue: https://github.com/JamesPHoughton/pysd/issues/108 + that time is not shared between the two models - def test_replace_element(self): + """ + # Todo: this test could be made more comprehensive import pysd - model = pysd.read_vensim(test_model) - stocks1 = model.run() - model.components.characteristic_time = lambda: 3 - stocks2 = model.run() - self.assertGreater( - stocks1["Teacup Temperature"].loc[10], - stocks2["Teacup Temperature"].loc[10] - ) + model_1 = pysd.read_vensim(os.path.join( + _root, "test-models/samples/teacup/teacup.mdl")) + model_2 = pysd.read_vensim(os.path.join( + _root, "test-models/samples/SIR/SIR.mdl")) - def test_set_initial_condition_origin_full(self): + model_1.components.initial_time = lambda: 10 + self.assertNotEqual(model_2.components.initial_time, 10) + + # check that the model time is not shared between the two objects + model_1.run() + self.assertNotEqual(model_1.time(), model_2.time()) + + def test_restart_cache(self): + """ + Test that when we cache a model variable at the 'run' time, + if the variable is changed and the model re-run, the cache updates + to the new variable, instead of maintaining the old one. + """ import pysd model = pysd.read_vensim(test_model) - initial_temp = model.components.teacup_temperature() - initial_time = model.components.time() + model.run() + old = model.components.room_temperature() + model.set_components({"Room Temperature": 345}) + new = model.components.room_temperature() + model.run() + self.assertEqual(new, 345) + self.assertNotEqual(old, new) - new_state = {"Teacup Temperature": 500} - new_time = 10 + def test_circular_reference(self): + import pysd - model.set_initial_condition((new_time, new_state)) - set_temp = model.components.teacup_temperature() - set_time = model.components.time() + with self.assertRaises(ValueError) as err: + pysd.load( + more_tests + + "/circular_reference/test_circular_reference.py") - self.assertNotEqual( - set_temp, - initial_temp, - "Test definition is wrong, please change configuration", + self.assertIn("_integ_integ", str(err.exception)) + self.assertIn("_delay_delay", str(err.exception)) + self.assertIn( + "Unresolvable Reference: " + + "Probable circular initialization...\n" + + "Not able to initialize the " + + "following objects:", + str(err.exception), ) - self.assertEqual(set_temp, 500) - self.assertNotEqual( - initial_time, - new_time, - "Test definition is wrong, please change configuration", + def test_not_able_to_update_stateful_object(self): + import pysd + + integ = pysd.functions.Integ( + lambda: xr.DataArray([1, 2], {"Dim": ["A", "B"]}, ["Dim"]), + lambda: xr.DataArray(0, {"Dim": ["A", "B"]}, ["Dim"]), + "my_integ_object", ) - self.assertEqual(new_time, set_time) - model.set_initial_condition("original") - set_temp = model.components.teacup_temperature() - set_time = model.components.time() + integ.initialize() - self.assertEqual(initial_temp, set_temp) - self.assertEqual(initial_time, set_time) + with self.assertRaises(ValueError) as err: + integ.update(np.array([[1, 2], [3, 4]])) - def test_set_initial_condition_origin_short(self): + self.assertIn( + "Could not update the value of my_integ_object", str(err.exception) + ) + + +class TestMultiRun(unittest.TestCase): + def test_delay_reinitializes(self): import pysd - model = pysd.read_vensim(test_model) - initial_temp = model.components.teacup_temperature() - initial_time = model.components.time() + model = pysd.read_vensim(os.path.join( + _root, + "test-models/tests/delays/test_delays.mdl")) + res1 = model.run() + res2 = model.run() + self.assertTrue(all(res1 == res2)) - new_state = {"Teacup Temperature": 500} - new_time = 10 - model.set_initial_condition((new_time, new_state)) - set_temp = model.components.teacup_temperature() - set_time = model.components.time() +class TestSplitViews(unittest.TestCase): + def test_read_vensim_split_model(self): + import pysd + from pysd.tools.benchmarking import assert_frames_close - self.assertNotEqual( - set_temp, - initial_temp, - "Test definition is wrong, please change configuration", + root_dir = more_tests + "/split_model/" + + model_name = "test_split_model" + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True ) - self.assertEqual(set_temp, 500) - self.assertNotEqual( - initial_time, - new_time, - "Test definition is wrong, please change configuration", + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_filename = "_modules.json" + modules_dirname = "modules_" + model_name + + # check that _namespace and _subscript_dict json files where created + self.assertTrue(os.path.isfile(root_dir + namespace_filename)) + self.assertTrue(os.path.isfile(root_dir + subscript_dict_filename)) + + # check that the main model file was created + self.assertTrue(os.path.isfile(root_dir + model_name + ".py")) + + # check that the modules folder was created + self.assertTrue(os.path.isdir(root_dir + modules_dirname)) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + modules_filename) ) - self.assertEqual(new_time, set_time) - model.set_initial_condition("o") - set_temp = model.components.teacup_temperature() - set_time = model.components.time() + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + "view_1.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + "view2.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + "view_3.py")) - self.assertEqual(initial_temp, set_temp) - self.assertEqual(initial_time, set_time) + # check dictionaries + self.assertIn("Stock", model_split.components._namespace.keys()) + self.assertIn("view2", model_split.components._modules.keys()) + self.assertIsInstance(model_split.components._subscript_dict, dict) + + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() + + # assert that the functions are not defined in the main file + self.assertNotIn("def another_var()", file_content) + self.assertNotIn("def rate1()", file_content) + self.assertNotIn("def varn()", file_content) + self.assertNotIn("def variablex()", file_content) + self.assertNotIn("def stock()", file_content) + + # check that the results of the split model are the same than those + # without splitting + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) - def test_set_initial_condition_for_stock_component(self): - import pysd + result_split = model_split.run() + result_non_split = model_non_split.run() - model = pysd.read_vensim(test_model) - initial_temp = model.components.teacup_temperature() - initial_time = model.components.time() + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) - new_state = {"Teacup Temperature": 500} - new_time = 10 + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - model.set_initial_condition((new_time, new_state)) - set_temp = model.components.teacup_temperature() - set_time = model.components.time() + # assert that the functions are in the main file for regular trans + self.assertIn("def another_var()", file_content) + self.assertIn("def rate1()", file_content) + self.assertIn("def varn()", file_content) + self.assertIn("def variablex()", file_content) + self.assertIn("def stock()", file_content) - self.assertNotEqual( - set_temp, - initial_temp, - "Test definition is wrong, please change configuration", - ) - self.assertEqual(set_temp, 500) + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) - self.assertNotEqual( - initial_time, 10, "Test definition is wrong, please change" + - " configuration" - ) - self.assertEqual(set_time, 10) + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) - def test_set_initial_condition_for_constant_component(self): + def test_read_vensim_split_model_vensim_8_2_1(self): import pysd + from pysd.tools.benchmarking import assert_frames_close - model = pysd.read_vensim(test_model) - initial_temp = model.components.teacup_temperature() - initial_time = model.components.time() + root_dir = os.path.join(_root, "more-tests/split_model_vensim_8_2_1/") - new_state = {"Room Temperature": 100} - new_time = 10 + model_name = "test_split_model_vensim_8_2_1" + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True, subview_sep="." + ) - with catch_warnings(record=True) as ws: - model.set_initial_condition((new_time, new_state)) - # use only future warnings - wf = [w for w in ws if issubclass(w.category, FutureWarning)] - self.assertEqual(len(wf), 1) - self.assertIn( - "a constant value with initial_conditions will be deprecated", - str(wf[0].message)) + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_filename = "_modules.json" + modules_dirname = "modules_" + model_name - set_temp = model.components.room_temperature() - set_time = model.components.time() + # check that _namespace and _subscript_dict json files where created + self.assertTrue(os.path.isfile(root_dir + namespace_filename)) + self.assertTrue(os.path.isfile(root_dir + subscript_dict_filename)) - self.assertNotEqual( - set_temp, - initial_temp, - "Test definition is wrong, please change configuration", - ) - self.assertEqual(set_temp, 100) + # check that the main model file was created + self.assertTrue(os.path.isfile(root_dir + model_name + ".py")) - self.assertNotEqual( - initial_time, 10, "Test definition is wrong, please change " + - "configuration" + # check that the modules folder was created + self.assertTrue(os.path.isdir(root_dir + modules_dirname)) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + modules_filename) ) - self.assertEqual(set_time, 10) - - def test_get_args(self): - import pysd - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_look) + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + "teacup.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/" + "cream.py")) - self.assertEqual(model.get_args('Room Temperature'), []) - self.assertEqual(model.get_args('room_temperature'), []) - self.assertEqual(model.get_args('teacup_temperature'), []) - self.assertEqual(model.get_args('_integ_teacup_temperature'), []) + # check dictionaries + self.assertIn("Cream Temperature", + model_split.components._namespace.keys()) + self.assertIn("cream", model_split.components._modules.keys()) + self.assertIsInstance(model_split.components._subscript_dict, dict) - self.assertEqual(model2.get_args('lookup 1d'), ['x']) - self.assertEqual(model2.get_args('lookup_1d'), ['x']) - self.assertEqual(model2.get_args('lookup 2d'), ['x']) - self.assertEqual(model2.get_args('lookup_2d'), ['x']) + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - with self.assertRaises(NameError): - model.get_args('not_a_var') + # assert that the functions are not defined in the main file + self.assertNotIn("def teacup_temperature()", file_content) + self.assertNotIn("def cream_temperature()", file_content) - def test_get_coords(self): - import pysd + # check that the results of the split model are the same than those + # without splitting + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] + result_split = model_split.run() + result_non_split = model_non_split.run() - coords_dims = (coords, dims) + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_subs) + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - self.assertIsNone(model.get_coords("Room Temperature")) - self.assertIsNone(model.get_coords("room_temperature")) - self.assertIsNone(model.get_coords("teacup_temperature")) - self.assertIsNone(model.get_coords("_integ_teacup_temperature")) + # assert that the functions are in the main file for regular trans + self.assertIn("def teacup_temperature()", file_content) + self.assertIn("def cream_temperature()", file_content) - self.assertEqual(model2.get_coords("Initial Values"), coords_dims) - self.assertEqual(model2.get_coords("initial_values"), coords_dims) - self.assertEqual(model2.get_coords("Stock A"), coords_dims) - self.assertEqual(model2.get_coords("stock_a"), coords_dims) - self.assertEqual(model2.get_coords("_integ_stock_a"), coords_dims) + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) - with self.assertRaises(NameError): - model.get_coords('not_a_var') + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) - def test__build_euler_timeseries(self): + def test_read_vensim_split_model_subviews(self): import pysd + from pysd.tools.benchmarking import assert_frames_close - model = pysd.read_vensim(test_model) - model.components.initial_time = lambda: 3 - model.components.final_time = lambda: 50 - model.components.time_step = lambda: 1 - model.initialize() + root_dir = os.path.join(_root, "more-tests/split_model/") - actual = list(model._build_euler_timeseries(return_timestamps=[10])) - expected = range(3, 11, 1) - self.assertSequenceEqual(actual, expected) + model_name = "test_split_model_subviews" + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True, + subview_sep=["."] + ) - actual = list(model._build_euler_timeseries(return_timestamps=[10], - final_time=50)) - expected = range(3, 51, 1) - self.assertSequenceEqual(actual, expected) + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_dirname = "modules_" + model_name - def test__integrate(self): - import pysd + # check that the modules folders were created + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) - # Todo: think through a stronger test here... - model = pysd.read_vensim(test_model) - model.progress = False - res = model._integrate(time_steps=list(range(5)), - capture_elements=['teacup_temperature'], - return_timestamps=list(range(0, 5, 2))) - self.assertIsInstance(res, pd.DataFrame) - self.assertIn('teacup_temperature', res) - self.assertTrue(all(res.index.values == list(range(0, 5, 2)))) + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_1.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_2.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) - def test_default_returns_with_construction_functions(self): - """ - If the run function is called with no arguments, should still be able - to get default return functions. + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - """ - import pysd + # assert that the functions are not defined in the main file + self.assertNotIn("def another_var()", file_content) + self.assertNotIn("def rate1()", file_content) + self.assertNotIn("def varn()", file_content) + self.assertNotIn("def variablex()", file_content) + self.assertNotIn("def stock()", file_content) - model = pysd.read_vensim(os.path.join( - _root, "test-models/tests/delays/test_delays.mdl")) - ret = model.run() - self.assertTrue( - { - "Initial Value", - "Input", - "Order Variable", - "Output Delay1", - "Output Delay1I", - "Output Delay3", - } - <= set(ret.columns.values) + # check that the results of the split model are the same than those + # without splitting + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False ) - def test_default_returns_with_lookups(self): - """ - Addresses https://github.com/JamesPHoughton/pysd/issues/114 - The default settings should skip model elements with no particular - return value - """ - import pysd - - model = pysd.read_vensim(os.path.join( - _root, "test-models/tests/lookups/test_lookups.mdl")) - ret = model.run() - self.assertTrue( - {"accumulation", "rate", "lookup function call"} <= - set(ret.columns.values) - ) + result_split = model_split.run() + result_non_split = model_non_split.run() - def test_py_model_file(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/86""" - import pysd + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) - model = pysd.read_vensim(test_model) - self.assertEqual(model.py_model_file, - test_model.replace(".mdl", ".py")) + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - def test_mdl_file(self): - """Relates to https://github.com/JamesPHoughton/pysd/issues/86""" - import pysd + # assert that the functions are in the main file for regular trans + self.assertIn("def another_var()", file_content) + self.assertIn("def rate1()", file_content) + self.assertIn("def varn()", file_content) + self.assertIn("def variablex()", file_content) + self.assertIn("def stock()", file_content) - model = pysd.read_vensim(test_model) - self.assertEqual(model.mdl_file, test_model) + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) - @unittest.skip("infinite loop") - def test_incomplete_model(self): + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) + + def test_read_vensim_split_model_several_subviews(self): import pysd + from pysd.tools.benchmarking import assert_frames_close - with catch_warnings(record=True) as w: - simplefilter("always") - model = pysd.read_vensim(os.path.join( - _root, - "test-models/tests/incomplete_equations/" - + "test_incomplete_model.mdl" - )) - self.assertTrue(any([warn.category == SyntaxWarning for warn in w])) + root_dir = os.path.join(_root, "more-tests/split_model/") - with catch_warnings(record=True) as w: - model.run() - self.assertEqual(len(w), 1) + model_name = "test_split_model_sub_subviews" + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True, + subview_sep=[".", "-"] + ) + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_dirname = "modules_" + model_name -class TestModelInteraction(unittest.TestCase): - """ The tests in this class test pysd's interaction with itself - and other modules. """ + # check that the modules folders were created + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_1")) + self.assertTrue(os.path.isdir(root_dir + modules_dirname + "/view_3" + + "/subview_2")) + # check creation of module files + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_2.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_1.py")) + self.assertTrue( + os.path.isfile(root_dir + modules_dirname + "/view_1/" + + "submodule_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_1.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_1" + "/sview_2.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_3.py")) + self.assertTrue(os.path.isfile(root_dir + modules_dirname + "/view_3" + + "/subview_2" + "/sview_4.py")) - def test_multiple_load(self): - """ - Test that we can load and run multiple models at the same time, - and that the models don't interact with each other. This can - happen if we arent careful about class attributes vs instance - attributes + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - This test responds to issue: - https://github.com/JamesPHoughton/pysd/issues/23 + # assert that the functions are not defined in the main file + self.assertNotIn("def another_var()", file_content) + self.assertNotIn("def rate1()", file_content) + self.assertNotIn("def varn()", file_content) + self.assertNotIn("def variablex()", file_content) + self.assertNotIn("def stock()", file_content) + self.assertNotIn("def interesting_var_2()", file_content) + self.assertNotIn("def great_var()", file_content) - """ - import pysd + # check that the results of the split model are the same than those + # without splitting + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) - model_1 = pysd.read_vensim(os.path.join( - _root, "test-models/samples/teacup/teacup.mdl")) - model_2 = pysd.read_vensim(os.path.join( - _root, "test-models/samples/SIR/SIR.mdl")) + result_split = model_split.run() + result_non_split = model_non_split.run() - self.assertNotIn("teacup_temperature", dir(model_2.components)) - self.assertIn("susceptible", dir(model_2.components)) + # results of a split model are the same that those of the regular + # model (un-split) + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) - self.assertNotIn("susceptible", dir(model_1.components)) - self.assertIn("teacup_temperature", dir(model_1.components)) + with open(root_dir + model_name + ".py", 'r') as file: + file_content = file.read() - def test_no_crosstalk(self): - """ - Need to check that if we instantiate two copies of the same model, - changes to one copy do not influence the other copy. + # assert that the functions are in the main file for regular trans + self.assertIn("def another_var()", file_content) + self.assertIn("def rate1()", file_content) + self.assertIn("def varn()", file_content) + self.assertIn("def variablex()", file_content) + self.assertIn("def stock()", file_content) + self.assertIn("def interesting_var_2()", file_content) + self.assertIn("def great_var()", file_content) - Checks for issue: https://github.com/JamesPHoughton/pysd/issues/108 - that time is not shared between the two models + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) - """ - # Todo: this test could be made more comprehensive + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) + + def test_read_vensim_split_model_with_macro(self): import pysd + from pysd.tools.benchmarking import assert_frames_close - model_1 = pysd.read_vensim(os.path.join( - _root, "test-models/samples/teacup/teacup.mdl")) - model_2 = pysd.read_vensim(os.path.join( - _root, "test-models/samples/SIR/SIR.mdl")) + root_dir = more_tests + "/split_model_with_macro/" - model_1.components.initial_time = lambda: 10 - self.assertNotEqual(model_2.components.initial_time, 10) + model_name = "test_split_model_with_macro" + model_non_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=False + ) - # check that the model time is not shared between the two objects - model_1.run() - self.assertNotEqual(model_1.time(), model_2.time()) + namespace_filename = "_namespace_" + model_name + ".json" + subscript_dict_filename = "_subscripts_" + model_name + ".json" + modules_dirname = "modules_" + model_name - def test_restart_cache(self): - """ - Test that when we cache a model variable at the 'run' time, - if the variable is changed and the model re-run, the cache updates - to the new variable, instead of maintaining the old one. - """ - import pysd + # running split model + result_non_split = model_non_split.run() - model = pysd.read_vensim(test_model) - model.run() - old = model.components.room_temperature() - model.set_components({"Room Temperature": 345}) - new = model.components.room_temperature() - model.run() - self.assertEqual(new, 345) - self.assertNotEqual(old, new) + model_split = pysd.read_vensim( + root_dir + model_name + ".mdl", split_views=True + ) + result_split = model_split.run() - def test_circular_reference(self): - import pysd + # results of a split model are the same that those of the regular model + assert_frames_close(result_split, result_non_split, atol=0, rtol=0) - with self.assertRaises(ValueError) as err: - pysd.load( - more_tests - + "/circular_reference/test_circular_reference.py") + # remove newly created files + os.remove(root_dir + model_name + ".py") + os.remove(root_dir + "expression_macro.py") + os.remove(root_dir + namespace_filename) + os.remove(root_dir + subscript_dict_filename) - self.assertIn("_integ_integ", str(err.exception)) - self.assertIn("_delay_delay", str(err.exception)) - self.assertIn( - "Unresolvable Reference: " - + "Probable circular initialization...\n" - + "Not able to initialize the " - + "following objects:", - str(err.exception), - ) + # remove newly created modules folder + shutil.rmtree(root_dir + modules_dirname) - def test_not_able_to_update_stateful_object(self): + def test_read_vensim_split_model_warning(self): import pysd + # setting the split_views=True when the model has a single + # view should generate a warning + with catch_warnings(record=True) as ws: + pysd.read_vensim( + test_model, split_views=True + ) # set stock value using params - integ = pysd.functions.Integ( - lambda: xr.DataArray([1, 2], {"Dim": ["A", "B"]}, ["Dim"]), - lambda: xr.DataArray(0, {"Dim": ["A", "B"]}, ["Dim"]), - "my_integ_object", + wu = [w for w in ws if issubclass(w.category, UserWarning)] + + self.assertEqual(len(wu), 1) + self.assertTrue( + "Only a single view with no subviews was detected" in str( + wu[0].message) ) - integ.initialize() + def test_read_vensim_split_model_non_matching_separator_warning(self): + import pysd + # setting the split_views=True when the model has a single + # view should generate a warning - with self.assertRaises(ValueError) as err: - integ.update(np.array([[1, 2], [3, 4]])) + root_dir = os.path.join(_root, "more-tests/split_model/") - self.assertIn( - "Could not update the value of my_integ_object", str(err.exception) - ) + model_name = "test_split_model_sub_subviews" + with catch_warnings(record=True) as ws: + pysd.read_vensim(root_dir + model_name + ".mdl", split_views=True, + subview_sep=["a"]) -class TestMultiRun(unittest.TestCase): - def test_delay_reinitializes(self): - import pysd + wu = [w for w in ws if issubclass(w.category, UserWarning)] - model = pysd.read_vensim(os.path.join( - _root, - "test-models/tests/delays/test_delays.mdl")) - res1 = model.run() - res2 = model.run() - self.assertTrue(all(res1 == res2)) + self.assertEqual(len(wu), 1) + self.assertTrue( + "The given subview separators were not matched in" in str( + wu[0].message) + ) From 9c36e2c143aa957661323e3c770d83ed389d0933 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Mon, 27 Sep 2021 13:58:32 +0200 Subject: [PATCH 6/8] Clean load_modules --- pysd/py_backend/builder.py | 5 ++--- pysd/py_backend/utils.py | 11 ++++------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index eb1fbf64..89ce584e 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -296,13 +296,12 @@ def _build_main_module(elements, subscript_dict, file_name): text += textwrap.dedent(""" # load modules from modules_%(outfile)s directory - for module_name, module_content in _modules.items(): - exec(load_modules(module_name, module_content, _root, - "%(outfile)s", submodules=[])) + exec(load_modules("modules_%(outfile)s", _modules, _root, [])) """ % { "outfile": os.path.basename(file_name).split(".")[0], }) + print(text) text += funcs text = black.format_file_contents(text, fast=True, mode=black.FileMode()) diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 12fc8b46..5947bf15 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -889,21 +889,18 @@ def open_module(root_dir, model_name, module, submodule=None): os.path.join(root_dir, "modules_" + model_name, rel_file_path)).read() -def load_modules(module_name, module_content, root_dir, model_name, - work_dir=None, submodules=[]): +def load_modules(module_name, module_content, work_dir, submodules): # TODO: document - if not work_dir: - work_dir = os.path.join(root_dir, "modules_" + model_name) if isinstance(module_content, list): with open(os.path.join(work_dir, module_name + ".py"), "r") as file: submodules.append(file.read()) else: - work_dir = os.path.join(work_dir, module_name) for submod_name, submod_content in module_content.items(): load_modules( - submod_name, submod_content, root_dir, model_name, - work_dir=work_dir, submodules=submodules) + submod_name, submod_content, + os.path.join(work_dir, module_name), + submodules) return "\n\n".join(submodules) From 2a702754a41e0fcfdf1f7426b51b77bfd17b3e68 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Tue, 28 Sep 2021 08:56:28 +0200 Subject: [PATCH 7/8] Add macro objects to their corresponding views --- pysd/py_backend/builder.py | 20 ++++++++++--------- pysd/py_backend/utils.py | 31 ++++++++++++++++++++++++++++- pysd/py_backend/vensim/vensim2py.py | 2 +- 3 files changed, 42 insertions(+), 11 deletions(-) diff --git a/pysd/py_backend/builder.py b/pysd/py_backend/builder.py index 89ce584e..90d43020 100644 --- a/pysd/py_backend/builder.py +++ b/pysd/py_backend/builder.py @@ -301,7 +301,6 @@ def _build_main_module(elements, subscript_dict, file_name): """ % { "outfile": os.path.basename(file_name).split(".")[0], }) - print(text) text += funcs text = black.format_file_contents(text, fast=True, mode=black.FileMode()) @@ -1916,12 +1915,15 @@ def add_ext_lookup(identifier, file_name, tab, x_row_or_col, cell, return "%s(x)" % external["py_name"], [external] -def add_macro(macro_name, filename, arg_names, arg_vals): +def add_macro(identifier, macro_name, filename, arg_names, arg_vals): """ - Constructs a stateful object instantiating a 'Macro' + Constructs a stateful object instantiating a 'Macro'. Parameters ---------- + identifier: str + The python-safe name of the element that calls the macro. + macro_name: str Python safe name for macro. @@ -1934,16 +1936,16 @@ def add_macro(macro_name, filename, arg_names, arg_vals): Returns ------- reference: str - reference to the Initial object `__call__` method, - which will return the first calculated value of `initial_input` + Reference to the Initial object `__call__` method, + which will return the first calculated value of `initial_input`. + new_structure: list - list of element construction dictionaries for the builder to assemble + List of element construction dictionaries for the builder to assemble. """ Imports.add("functions", "Macro") - py_name = "_macro_" + macro_name + "_" + "_".join( - [utils.make_python_identifier(f)[0] for f in arg_vals]) + py_name = "_macro_" + macro_name + "_" + identifier func_args = "{ %s }" % ", ".join( ["'%s': lambda: %s" % (key, val) for key, val in zip(arg_names, @@ -1951,7 +1953,7 @@ def add_macro(macro_name, filename, arg_names, arg_vals): stateful = { "py_name": py_name, - "parent_name": macro_name, + "parent_name": identifier, "real_name": "Macro Instantiation of " + macro_name, "doc": "Instantiates the Macro", "py_expr": "Macro('%s', %s, '%s'," diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 5947bf15..45b87881 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -890,8 +890,37 @@ def open_module(root_dir, model_name, module, submodule=None): def load_modules(module_name, module_content, work_dir, submodules): - # TODO: document + """ + Used to load model modules from the main model file, when + split_views=True in the read_vensim function. This function is used + to iterate over the different layers of the nested dictionary that + describes which model variables belong to each module/submodule. + + Parameters + ---------- + module_name: str + Name of the module to load. + + module_content: dict or list + Content of the module. If it's a dictionary, it means that the + module has submodules, whereas if it is a list it means that that + particular module/submodule is a final one. + + work_dir: str + Path to the module file. + submodules: list + This list gets updated at every recursive iteration, and each element + corresponds to the string representation of each module/submodule that + is read. + + Returns + ------- + str: + String representations of the modules/submodules to execute in the main + model file. + + """ if isinstance(module_content, list): with open(os.path.join(work_dir, module_name + ".py"), "r") as file: submodules.append(file.read()) diff --git a/pysd/py_backend/vensim/vensim2py.py b/pysd/py_backend/vensim/vensim2py.py index d658c6e7..f4eb1a5f 100644 --- a/pysd/py_backend/vensim/vensim2py.py +++ b/pysd/py_backend/vensim/vensim2py.py @@ -1435,7 +1435,7 @@ def visit_macro_call(self, n, vc): macro = [x for x in macro_list if x["py_name"] == py_name][ 0 ] # should match once - name, structure = builder.add_macro( + name, structure = builder.add_macro(element["py_name"], macro["py_name"], macro["file_name"], macro["params"], arglist ) self.new_structure += structure From 7f853f99e8db87401dd07a63b20ab3ef4160c390 Mon Sep 17 00:00:00 2001 From: Eneko Martin Martinez Date: Tue, 28 Sep 2021 09:27:09 +0200 Subject: [PATCH 8/8] Add with block to open_module --- pysd/py_backend/utils.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 45b87881..6c007e2b 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -850,8 +850,9 @@ def load_model_data(root_dir, model_name): return namespace, subscripts, modules -def open_module(root_dir, model_name, module, submodule=None): +def open_module(root_dir, model_name, module, submodule=None): # pragma: no cover """ + This function will be deprecated from release 2.0. Used to load model modules from the main model file, when split_views=True in the read_vensim function. @@ -873,11 +874,11 @@ def open_module(root_dir, model_name, module, submodule=None): ------- str: Model file content. - """ + """ warnings.warn( "open_module function will be deprecated from release 2.0. Use " - + "load_modules instead.", + + "load_modules instead or translate the model again.", FutureWarning ) if not submodule: @@ -885,8 +886,9 @@ def open_module(root_dir, model_name, module, submodule=None): else: rel_file_path = os.path.join(module, submodule + ".py") - return open( - os.path.join(root_dir, "modules_" + model_name, rel_file_path)).read() + with open(os.path.join(root_dir, "modules_" + model_name, rel_file_path), + "r") as mod: + return mod.read() def load_modules(module_name, module_content, work_dir, submodules): @@ -922,8 +924,8 @@ def load_modules(module_name, module_content, work_dir, submodules): """ if isinstance(module_content, list): - with open(os.path.join(work_dir, module_name + ".py"), "r") as file: - submodules.append(file.read()) + with open(os.path.join(work_dir, module_name + ".py"), "r") as mod: + submodules.append(mod.read()) else: for submod_name, submod_content in module_content.items(): load_modules(