diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index da65bcb8..4a69d6fa 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -1,5 +1,6 @@ import warnings import re +from pathlib import Path import numpy as np import xarray as xr @@ -19,6 +20,8 @@ def read(cls, file_name, encoding=None): """ Read the columns from the data file or return the previously read ones """ + if isinstance(file_name, str): + file_name = Path(file_name) if file_name in cls._files: return cls._files[file_name] else: @@ -50,7 +53,7 @@ def read_file(cls, file_name, encoding=None): out = cls.read_line(file_name, encoding) if out is None: raise ValueError( - f"\nNot able to read '{file_name}'. " + f"\nNot able to read '{str(file_name)}'. " + "Only '.csv', '.tab' files are accepted.") transpose = False @@ -64,7 +67,7 @@ def read_file(cls, file_name, encoding=None): return out, transpose else: raise ValueError( - f"Invalid file format '{file_name}'... varible names " + f"Invalid file format '{str(file_name)}'... varible names " "should appear in the first row or in the first column...") @classmethod @@ -72,13 +75,13 @@ def read_line(cls, file_name, encoding=None): """ Read the firts row and return a set of it. """ - if file_name.lower().endswith(".tab"): + if file_name.suffix.lower() == ".tab": return set(pd.read_table(file_name, nrows=0, encoding=encoding, dtype=str, header=0).iloc[:, 1:]) - elif file_name.lower().endswith(".csv"): + elif file_name.suffix.lower() == ".csv": return set(pd.read_csv(file_name, nrows=0, encoding=encoding, @@ -92,12 +95,12 @@ def read_row(cls, file_name, encoding=None): """ Read the firts column and return a set of it. """ - if file_name.lower().endswith(".tab"): + if file_name.suffix.lower() == ".tab": return set(pd.read_table(file_name, usecols=[0], encoding=encoding, dtype=str).iloc[:, 0].to_list()) - elif file_name.lower().endswith(".csv"): + elif file_name.suffix.lower() == ".csv": return set(pd.read_csv(file_name, usecols=[0], encoding=encoding, @@ -236,7 +239,7 @@ def load_data(self, file_names): Resulting data array with the time in the first dimension. """ - if isinstance(file_names, str): + if isinstance(file_names, (str, Path)): file_names = [file_names] for file_name in file_names: @@ -248,7 +251,7 @@ def load_data(self, file_names): raise ValueError( f"_data_{self.py_name}\n" f"Data for {self.real_name} not found in " - f"{', '.join(file_names)}") + f"{', '.join([str(file_name) for file_name in file_names])}") def _load_data(self, file_name): """ diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 485ac4e5..097874c0 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -6,6 +6,7 @@ import os import json +from pathlib import Path from chardet.universaldetector import UniversalDetector import regex as re @@ -452,13 +453,16 @@ def load_outputs(file_name, transpose=False, columns=None, encoding=None): """ read_func = {'.csv': pd.read_csv, '.tab': pd.read_table} + if isinstance(file_name, str): + file_name = Path(file_name) + if columns: columns = set(columns) if not transpose: columns.add("Time") for end, func in read_func.items(): - if file_name.lower().endswith(end): + if file_name.suffix.lower() == end: if transpose: out = func(file_name, encoding=encoding, diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py index 15a0e516..0e101fe0 100644 --- a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py +++ b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py @@ -10,7 +10,7 @@ @pytest.mark.parametrize( "model_path,subview_sep,variables,modules,n_deps,dep_vars", [ - ( + ( # split_views Path("more-tests/split_model/test_split_model.mdl"), [], ["stock"], @@ -18,7 +18,7 @@ (6, 1, 2, 0, 1), {"rate1": 4, "initial_stock": 2, "initial_stock_correction": 0} ), - ( + ( # split_subviews Path("more-tests/split_model/test_split_model_subviews.mdl"), ["."], [], @@ -28,7 +28,7 @@ } ), - ( + ( # split_sub_subviews Path("more-tests/split_model/test_split_model_sub_subviews.mdl"), [".", "-"], ["variablex"], @@ -37,6 +37,7 @@ {"another_var": 5, "look_up_definition": 3} ) ], + ids=["split_views", "split_subviews", "split_sub_subviews"] ) class TestSubmodel: """Submodel selecting class""" @@ -174,7 +175,7 @@ def test_select_submodel(self, model, variables, modules, @pytest.mark.parametrize( "model_path,split_views,module,raise_type,error_message", [ - ( + ( # module_not_found Path("more-tests/split_model/test_split_model.mdl"), True, "view_4", @@ -182,7 +183,7 @@ def test_select_submodel(self, model, variables, modules, "Module or submodule 'view_4' not found..." ), - ( + ( # not_modularized_model Path("more-tests/split_model/test_split_model.mdl"), False, "view_1", @@ -191,6 +192,7 @@ def test_select_submodel(self, model, variables, modules, ) ], + ids=["module_not_found", "not_modularized_model"] ) class TestGetVarsInModuleErrors: @pytest.fixture diff --git a/tests/pytest_translation/vensim2py/pytest_split_views.py b/tests/pytest_translation/vensim2py/pytest_split_views.py index c8019981..1267bfe1 100644 --- a/tests/pytest_translation/vensim2py/pytest_split_views.py +++ b/tests/pytest_translation/vensim2py/pytest_split_views.py @@ -11,7 +11,7 @@ "model_path,subview_sep,modules,macros,original_vars,py_vars," + "stateful_objs", [ - ( + ( # split_views Path("more-tests/split_model/test_split_model.mdl"), [], ["view_1", "view2", "view_3"], @@ -20,7 +20,7 @@ ["another_var", "rate1", "varn", "variablex", "stock"], ["_integ_stock"] ), - ( + ( # split_subviews Path("more-tests/split_model/test_split_model_subviews.mdl"), ["."], ["view_1/submodule_1", "view_1/submodule_2", "view_2"], @@ -29,7 +29,7 @@ ["another_var", "rate1", "varn", "variablex", "stock"], ["_integ_stock"] ), - ( + ( # split_sub_subviews Path("more-tests/split_model/test_split_model_sub_subviews.mdl"), [".", "-"], [ @@ -43,7 +43,7 @@ "interesting_var_2", "great_var"], ["_integ_stock"] ), - ( + ( # split_macro Path("more-tests/split_model_with_macro/" + "test_split_model_with_macro.mdl"), [".", "-"], @@ -53,7 +53,7 @@ ["new_var"], ["_macro_macro_output"] ), - ( + ( # split_vensim_8_2_1 Path("more-tests/split_model_vensim_8_2_1/" + "test_split_model_vensim_8_2_1.mdl"), [], @@ -64,6 +64,8 @@ ["integ_teacup_temperature", "integ_cream_temperature"] ) ], + ids=["split_views", "split_subviews", "split_sub_subviews", "split_macro", + "split_vensim_8_2_1"] ) class TestSplitViews: """ @@ -152,18 +154,19 @@ def test_read_vensim_split_model(self, model_file, subview_sep, @pytest.mark.parametrize( "model_path,subview_sep,warning_message", [ - ( + ( # warning_noviews Path("test-models/samples/teacup/teacup.mdl"), [], "Only a single view with no subviews was detected. The model" + " will be built in a single file." ), - ( + ( # not_match_separator Path("more-tests/split_model/test_split_model_sub_subviews.mdl"), ["a"], "The given subview separators were not matched in any view name." ), ], + ids=["warning_noviews", "not_match_separator"] ) class TestSplitViewsWarnings: """ diff --git a/tests/pytest_types/data/pytest_columns.py b/tests/pytest_types/data/pytest_columns.py new file mode 100644 index 00000000..2e6d81a5 --- /dev/null +++ b/tests/pytest_types/data/pytest_columns.py @@ -0,0 +1,137 @@ +import pytest +import itertools + +from pysd.py_backend.data import Columns + + +class TestColumns: + @pytest.fixture(scope="class") + def out_teacup(self, _root): + return _root.joinpath("data/out_teacup.csv") + + @pytest.fixture(scope="class") + def out_teacup_transposed(self, _root): + return _root.joinpath("data/out_teacup_transposed.csv") + + def test_clean_columns(self, out_teacup): + # test the singleton works well for laizy loading + Columns.clean() + assert Columns._files == {} + Columns.read(out_teacup) + assert Columns._files != {} + assert out_teacup in Columns._files + Columns.clean() + assert Columns._files == {} + + def test_transposed_frame(self, out_teacup, out_teacup_transposed): + # test loading transposed frames + cols1, trans1 = Columns.get_columns(out_teacup) + cols2, trans2 = Columns.get_columns(out_teacup_transposed) + Columns.clean() + + assert cols1 == cols2 + assert not trans1 + assert trans2 + + def test_get_columns(self, out_teacup, out_teacup_transposed): + # test getting specific columns by name + cols0, trans0 = Columns.get_columns(out_teacup) + + cols1, trans1 = Columns.get_columns( + out_teacup, + vars=["Room Temperature", "Teacup Temperature"]) + + cols2, trans2 = Columns.get_columns( + out_teacup_transposed, + vars=["Heat Loss to Room"]) + + cols3 = Columns.get_columns( + out_teacup_transposed, + vars=["No column"])[0] + + Columns.clean() + + assert cols1.issubset(cols0) + assert cols1 == set(["Room Temperature", "Teacup Temperature"]) + + assert cols2.issubset(cols0) + assert cols2 == set(["Heat Loss to Room"]) + + assert cols3 == set() + + assert not trans0 + assert not trans1 + assert trans2 + + def test_get_columns_subscripted(self, _root): + # test get subscripted columns + data_file = _root.joinpath( + "test-models/tests/subscript_3d_arrays_widthwise/output.tab" + ) + + data_file2 = _root.joinpath( + "test-models/tests/subscript_2d_arrays/output.tab" + ) + + subsd = { + "d3": ["Depth 1", "Depth 2"], + "d2": ["Column 1", "Column 2"], + "d1": ["Entry 1", "Entry 2", "Entry 3"] + } + + cols1 = Columns.get_columns( + data_file, + vars=["Three Dimensional Constant"])[0] + + expected = { + "Three Dimensional Constant[" + ",".join(el) + "]" + for el in itertools.product(subsd["d1"], subsd["d2"], subsd["d3"]) + } + + assert cols1 == expected + + cols2 = Columns.get_columns( + data_file2, + vars=["Rate A", "Stock A"])[0] + + subs = list(itertools.product(subsd["d1"], subsd["d2"])) + expected = { + "Rate A[" + ",".join(el) + "]" + for el in subs + } + + expected.update({ + "Stock A[" + ",".join(el) + "]" + for el in subs + }) + + assert cols2 == expected + + +@pytest.mark.parametrize( + "file,raise_type,error_message", + [ + ( # invalid_file_type + "more-tests/not_vensim/test_not_vensim.txt", + ValueError, + "Not able to read '%s'" + ), + ( # invalid_file_format + "data/out_teacup_no_head.csv", + ValueError, + "Invalid file format '%s'... varible names should appear" + + " in the first row or in the first column..." + ) + ], + ids=["invalid_file_type", "invalid_file_format"] +) +class TestColumnsErrors: + # Test errors associated with Columns class + + @pytest.fixture + def file_path(self, _root, file): + return _root.joinpath(file) + + def test_columns_errors(self, file_path, raise_type, error_message): + with pytest.raises(raise_type, match=error_message % str(file_path)): + Columns.read_file(file_path) diff --git a/tests/pytest_types/data/pytest_data.py b/tests/pytest_types/data/pytest_data.py new file mode 100644 index 00000000..4aebd8d1 --- /dev/null +++ b/tests/pytest_types/data/pytest_data.py @@ -0,0 +1,56 @@ +import pytest + +import xarray as xr + +from pysd.py_backend.data import Data + + +@pytest.mark.parametrize( + "value,interp,raise_type,error_message", + [ + ( # not_loaded_data + None, + "interpolate", + ValueError, + "Trying to interpolate data variable before loading the data..." + ), + # test that try/except block on call doesn't catch errors differents + # than data = None + ( # try_except_1 + 3, + None, + TypeError, + "'int' object is not subscriptable" + ), + ( # try_except_2 + xr.DataArray([10, 20], {'dim1': [0, 1]}, ['dim1']), + None, + KeyError, + "'time'" + ), + ( # try_except_3 + xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), + None, + AttributeError, + "'Data' object has no attribute 'is_float'" + ) + ], + ids=["not_loaded_data", "try_except_1", "try_except_2", "try_except_3"] +) +@pytest.mark.filterwarnings("ignore") +class TestDataErrors(): + # Test errors associated with Data class + # Several Data cases are tested in unit_test_external while some other + # are tested indirectly in unit_test_pysd and integration_test_vensim + + @pytest.fixture + def data(self, value, interp): + obj = Data() + obj.data = value + obj.interp = interp + obj.py_name = "data" + return obj + + def test_data_errors(self, data, raise_type, error_message): + with pytest.raises(raise_type, match=error_message): + data(1.5) diff --git a/tests/pytest_types/data/pytest_data_with_model.py b/tests/pytest_types/data/pytest_data_with_model.py new file mode 100644 index 00000000..d6c74851 --- /dev/null +++ b/tests/pytest_types/data/pytest_data_with_model.py @@ -0,0 +1,139 @@ +import pytest +import shutil + +import numpy as np +import pandas as pd + +from pysd.tools.benchmarking import assert_frames_close +from pysd import read_vensim, load + + +@pytest.fixture(scope="module") +def data_folder(_root): + return _root.joinpath("more-tests/data_model/") + + +@pytest.fixture(scope="module") +def data_model(data_folder): + return data_folder.joinpath("test_data_model.mdl") + + +@pytest.fixture +def data_files(data_files_short, data_folder): + if isinstance(data_files_short, str): + return data_folder.joinpath(data_files_short) + elif isinstance(data_files_short, list): + return [data_folder.joinpath(df) for df in data_files_short] + else: + return { + data_folder.joinpath(df): value + for df, value in data_files_short.items() + } + + +times = np.arange(11) + + +@pytest.mark.parametrize( + "data_files_short,expected", + [ + ( # one_file + "data1.tab", + pd.DataFrame( + index=times, + data={'var1': times, "var2": 2*times, "var3": 3*times} + ) + ), + ( # two_files + ["data3.tab", + "data1.tab"], + pd.DataFrame( + index=times, + data={'var1': -times, "var2": -2*times, "var3": 3*times} + ) + + ), + ( # transposed_file + ["data2.tab"], + pd.DataFrame( + index=times, + data={'var1': times-5, "var2": 2*times-5, "var3": 3*times-5} + ) + + ), + ( # dict_file + {"data2.tab": ["\"data-3\""], + "data1.tab": ["data_1", "Data 2"]}, + pd.DataFrame( + index=times, + data={'var1': times, "var2": 2*times, "var3": 3*times-5} + ) + ) + + ], + ids=["one_file", "two_files", "transposed_file", "dict_file"] +) +class TestPySDData: + + @pytest.fixture + def model(self, data_model, data_files, shared_tmpdir): + # translated file + file = shared_tmpdir.joinpath(data_model.with_suffix(".py").name) + if file.is_file(): + # load already translated file + return load(file, data_files) + else: + # copy mdl file to tmp_dir and translate it + file = shared_tmpdir.joinpath(data_model.name) + shutil.copy(data_model, file) + return read_vensim(file, data_files) + + def test_get_data_and_run(self, model, expected): + assert_frames_close( + model.run(return_columns=["var1", "var2", "var3"]), + expected) + + +class TestPySDDataErrors: + def model(self, data_model, data_files, shared_tmpdir): + # translated file + file = shared_tmpdir.joinpath(data_model.with_suffix(".py").name) + if file.is_file(): + # load already translated file + return load(file, data_files) + else: + # copy mdl file to tmp_dir and translate it + file = shared_tmpdir.joinpath(data_model.name) + shutil.copy(data_model, file) + return read_vensim(file, data_files) + + def test_run_error(self, data_model, shared_tmpdir): + model = self.model(data_model, [], shared_tmpdir) + error_message = "Trying to interpolate data variable before loading"\ + + " the data..." + + with pytest.raises(ValueError, match=error_message): + model.run(return_columns=["var1", "var2", "var3"]) + + @pytest.mark.parametrize( + "data_files_short,raise_type,error_message", + [ + ( # missing_data + "data3.tab", + ValueError, + "Data for \"data-3\" not found in %s" + ), + ( # data_variable_not_found_from_dict_file + {"data1.tab": ["non-existing-var"]}, + ValueError, + "'non-existing-var' not found as model data variable" + ), + ], + ids=["missing_data", "data_variable_not_found_from_dict_file"] + ) + def test_loading_error(self, data_model, data_files, raise_type, + error_message, shared_tmpdir): + + with pytest.raises(raise_type, match=error_message % (data_files)): + self.model( + data_model, data_files, shared_tmpdir) diff --git a/tests/unit_test_data.py b/tests/unit_test_data.py deleted file mode 100644 index 2b1f07d0..00000000 --- a/tests/unit_test_data.py +++ /dev/null @@ -1,197 +0,0 @@ -import os -import itertools -import unittest - -import xarray as xr - -_root = os.path.dirname(__file__) - - -class TestColumns(unittest.TestCase): - def test_clean_columns(self): - from pysd.py_backend.data import Columns - Columns.clean() - self.assertEqual(Columns._files, {}) - Columns.read( - os.path.join(_root, "data/out_teacup.csv")) - self.assertNotEqual(Columns._files, {}) - self.assertIn(os.path.join(_root, "data/out_teacup.csv"), - Columns._files) - Columns.clean() - self.assertEqual(Columns._files, {}) - - def test_non_valid_outputs(self): - from pysd.py_backend.data import Columns - - with self.assertRaises(ValueError) as err: - Columns.read_file( - os.path.join( - _root, - "more-tests/not_vensim/test_not_vensim.txt")) - - self.assertIn( - "Not able to read '", - str(err.exception)) - self.assertIn( - "more-tests/not_vensim/test_not_vensim.txt'.", - str(err.exception)) - - def test_non_valid_file_format(self): - from pysd.py_backend.data import Columns - - file_name = os.path.join(_root, "data/out_teacup_no_head.csv") - with self.assertRaises(ValueError) as err: - Columns.read_file(file_name) - - self.assertIn( - f"Invalid file format '{file_name}'... varible names " - + "should appear in the first row or in the first column...", - str(err.exception)) - - def test_transposed_frame(self): - from pysd.py_backend.data import Columns - - cols1, trans1 = Columns.get_columns( - os.path.join(_root, "data/out_teacup.csv")) - cols2, trans2 = Columns.get_columns( - os.path.join(_root, "data/out_teacup_transposed.csv")) - Columns.clean() - - self.assertEqual(cols1, cols2) - self.assertFalse(trans1) - self.assertTrue(trans2) - - def test_get_columns(self): - from pysd.py_backend.data import Columns - - cols0, trans0 = Columns.get_columns( - os.path.join(_root, "data/out_teacup.csv")) - - cols1, trans1 = Columns.get_columns( - os.path.join(_root, "data/out_teacup.csv"), - vars=["Room Temperature", "Teacup Temperature"]) - - cols2, trans2 = Columns.get_columns( - os.path.join(_root, "data/out_teacup_transposed.csv"), - vars=["Heat Loss to Room"]) - - cols3 = Columns.get_columns( - os.path.join(_root, "data/out_teacup_transposed.csv"), - vars=["No column"])[0] - - Columns.clean() - - self.assertTrue(cols1.issubset(cols0)) - self.assertEqual( - cols1, - set(["Room Temperature", "Teacup Temperature"])) - - self.assertTrue(cols2.issubset(cols0)) - self.assertEqual( - cols2, - set(["Heat Loss to Room"])) - - self.assertEqual(cols3, set()) - - self.assertFalse(trans0) - self.assertFalse(trans1) - self.assertTrue(trans2) - - def test_get_columns_subscripted(self): - from pysd.py_backend.data import Columns - - data_file = os.path.join( - _root, - "test-models/tests/subscript_3d_arrays_widthwise/output.tab" - ) - - data_file2 = os.path.join( - _root, - "test-models/tests/subscript_2d_arrays/output.tab" - ) - - subsd = { - "d3": ["Depth 1", "Depth 2"], - "d2": ["Column 1", "Column 2"], - "d1": ["Entry 1", "Entry 2", "Entry 3"] - } - - cols1 = Columns.get_columns( - data_file, - vars=["Three Dimensional Constant"])[0] - - expected = { - "Three Dimensional Constant[" + ",".join(el) + "]" - for el in itertools.product(subsd["d1"], subsd["d2"], subsd["d3"]) - } - - self.assertEqual(cols1, expected) - - cols2 = Columns.get_columns( - data_file2, - vars=["Rate A", "Stock A"])[0] - - subs = list(itertools.product(subsd["d1"], subsd["d2"])) - expected = { - "Rate A[" + ",".join(el) + "]" - for el in subs - } - - expected.update({ - "Stock A[" + ",".join(el) + "]" - for el in subs - }) - - self.assertEqual(cols2, expected) - - -class TestData(unittest.TestCase): - # Several Data cases are tested in unit_test_external while some other - # are tested indirectly in unit_test_pysd and integration_test_vensim - - def test_no_data_error(self): - from pysd.py_backend.data import Data - obj = Data() - obj.data = None - obj.interp = "interpolate" - obj.py_name = "data" - with self.assertRaises(ValueError) as err: - obj(1.5) - - self.assertIn( - "Trying to interpolate data variable before loading the data...", - str(err.exception)) - - def test_invalid_data_regular_error(self): - # test that try/except block on call doesn't catch errors differents - # than data = None - from pysd.py_backend.data import Data - - obj = Data() - - obj.data = 3 - - with self.assertRaises(TypeError) as err: - obj(1.5) - - self.assertIn( - "'int' object is not subscriptable", - str(err.exception)) - - obj.data = xr.DataArray([10, 20], {'dim1': [0, 1]}, ['dim1']) - - with self.assertRaises(KeyError) as err: - obj(1.5) - - self.assertIn( - "'time'", - str(err.exception)) - - obj.data = xr.DataArray([10, 20], {'time': [0, 1]}, ['time']) - - with self.assertRaises(AttributeError) as err: - obj(1.5) - - self.assertIn( - "'Data' object has no attribute 'interp'", - str(err.exception)) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 3a1c9a28..3d9799ed 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -1579,103 +1579,6 @@ def test_change_constant_pipe(self): test_model_constant_pipe.replace(".mdl", ".py")) -class TestDataReading(unittest.TestCase): - data_folder = os.path.join(_root, "more-tests/data_model/") - data_model = os.path.join(data_folder, "test_data_model.mdl") - - def test_no_data_files_provided(self): - from pysd import read_vensim - model = read_vensim(self.data_model) - - with self.assertRaises(ValueError) as err: - model.run(return_columns=["var1", "var2", "var3"]) - - self.assertIn("Trying to interpolate data variable before loading" - " the data...", str(err.exception)) - - def test_missing_data(self): - from pysd import read_vensim - - with self.assertRaises(ValueError) as err: - read_vensim( - self.data_model, data_files=self.data_folder+"data3.tab") - - self.assertIn( - "Data for \"data-3\" not found in " - + self.data_folder + "data3.tab", - str(err.exception)) - - def test_get_data_variable_not_found_from_dict_file(self): - from pysd import read_vensim - - with self.assertRaises(ValueError) as err: - read_vensim( - self.data_model, - data_files={ - self.data_folder+"data1.tab": ["non-existing-var"]}) - - self.assertIn( - "'non-existing-var' not found as model data variable", - str(err.exception)) - - def test_get_data_from_one_file(self): - from pysd import read_vensim - - model = read_vensim( - self.data_model, data_files=self.data_folder+"data1.tab") - out = model.run(return_columns=["var1", "var2", "var3"]) - times = np.arange(11) - expected = pd.DataFrame( - index=times, - data={'var1': times, "var2": 2*times, "var3": 3*times}) - - assert_frames_close(out, expected) - - def test_get_data_from_two_file(self): - from pysd import read_vensim - - model = read_vensim( - self.data_model, - data_files=[self.data_folder+"data3.tab", - self.data_folder+"data1.tab"]) - out = model.run(return_columns=["var1", "var2", "var3"]) - times = np.arange(11) - expected = pd.DataFrame( - index=times, - data={'var1': -times, "var2": -2*times, "var3": 3*times}) - - assert_frames_close(out, expected) - - def test_get_data_from_transposed_file(self): - from pysd import read_vensim - - model = read_vensim( - self.data_model, - data_files=[self.data_folder+"data2.tab"]) - out = model.run(return_columns=["var1", "var2", "var3"]) - times = np.arange(11) - expected = pd.DataFrame( - index=times, - data={'var1': times-5, "var2": 2*times-5, "var3": 3*times-5}) - - assert_frames_close(out, expected) - - def test_get_data_from_dict_file(self): - from pysd import read_vensim - - model = read_vensim( - self.data_model, - data_files={self.data_folder+"data2.tab": ["\"data-3\""], - self.data_folder+"data1.tab": ["data_1", "Data 2"]}) - out = model.run(return_columns=["var1", "var2", "var3"]) - times = np.arange(11) - expected = pd.DataFrame( - index=times, - data={'var1': times, "var2": 2*times, "var3": 3*times-5}) - - assert_frames_close(out, expected) - - class TestExportImport(unittest.TestCase): def test_run_export_import_integ(self): from pysd import read_vensim