diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml new file mode 100644 index 00000000..e425613b --- /dev/null +++ b/.github/workflows/build-docs.yml @@ -0,0 +1,27 @@ + +# Build html docs. Any warning building the docs will produce an error. + +name: Build docs + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.7' + - name: Install dependencies + run: | + pip install . + pip install -r docs/requirements.txt + - name: Test build html + run: | + cd docs + make html SPHINXOPTS="-W" diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml new file mode 100644 index 00000000..62174758 --- /dev/null +++ b/.github/workflows/link-check.yml @@ -0,0 +1,18 @@ +name: Link check + +on: + push: + pull_request: + +jobs: + linkChecker: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Link Checker + uses: lycheeverse/lychee-action@v1.5.1 + with: + fail: true + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 6cb92be8..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,3 +0,0 @@ -..under construction.. - -We are super welcoming of contributions, here's how: diff --git a/LICENSE b/LICENSE index 13c0d569..d4977050 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2017 James Houghton +Copyright (c) 2013-2022 PySD contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/MANIFEST.in b/MANIFEST.in index 248b7e48..d1c65b15 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ include requirements.txt include README.md -include docs/images/PySD_Logo* include LICENSE +include docs/images/PySD_Logo* graft pysd/translators/*/parsing_grammars diff --git a/README.md b/README.md index 444df8bc..27428f2f 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,18 @@ PySD ==== - -[![Coverage Status](https://coveralls.io/repos/github/JamesPHoughton/pysd/badge.svg?branch=master)](https://coveralls.io/github/JamesPHoughton/pysd?branch=master) +[![Maintained](https://img.shields.io/badge/Maintained-Yes-brightgreen.svg)](https://github.com/SDXorg/pysd/pulse) +[![Coverage Status](https://coveralls.io/repos/github/SDXorg/pysd/badge.svg?branch=master)](https://coveralls.io/github/SDXorg/pysd?branch=master) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/pysd/badges/version.svg)](https://anaconda.org/conda-forge/pysd) [![PyPI version](https://badge.fury.io/py/pysd.svg)](https://badge.fury.io/py/pysd) [![PyPI status](https://img.shields.io/pypi/status/pysd.svg)](https://pypi.python.org/pypi/pysd/) [![Py version](https://img.shields.io/pypi/pyversions/pysd.svg)](https://pypi.python.org/pypi/pysd/) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5654824.svg)](https://doi.org/10.5281/zenodo.5654824) +[![Contributions](https://img.shields.io/badge/contributions-welcome-blue.svg)](https://pysd.readthedocs.io/en/latest/development/development_index.html) [![Docs](https://readthedocs.org/projects/pysd/badge/?version=latest)](https://pysd.readthedocs.io/en/latest/?badge=latest) -![PySD Logo](https://raw.githubusercontent.com/JamesPHoughton/pysd/5cc4fe5dc65e6b5140a00e87a1be9d261570ee8d/docs/images/PySD_Logo_letters.svg?style=centerme) +![PySD Logo](https://raw.githubusercontent.com/SDXorg/pysd/5cc4fe5dc65e6b5140a00e87a1be9d261570ee8d/docs/images/PySD_Logo_letters.svg?style=centerme) -This project is a simple library for running [System Dynamics](http://en.wikipedia.org/wiki/System_dynamics) models in python, with the purpose of improving integration of *Big Data* and *Machine Learning* into the SD workflow. +This project is a library for running [System Dynamics](http://en.wikipedia.org/wiki/System_dynamics) models in Python, with the purpose of improving integration of *Big Data* and *Machine Learning* into the SD workflow. **The current version needs to run at least Python 3.7.** @@ -22,13 +23,13 @@ See the [project documentation](http://pysd.readthedocs.org/) for information ab - [Installation](http://pysd.readthedocs.org/en/latest/installation.html) - [Getting Started](http://pysd.readthedocs.org/en/latest/getting_started.html) -For standard methods for data analysis with SD models, see the [PySD Cookbook](https://github.com/JamesPHoughton/PySD-Cookbook), containing (for example): +For standard methods for data analysis with SD models, see the [PySD Cookbook](https://github.com/SDXorg/PySD-Cookbook), containing (for example): -- [Model Fitting](http://nbviewer.ipython.org/github/JamesPHoughton/PySD-Cookbook/blob/master/2_1_Fitting_with_Optimization.ipynb) -- [Surrogating model components with machine learning regressions](http://nbviewer.ipython.org/github/JamesPHoughton/PySD-Cookbook/blob/master/6_1_Surrogating_with_regression.ipynb) -- [Multi-Scale geographic comparison of model predictions](http://nbviewer.ipython.org/github/JamesPHoughton/PySD-Cookbook/blob/master/Exploring%20models%20across%20geographic%20scales.ipynb) +- [Model Fitting](http://nbviewer.ipython.org/github/SDXorg/PySD-Cookbook/blob/master/source/analyses/fitting/Fitting_with_Optimization.ipynb) +- [Surrogating model components with machine learning regressions](http://nbviewer.ipython.org/github/SDXorg/PySD-Cookbook/blob/master/source/analyses/surrogating_functions/Surrogating_with_regression.ipynb) +- [Multi-Scale geographic comparison of model predictions](http://nbviewer.ipython.org/github/SDXorg/PySD-Cookbook/blob/master/source/analyses/geo/Exploring_models_across_geographic_scales.ipynb) -If you use PySD in any published work, consider citing the [PySD Introductory Paper](https://github.com/JamesPHoughton/pysd/blob/master/docs/PySD%20Intro%20Paper%20Preprint.pdf): +If you use PySD in any published work, consider citing the [PySD Introductory Paper](https://github.com/SDXorg/pysd/blob/master/docs/PySD%20Intro%20Paper%20Preprint.pdf): >Houghton, James; Siegel, Michael. "Advanced data analytics for system dynamics models using PySD." *Proceedings of the 33rd International Conference of the System Dynamics Society.* 2015. @@ -50,19 +51,17 @@ If you'd like to work with this repository directly, you'll need to use a recurs The command should be something like: ```shell -git clone --recursive https://github.com/JamesPHoughton/pysd.git +git clone --recursive https://github.com/SDXorg/pysd.git ``` ### Extensions You can use PySD in [R](https://www.r-project.org/) via the [PySD2R](https://github.com/JimDuggan/pysd2r) package, also available on [cran](https://CRAN.R-project.org/package=pysd2r). -### Contributors +### Contributing -Many people have contributed to developing this project - by -[submitting code](https://github.com/JamesPHoughton/pysd/graphs/contributors), bug reports, and advice. +PySD is currently a community-maintained project, any contribution is welcome. -Special thanks to the [sdCloud.io](http://sdcloud.io) development team, who have -made great contributions to XMILE support, and for integrating PySD into their cloud-based model simulation environment. +Many people have contributed to developing this project - by [submitting code](https://github.com/SDXorg/pysd/graphs/contributors), bug reports, and advice. Main historic changes in PySD are described in the [About PySD section](https://pysd.readthedocs.io/en/latest/about.html). The [Developer Documentation](https://pysd.readthedocs.io/en/latest/development/development_index.html) could help new developers. -Extra special thanks to [@enekomartinmartinez](https://github.com/enekomartinmartinez) for dramatically pushing forward subscript capabilities (and many other attributes). +The code for this package is available at: https://github.com/SDXorg/pysd diff --git a/docs/about.rst b/docs/about.rst index 05fa7636..6c7f0af0 100644 --- a/docs/about.rst +++ b/docs/about.rst @@ -1,6 +1,21 @@ About the Project ================= +PySD was created in 2014 by `James P Houghton `_ to translate Vensim models to Python. The original goal for translating SD models into Python was to be able to take advantage of all the tools available in Python and thus to extent what is possible using Vensim. + +Since the creation of the library, many people have contributed to the project by reporting and fixing bugs and adding new features. These contributions are listed in the `contributions section of the GitHub repository `_. + +Some of the big changes that have allowed PySD to get to its current state are the development of an XMILE to Python translator in 2017 by `Alex Prey `_ and the restructuring of the translation and model building through an Abstract Syntax by `Eneko Martin-Martinez `_ in 2022. + +Some other contributions until release 3.0.0 were: + +- `Julien Malard-Adam `_ added unicode support for the Vensim parser. +- `sdCloud.io `_ development team made great contributions to improve XMILE support and integrated PySD into their cloud-based model simulation environment. +- `Eneko Martin-Martinez `_ pushed forward the subscripts capabilities for both Vensim and XMILE and included support for several Vensim functions and improved the performance. +- `Roger Samsó `_ included a parser for the Vensim sketch and added the option to split a Vensim model per view based on the sketch information. + +The changes made since release 3.0.0 are tracked in the :doc:`whats_new` section. + Motivation: The (coming of) age of Big Data ------------------------------------------- @@ -28,5 +43,3 @@ A third category of tools imports the models created by traditional tools to per The central paradigm of PySD is that it is more efficient to bring the mature capabilities of system dynamics into an environment in use for active development in data science, than to attempt to bring each new development in inference and machine learning into the system dynamics enclave. PySD reads a model file – the product of a modeling program such as Vensim or Stella/iThink – and cross compiles it into Python, providing a simulation engine that can run these models natively in the Python environment. It is not a substitute for these tools, and cannot be used to replace a visual model construction environment. - - diff --git a/docs/command_line_usage.rst b/docs/command_line_usage.rst index 385a44e7..b959ee8a 100644 --- a/docs/command_line_usage.rst +++ b/docs/command_line_usage.rst @@ -26,11 +26,11 @@ In order to set the output file path, the *-o/--output-file* argument can be use python -m pysd -o my_output_file.csv Teacup.mdl .. note:: - The output file can be a *.csv* or *.tab*. + The output file format may be *.csv*, *.tab* or *.nc*. .. note:: - If *-o/--output-file* is not given, the output will be saved in a file - that starts with the model file name followed by a time stamp to avoid + If *-o/--output-file* is not given, the output will be saved in a *.tab* + file that starts with the model file name followed by a time stamp to avoid overwritting files. Activate progress bar diff --git a/docs/conf.py b/docs/conf.py index 1fdb00fb..f4a26a0d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -58,8 +58,8 @@ ] extlinks = { - "issue": ("https://github.com/JamesPHoughton/pysd/issues/%s", "issue #%s"), - "pull": ("https://github.com/JamesPHoughton/pysd/pull/%s", "PR #%s"), + "issue": ("https://github.com/SDXorg/pysd/issues/%s", "issue #%s"), + "pull": ("https://github.com/SDXorg/pysd/pull/%s", "PR #%s"), } # Add any paths that contain templates here, relative to this directory. @@ -74,8 +74,8 @@ # General information about the project. project = 'PySD' -copyright = '2016, James Houghton' -author = 'James Houghton' +copyright = '2022, PySD contributors' +author = 'PySD contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -128,7 +128,7 @@ # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PySD.tex', 'PySD Documentation', - 'James Houghton', 'manual'), + 'PySD contributors', 'manual'), ] # -- Options for manual page output --------------------------------------- diff --git a/docs/development/guidelines.rst b/docs/development/guidelines.rst index e1f884f8..0df3b399 100644 --- a/docs/development/guidelines.rst +++ b/docs/development/guidelines.rst @@ -25,7 +25,7 @@ of the `/tests/` directory. In order to run all the tests :py:mod:`pytest` should be used. A `Makefile` is given to run easier the tests with :py:mod:`pytest`, check -`tests/README `_ +`tests/README `_ for more information. These tests run quickly and should be executed when any changes are made to ensure diff --git a/docs/development/pathway.rst b/docs/development/pathway.rst index 0580fef2..e09187ff 100644 --- a/docs/development/pathway.rst +++ b/docs/development/pathway.rst @@ -2,7 +2,7 @@ PySD Development Pathway ======================== High priority features, bugs, and other elements of active effort are listed on the `github issue -tracker. `_ To get involved see :doc:`guidelines`. +tracker. `_ To get involved see :doc:`guidelines`. High Priority diff --git a/docs/generate_tables.py b/docs/generate_tables.py index 29fa20ca..38b6b8dd 100644 --- a/docs/generate_tables.py +++ b/docs/generate_tables.py @@ -1,6 +1,7 @@ -import pandas as pd from pathlib import Path +import pandas as pd + def generate(table, columns, output): """Generate markdown table.""" diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 865ecbd1..bfb739bc 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -1,6 +1,14 @@ Getting Started =============== +.. note:: + A cookbook of simple recipes for advanced data analytics using PySD is available at: + http://pysd-cookbook.readthedocs.org/ + + The cookbook includes models, sample data, and code in the form of iPython notebooks that demonstrate a variety of data integration and analysis tasks. + These models can be executed on your local machine, and modified to suit your particular analysis requirements. + + Importing a model and getting started ------------------------------------- To begin, we must first load the PySD module, and use it to import a model file:: @@ -156,6 +164,21 @@ The subscripted variables, in general, will be returned as :py:class:`xarray.Dat >>> model.run(flatten_output=True) + +Storing simulation results on a file +------------------------------------ +Simulation results can be stored as *.csv*, *.tab* or *.nc* (netCDF4) files by defining the desired output file path in the `output_file` argument, when calling the :py:meth:`.run` method:: + + >>> model.run(output_file="results.tab") + +If the `output_file` is not set, the :py:meth:`.run` method will return a :py:class:`pandas.DataFrame`. + +For most cases, the *.tab* file format is the safest choice. It is preferable over the *.csv* format when the model includes subscripted variables. The *.nc* format is recommended for large models, and when the user wants to keep metadata such as variable units and description. + +.. warning:: + *.nc* files require :py:mod:`netcdf4` library which is an optional requirement and thus not installed automatically with the package. We recommend using :py:mod:`netcdf4` 1.6.0 or above, however, it will also work with :py:mod:`netcdf4` 1.5.0 or above. + + Setting parameter values ------------------------ In some situations we may want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:meth:`.run` method gives us a convenient method in the `params` keyword argument. @@ -174,7 +197,7 @@ If the parameter value to change is a subscripted variable (vector, matrix...), >>> model.run(params={'Subscripted var': 0}) -A partial :py:class:`xarray.DataArray` can be used. For example a new variable with ‘dim2’ but not ‘dim2’. In that case, the result will be repeated in the remaining dimensions:: +A partial :py:class:`xarray.DataArray` can be used. For example a new variable with ‘dim2’ but not ‘dim1’. In that case, the result will be repeated in the remaining dimensions:: >>> import xarray as xr >>> new_value = xr.DataArray([1, 5], {'dim2': [1, 2]}, ['dim2']) diff --git a/docs/index.rst b/docs/index.rst index 99d7d54d..0201a1a5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,20 +4,25 @@ PySD |made-with-sphinx-doc| |DOI| +|Maintained| |PyPI license| |conda package| |PyPI package| |PyPI status| |PyPI pyversions| +|Contributions| .. |made-with-sphinx-doc| image:: https://img.shields.io/badge/Made%20with-Sphinx-1f425f.svg :target: https://www.sphinx-doc.org/ +.. |Maintained| image:: https://img.shields.io/badge/Maintained-Yes-brightgreen.svg + :target: https://github.com/SDXorg/pysd/pulse + .. |docs| image:: https://readthedocs.org/projects/pysd/badge/?version=latest :target: https://pysd.readthedocs.io/en/latest/?badge=latest .. |PyPI license| image:: https://img.shields.io/pypi/l/sdqc.svg - :target: https://github.com/JamesPHoughton/pysd/blob/master/LICENSE + :target: https://github.com/SDXorg/pysd/blob/master/LICENSE .. |PyPI package| image:: https://badge.fury.io/py/pysd.svg :target: https://badge.fury.io/py/pysd @@ -34,6 +39,9 @@ PySD .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.5654824.svg :target: https://doi.org/10.5281/zenodo.5654824 +.. |Contributions| image:: https://img.shields.io/badge/contributions-welcome-blue.svg + :target: https://pysd.readthedocs.io/en/latest/development/development_index.html + This project is a simple library for running System Dynamics models in Python, with the purpose of improving integration of Big Data and Machine Learning into the SD workflow. PySD translates :doc:`Vensim ` or @@ -63,16 +71,19 @@ The cookbook includes models, sample data, and code in the form of iPython noteb Contributing ^^^^^^^^^^^^ -The code for this package is available at: https://github.com/JamesPHoughton/pysd +|Contributions| + +PySD is currently a community-maintained project, any contribution is welcome. + +The code for this package is available at: https://github.com/SDXorg/pysd -If you find a bug, or are interested in a particular feature, see :doc:`reporting bugs <../reporting_bugs>`. +If you find any bug, or are interested in a particular feature, see :doc:`reporting bugs <../reporting_bugs>`. -If you are interested in contributing to the development of PySD, see the :doc:`developer documentation <../development/development_index>` -listed above. +If you are interested in contributing to the development of PySD, see the :doc:`developer documentation <../development/development_index>` listed above. Citing ^^^^^^ -If you use PySD in any published work, consider citing the `PySD Introductory Paper `_:: +If you use PySD in any published work, consider citing the `PySD Introductory Paper `_:: Houghton, James; Siegel, Michael. "Advanced data analytics for system dynamics models using PySD." *Proceedings of the 33rd International Conference of the System Dynamics Society.* 2015. diff --git a/docs/installation.rst b/docs/installation.rst index f32a5027..6f5cf8ed 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -23,9 +23,9 @@ To install from source, clone the project with git: .. code-block:: bash - git clone https://github.com/JamesPHoughton/pysd.git + git clone https://github.com/SDXorg/pysd.git -or download the latest version from the project repository: https://github.com/JamesPHoughton/pysd +or download the latest version from the project repository: https://github.com/SDXorg/pysd In the source directory use the command: @@ -66,6 +66,10 @@ In order to plot model outputs as shown in :doc:`Getting started <../getting_sta * Matplotlib +In order to be able to export data to netCDF (*.nc*) files: + +* netCDF4 + These Python libraries bring additional data analytics capabilities to the analysis of SD models: @@ -82,5 +86,5 @@ These modules can be installed using pip with a syntax similar to the above. Additional Resources -------------------- -The `PySD Cookbook `_ contains recipes that can help you get set up with PySD. +The `PySD Cookbook `_ contains recipes that can help you get set up with PySD. diff --git a/docs/reporting_bugs.rst b/docs/reporting_bugs.rst index 860a4c67..4ef4d384 100644 --- a/docs/reporting_bugs.rst +++ b/docs/reporting_bugs.rst @@ -3,7 +3,7 @@ Reporting bugs Before reporting any bug, please make sure that you are using the latest version of PySD. You can get the version number by running `python -m pysd -v` on the command line. -All bugs must be reported in the project's `issue tracker on github `_. +All bugs must be reported in the project's `issue tracker on github `_. .. note:: Not all the features and functions are implemented. If you are in trouble while translating or running a Vensim or Xmile model check the :ref:`Vensim supported functions ` or :ref:`Xmile supported functions ` and consider that when openning a new issue. diff --git a/docs/requirements.txt b/docs/requirements.txt index b97418bc..563a2af5 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,3 +4,5 @@ sphinx==4.2.0 sphinx_rtd_theme==1.0.0 readthedocs-sphinx-search==0.1.1 jinja2==3.0.0 +mock +pandas \ No newline at end of file diff --git a/docs/whats_new.rst b/docs/whats_new.rst index f59c4729..d13f6b2c 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -1,5 +1,50 @@ What's New ========== +v3.7.0 (2022/09/19) +------------------- + +New Features +~~~~~~~~~~~~ +- Simulation results can now be stored as netCDF4 files. (`@rogersamso `_) +- The CLI also accepts netCDF4 file paths after the -o argument. (`@rogersamso `_) + +Breaking changes +~~~~~~~~~~~~~~~~ + +Deprecations +~~~~~~~~~~~~ + +Bug fixes +~~~~~~~~~ +- Fix bug when a WITH LOOKUPS argument has subscripts. (`@enekomartinmartinez `_) +- Fix bug of exportig csv files with multiple subscripts variables. (`@rogersamso `_) +- Fix bug of missing dimensions in variables defined with not all the subscripts of a range (:issue:`364`). (`@enekomartinmartinez `_) +- Fix bug when running a model with variable final time or time step and progressbar (:issue:`361`). (`@enekomartinmartinez `_) + +Documentation +~~~~~~~~~~~~~ +- Add `Storing simulation results on a file` section in the :doc:`getting_started` page. (`@rogersamso `_) +- Include cookbook information in the :doc:`getting_started` page. (`@enekomartinmartinez `_) +- Include an introduction of main historical changes in the :doc:`about` page. (`@enekomartinmartinez `_) + +Performance +~~~~~~~~~~~ +- Exporting outputs as netCDF4 is much faster than exporting a pandas DataFrame, especially for large models. (`@rogersamso `_) + +Internal Changes +~~~~~~~~~~~~~~~~ +- Make PySD work with :py:mod:`parsimonius` 0.10.0. (`@enekomartinmartinez `_) +- Add netCDF4 dependency for tests. (`@rogersamso `_) +- Improve warning message when replacing a stock with a parameter. (`@enekomartinmartinez `_) +- Include more pytest parametrizations in some test and make them translate the models in temporary directories. (`@enekomartinmartinez `_) +- Include lychee-action in the GHA workflow to check the links. (`@enekomartinmartinez `_) +- Update License. (`@enekomartinmartinez `_) +- Include `Maintained? Yes` and `Contributions welcome` badges. (`@enekomartinmartinez `_) +- Update links to the new repository location. (`@enekomartinmartinez `_) +- Reduce relative precision from 1e-10 to 1e-5 to compute the saving times and final time. (`@enekomartinmartinez `_) +- Add convergence tests for euler integration method. (`@enekomartinmartinez `_) +- Include build docs check in the GHA workflow to avoid warnings with sphinx. (`@enekomartinmartinez `_) + v3.6.1 (2022/09/05) ------------------- @@ -23,15 +68,15 @@ Performance Internal Changes ~~~~~~~~~~~~~~~~ -- Set :py:mod:`parsimonius` requirement to 0.9.0 to avoid a breaking-change in the newest version. Pending to update PySD to run it with :py:mod:`parsimonious` 0.10.0. +- Set :py:mod:`parsimonius` requirement to 0.9.0 to avoid a breaking-change in the newest version. Pending to update PySD to run it with :py:mod:`parsimonious` 0.10.0. (`@enekomartinmartinez `_) v3.6.0 (2022/08/31) ------------------- New Features ~~~~~~~~~~~~ -- Include warning messages when a variable is defined in more than one view, when a control variable appears in a view or when a variable doesn't appear in any view as a `workbench variable` (:issue:`357`). -- Force variables in a module to be saved alphabetically for being able to compare differences between versions (only for the models that are split by views). +- Include warning messages when a variable is defined in more than one view, when a control variable appears in a view or when a variable doesn't appear in any view as a `workbench variable` (:issue:`357`). (`@enekomartinmartinez `_) +- Force variables in a module to be saved alphabetically for being able to compare differences between versions (only for the models that are split by views). (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -41,7 +86,7 @@ Deprecations Bug fixes ~~~~~~~~~ -- Classify control variables in the main file always (:issue:`357`). +- Classify control variables in the main file always (:issue:`357`). (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ @@ -51,7 +96,7 @@ Performance Internal Changes ~~~~~~~~~~~~~~~~ -- Include :py:class:`pysd.translators.structures.abstract_model.AbstractControlElement` child of :py:class:`pysd.translators.structures.abstract_model.AbstractElement` to differentiate the control variables. +- Include :py:class:`pysd.translators.structures.abstract_model.AbstractControlElement` child of :py:class:`pysd.translators.structures.abstract_model.AbstractElement` to differentiate the control variables. (`@enekomartinmartinez `_) v3.5.2 (2022/08/15) @@ -68,7 +113,7 @@ Deprecations Bug fixes ~~~~~~~~~ -- Make sketch's `font_size` optional. +- Make sketch's `font_size` optional. (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ @@ -94,16 +139,16 @@ Deprecations Bug fixes ~~~~~~~~~ -- Fix bug generated when :EXCEPT: keyword is used with subscript subranges (:issue:`352`). -- Fix bug of precision error for :py:func:`pysd.py_backend.allocation.allocate_by_priority` (:issue:`353`). -- Fix bug of constant cache assignment. +- Fix bug generated when :EXCEPT: keyword is used with subscript subranges (:issue:`352`). (`@enekomartinmartinez `_) +- Fix bug of precision error for :py:func:`pysd.py_backend.allocation.allocate_by_priority` (:issue:`353`). (`@enekomartinmartinez `_) +- Fix bug of constant cache assignment. (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ Performance ~~~~~~~~~~~ -- Improve the performance of reading :py:class:`pysd.py_backend.external.External` data with cellrange names by loading the data in memory with :py:mod:`pandas`. As recommended by :py:mod:`openpyxl` developers, this is a possible way of improving performance to avoid parsing all rows up each time for getting the data (`issue 1867 in openpyxl `_). +- Improve the performance of reading :py:class:`pysd.py_backend.external.External` data with cellrange names by loading the data in memory with :py:mod:`pandas`. As recommended by :py:mod:`openpyxl` developers, this is a possible way of improving performance to avoid parsing all rows up each time for getting the data (`issue 1867 in openpyxl `_). (`@enekomartinmartinez `_) Internal Changes ~~~~~~~~~~~~~~~~ @@ -113,7 +158,7 @@ v3.5.0 (2022/07/25) New Features ~~~~~~~~~~~~ -- Add support for subscripted arguments in :py:func:`pysd.py_backend.functions.ramp` and :py:func:`pysd.py_backend.functions.step` (:issue:`344`). +- Add support for subscripted arguments in :py:func:`pysd.py_backend.functions.ramp` and :py:func:`pysd.py_backend.functions.step` (:issue:`344`). (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -123,21 +168,21 @@ Deprecations Bug fixes ~~~~~~~~~ -- Fix bug related to the order of elements in 1D GET expressions (:issue:`343`). -- Fix bug in request 0 values in allocate by priority (:issue:`345`). -- Fix a numerical error in starting time of step and ramp. +- Fix bug related to the order of elements in 1D GET expressions (:issue:`343`). (`@enekomartinmartinez `_) +- Fix bug in request 0 values in allocate by priority (:issue:`345`). (`@enekomartinmartinez `_) +- Fix a numerical error in starting time of step and ramp. (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ -- Include new PySD logo. +- Include new PySD logo. (`@enekomartinmartinez `_) Performance ~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ -- Ignore 'distutils Version classes are deprecated. Use packaging.version instead' error in tests as it is an internal error of `xarray`. -- Add a warning message when a subscript range is duplicated in a variable reference. +- Ignore 'distutils Version classes are deprecated. Use packaging.version instead' error in tests as it is an internal error of `xarray`. (`@enekomartinmartinez `_) +- Add a warning message when a subscript range is duplicated in a variable reference. (`@enekomartinmartinez `_) v3.4.0 (2022/06/29) @@ -145,7 +190,7 @@ v3.4.0 (2022/06/29) New Features ~~~~~~~~~~~~ -- Add support for Vensim's `ALLOCATE AVAILABLE `_ (:py:func:`pysd.py_backend.allocation.allocate_available`) function (:issue:`339`). Integer allocation cases have not been implemented neither the fixed quantity and constant elasticity curve priority functions. +- Add support for Vensim's `ALLOCATE AVAILABLE `_ (:py:func:`pysd.py_backend.allocation.allocate_available`) function (:issue:`339`). Integer allocation cases have not been implemented neither the fixed quantity and constant elasticity curve priority functions. (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -158,14 +203,14 @@ Bug fixes Documentation ~~~~~~~~~~~~~ -- Improve the documentation of the :py:mod:`pysd.py_backend.allocation` module. +- Improve the documentation of the :py:mod:`pysd.py_backend.allocation` module. (`@enekomartinmartinez `_) Performance ~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ -- Add a class to manage priority profiles so it can be also used by the `many-to-many allocation `_. +- Add a class to manage priority profiles so it can be also used by the `many-to-many allocation `_. (`@enekomartinmartinez `_) v3.3.0 (2022/06/22) @@ -173,7 +218,7 @@ v3.3.0 (2022/06/22) New Features ~~~~~~~~~~~~ -- Add support for Vensim's `ALLOCATE BY PRIORITY `_ (:py:func:`pysd.py_backend.allocation.allocate_by_priority`) function (:issue:`263`). +- Add support for Vensim's `ALLOCATE BY PRIORITY `_ (:py:func:`pysd.py_backend.allocation.allocate_by_priority`) function (:issue:`263`). (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -183,7 +228,7 @@ Deprecations Bug fixes ~~~~~~~~~ -- Fix bug of using subranges to define a bigger range (:issue:`335`). +- Fix bug of using subranges to define a bigger range (:issue:`335`). (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ @@ -193,15 +238,15 @@ Performance Internal Changes ~~~~~~~~~~~~~~~~ -- Improve error messages for :class:`pysd.py_backend.External` objects. +- Improve error messages for :class:`pysd.py_backend.External` objects. (`@enekomartinmartinez `_) v3.2.0 (2022/06/10) ------------------- New Features ~~~~~~~~~~~~ -- Add support for Vensim's `GET TIME VALUE `_ (:py:func:`pysd.py_backend.functions.get_time_value`) function (:issue:`332`). Not all cases have been implemented. -- Add support for Vensim's `VECTOR SELECT `_ (:py:func:`pysd.py_backend.functions.vector_select`) function (:issue:`266`). +- Add support for Vensim's `GET TIME VALUE `_ (:py:func:`pysd.py_backend.functions.get_time_value`) function (:issue:`332`). Not all cases have been implemented. (`@enekomartinmartinez `_) +- Add support for Vensim's `VECTOR SELECT `_ (:py:func:`pysd.py_backend.functions.vector_select`) function (:issue:`266`). (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -228,9 +273,9 @@ v3.1.0 (2022/06/02) New Features ~~~~~~~~~~~~ -- Add support for Vensim's `VECTOR SORT ORDER `_ (:py:func:`pysd.py_backend.functions.vector_sort_order`) function (:issue:`326`). -- Add support for Vensim's `VECTOR RANK `_ (:py:func:`pysd.py_backend.functions.vector_rank`) function (:issue:`326`). -- Add support for Vensim's `VECTOR REORDER `_ (:py:func:`pysd.py_backend.functions.vector_reorder`) function (:issue:`326`). +- Add support for Vensim's `VECTOR SORT ORDER `_ (:py:func:`pysd.py_backend.functions.vector_sort_order`) function (:issue:`326`). (`@enekomartinmartinez `_) +- Add support for Vensim's `VECTOR RANK `_ (:py:func:`pysd.py_backend.functions.vector_rank`) function (:issue:`326`). (`@enekomartinmartinez `_) +- Add support for Vensim's `VECTOR REORDER `_ (:py:func:`pysd.py_backend.functions.vector_reorder`) function (:issue:`326`). (`@enekomartinmartinez `_) Breaking changes ~~~~~~~~~~~~~~~~ @@ -243,7 +288,7 @@ Bug fixes Documentation ~~~~~~~~~~~~~ -- Add the section :doc:`/development/adding_functions` with examples for developers. +- Add the section :doc:`/development/adding_functions` with examples for developers. (`@enekomartinmartinez `_) Performance ~~~~~~~~~~~ @@ -269,7 +314,7 @@ Deprecations Bug fixes ~~~~~~~~~ -- Simplify subscripts dictionaries for :py:class:`pysd.py_backend.data.TabData` objects. +- Simplify subscripts dictionaries for :py:class:`pysd.py_backend.data.TabData` objects. (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ @@ -281,12 +326,12 @@ Performance Internal Changes ~~~~~~~~~~~~~~~~ -- Add Python 3.10 to CI pipeline and include it in the supported versions list. -- Correct LICENSE file extension in the `setup.py`. -- Move from `importlib`'s :py:func:`load_module` to :py:func:`exec_module`. -- Remove warnings related to :py:data:`set` usage. -- Move all the missing test to :py:mod:`pytest`. -- Remove warning messages from test and make test fail if there is any warning. +- Add Python 3.10 to CI pipeline and include it in the supported versions list. (`@enekomartinmartinez `_) +- Correct LICENSE file extension in the `setup.py`. (`@enekomartinmartinez `_) +- Move from `importlib`'s :py:func:`load_module` to :py:func:`exec_module`. (`@enekomartinmartinez `_) +- Remove warnings related to :py:data:`set` usage. (`@enekomartinmartinez `_) +- Move all the missing test to :py:mod:`pytest`. (`@enekomartinmartinez `_) +- Remove warning messages from test and make test fail if there is any warning. (`@enekomartinmartinez `_) v3.0.0 (2022/05/23) @@ -295,9 +340,9 @@ v3.0.0 (2022/05/23) New Features ~~~~~~~~~~~~ -- The new :doc:`Abstract Model Representation ` translation and building workflow will allow to add new output languages in the future. -- Added new properties to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. -- Cleaner Python models: +- The new :doc:`Abstract Model Representation ` translation and building workflow will allow to add new output languages in the future. (`@enekomartinmartinez `_) +- Added new properties to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. (`@enekomartinmartinez `_) +- Cleaner Python models: (`@enekomartinmartinez `_) - :py:data:`_namespace` and :py:data:`_dependencies` dictionaries have been removed from the file. - Variables original names, dependencies metadata now are given through :py:meth:`pysd.py_backend.components.Component.add` decorator, instead of having them in the docstring. - Merging of variable equations is now done using the coordinates to a pre-allocated array, instead of using the `magic` function :py:data:`pysd.py_backend.utils.xrmerge()`. @@ -306,48 +351,48 @@ New Features Breaking changes ~~~~~~~~~~~~~~~~ -- Set the argument :py:data:`flatten_output` from :py:meth:`.run` to :py:data:`True` by default. Previously it was set to :py:data:`False` by default. -- Move the docstring of the model to a property, :py:attr:`.doc`. Thus, it is not callable anymore. -- Allow the function :py:func:`pysd.py_backend.functions.pulse` to also perform the operations performed by :py:data:`pysd.py_backend.functions.pulse_train()` and :py:data:`pysd.py_backend.functions.pulse_magnitude()`. -- Change first argument of :py:func:`pysd.py_backend.functions.active_initial`, now it is the `stage of the model` and not the `time`. -- Simplify the function :py:data:`pysd.py_backend.utils.rearrange()` orienting it to perform simple rearrange cases for user interaction. -- Move :py:data:`pysd.py_backend.statefuls.Model` and :py:data:`pysd.py_backend.statefuls.Macro` to :py:class:`pysd.py_backend.model.Model` and :py:class:`pysd.py_backend.model.Macro`, respectively. -- Manage all kinds of lookups with the :py:class:`pysd.py_backend.lookups.Lookups` class. -- Include a second optional argument to lookups functions to set the final coordinates when a subscripted variable is passed as an argument. +- Set the argument :py:data:`flatten_output` from :py:meth:`.run` to :py:data:`True` by default. Previously it was set to :py:data:`False` by default. (`@enekomartinmartinez `_) +- Move the docstring of the model to a property, :py:attr:`.doc`. Thus, it is not callable anymore. (`@enekomartinmartinez `_) +- Allow the function :py:func:`pysd.py_backend.functions.pulse` to also perform the operations performed by :py:data:`pysd.py_backend.functions.pulse_train()` and :py:data:`pysd.py_backend.functions.pulse_magnitude()`. (`@enekomartinmartinez `_) +- Change first argument of :py:func:`pysd.py_backend.functions.active_initial`, now it is the `stage of the model` and not the `time`. (`@enekomartinmartinez `_) +- Simplify the function :py:data:`pysd.py_backend.utils.rearrange()` orienting it to perform simple rearrange cases for user interaction. (`@enekomartinmartinez `_) +- Move :py:data:`pysd.py_backend.statefuls.Model` and :py:data:`pysd.py_backend.statefuls.Macro` to :py:class:`pysd.py_backend.model.Model` and :py:class:`pysd.py_backend.model.Macro`, respectively. (`@enekomartinmartinez `_) +- Manage all kinds of lookups with the :py:class:`pysd.py_backend.lookups.Lookups` class. (`@enekomartinmartinez `_) +- Include a second optional argument to lookups functions to set the final coordinates when a subscripted variable is passed as an argument. (`@enekomartinmartinez `_) Deprecations ~~~~~~~~~~~~ -- Remove :py:data:`pysd.py_backend.utils.xrmerge()`, :py:data:`pysd.py_backend.functions.pulse_train()`, :py:data:`pysd.py_backend.functions.pulse_magnitude()`, :py:data:`pysd.py_backend.functions.lookup()`, :py:data:`pysd.py_backend.functions.lookup_discrete()`, :py:data:`pysd.py_backend.functions.lookup_extrapolation()`, :py:data:`pysd.py_backend.functions.logical_and()`, :py:data:`pysd.py_backend.functions.logical_or()`, :py:data:`pysd.py_backend.functions.bounded_normal()`, :py:data:`pysd.py_backend.functions.log()`. -- Remove old translation and building files (:py:data:`pysd.translation`). +- Remove :py:data:`pysd.py_backend.utils.xrmerge()`, :py:data:`pysd.py_backend.functions.pulse_train()`, :py:data:`pysd.py_backend.functions.pulse_magnitude()`, :py:data:`pysd.py_backend.functions.lookup()`, :py:data:`pysd.py_backend.functions.lookup_discrete()`, :py:data:`pysd.py_backend.functions.lookup_extrapolation()`, :py:data:`pysd.py_backend.functions.logical_and()`, :py:data:`pysd.py_backend.functions.logical_or()`, :py:data:`pysd.py_backend.functions.bounded_normal()`, :py:data:`pysd.py_backend.functions.log()`. (`@enekomartinmartinez `_) +- Remove old translation and building files (:py:data:`pysd.translation`). (`@enekomartinmartinez `_) Bug fixes ~~~~~~~~~ -- Generate the documentation of the model when loading it to avoid lossing information when replacing a variable value (:issue:`310`, :pull:`312`). -- Make random functions return arrays of the same shape as the variable, to avoid repeating values over a dimension (:issue:`309`, :pull:`312`). -- Fix bug when Vensim's :MACRO: definition is not at the top of the model file (:issue:`306`, :pull:`312`). -- Make builder identify the subscripts using a main range and subrange to allow using subscripts as numeric values as Vensim does (:issue:`296`, :issue:`301`, :pull:`312`). -- Fix bug of missmatching of functions and lookups names (:issue:`116`, :pull:`312`). -- Parse Xmile models case insensitively and ignoring the new lines characters (:issue:`203`, :issue:`253`, :pull:`312`). -- Add support for Vensim's `\:EXCEPT\: keyword `_ (:issue:`168`, :issue:`253`, :pull:`312`). -- Add spport for Xmile's FORCST and SAFEDIV functions (:issue:`154`, :pull:`312`). -- Add subscripts support for Xmile (:issue:`289`, :pull:`312`). -- Fix numeric error bug when using :py:data:`return_timestamps` and time step with non-integer values. +- Generate the documentation of the model when loading it to avoid lossing information when replacing a variable value (:issue:`310`, :pull:`312`). (`@enekomartinmartinez `_) +- Make random functions return arrays of the same shape as the variable, to avoid repeating values over a dimension (:issue:`309`, :pull:`312`). (`@enekomartinmartinez `_) +- Fix bug when Vensim's :MACRO: definition is not at the top of the model file (:issue:`306`, :pull:`312`). (`@enekomartinmartinez `_) +- Make builder identify the subscripts using a main range and subrange to allow using subscripts as numeric values as Vensim does (:issue:`296`, :issue:`301`, :pull:`312`). (`@enekomartinmartinez `_) +- Fix bug of missmatching of functions and lookups names (:issue:`116`, :pull:`312`). (`@enekomartinmartinez `_) +- Parse Xmile models case insensitively and ignoring the new lines characters (:issue:`203`, :issue:`253`, :pull:`312`). (`@enekomartinmartinez `_) +- Add support for Vensim's `\:EXCEPT\: keyword `_ (:issue:`168`, :issue:`253`, :pull:`312`). (`@enekomartinmartinez `_) +- Add spport for Xmile's FORCST and SAFEDIV functions (:issue:`154`, :pull:`312`). (`@enekomartinmartinez `_) +- Add subscripts support for Xmile (:issue:`289`, :pull:`312`). (`@enekomartinmartinez `_) +- Fix numeric error bug when using :py:data:`return_timestamps` and time step with non-integer values. (`@enekomartinmartinez `_) Documentation ~~~~~~~~~~~~~ -- Review the whole documentation, refract it, and describe the new features. +- Review the whole documentation, refract it, and describe the new features. (`@enekomartinmartinez `_) Performance ~~~~~~~~~~~ -- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`pysd.py_backend.utils.xrmerge()`. -- The arranging and subseting of arrays is now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. -- The grammars for Parsimonious are only compiled once per translation. +- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`pysd.py_backend.utils.xrmerge()`. (`@enekomartinmartinez `_) +- The arranging and subseting of arrays is now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. (`@enekomartinmartinez `_) +- The grammars for Parsimonious are only compiled once per translation. (`@enekomartinmartinez `_) Internal Changes ~~~~~~~~~~~~~~~~ -- The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. +- The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. (`@enekomartinmartinez `_) diff --git a/pysd/_version.py b/pysd/_version.py index b202327a..46f67e7f 100644 --- a/pysd/_version.py +++ b/pysd/_version.py @@ -1 +1 @@ -__version__ = "3.6.1" +__version__ = "3.7.0" diff --git a/pysd/builders/python/python_expressions_builder.py b/pysd/builders/python/python_expressions_builder.py index 7308a2a5..e4df5a51 100644 --- a/pysd/builders/python/python_expressions_builder.py +++ b/pysd/builders/python/python_expressions_builder.py @@ -1712,15 +1712,15 @@ def build(self, arguments: dict) -> Union[BuildAST, None]: arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_hardcodedlookup") - - arguments["final_subs"] = self.element.subs_dict + arguments["final_subs"] = "%(final_subs)s" self.element.objects["hardcoded_lookups"] = { "name": arguments["name"], "expression": "%(name)s = HardcodedLookups(%(x)s, %(y)s, " "%(subscripts)s, '%(interp)s', " "%(final_subs)s, '%(name)s')" - % arguments + % arguments, + "final_subs": self.element.subs_dict } return BuildAST( @@ -1769,11 +1769,21 @@ def build(self, arguments: dict) -> BuildAST: separator=",", threshold=len(self.lookups.y) ) - return BuildAST( - expression="np.interp(%(value)s, %(x)s, %(y)s)" % arguments, - calls=arguments["value"].calls, - subscripts=arguments["value"].subscripts, - order=0) + if arguments["value"].subscripts: + subs = arguments["value"].subscripts + expression = "np.interp(%(value)s, %(x)s, %(y)s)" % arguments + return BuildAST( + expression="xr.DataArray(%s, %s, %s)" % ( + expression, subs, list(subs)), + calls=arguments["value"].calls, + subscripts=subs, + order=0) + else: + return BuildAST( + expression="np.interp(%(value)s, %(x)s, %(y)s)" % arguments, + calls=arguments["value"].calls, + subscripts={}, + order=0) class ReferenceBuilder(StructureBuilder): diff --git a/pysd/builders/python/python_model_builder.py b/pysd/builders/python/python_model_builder.py index 4a20e21a..637571f6 100644 --- a/pysd/builders/python/python_model_builder.py +++ b/pysd/builders/python/python_model_builder.py @@ -639,6 +639,11 @@ def build_element(self) -> None: else: self.pre_expression = "" # NUMPY: reshape to the final shape if needed + # include full subscript range for objects defined with a + # partial range (issue #363) + for value in self.objects.values(): + if value["expression"] is not None and "final_subs" in value: + value["final_subs"] = self.subs_dict # expressions[0]["expr"].reshape(self.section.subscripts, {}) if not expressions[0]["expr"].subscripts and self.subscripts: # Updimension the return value to an array diff --git a/pysd/cli/main.py b/pysd/cli/main.py index 29601b52..fb28940e 100644 --- a/pysd/cli/main.py +++ b/pysd/cli/main.py @@ -2,17 +2,15 @@ import os from pathlib import Path -from csv import QUOTE_NONE from datetime import datetime -from .parser import parser - import pysd from pysd.translators.vensim.vensim_utils import supported_extensions as\ vensim_extensions from pysd.translators.xmile.xmile_utils import supported_extensions as\ xmile_extensions +from .parser import parser def main(args): """ @@ -41,12 +39,17 @@ def main(args): model.initialize() - output = model.run(**create_configuration(model, options)) + if not options.output_file: + options.output_file = os.path.splitext(os.path.basename( + options.model_file + ))[0]\ + + datetime.now().strftime("_output_%Y_%m_%d-%H_%M_%S_%f.tab") + + model.run(**create_configuration(model, options)) if options.export_file: model.export(options.export_file) - save(output, options) print("\nFinished!") sys.exit() @@ -133,45 +136,11 @@ def create_configuration(model, options): "time_step": options.time_step, "saveper": options.saveper, "flatten_output": True, # need to return totally flat DF - "return_timestamps": options.return_timestamps # given or None + "return_timestamps": options.return_timestamps, # given or None, + "output_file": options.output_file } if options.import_file: conf_dict["initial_condition"] = options.import_file return conf_dict - - -def save(output, options): - """ - Saves models output. - - Paramters - --------- - output: pandas.DataFrame - - options: argparse.Namespace - - Returns - ------- - None - - """ - if options.output_file: - output_file = options.output_file - else: - output_file = os.path.splitext(os.path.basename( - options.model_file - ))[0]\ - + datetime.now().strftime("_output_%Y_%m_%d-%H_%M_%S_%f.tab") - - if output_file.endswith(".tab"): - sep = "\t" - else: - sep = "," - - # QUOTE_NONE used to print the csv/tab files af vensim does with special - # characterse, e.g.: "my-var"[Dimension] - output.to_csv(output_file, sep, index_label="Time", quoting=QUOTE_NONE) - - print(f"Data saved in '{output_file}'") diff --git a/pysd/cli/parser.py b/pysd/cli/parser.py index dbe6e012..b22bb677 100644 --- a/pysd/cli/parser.py +++ b/pysd/cli/parser.py @@ -29,10 +29,10 @@ def check_output(string): Checks that out put file ends with .tab or .csv """ - if not string.endswith('.tab') and not string.endswith('.csv'): + if not string.endswith(('.tab', '.csv', '.nc')): parser.error( f'when parsing {string}' - '\nThe output file name must be .tab or .csv...') + '\nThe output file name must be .tab, .csv or .nc...') return string diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 20fac9f3..140ac9b2 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -123,7 +123,7 @@ def _set_component(self, name, value): class Time(object): - rprec = 1e-10 # relative precission for final time and saving time + rprec = 1e-5 # relative precision for final time and saving time def __init__(self): self._time = None @@ -183,7 +183,7 @@ def in_return(self): prec = self.time_step() * self.rprec if self.return_timestamps is not None: - # this allows managing float precission error + # this allows managing float precision error if self.next_return is None: return False if np.isclose(self._time, self.next_return, prec): @@ -207,7 +207,7 @@ def in_return(self): return time_delay % save_per < prec or -time_delay % save_per < prec def round(self): - """ Return rounded time to outputs to avoid float precission error""" + """ Return rounded time to outputs to avoid float precision error""" return np.round( self._time, -int(np.log10(self.time_step()*self.rprec))) diff --git a/pysd/py_backend/model.py b/pysd/py_backend/model.py index 60a35a0a..1a13e9f4 100644 --- a/pysd/py_backend/model.py +++ b/pysd/py_backend/model.py @@ -8,12 +8,15 @@ import warnings import inspect import pickle +from pathlib import Path from typing import Union import numpy as np import xarray as xr import pandas as pd +from pysd._version import __version__ + from . import utils from .statefuls import DynamicStateful, Stateful from .external import External, Excels @@ -21,8 +24,7 @@ from .data import TabData from .lookups import HardcodedLookups from .components import Components, Time - -from pysd._version import __version__ +from .output import ModelOutput class Macro(DynamicStateful): @@ -483,7 +485,7 @@ def export(self, file_name): Parameters ---------- - file_name: str + file_name: str or pathlib.Path Name of the file to export the values. """ @@ -510,7 +512,7 @@ def import_pickle(self, file_name): Parameters ---------- - file_name: str + file_name: str or pathlib.Path Name of the file to import the values from. """ @@ -709,7 +711,6 @@ def set_components(self, params, new=False): >>> br = pandas.Series(index=range(30), values=np.sin(range(30)) >>> model.set_components({'birth_rate': br}) - """ # TODO: allow the params argument to take a pandas dataframe, where # column names are variable names. However some variables may be @@ -780,8 +781,8 @@ def set_components(self, params, new=False): # this won't handle other statefuls... if '_integ_' + func_name in dir(self.components): - warnings.warn("Replacing the equation of stock" - + "{} with params".format(key), + warnings.warn("Replacing the equation of stock " + "'{}' with params...".format(key), stacklevel=2) new_function.__name__ = func_name @@ -1023,6 +1024,7 @@ def __init__(self, py_model_file, data_files, initialize, missing_values): self.time.set_control_vars(**self.components._control_vars) self.data_files = data_files self.missing_values = missing_values + self.progress = None if initialize: self.initialize() @@ -1035,7 +1037,7 @@ def initialize(self): def run(self, params=None, return_columns=None, return_timestamps=None, initial_condition='original', final_time=None, time_step=None, saveper=None, reload=False, progress=False, flatten_output=True, - cache_output=True): + cache_output=True, output_file=None): """ Simulate the model's behavior over time. Return a pandas dataframe with timestamps as rows, @@ -1098,6 +1100,8 @@ def run(self, params=None, return_columns=None, return_timestamps=None, If True, once the output dataframe has been formatted will split the xarrays in new columns following Vensim's naming to make a totally flat output. Default is True. + This argument will be ignored when passing a netCDF4 file + path in the output_file argument. cache_output: bool (optional) If True, the number of calls of outputs variables will be increased @@ -1106,6 +1110,11 @@ def run(self, params=None, return_columns=None, return_timestamps=None, recommended to activate this feature, if time step << saveper it is recommended to deactivate it. Default is True. + output_file: str, pathlib.Path or None (optional) + Path of the file in which to save simulation results. + Currently, csv, tab and nc (netCDF4) files are supported. + + Examples -------- >>> model.run(params={'exogenous_constant': 42}) @@ -1113,6 +1122,8 @@ def run(self, params=None, return_columns=None, return_timestamps=None, >>> model.run(return_timestamps=[1, 2, 3, 4, 10]) >>> model.run(return_timestamps=10) >>> model.run(return_timestamps=np.linspace(1, 10, 20)) + >>> model.run(output_file="results.nc") + See Also -------- @@ -1123,8 +1134,6 @@ def run(self, params=None, return_columns=None, return_timestamps=None, if reload: self.reload() - self.progress = progress - self.time.add_return_timestamps(return_timestamps) if self.time.return_timestamps is not None and not final_time: # if not final time given the model will end in the list @@ -1145,6 +1154,18 @@ def run(self, params=None, return_columns=None, return_timestamps=None, self.set_initial_condition(initial_condition) + # set progressbar + if progress and (self.cache_type["final_time"] == "step" or + self.cache_type["time_step"] == "step"): + warnings.warn( + "The progressbar is not compatible with dynamic " + "final time or time step. Both variables must be " + "constants to prompt progress." + ) + progress = False + + self.progress = progress + if return_columns is None or isinstance(return_columns, str): return_columns = self._default_return_columns(return_columns) @@ -1154,14 +1175,32 @@ def run(self, params=None, return_columns=None, return_timestamps=None, # create a dictionary splitting run cached and others capture_elements = self._split_capture_elements(capture_elements) + # include outputs in cache if needed self._dependencies["OUTPUTS"] = { element: 1 for element in capture_elements["step"] } + if cache_output: # udate the cache type taking into account the outputs self._assign_cache_type() + # check validitty of output_file. This could be done inside the + # ModelOutput class, but it feels too late + if output_file: + if not isinstance(output_file, (str, Path)): + raise TypeError( + "Paths must be strings or pathlib Path objects.") + + if isinstance(output_file, str): + output_file = Path(output_file) + + file_extension = output_file.suffix + + if file_extension not in ModelOutput.valid_output_files: + raise ValueError( + f"Unsupported output file format {file_extension}") + # add constant cache to thosa variable that are constants self._add_constant_cache() @@ -1170,16 +1209,19 @@ def run(self, params=None, return_columns=None, return_timestamps=None, # need to clean cache to remove the values from active_initial self.clean_caches() - res = self._integrate(capture_elements['step']) + # instantiating output object + output = ModelOutput(self, capture_elements['step'], output_file) + + self._integrate(output) del self._dependencies["OUTPUTS"] - self._add_run_elements(res, capture_elements['run']) - self._remove_constant_cache() + output.add_run_elements(self, capture_elements['run']) - return_df = utils.make_flat_df(res, return_addresses, flatten_output) + self._remove_constant_cache() - return return_df + return output.postprocess( + return_addresses=return_addresses, flatten=flatten_output) def select_submodel(self, vars=[], modules=[], exogenous_components={}): """ @@ -1593,7 +1635,7 @@ def set_initial_condition(self, initial_condition): Parameters ---------- - initial_condition : str or (float, dict) + initial_condition : str or (float, dict) or pathlib.Path The starting time, and the state of the system (the values of all the stocks) at that starting time. 'original' or 'o'uses model-file specified initial condition. 'current' or 'c' uses @@ -1615,19 +1657,21 @@ def set_initial_condition(self, initial_condition): model.set_initial_value() """ + if isinstance(initial_condition, str)\ + and initial_condition.lower() not in ["original", "o", + "current", "c"]: + initial_condition = Path(initial_condition) if isinstance(initial_condition, tuple): self.initialize() self.set_initial_value(*initial_condition) + elif isinstance(initial_condition, Path): + self.import_pickle(initial_condition) elif isinstance(initial_condition, str): if initial_condition.lower() in ["original", "o"]: self.time.set_control_vars( initial_time=self.components._control_vars["initial_time"]) self.initialize() - elif initial_condition.lower() in ["current", "c"]: - pass - else: - self.import_pickle(initial_condition) else: raise TypeError( "Invalid initial conditions. " @@ -1647,26 +1691,19 @@ def _euler_step(self, dt): """ self.state = self.state + self.ddt() * dt - def _integrate(self, capture_elements): + def _integrate(self, out_obj): """ - Performs euler integration. + Performs euler integration and writes results to the out_obj. Parameters ---------- - capture_elements: set - Which model elements to capture - uses pysafe names. + out_obj: pysd.ModelOutput Returns ------- - outputs: pandas.DataFrame - Output capture_elements data. + None """ - # necessary to have always a non-xaray object for appending objects - # to the DataFrame time will always be a model element and not saved - # TODO: find a better way of saving outputs - capture_elements.add("time") - outputs = pd.DataFrame(columns=capture_elements) if self.progress: # initialize progress bar @@ -1677,11 +1714,11 @@ def _integrate(self, capture_elements): # when None is used the update will do nothing progressbar = utils.ProgressBar(None) + # performs the time stepping while self.time.in_bounds(): if self.time.in_return(): - outputs.at[self.time.round()] = [ - getattr(self.components, key)() - for key in capture_elements] + out_obj.update(self) + self._euler_step(self.time.time_step()) self.time.update(self.time()+self.time.time_step()) self.clean_caches() @@ -1690,33 +1727,6 @@ def _integrate(self, capture_elements): # need to add one more time step, because we run only the state # updates in the previous loop and thus may be one short. if self.time.in_return(): - outputs.at[self.time.round()] = [getattr(self.components, key)() - for key in capture_elements] + out_obj.update(self) progressbar.finish() - - # delete time column as it was created only for avoiding errors - # of appending data. See previous TODO. - del outputs["time"] - return outputs - - def _add_run_elements(self, df, capture_elements): - """ - Adds constant elements to a dataframe. - - Parameters - ---------- - df: pandas.DataFrame - Dataframe to add elements. - - capture_elements: list - List of constant elements - - Returns - ------- - None - - """ - nt = len(df.index.values) - for element in capture_elements: - df[element] = [getattr(self.components, element)()] * nt diff --git a/pysd/py_backend/output.py b/pysd/py_backend/output.py new file mode 100644 index 00000000..8997ec41 --- /dev/null +++ b/pysd/py_backend/output.py @@ -0,0 +1,606 @@ +""" +ModelOutput class is used to build different output objects based on +user input. For now, available output types are pandas DataFrame or +netCDF4 Dataset. +The OutputHandlerInterface class is an interface for the creation of handlers +for other output object types. +""" +import abc +import time as t + +from csv import QUOTE_NONE + +import regex as re + +import numpy as np +import xarray as xr +import pandas as pd + +from pysd._version import __version__ + +from . utils import xrsplit + + +class ModelOutput(): + """ + Handles different types of outputs by dispatchinging the tasks + to adequate object handlers. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + out_file: str or pathlib.Path + Path to the file where the results will be written. + + """ + valid_output_files = [".nc", ".csv", ".tab"] + + def __init__(self, model, capture_elements, out_file=None): + + # Add any other handlers that you write here, in the order you + # want them to run (DataFrameHandler runs first) + self.handler = DataFrameHandler(DatasetHandler(None)).handle(out_file) + + capture_elements.add("time") + self.capture_elements = capture_elements + + self.initialize(model) + + def initialize(self, model): + """ Delegating the creation of the results object and its elements to + the appropriate handler.""" + self.handler.initialize(model, self.capture_elements) + + def update(self, model): + """ Delegating the update of the results object and its elements to the + appropriate handler.""" + self.handler.update(model, self.capture_elements) + + def postprocess(self, **kwargs): + """ Delegating the postprocessing of the results object to the + appropriate handler.""" + return self.handler.postprocess(**kwargs) + + def add_run_elements(self, model, run_elements): + """ Delegating the addition of results with run cache in the output + object to the appropriate handler.""" + self.handler.add_run_elements(model, run_elements) + + +class OutputHandlerInterface(metaclass=abc.ABCMeta): + """ + Interface for the creation of different output handlers. + """ + def __init__(self, next=None): + self._next = next + + def handle(self, out_file): + """ + If the concrete handler can write on the output file type passed by the + user, it returns the handler itself, else it goes to the next handler. + + Parameters + ---------- + out_file: str or pathlib.Path + Path to the file where the results will be written. + + Returns + ------- + handler + + """ + handler = self.process_output(out_file) + + if handler is not None: # the handler can write the out_file type. + return handler + else: + return self._next.handle(out_file) + + @classmethod + def __subclasshook__(cls, subclass): + return (hasattr(subclass, 'process_output') and + callable(subclass.process_output) and + hasattr(subclass, 'initialize') and + callable(subclass.initialize) and + hasattr(subclass, 'update') and + callable(subclass.update) and + hasattr(subclass, 'postprocess') and + callable(subclass.postprocess) and + hasattr(subclass, 'add_run_elements') and + callable(subclass.add_run_elements) or + NotImplemented) + + @abc.abstractmethod + def process_output(self, out_file): + """ + If concrete handler can process out_file, returns it, else returns + None. + """ + raise NotImplementedError + + @abc.abstractmethod + def initialize(self, model, capture_elements): + """ + Create the results object and its elements based on capture_elemetns. + """ + raise NotImplementedError + + @abc.abstractmethod + def update(self, model, capture_elements): + """ + Update the results object at each iteration at which resutls are + stored. + """ + raise NotImplementedError + + @abc.abstractmethod + def postprocess(self, **kwargs): + """ + Perform different tasks at the time of returning the results object. + """ + raise NotImplementedError + + @abc.abstractmethod + def add_run_elements(self, model, capture_elements): + """ + Add elements with run cache to the results object. + """ + raise NotImplementedError + + +class DatasetHandler(OutputHandlerInterface): + """ + Manages simulation results stored as netCDF4 Dataset. + """ + + def __init__(self, next): + super().__init__(next) + self.out_file = None + self.ds = None + self._step = 0 + self.nc = __import__("netCDF4") + + @property + def step(self): + """ + Used as time index for the output Dataset. Increases by one at each + iteration. + """ + return self._step + + def __update_step(self): + """ + Increases the _step attribute by 1 at each model iteration. + """ + self._step = self.step + 1 + + def process_output(self, out_file): + """ + If out_file can be handled by this concrete handler, it returns the + handler instance, else it returns None. + + Parameters + ---------- + out_file: str or pathlib.Path + Path to the file where the results will be written. + + Returns + ------- + None or DatasetHandler instance + + """ + if out_file: + if out_file.suffix == ".nc": + self.out_file = out_file + return self + + def initialize(self, model, capture_elements): + """ + Creates a netCDF4 Dataset and adds model dimensions and variables + present in the capture elements to it. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + self.ds = self.nc.Dataset(self.out_file, "w") + + # defining global attributes + self.ds.description = "Results for simulation run on " \ + f"{t.ctime(t.time())} using PySD version {__version__}" + self.ds.model_file = model.py_model_file or model.mdl_file + self.ds.timestep = f"{model.time.time_step()}" if model.cache_type[ + "time_step"] == "run" else "Variable" + self.ds.initial_time = f"{model.time.initial_time()}" + self.ds.final_time = f"{model.time.final_time()}" if model.cache_type[ + "final_time"] == "run" else "Variable" + + # creating variables for all model dimensions + for dim_name, coords in model.subscripts.items(): + coords = np.array(coords) + # create dimension + self.ds.createDimension(dim_name, len(coords)) + # length of the longest string in the coords + max_str_len = len(max(coords, key=len)) + # create variable for the dimension + var = self.ds.createVariable( + dim_name, f"S{max_str_len}", (dim_name,)) + # assigning coords to dimension + var[:] = coords + + # creating the time dimension as unlimited + self.ds.createDimension("time", None) + # creating variables + self.__create_ds_vars(model, capture_elements) + + def update(self, model, capture_elements): + """ + Writes values of cache step variables from the capture_elements set + in the netCDF4 Dataset. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + for key in capture_elements: + comp = model[key] + if isinstance(comp, xr.DataArray): + self.ds[key][self.step, :] = comp.values + else: + self.ds[key][self.step] = comp + + self.__update_step() + + def __update_run_elements(self, model, capture_elements): + """ + Writes values of cache run elements from the cature_elements set + in the netCDF4 Dataset. + Cache run elements do not have the time dimension. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + for key in capture_elements: + comp = model[key] + if isinstance(comp, xr.DataArray): + self.ds[key][:] = comp.values + else: + self.ds[key][:] = comp + + def postprocess(self, **kwargs): + """ + Closes netCDF4 Dataset. + + Returns + ------- + None + """ + self.ds.close() + print(f"Results stored in {self.out_file}") + + def add_run_elements(self, model, capture_elements): + """ + Adds constant elements to netCDF4 Dataset. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: list + List of constant elements + + Returns + ------- + None + + """ + # creating variables in capture_elements + self.__create_ds_vars(model, capture_elements, time_dim=False) + self.__update_run_elements(model, capture_elements) + + def __create_ds_vars(self, model, capture_elements, time_dim=True): + """ + Create new variables in a netCDF4 Dataset from the capture_elements. + Data is zlib compressed by default for netCDF4 1.6.0 and above. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + time_dim: bool + Whether to add time as the first dimension for the variable. + + Returns + ------- + None + + """ + kwargs = dict() + + if tuple(self.nc.__version__.split(".")) >= ('1', '6', '0'): + kwargs["compression"] = "zlib" + + for key in capture_elements: + comp = model[key] + + dims = tuple() + if isinstance(comp, xr.DataArray): + dims = tuple(comp.dims) + if time_dim: + dims = ("time",) + dims + + var = self.ds.createVariable(key, "f8", dims, **kwargs) + # adding metadata for each var from the model.doc + for col in model.doc.columns: + if col in ["Subscripts", "Limits"]: + # pass those that cannot be saved as attributes + continue + var.setncattr( + col, + model.doc.loc[model.doc["Py Name"] == key, col].values[0] + or "Missing" + ) + + +class DataFrameHandler(OutputHandlerInterface): + """ + Manages simulation results stored as pandas DataFrame. + """ + def __init__(self, next): + super().__init__(next) + self.ds = None + self.out_file = None + + def process_output(self, out_file): + """ + If this handler can process out_file, it returns True, else False. + DataFrameHandler handles outputs to be saved as *.csv or *.tab files, + and is the default handler when no output file is passed by the user. + + Parameters + ---------- + out_file: str or pathlib.Path + Path to the file where the results will be written. + + Returns + ------- + None or DataFrameHandler instance + + """ + self.out_file = out_file + + if not out_file: + return self + + if out_file.suffix in [".csv", ".tab"]: + return self + + def initialize(self, model, capture_elements): + """ + Creates a pandas DataFrame and adds model variables as columns. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + self.ds = pd.DataFrame(columns=capture_elements) + + def update(self, model, capture_elements): + """ + Add a row to the results pandas DataFrame with the values of the + variables listed in capture_elements. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + self.ds.at[model.time.round()] = [ + getattr(model.components, key)() + for key in capture_elements] + + def postprocess(self, **kwargs): + """ + Delete time column from the pandas DataFrame and flatten xarrays if + required. + + Returns + ------- + ds: pandas.DataFrame + Simulation results stored as a pandas DataFrame. + + """ + # delete time column as it was created only for avoiding errors + # of appending data. See previous TODO. + del self.ds["time"] + + # enforce flattening if df is to be saved to csv or tab file + flatten = True if self.out_file else kwargs.get("flatten", None) + + df = DataFrameHandler.make_flat_df( + self.ds, kwargs["return_addresses"], flatten + ) + if self.out_file: + self.__save_to_file(df) + + return df + + def __save_to_file(self, output): + """ + Saves models output. + + Paramters + --------- + output: pandas.DataFrame + + options: argparse.Namespace + + Returns + ------- + None + + """ + if self.out_file.suffix == ".tab": + sep = "\t" + else: + sep = "," + output.columns = [col.replace(",", ";") for col in output.columns] + + # QUOTE_NONE used to print the csv/tab files as vensim does with + # special characterse, e.g.: "my-var"[Dimension] + output.to_csv( + self.out_file, sep, index_label="Time", quoting=QUOTE_NONE) + + print(f"Data saved in '{self.out_file}'") + + def add_run_elements(self, model, capture_elements): + """ + Adds constant elements to a dataframe. + + Parameters + ---------- + model: pysd.Model + PySD Model object + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + None + + """ + nx = len(self.ds.index) + for element in capture_elements: + self.ds[element] = [getattr(model.components, element)()] * nx + + @staticmethod + def make_flat_df(df, return_addresses, flatten=False): + """ + Takes a dataframe from the outputs of the integration processes, + renames the columns as the given return_adresses and splits xarrays + if needed. + + Parameters + ---------- + df: pandas.DataFrame + Dataframe to process. + + return_addresses: dict + Keys will be column names of the resulting dataframe, and are what + the user passed in as 'return_columns'. Values are a tuple: + (py_name, {coords dictionary}) which tells us where to look for the + value to put in that specific column. + + flatten: bool (optional) + If True, once the output dataframe has been formatted will + split the xarrays in new columns following vensim's naming + to make a totally flat output. Default is False. + + Returns + ------- + new_df: pandas.DataFrame + Formatted dataframe. + + """ + new_df = {} + for real_name, (pyname, address) in return_addresses.items(): + if address: + # subset the specific address + values = [x.loc[address] for x in df[pyname].values] + else: + # get the full column + values = df[pyname].to_list() + + is_dataarray = len(values) != 0 and isinstance( + values[0], xr.DataArray) + + if is_dataarray and values[0].size == 1: + # some elements are returned as 0-d arrays, convert + # them to float + values = [float(x) for x in values] + is_dataarray = False + + if flatten and is_dataarray: + DataFrameHandler.__add_flat(new_df, real_name, values) + else: + new_df[real_name] = values + + return pd.DataFrame(index=df.index, data=new_df) + + @staticmethod + def __add_flat(savedict, name, values): + """ + Add float lists from a list of xarrays to a provided dictionary. + + Parameters + ---------- + savedict: dict + Dictionary to save the data on. + + name: str + The base name of the variable to save the data. + + values: list + List of xarrays to convert to split in floats. + + Returns + ------- + None + + """ + # remove subscripts from name if given + name = re.sub(r'\[.*\]', '', name) + dims = values[0].dims + + # split values in xarray.DataArray + lval = [xrsplit(val) for val in values] + for i, ar in enumerate(lval[0]): + vals = [float(v[i]) for v in lval] + subs = '[' + ','.join([str(ar.coords[dim].values) + for dim in dims]) + ']' + savedict[name+subs] = vals diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index b31ebdae..6b8eb97c 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -9,7 +9,6 @@ from pathlib import Path from chardet.universaldetector import UniversalDetector -import regex as re import progressbar import numpy as np import xarray as xr @@ -104,92 +103,6 @@ def get_return_elements(return_columns, namespace): return list(capture_elements), return_addresses -def make_flat_df(df, return_addresses, flatten=False): - """ - Takes a dataframe from the outputs of the integration processes, - renames the columns as the given return_adresses and splits xarrays - if needed. - - Parameters - ---------- - df: Pandas.DataFrame - Output from the integration. - - return_addresses: dict - Keys will be column names of the resulting dataframe, and are what the - user passed in as 'return_columns'. Values are a tuple: - (py_name, {coords dictionary}) which tells us where to look for the - value to put in that specific column. - - flatten: bool (optional) - If True, once the output dataframe has been formatted will - split the xarrays in new columns following vensim's naming - to make a totally flat output. Default is False. - - Returns - ------- - new_df: pandas.DataFrame - Formatted dataframe. - - """ - new_df = {} - for real_name, (pyname, address) in return_addresses.items(): - if address: - # subset the specific address - values = [x.loc[address] for x in df[pyname].values] - else: - # get the full column - values = df[pyname].to_list() - - is_dataarray = len(values) != 0 and isinstance(values[0], xr.DataArray) - - if is_dataarray and values[0].size == 1: - # some elements are returned as 0-d arrays, convert - # them to float - values = [float(x) for x in values] - is_dataarray = False - - if flatten and is_dataarray: - _add_flat(new_df, real_name, values) - else: - new_df[real_name] = values - - return pd.DataFrame(index=df.index, data=new_df) - - -def _add_flat(savedict, name, values): - """ - Add float lists from a list of xarrays to a provided dictionary. - - Parameters - ---------- - savedict: dict - Dictionary to save the data on. - - name: str - The base name of the variable to save the data. - - values: list - List of xarrays to convert to split in floats. - - Returns - ------- - None - - """ - # remove subscripts from name if given - name = re.sub(r'\[.*\]', '', name) - dims = values[0].dims - - # split values in xarray.DataArray - lval = [xrsplit(val) for val in values] - for i, ar in enumerate(lval[0]): - vals = [float(v[i]) for v in lval] - subs = '[' + ','.join([str(ar.coords[dim].values) - for dim in dims]) + ']' - savedict[name+subs] = vals - - def compute_shape(coords, reshape_len=None, py_name=""): """ Computes the 'shape' of a coords dictionary. diff --git a/pysd/pysd.py b/pysd/pysd.py index 21b6f93d..fc032d6b 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -13,14 +13,14 @@ if sys.version_info[:2] < (3, 7): # pragma: no cover raise RuntimeError( "\n\n" - + "Your Python version is not longer supported by PySD.\n" + + "Your Python version is no longer supported by PySD.\n" + "The current version needs to run at least Python 3.7." + " You are running:\n\tPython " + sys.version + "." + "\nPlease update your Python version or use the last " + " supported version:\n\t" - + "https://github.com/JamesPHoughton/pysd/releases/tag/LastPy2" + + "https://github.com/SDXorg/pysd/releases/tag/LastPy2" ) diff --git a/pysd/translators/vensim/parsing_grammars/element_object.peg b/pysd/translators/vensim/parsing_grammars/element_object.peg index 49256520..b0af013a 100644 --- a/pysd/translators/vensim/parsing_grammars/element_object.peg +++ b/pysd/translators/vensim/parsing_grammars/element_object.peg @@ -29,7 +29,7 @@ subscript_copy = name _ "<->" _ name_mapping # Subscript mapping subscript_mapping_list = "->" _ subscript_mapping _ ("," _ subscript_mapping _)* -subscript_mapping = (_ name_mapping _) / (_ "(" _ name_mapping _ ":" _ index_list _")" ) +subscript_mapping = (_ name_mapping _) / (_ "(" _ name_mapping _ ":" _ index_list _ ")" ) name_mapping = basic_id / escape_group # Subscript except match diff --git a/requirements.txt b/requirements.txt index dfa1b357..e0947fec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ pandas -parsimonious==0.9.0 +parsimonious xarray xlrd lxml diff --git a/setup.py b/setup.py index be4253d6..7f9371e4 100755 --- a/setup.py +++ b/setup.py @@ -7,10 +7,9 @@ name='pysd', version=__version__, python_requires='>=3.7', - author='James Houghton', - author_email='james.p.houghton@gmail.com', + author='PySD contributors', packages=find_packages(exclude=['docs', 'tests', 'dist', 'build']), - url='https://github.com/JamesPHoughton/pysd', + url='https://github.com/SDXorg/pysd', license='LICENSE', description='System Dynamics Modeling in Python', long_description=open('README.md').read(), diff --git a/tests/conftest.py b/tests/conftest.py index b4fd902b..0019e9ad 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,13 @@ -import pytest +import shutil from pathlib import Path +import pytest +from pysd import read_vensim, read_xmile, load +from pysd.translators.vensim.vensim_utils import supported_extensions as\ + vensim_extensions +from pysd.translators.xmile.xmile_utils import supported_extensions as\ + xmile_extensions + @pytest.fixture(scope="session") def _root(): @@ -18,3 +25,24 @@ def _test_models(_root): def shared_tmpdir(tmpdir_factory): # shared temporary directory for each class return Path(tmpdir_factory.mktemp("shared")) + + +@pytest.fixture +def model(_root, tmp_path, model_path): + """ + Copy model to the tmp_path and translate it + """ + assert (_root / model_path).exists(), "The model doesn't exist" + + target = tmp_path / model_path.parent.name + new_path = target / model_path.name + shutil.copytree(_root / model_path.parent, target) + + if model_path.suffix.lower() in vensim_extensions: + return read_vensim(new_path) + elif model_path.suffix.lower() in xmile_extensions: + return read_xmile(new_path) + elif model_path.suffix.lower() == ".py": + return load(new_path) + else: + return ValueError("Invalid model") diff --git a/tests/pytest.ini b/tests/pytest.ini index b7a740f8..ac62136e 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -3,4 +3,4 @@ python_files = pytest_*/**/*.py pytest_*/*.py filterwarnings = error ignore:Creating an ndarray from ragged nested sequences - ignore:distutils Version classes are deprecated. Use packaging.version instead \ No newline at end of file + ignore:`np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here. diff --git a/tests/pytest_integration/pytest_integration_euler.py b/tests/pytest_integration/pytest_integration_euler.py new file mode 100644 index 00000000..33ede4c4 --- /dev/null +++ b/tests/pytest_integration/pytest_integration_euler.py @@ -0,0 +1,211 @@ +from pathlib import Path + +import pytest +import numpy as np +import pandas as pd + + +def harmonic_position(t, x0, k, m): + """ + Position for the simple harmonic oscillator + 'test-models/samples/simple_harmonic_oscillator + /simple_harmonic_oscillator.mdl' + """ + return x0*np.cos(np.sqrt(k/m)*t) + + +def harmonic_speed(t, x0, k, m): + """ + Speed for the simple harmonic oscillator + 'test-models/samples/simple_harmonic_oscillator + /simple_harmonic_oscillator.mdl' + """ + return - x0*np.sqrt(k/m)*np.sin(np.sqrt(k/m)*t) + + +@pytest.mark.parametrize( + "model_path,f,f2,stocks,arguments,integration_frame,ts_log,ts_rmse", + [ + # model_path: model path: pathlib.Path object + # f: stocks analytical solutions: tuple of funcs + # f2: stocks analytical solutions second derivative: tuple of funcs + # stocks: stock names in the model: tuple of strings + # arguments: arguments of fs and f2s in the model: tuple of strings + # integration_frame: minimum and maximum time to find solutions: tuple + # ts_log: logarithmic range of time steps in base 10: tuple + # ts_rmse: sorted time step for RMSE test: iterable + ( + Path("test-models/samples/teacup/teacup.mdl"), + (lambda t, T0, TR, ct: TR + (T0-TR)*np.exp(-t/ct),), + (lambda t, T0, TR, ct: (T0-TR)*np.exp(-t/ct)/ct**2,), + ("Teacup Temperature",), + ("Teacup Temperature", "Room Temperature", "Characteristic Time"), + (0, 40), + (1, -5), + [10, 5, 1, 0.5, 0.1, 0.05, 0.01] + ), + ( + Path("test-models/samples/simple_harmonic_oscillator/" + "simple_harmonic_oscillator.mdl"), + ( + harmonic_position, + harmonic_speed + ), + ( + lambda t, x0, k, m: -k/m*harmonic_position(t, x0, k, m), + lambda t, x0, k, m: -k/m*harmonic_speed(t, x0, k, m) + ), + ("position", "speed"), + ("initial position", "elastic constant", "mass"), + (0, 40), + (-1, -5), + [10, 5, 1, 0.5, 0.1, 0.05, 0.01] + ) + ], + ids=["teacup", "harmonic"] +) +class TestEulerConvergence: + """ + Tests for Euler integration method convergence. + """ + # Number of points to compute the tests + n_points_lte = 30 + + def test_local_truncation_error(self, model, f, f2, stocks, arguments, + integration_frame, ts_log, ts_rmse): + """ + Test the local truncation error (LTE). + LTE = y_1 - y(t_0+h) = 0.5*h**2*y''(x) for x in [t_0, t_0+h] + + where y_1 = y(t_0) + h*f(t_0, y(t_0)) and + + Generates n_points_lte in the given integration frame and test the + convergence with logarithmically uniform split time_steps. + + Parameter + --------- + model: pysd.py_backend.model.Model + The model to integrate. + f: tuple of functions + The functions of the analytical solution of each stock. + f2: tuple of functions + The second derivative of the functions of the analytical + solution of each stock. + stocks: tuple of strings + The name of the stocks. + arguments: tuple of strings + The neccessary argument names to evaluate f's and f2's. + Note that all the functions must take the same arguments + and in the same order. + integration_frame: tuple + Initial time of the model (usually 0) and maximum time to + generate a value for test the LTE. + ts_log: tuple + log in base 10 of the inteval of time step to generate. I.e., + the first point will be evaluated with time_step = 10**ts_log[0] + and the last one with 10**ts_log[1]. + ts_rmse: iterable + Not used. + + """ + # Generate starting points to compute LTE + t0s = np.random.uniform(*integration_frame, self.n_points_lte) + # Generate time steps + hs = 10**np.linspace(*ts_log, self.n_points_lte) + # Get model values before making any change + model_values = [model[var] for var in arguments] + + for t0, h in zip(t0s, hs): + # Reload model + model.reload() + # Get start value(s) + x0s = [ + func(t0, *model_values) + for func in f + ] + # Get expected value(s) + x_expect = np.array([ + func(t0 + h, *model_values) + for func in f + ]) + # Get error bound (error = 0.5h²*f''(x) for x in [t0, t0+h]) + # The 0.5 factor is removed to avoid problems with local maximums + # We assume error < 2h²*max(f''(x)) for x in [t0, t0+h] + error = 2*h**2*np.array([ + max(func(np.linspace(t0, t0+h, 1000), *model_values)) + for func in f2 + ]) + # Run the model from (t0, x0s) to t0+h + ic = t0, {stock: x0 for stock, x0 in zip(stocks, x0s)} + x_euler = model.run( + initial_condition=ic, + time_step=h, + return_columns=stocks, + return_timestamps=t0+h + ).values[0] + + # Expected error + assert np.all(np.abs(x_expect - x_euler) <= np.abs(error)),\ + f"The LTE is bigger than the expected one ({ic}) h={h},"\ + f"\n{np.abs(x_expect - x_euler)} !<= {np.abs(error)}, " + + def test_root_mean_square_error(self, model, f, f2, stocks, arguments, + integration_frame, ts_log, ts_rmse): + """ + Test the root-mean-square error (RMSE). + RMSE = SQRT(MEAN((y_i-y(t_0+h*i))^2)) + + Integrates the given model with different time steps and checks + that the RMSE decreases when the time step decreases. + + Parameter + --------- + model: pysd.py_backend.model.Model + The model to integrate. + f: tuple of functions + The functions of the analytical solution of each stock. + f2: tuple of functions + Not used. + stocks: tuple of strings + The name of the stocks. + arguments: tuple of strings + The neccessary argument names to evaluate f's and f2's. + Note that all the functions must take the same arguments + and in the same order. + integration_frame: tuple + Not used. + ts_log: tuple + Not used. + ts_rmse: iterable + Time step to compute the root mean square error over the + whole integration. It shopuld be sorted from biggest to + smallest. + + """ + # Get model values before making any change + model_values = [model[var] for var in arguments] + + rmse = [] + for h in ts_rmse: + # Reload model + model.reload() + # Run the model from (t0, x0s) to t0+h + x_euler = model.run( + time_step=h, + saveper=h, + return_columns=stocks + ) + # Expected values + expected_values = pd.DataFrame( + index=x_euler.index, + data={ + stock: func(x_euler.index, *model_values) + for stock, func in zip(stocks, f) + } + ) + # Compute the RMSE for each stock + rmse.append(np.sqrt(((x_euler-expected_values)**2).mean())) + + # Assert that the RMSE decreases for all stocks while + # decreasing the time step + assert np.all(np.diff(rmse, axis=0) < 0) diff --git a/tests/pytest_integration/pytest_integration_vensim_pathway.py b/tests/pytest_integration/pytest_integration_vensim_pathway.py index ce8e28d3..71c3fb29 100644 --- a/tests/pytest_integration/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/pytest_integration_vensim_pathway.py @@ -314,6 +314,10 @@ "folder": "parentheses", "file": "test_parens.mdl" }, + "partial_range_definitions": { + "folder": "partial_range_definitions", + "file": "test_partial_range_definitions.mdl" + }, "reference_capitalization": { "folder": "reference_capitalization", "file": "test_reference_capitalization.mdl" @@ -546,6 +550,10 @@ "folder": "vector_select", "file": "test_vector_select.mdl" }, + "with_lookup": { + "folder": "with_lookup", + "file": "test_with_lookup.mdl" + }, "xidz_zidz": { "folder": "xidz_zidz", "file": "xidz_zidz.mdl" diff --git a/tests/pytest_pysd/pytest_cli.py b/tests/pytest_pysd/pytest_cli.py index 636074ab..7770d247 100644 --- a/tests/pytest_pysd/pytest_cli.py +++ b/tests/pytest_pysd/pytest_cli.py @@ -100,7 +100,7 @@ def test_read_not_valid_output(self, _root): stderr = out.stderr.decode(encoding_stderr) assert out.returncode != 0 assert f"PySD: error: when parsing {out_xls_file}" in stderr - assert "The output file name must be .tab or .csv..." in stderr + assert "The output file name must be .tab, .csv or .nc..." in stderr def test_read_not_valid_time_stamps(self): diff --git a/tests/pytest_pysd/pytest_output.py b/tests/pytest_pysd/pytest_output.py new file mode 100644 index 00000000..de45856f --- /dev/null +++ b/tests/pytest_pysd/pytest_output.py @@ -0,0 +1,514 @@ +from pathlib import Path + +import pytest +import numpy as np +import pandas as pd +import xarray as xr +import netCDF4 as nc + +from pysd.tools.benchmarking import assert_frames_close +from pysd.py_backend.output import OutputHandlerInterface, DatasetHandler, \ + DataFrameHandler, ModelOutput + + +test_model_look = Path( + "test-models/tests/get_lookups_subscripted_args/" + "test_get_lookups_subscripted_args.mdl" +) +test_model_constants = Path( + "test-models/tests/get_constants_subranges/" + "test_get_constants_subranges.mdl" +) +test_model_numeric_coords = Path( + "test-models/tests/subscript_1d_arrays/" + "test_subscript_1d_arrays.mdl" +) +test_variable_step = Path( + "test-models/tests/control_vars/" + "test_control_vars.mdl" +) +test_partial_definitions = Path( + "test-models/tests/partial_range_definitions/" + "test_partial_range_definitions.mdl" +) + + +class TestOutput(): + + def test_output_handler_interface(self): + # when the class does not inherit from OutputHandlerInterface, it must + # implement all the interface to be a subclass of + # OutputHandlerInterface. + # Add any additional Handler here. + assert issubclass(DatasetHandler, OutputHandlerInterface) + assert issubclass(DataFrameHandler, OutputHandlerInterface) + + class ThatFollowsInterface: + """ + This class does not inherit from OutputHandlerInterface, but it + overrides all its methods (it follows the interface). + """ + def process_output(self, out_file): + pass + + def initialize(self, model, capture_elements): + pass + + def update(self, model, capture_elements): + pass + + def postprocess(self, **kwargs): + pass + + def add_run_elements(self, capture_elemetns): + pass + + # eventhough it does not inherit from OutputHandlerInterface, it is + # considered a subclass, because it follows the interface + assert issubclass(ThatFollowsInterface, OutputHandlerInterface) + + class IncompleteHandler: + """ + Class that does not follow the full interface + (add_run_elements is missing). + """ + def initialize(self, model, capture_elements): + pass + + def update(self, model, capture_elements): + pass + + def postprocess(self, **kwargs): + pass + + # It does not inherit from OutputHandlerInterface and does not fulfill + # its interface + assert not issubclass(IncompleteHandler, OutputHandlerInterface) + + class EmptyHandler(OutputHandlerInterface): + """ + When the class DOES inherit from OutputHandlerInterface, but does + not override all its abstract methods, then it cannot be + instantiated + """ + pass + + # it is a subclass because it inherits from it + assert issubclass(EmptyHandler, OutputHandlerInterface) + + # it cannot be instantiated because it does not override all abstract + # methods + with pytest.raises(TypeError): + EmptyHandler() + + # calling methods that are not overriden returns NotImplementedError + # this should never happen, because these methods are instance methods, + # therefore the class needs to be instantiated first + with pytest.raises(NotImplementedError): + EmptyHandler.initialize(EmptyHandler, "model", "capture") + + with pytest.raises(NotImplementedError): + EmptyHandler.process_output(EmptyHandler, "out_file") + + with pytest.raises(NotImplementedError): + EmptyHandler.update(EmptyHandler, "model", "capture") + + with pytest.raises(NotImplementedError): + EmptyHandler.postprocess(EmptyHandler) + + with pytest.raises(NotImplementedError): + EmptyHandler.add_run_elements( + EmptyHandler, "model", "capture") + + @pytest.mark.parametrize("model_path", [test_model_look]) + def test_invalid_output_file(self, model): + error_message = "Paths must be strings or pathlib Path objects." + with pytest.raises(TypeError, match=error_message): + model.run(output_file=1234) + + error_message = "Unsupported output file format .txt" + with pytest.raises(ValueError, match=error_message): + model.run(output_file="file.txt") + + @pytest.mark.parametrize( + "model_path,dims,values", + [ + ( + test_model_look, + { + "Rows": 2, + "Dim": 2, + "time": 61 + }, + { + "lookup_1d_time": (("time",), None), + "d2d": (("time", "Rows", "Dim"), None), + "initial_time": (tuple(), 0), + "final_time": (tuple(), 30), + "saveper": (tuple(), 0.5), + "time_step": (tuple(), 0.5) + } + + ), + ( + test_model_constants, + { + "dim1": 5, + "dim1a": 2, + "dim1c": 3, + 'time': 2 + }, + { + "constant": ( + ("dim1",), + np.array([0., 0., 1., 15., 50.]) + ) + } + ), + ( + test_model_numeric_coords, + { + "One Dimensional Subscript": 3, + 'time': 101 + }, + { + "rate_a": ( + ("One Dimensional Subscript",), + np.array([0.01, 0.02, 0.03])), + "stock_a": ( + ("time", "One Dimensional Subscript"), + np.array([ + np.arange(0, 1.0001, 0.01), + np.arange(0, 2.0001, 0.02), + np.arange(0, 3.0001, 0.03)], + dtype=float).transpose()), + "time": (("time",), np.arange(0.0, 101.0, 1.0)) + } + ), + ( + test_variable_step, + { + "time": 25 + }, + { + "final_time": ( + ("time",), + np.array([ + 10., 10., 10., 10., 10., 10., + 50., 50., 50., 50., 50., 50., + 50., 50., 50., 50., 50., 50., + 50., 50., 50., 50., 50., 50., 50. + ])), + "initial_time": ( + ("time",), + np.array([ + 0., 0., 0., 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, + 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2 + ])), + "time_step": ( + ("time",), + np.array([ + 1., 1., 1., 1., 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 + ])), + "saveper": ( + ("time",), + np.array([ + 1., 1., 1., 1., 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 5., 5., + 5., 5., 5., 5., 5., 5., 5.])) + } + ), + ( + test_partial_definitions, + { + "my range": 5, + "time": 11 + }, + { + "partial_data": (("time", "my range"), None), + "partial_constants": (("my range",), None) + } + ) + ], + ids=["lookups", "constants", "numeric_coords", "variable_step", + "partial_definitions"] + ) + @pytest.mark.filterwarnings("ignore") + def test_output_nc(self, tmp_path, model, dims, values): + + out_file = tmp_path.joinpath("results.nc") + + model.run(output_file=out_file) + + with nc.Dataset(out_file, "r") as ds: + assert ds.ncattrs() == [ + 'description', 'model_file', 'timestep', 'initial_time', + 'final_time'] + assert list(ds.dimensions) == list(dims) + # dimensions are stored as variables + for dim, n in dims.items(): + # check dimension size + assert ds[dim].size == n + assert dim in ds.variables.keys() + # check dimension type + if dim != "time": + assert ds[dim].dtype in ["S1", str] + else: + assert ds[dim].dtype == float + + for var, (dim, val) in values.items(): + # check variable dimensions + assert ds[var].dimensions == dim + if val is not None: + # check variable values if given + assert np.all(np.isclose(ds[var][:].data, val)) + + # Check variable attributes + doc = model.doc + doc.set_index("Py Name", drop=False, inplace=True) + doc.drop(columns=["Subscripts", "Limits"], inplace=True) + + for var in doc["Py Name"]: + if doc.loc[var, "Type"] == "Lookup": + continue + for key in doc.columns: + assert getattr(ds[var], key) == (doc.loc[var, key] + or "Missing") + + @pytest.mark.parametrize( + "model_path,fmt,sep", + [ + (test_model_look, "csv", ","), + (test_model_look, "tab", "\t")]) + @pytest.mark.filterwarnings("ignore") + def test_output_csv(self, fmt, sep, capsys, model, tmp_path): + out_file = tmp_path.joinpath("results." + fmt) + + model.run(output_file=out_file) + + captured = capsys.readouterr() # capture stdout + assert f"Data saved in '{out_file}'" in captured.out + + df = pd.read_csv(out_file, sep=sep) + + assert df["Time"].iloc[-1] == model["final_time"] + assert df["Time"].iloc[0] == model["initial_time"] + assert df.shape == (61, 51) + assert not df.isnull().values.any() + assert "lookup 3d time[B;Row1]" in df.columns or \ + "lookup 3d time[B,Row1]" in df.columns + + @pytest.mark.parametrize("model_path", [test_model_look]) + def test_dataset_handler_step_setter(self, tmp_path, model): + capture_elements = set() + results = tmp_path.joinpath("results.nc") + output = ModelOutput(model, capture_elements, results) + + # Dataset handler step cannot be modified from the outside + with pytest.raises(AttributeError): + output.handler.step = 5 + + with pytest.raises(AttributeError): + output.handler.__update_step() + + assert output.handler.step == 0 + + def test_make_flat_df(self): + + df = pd.DataFrame(index=[1], columns=['elem1']) + df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2'])] + + expected = pd.DataFrame(index=[1], data={'Elem1[B,F]': 6.}) + + return_addresses = { + 'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})} + + actual = DataFrameHandler.make_flat_df(df, return_addresses) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) + + def test_make_flat_df_0dxarray(self): + + df = pd.DataFrame(index=[1], columns=['elem1']) + df.at[1] = [xr.DataArray(5)] + + expected = pd.DataFrame(index=[1], data={'Elem1': 5.}) + + return_addresses = {'Elem1': ('elem1', {})} + + actual = DataFrameHandler.make_flat_df( + df, return_addresses, flatten=True) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) + + def test_make_flat_df_nosubs(self): + + df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) + df.at[1] = [25, 13] + + expected = pd.DataFrame(index=[1], columns=['Elem1', 'Elem2']) + expected.at[1] = [25, 13] + + return_addresses = {'Elem1': ('elem1', {}), + 'Elem2': ('elem2', {})} + + actual = DataFrameHandler.make_flat_df(df, return_addresses) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + assert all(actual['Elem1'] == expected['Elem1']) + assert all(actual['Elem2'] == expected['Elem2']) + + def test_make_flat_df_return_array(self): + """ There could be cases where we want to + return a whole section of an array - ie, by passing in only part of + the simulation dictionary. in this case, we can't force to float...""" + + df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) + df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2']), + xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2'])] + + expected = pd.DataFrame(index=[1], columns=['Elem1[A, Dim2]', 'Elem2']) + expected.at[1] = [xr.DataArray([[1, 2, 3]], + {'Dim1': ['A'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2']), + xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2'])] + + return_addresses = { + 'Elem1[A, Dim2]': ('elem1', {'Dim1': ['A'], + 'Dim2': ['D', 'E', 'F']}), + 'Elem2': ('elem2', {})} + + actual = DataFrameHandler.make_flat_df(df, return_addresses) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + # need to assert one by one as they are xarrays + assert actual.loc[1, 'Elem1[A, Dim2]'].equals( + expected.loc[1, 'Elem1[A, Dim2]']) + assert actual.loc[1, 'Elem2'].equals(expected.loc[1, 'Elem2']) + + def test_make_flat_df_flatten(self): + + df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) + df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2']), + xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2'])] + + expected = pd.DataFrame(index=[1], columns=[ + 'Elem1[A,D]', + 'Elem1[A,E]', + 'Elem1[A,F]', + 'Elem2[A,D]', + 'Elem2[A,E]', + 'Elem2[A,F]', + 'Elem2[B,D]', + 'Elem2[B,E]', + 'Elem2[B,F]', + 'Elem2[C,D]', + 'Elem2[C,E]', + 'Elem2[C,F]']) + + expected.at[1] = [1, 2, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + return_addresses = { + 'Elem1[A,Dim2]': ('elem1', {'Dim1': ['A'], + 'Dim2': ['D', 'E', 'F']}), + 'Elem2': ('elem2', {})} + + actual = DataFrameHandler.make_flat_df( + df, return_addresses, flatten=True) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + # need to assert one by one as they are xarrays + for col in set(expected.columns): + assert actual.loc[:, col].values == expected.loc[:, col].values + + def test_make_flat_df_flatten_transposed(self): + + df = pd.DataFrame(index=[1], columns=['elem2']) + df.at[1] = [ + xr.DataArray( + [[1, 4, 7], [2, 5, 8], [3, 6, 9]], + {'Dim2': ['D', 'E', 'F'], 'Dim1': ['A', 'B', 'C']}, + ['Dim2', 'Dim1'] + ).transpose("Dim1", "Dim2") + ] + + expected = pd.DataFrame(index=[1], columns=[ + 'Elem2[A,D]', + 'Elem2[A,E]', + 'Elem2[A,F]', + 'Elem2[B,D]', + 'Elem2[B,E]', + 'Elem2[B,F]', + 'Elem2[C,D]', + 'Elem2[C,E]', + 'Elem2[C,F]']) + + expected.at[1] = [1, 2, 3, 4, 5, 6, 7, 8, 9] + + return_addresses = { + 'Elem2': ('elem2', {})} + + actual = DataFrameHandler.make_flat_df( + df, return_addresses, flatten=True) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + # need to assert one by one as they are xarrays + for col in set(expected.columns): + assert actual.loc[:, col].values == expected.loc[:, col].values + + def test_make_flat_df_times(self): + + df = pd.DataFrame(index=[1, 2], columns=['elem1']) + df['elem1'] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2']), + xr.DataArray([[2, 4, 6], [8, 10, 12], [14, 16, 19]], + {'Dim1': ['A', 'B', 'C'], + 'Dim2': ['D', 'E', 'F']}, + dims=['Dim1', 'Dim2'])] + + expected = pd.DataFrame([{'Elem1[B,F]': 6}, {'Elem1[B,F]': 12}]) + expected.index = [1, 2] + + return_addresses = {'Elem1[B,F]': ('elem1', {'Dim1': ['B'], + 'Dim2': ['F']})} + actual = DataFrameHandler.make_flat_df(df, return_addresses) + + # check all columns are in the DataFrame + assert set(actual.columns) == set(expected.columns) + assert set(actual.index) == set(expected.index) + assert all(actual['Elem1[B,F]'] == expected['Elem1[B,F]']) diff --git a/tests/pytest_pysd/pytest_pysd.py b/tests/pytest_pysd/pytest_pysd.py index 079723a1..f87fecaa 100644 --- a/tests/pytest_pysd/pytest_pysd.py +++ b/tests/pytest_pysd/pytest_pysd.py @@ -1,10 +1,10 @@ from pathlib import Path -from warnings import simplefilter, catch_warnings import pytest import pandas as pd import numpy as np import xarray as xr +import netCDF4 as nc from pysd.tools.benchmarking import assert_frames_close @@ -13,18 +13,17 @@ # TODO replace test paths by fixtures and translate and run the models # in temporal directories -_root = Path(__file__).parent.parent - -test_model = _root.joinpath("test-models/samples/teacup/teacup.mdl") -test_model_subs = _root.joinpath( +test_model = Path("test-models/samples/teacup/teacup.mdl") +test_model_subs = Path( "test-models/tests/subscript_2d_arrays/test_subscript_2d_arrays.mdl") -test_model_look = _root.joinpath( +test_model_look = Path( "test-models/tests/get_lookups_subscripted_args/" + "test_get_lookups_subscripted_args.mdl") -test_model_data = _root.joinpath( +test_model_data = Path( "test-models/tests/get_data_args_3d_xls/test_get_data_args_3d_xls.mdl") -more_tests = _root.joinpath("more-tests/") + +more_tests = Path("more-tests") test_model_constant_pipe = more_tests.joinpath( "constant_pipeline/test_constant_pipeline.mdl") @@ -32,8 +31,8 @@ class TestPySD(): - def test_run(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run(self, model): stocks = model.run() # return a dataframe assert isinstance(stocks, pd.DataFrame) @@ -44,7 +43,7 @@ def test_run(self): # there are no null values in the set assert stocks.notnull().all().all() - def test_run_ignore_missing(self): + def test_run_ignore_missing(self, _root): model_mdl = _root.joinpath( 'test-models/tests/get_with_missing_values_xlsx/' + 'test_get_with_missing_values_xlsx.mdl') @@ -71,32 +70,46 @@ def test_run_ignore_missing(self): # errors for missing values pysd.load(model_py, missing_values="raise") - def test_run_includes_last_value(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_includes_last_value(self, model): res = model.run() assert res.index[-1] == model.components.final_time() - def test_run_build_timeseries(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_build_timeseries(self, model): res = model.run(final_time=7, time_step=2, initial_condition=(3, {})) actual = list(res.index) expected = [3.0, 5.0, 7.0] assert actual == expected - def test_run_progress(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_progress(self, model): # same as test_run but with progressbar - model = pysd.read_vensim(test_model) stocks = model.run(progress=True) assert isinstance(stocks, pd.DataFrame) assert "Teacup Temperature" in stocks.columns.values assert len(stocks) > 3 assert stocks.notnull().all().all() - def test_run_return_timestamps(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/17""" + @pytest.mark.parametrize( + "model_path", + [Path("test-models/tests/control_vars/test_control_vars.mdl")]) + def test_run_progress_dynamic(self, model): + # same as test_run but with progressbar + warn_message = r"The progressbar is not compatible with dynamic "\ + r"final time or time step\. Both variables must be "\ + r"constants to prompt progress\." + with pytest.warns(UserWarning, match=warn_message): + stocks = model.run(progress=True) + assert isinstance(stocks, pd.DataFrame) + for var in ["FINAL TIME", "TIME STEP"]: + # assert that control variables have change + assert len(np.unique(stocks[var].values)) > 1 - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_return_timestamps(self, model): + """Addresses https://github.com/SDXorg/pysd/issues/17""" timestamps = np.random.randint(1, 5, 5).cumsum() stocks = model.run(return_timestamps=timestamps) assert (stocks.index.values == timestamps).all() @@ -145,51 +158,50 @@ def test_run_return_timestamps(self): assert 0.95 not in stocks.index assert 0.55 not in stocks.index - def test_run_return_timestamps_past_final_time(self): - """ If the user enters a timestamp that is longer than the euler + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_return_timestamps_past_final_time(self, model): + """ + If the user enters a timestamp that is longer than the euler timeseries that is defined by the normal model file, should - extend the euler series to the largest timestamp""" - - model = pysd.read_vensim(test_model) + extend the euler series to the largest timestamp + """ return_timestamps = list(range(0, 100, 10)) stocks = model.run(return_timestamps=return_timestamps) assert return_timestamps == list(stocks.index) - def test_return_timestamps_with_range(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_return_timestamps_with_range(self, model): """ Tests that return timestamps may receive a 'range'. It will be cast to a numpy array in the end... """ - - model = pysd.read_vensim(test_model) return_timestamps = range(0, 31, 10) stocks = model.run(return_timestamps=return_timestamps) assert list(return_timestamps) == list(stocks.index) - def test_run_return_columns_original_names(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/26 - - Also checks that columns are returned in the correct order""" - - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_return_columns_original_names(self, model): + """ + Addresses https://github.com/SDXorg/pysd/issues/26 + - Also checks that columns are returned in the correct order + """ return_columns = ["Room Temperature", "Teacup Temperature"] result = model.run(return_columns=return_columns) assert set(result.columns) == set(return_columns) - def test_run_return_columns_step(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_return_columns_step(self, model): """ Return only cache 'step' variables """ - model = pysd.read_vensim(test_model) result = model.run(return_columns='step') assert set(result.columns)\ == {'Teacup Temperature', 'Heat Loss to Room'} - def test_run_reload(self): - """ Addresses https://github.com/JamesPHoughton/pysd/issues/99""" - - model = pysd.read_vensim(test_model) - + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_reload(self, model): + """Addresses https://github.com/SDXorg/pysd/issues/99""" result0 = model.run() result1 = model.run(params={"Room Temperature": 1000}) result2 = model.run() @@ -199,24 +211,23 @@ def test_run_reload(self): assert not (result0 == result1).all().all() assert (result1 == result2).all().all() - def test_run_return_columns_pysafe_names(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/26""" - - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_run_return_columns_pysafe_names(self, model): + """Addresses https://github.com/SDXorg/pysd/issues/26""" return_columns = ["room_temperature", "teacup_temperature"] result = model.run(return_columns=return_columns) assert set(result.columns) == set(return_columns) - def test_initial_conditions_invalid(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_initial_conditions_invalid(self, model): error_message = r"Invalid initial conditions\. "\ r"Check documentation for valid entries or use "\ r"'help\(model\.set_initial_condition\)'\." with pytest.raises(TypeError, match=error_message): model.run(initial_condition=["this is not valid"]) - def test_initial_conditions_tuple_pysafe_names(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_initial_conditions_tuple_pysafe_names(self, model): stocks = model.run( initial_condition=(3000, {"teacup_temperature": 33}), return_timestamps=list(range(3000, 3010)) @@ -224,10 +235,9 @@ def test_initial_conditions_tuple_pysafe_names(self): assert stocks["Teacup Temperature"].iloc[0] == 33 - def test_initial_conditions_tuple_original_names(self): - """ Responds to https://github.com/JamesPHoughton/pysd/issues/77""" - - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_initial_conditions_tuple_original_names(self, model): + """ Responds to https://github.com/SDXorg/pysd/issues/77""" stocks = model.run( initial_condition=(3000, {"Teacup Temperature": 33}), return_timestamps=list(range(3000, 3010)), @@ -235,8 +245,8 @@ def test_initial_conditions_tuple_original_names(self): assert stocks.index[0] == 3000 assert stocks["Teacup Temperature"].iloc[0] == 33 - def test_initial_conditions_current(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_initial_conditions_current(self, model): stocks1 = model.run(return_timestamps=list(range(0, 31))) stocks2 = model.run( initial_condition="current", return_timestamps=list(range(30, 45)) @@ -244,27 +254,23 @@ def test_initial_conditions_current(self): assert stocks1["Teacup Temperature"].iloc[-1]\ == stocks2["Teacup Temperature"].iloc[0] - def test_initial_condition_bad_value(self): - model = pysd.read_vensim(test_model) - + @pytest.mark.parametrize("model_path", [test_model]) + def test_initial_condition_bad_value(self, model): with pytest.raises(FileNotFoundError): model.run(initial_condition="bad value") - def test_initial_conditions_subscripted_value_with_numpy_error(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_initial_conditions_subscripted_value_with_numpy_error(self, + model): input_ = np.array([[5, 3], [4, 8], [9, 3]]) - - model = pysd.read_vensim(test_model_subs) - with pytest.raises(TypeError): model.run(initial_condition=(5, {'stock_a': input_}), return_columns=['stock_a'], return_timestamps=list(range(5, 10))) - def test_set_constant_parameter(self): - """ In response to: - re: https://github.com/JamesPHoughton/pysd/issues/5""" - - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_constant_parameter(self, model): + """Responds to https://github.com/SDXorg/pysd/issues/5""" model.set_components({"room_temperature": 20}) assert model.components.room_temperature() == 20 @@ -274,8 +280,8 @@ def test_set_constant_parameter(self): with pytest.raises(NameError): model.set_components({'not_a_var': 20}) - def test_set_constant_parameter_inline(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_constant_parameter_inline(self, model): model.components.room_temperature = 20 assert model.components.room_temperature() == 20 @@ -285,8 +291,8 @@ def test_set_constant_parameter_inline(self): with pytest.raises(NameError): model.components.not_a_var = 20 - def test_set_timeseries_parameter(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_timeseries_parameter(self, model): timeseries = list(range(30)) temp_timeseries = pd.Series( index=timeseries, @@ -299,8 +305,8 @@ def test_set_timeseries_parameter(self): ) assert (res["room_temperature"] == temp_timeseries).all() - def test_set_timeseries_parameter_inline(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_timeseries_parameter_inline(self, model): timeseries = list(range(30)) temp_timeseries = pd.Series( index=timeseries, @@ -313,37 +319,35 @@ def test_set_timeseries_parameter_inline(self): ) assert (res["room_temperature"] == temp_timeseries).all() - def test_set_component_with_real_name(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_component_with_real_name(self, model): model.set_components({"Room Temperature": 20}) assert model.components.room_temperature() == 20 model.run(params={"Room Temperature": 70}) assert model.components.room_temperature() == 70 - def test_set_components_warnings(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/80""" - - model = pysd.read_vensim(test_model) - with catch_warnings(record=True) as w: - simplefilter("always") + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_components_warnings(self, model): + """Addresses https://github.com/SDXorg/pysd/issues/80""" + warn_message = r"Replacing the equation of stock "\ + r"'Teacup Temperature' with params\.\.\." + with pytest.warns(UserWarning, match=warn_message): model.set_components( {"Teacup Temperature": 20, "Characteristic Time": 15} ) # set stock value using params - # check that warning references the stock - assert "Teacup Temperature" in str(w[0].message) - - def test_set_components_with_function(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_components_with_function(self, model): def test_func(): return 5 - model = pysd.read_vensim(test_model) model.set_components({"Room Temperature": test_func}) res = model.run(return_columns=["Room Temperature"]) assert test_func() == res["Room Temperature"].iloc[0] - def test_set_subscripted_value_with_constant(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_value_with_constant(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -351,13 +355,13 @@ def test_set_subscripted_value_with_constant(self): dims = ["One Dimensional Subscript", "Second Dimension Subscript"] output = xr.DataArray([[5, 5], [5, 5], [5, 5]], coords, dims) - model = pysd.read_vensim(test_model_subs) model.set_components({"initial_values": 5, "final_time": 10}) res = model.run( return_columns=["Initial Values"], flatten_output=False) assert output.equals(res["Initial Values"].iloc[0]) - def test_set_subscripted_value_with_partial_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_value_with_partial_xarray(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -370,13 +374,13 @@ def test_set_subscripted_value_with_partial_xarray(self): ["Second Dimension Subscript"], ) - model = pysd.read_vensim(test_model_subs) model.set_components({"Initial Values": input_val, "final_time": 10}) res = model.run( return_columns=["Initial Values"], flatten_output=False) assert output.equals(res["Initial Values"].iloc[0]) - def test_set_subscripted_value_with_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_value_with_xarray(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -384,160 +388,149 @@ def test_set_subscripted_value_with_xarray(self): dims = ["One Dimensional Subscript", "Second Dimension Subscript"] output = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - model = pysd.read_vensim(test_model_subs) model.set_components({"initial_values": output, "final_time": 10}) res = model.run( return_columns=["Initial Values"], flatten_output=False) assert output.equals(res["Initial Values"].iloc[0]) - def test_set_parameter_data(self): - model = pysd.read_vensim(test_model_data) + @pytest.mark.parametrize("model_path", [test_model_data]) + @pytest.mark.filterwarnings("ignore") + def test_set_parameter_data(self, model): timeseries = list(range(31)) series = pd.Series( index=timeseries, data=(50+np.random.rand(len(timeseries)).cumsum()) ) - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - model.set_components({"data_backward": 20, "data_forward": 70}) - - out = model.run( - return_columns=["data_backward", "data_forward"], - flatten_output=False) - - for time in out.index: - assert (out["data_backward"][time] == 20).all() - assert (out["data_forward"][time] == 70).all() - - out = model.run( - return_columns=["data_backward", "data_forward"], - final_time=20, time_step=1, saveper=1, - params={"data_forward": 30, "data_backward": series}, - flatten_output=False) - - for time in out.index: - assert (out["data_forward"][time] == 30).all() - assert (out["data_backward"][time] == series[time]).all() - - def test_set_constant_parameter_lookup(self): - model = pysd.read_vensim(test_model_look) - - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - model.set_components({"lookup_1d": 20}) - for i in range(100): - assert model.components.lookup_1d(i) == 20 - - model.run(params={"lookup_1d": 70}, final_time=1) - for i in range(100): - assert model.components.lookup_1d(i) == 70 - - model.set_components({"lookup_2d": 20}) - for i in range(100): - assert model.components.lookup_2d(i).equals( - xr.DataArray(20, {"Rows": ["Row1", "Row2"]}, ["Rows"]) - ) + model.set_components({"data_backward": 20, "data_forward": 70}) + + out = model.run( + return_columns=["data_backward", "data_forward"], + flatten_output=False) + + for time in out.index: + assert (out["data_backward"][time] == 20).all() + assert (out["data_forward"][time] == 70).all() + + out = model.run( + return_columns=["data_backward", "data_forward"], + final_time=20, time_step=1, saveper=1, + params={"data_forward": 30, "data_backward": series}, + flatten_output=False) + + for time in out.index: + assert (out["data_forward"][time] == 30).all() + assert (out["data_backward"][time] == series[time]).all() + + @pytest.mark.parametrize("model_path", [test_model_look]) + @pytest.mark.filterwarnings("ignore") + def test_set_constant_parameter_lookup(self, model): + model.set_components({"lookup_1d": 20}) + for i in range(100): + assert model.components.lookup_1d(i) == 20 + + model.run(params={"lookup_1d": 70}, final_time=1) + for i in range(100): + assert model.components.lookup_1d(i) == 70 + + model.set_components({"lookup_2d": 20}) + for i in range(100): + assert model.components.lookup_2d(i).equals( + xr.DataArray(20, {"Rows": ["Row1", "Row2"]}, ["Rows"]) + ) - model.run(params={"lookup_2d": 70}, final_time=1) - for i in range(100): - assert model.components.lookup_2d(i).equals( - xr.DataArray(70, {"Rows": ["Row1", "Row2"]}, ["Rows"]) - ) + model.run(params={"lookup_2d": 70}, final_time=1) + for i in range(100): + assert model.components.lookup_2d(i).equals( + xr.DataArray(70, {"Rows": ["Row1", "Row2"]}, ["Rows"]) + ) - xr1 = xr.DataArray([-10, 50], {"Rows": ["Row1", "Row2"]}, ["Rows"]) - model.set_components({"lookup_2d": xr1}) - for i in range(100): - assert model.components.lookup_2d(i).equals(xr1) + xr1 = xr.DataArray([-10, 50], {"Rows": ["Row1", "Row2"]}, ["Rows"]) + model.set_components({"lookup_2d": xr1}) + for i in range(100): + assert model.components.lookup_2d(i).equals(xr1) - xr2 = xr.DataArray([-100, 500], {"Rows": ["Row1", "Row2"]}, - ["Rows"]) - model.run(params={"lookup_2d": xr2}, final_time=1) - for i in range(100): - assert model.components.lookup_2d(i).equals(xr2) + xr2 = xr.DataArray([-100, 500], {"Rows": ["Row1", "Row2"]}, ["Rows"]) + model.run(params={"lookup_2d": xr2}, final_time=1) + for i in range(100): + assert model.components.lookup_2d(i).equals(xr2) - def test_set_timeseries_parameter_lookup(self): - model = pysd.read_vensim(test_model_look) + @pytest.mark.parametrize("model_path", [test_model_look]) + @pytest.mark.filterwarnings("ignore") + def test_set_timeseries_parameter_lookup(self, model): timeseries = list(range(30)) - with catch_warnings(): - # avoid warnings related to extrapolation - simplefilter("ignore") - temp_timeseries = pd.Series( - index=timeseries, data=(50 + - np.random.rand(len(timeseries) - ).cumsum()) - ) + temp_timeseries = pd.Series( + index=timeseries, + data=(50+np.random.rand(len(timeseries)).cumsum()) + ) - res = model.run( - params={"lookup_1d": temp_timeseries}, - return_columns=["lookup_1d_time"], - return_timestamps=timeseries, - flatten_output=False - ) + res = model.run( + params={"lookup_1d": temp_timeseries}, + return_columns=["lookup_1d_time"], + return_timestamps=timeseries, + flatten_output=False + ) - assert (res["lookup_1d_time"] == temp_timeseries).all() + assert (res["lookup_1d_time"] == temp_timeseries).all() - res = model.run( - params={"lookup_2d": temp_timeseries}, - return_columns=["lookup_2d_time"], - return_timestamps=timeseries, - flatten_output=False - ) + res = model.run( + params={"lookup_2d": temp_timeseries}, + return_columns=["lookup_2d_time"], + return_timestamps=timeseries, + flatten_output=False + ) - assert all( - [ - a.equals(xr.DataArray(b, {"Rows": ["Row1", "Row2"]}, - ["Rows"])) - for a, b in zip(res["lookup_2d_time"].values, - temp_timeseries) - ] - ) + assert all( + [ + a.equals(xr.DataArray(b, {"Rows": ["Row1", "Row2"]}, ["Rows"])) + for a, b in zip(res["lookup_2d_time"].values, + temp_timeseries) + ] + ) - temp_timeseries2 = pd.Series( - index=timeseries, - data=[ - xr.DataArray([50 + x, 20 - y], {"Rows": ["Row1", "Row2"]}, - ["Rows"]) - for x, y in zip( - np.random.rand(len(timeseries)).cumsum(), - np.random.rand(len(timeseries)).cumsum(), - ) - ], - ) + temp_timeseries2 = pd.Series( + index=timeseries, + data=[ + xr.DataArray( + [50 + x, 20 - y], {"Rows": ["Row1", "Row2"]}, ["Rows"] + ) + for x, y in zip( + np.random.rand(len(timeseries)).cumsum(), + np.random.rand(len(timeseries)).cumsum(), + ) + ], + ) - res = model.run( - params={"lookup_2d": temp_timeseries2}, - return_columns=["lookup_2d_time"], - return_timestamps=timeseries, - flatten_output=False - ) + res = model.run( + params={"lookup_2d": temp_timeseries2}, + return_columns=["lookup_2d_time"], + return_timestamps=timeseries, + flatten_output=False + ) - assert all( - [ - a.equals(b) - for a, b in zip(res["lookup_2d_time"].values, - temp_timeseries2) - ] - ) + assert all( + [ + a.equals(b) + for a, b in zip(res["lookup_2d_time"].values, + temp_timeseries2) + ] + ) - def test_set_subscripted_value_with_numpy_error(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_value_with_numpy_error(self, model): input_ = np.array([[5, 3], [4, 8], [9, 3]]) - - model = pysd.read_vensim(test_model_subs) with pytest.raises(TypeError): model.set_components({"initial_values": input_, "final_time": 10}) - def test_set_subscripted_timeseries_parameter_with_constant(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_timeseries_parameter_with_constant(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], } dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - model = pysd.read_vensim(test_model_subs) timeseries = list(range(10)) val_series = [50 + rd for rd in np.random.rand(len(timeseries) ).cumsum()] @@ -558,7 +551,9 @@ def test_set_subscripted_timeseries_parameter_with_constant(self): ] ) - def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_timeseries_parameter_with_partial_xarray(self, + model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -571,7 +566,6 @@ def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): ["Second Dimension Subscript"], ) - model = pysd.read_vensim(test_model_subs) timeseries = list(range(10)) val_series = [input_val + rd for rd in np.random.rand(len(timeseries) ).cumsum()] @@ -588,7 +582,8 @@ def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): ] ) - def test_set_subscripted_timeseries_parameter_with_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_subscripted_timeseries_parameter_with_xarray(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -597,7 +592,6 @@ def test_set_subscripted_timeseries_parameter_with_xarray(self): init_val = xr.DataArray([[5, 3], [4, 8], [9, 3]], coords, dims) - model = pysd.read_vensim(test_model_subs) timeseries = list(range(10)) temp_timeseries = pd.Series( index=timeseries, @@ -620,10 +614,9 @@ def test_set_subscripted_timeseries_parameter_with_xarray(self): ] ) - def test_docs(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_docs(self, model): """ Test that the model prints some documentation """ - - model = pysd.read_vensim(test_model) assert isinstance(str(model), str) # tests string conversion of doc = model.doc assert isinstance(doc, pd.DataFrame) @@ -747,8 +740,8 @@ def downstream(run_hist, res_hist): assert run_history == ["U", "D", "D", "D", "U"] assert result_history == ["up", "down", "up", "down", "up", "down"] - def test_initialize(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_initialize(self, model): initial_temp = model.components.teacup_temperature() model.run() final_temp = model.components.teacup_temperature() @@ -757,8 +750,8 @@ def test_initialize(self): assert initial_temp != final_temp assert initial_temp == reset_temp - def test_initialize_order(self): - model = pysd.load(more_tests.joinpath( + def test_initialize_order(self, _root): + model = pysd.load(_root / more_tests.joinpath( "initialization_order/test_initialization_order.py")) assert model.initialize_order == ["_integ_stock_a", "_integ_stock_b"] @@ -769,8 +762,8 @@ def test_initialize_order(self): assert model.components.stock_b() == 1 assert model.components.stock_a() == 1 - def test_set_initial_with_deps(self): - model = pysd.load(more_tests.joinpath("initialization_order/" + def test_set_initial_with_deps(self, _root): + model = pysd.load(_root / more_tests.joinpath("initialization_order/" "test_initialization_order.py")) original_a = model.components.stock_a() @@ -790,9 +783,8 @@ def test_set_initial_with_deps(self): assert model.components.stock_a() == 89 assert model.components.stock_b() == 73 - def test_set_initial_value(self): - model = pysd.read_vensim(test_model) - + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_initial_value(self, model): initial_temp = model.components.teacup_temperature() new_time = np.random.rand() @@ -817,7 +809,8 @@ def test_set_initial_value(self): with pytest.raises(NameError): model.set_initial_value(new_time, {'not_a_var': 500}) - def test_set_initial_value_subscripted_value_with_constant(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_initial_value_subscripted_value_with_constant(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -826,8 +819,6 @@ def test_set_initial_value_subscripted_value_with_constant(self): output_b = xr.DataArray([[0, 0], [0, 0], [0, 0]], coords, dims) new_time = np.random.rand() - - model = pysd.read_vensim(test_model_subs) initial_stock = model.components.stock_a() # Test that we can set with real names @@ -850,7 +841,9 @@ def test_set_initial_value_subscripted_value_with_constant(self): {'_integ_stock_a': xr.DataArray(302, {'D': ['A', 'B']}, ['D'])} ) - def test_set_initial_value_subscripted_value_with_partial_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_initial_value_subscripted_value_with_partial_xarray(self, + model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -879,7 +872,6 @@ def test_set_initial_value_subscripted_value_with_partial_xarray(self): new_time = np.random.rand() - model = pysd.read_vensim(test_model_subs) initial_stock = model.components.stock_a() # Test that we can set with real names @@ -895,7 +887,8 @@ def test_set_initial_value_subscripted_value_with_partial_xarray(self): model.set_initial_value(new_time + 2, {'_integ_stock_a': input_val3}) assert model.components.stock_a().equals(output3) - def test_set_initial_value_subscripted_value_with_xarray(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_initial_value_subscripted_value_with_xarray(self, model): coords = { "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], "Second Dimension Subscript": ["Column 1", "Column 2"], @@ -907,7 +900,6 @@ def test_set_initial_value_subscripted_value_with_xarray(self): new_time = np.random.rand() - model = pysd.read_vensim(test_model_subs) initial_stock = model.components.stock_a() # Test that we can set with real names @@ -923,15 +915,14 @@ def test_set_initial_value_subscripted_value_with_xarray(self): model.set_initial_value(new_time + 2, {'_integ_stock_a': output3}) assert model.components.stock_a().equals(output3) - def test_set_initial_value_subscripted_value_with_numpy_error(self): + @pytest.mark.parametrize("model_path", [test_model_subs]) + def test_set_initial_value_subscripted_value_with_numpy_error(self, model): input1 = np.array([[5, 3], [4, 8], [9, 3]]) input2 = np.array([[53, 43], [84, 80], [29, 63]]) input3 = np.array([[54, 32], [40, 87], [93, 93]]) new_time = np.random.rand() - model = pysd.read_vensim(test_model_subs) - # Test that we can set with real names with pytest.raises(TypeError): model.set_initial_value(new_time, {'Stock A': input1}) @@ -944,16 +935,16 @@ def test_set_initial_value_subscripted_value_with_numpy_error(self): with pytest.raises(TypeError): model.set_initial_value(new_time + 2, {'_integ_stock_a': input3}) - def test_replace_element(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_replace_element(self, model): stocks1 = model.run() model.components.characteristic_time = lambda: 3 stocks2 = model.run() assert stocks1["Teacup Temperature"].loc[10]\ > stocks2["Teacup Temperature"].loc[10] - def test_set_initial_condition_origin_full(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_initial_condition_origin_full(self, model): initial_temp = model.components.teacup_temperature() initial_time = model.components.time() @@ -980,8 +971,8 @@ def test_set_initial_condition_origin_full(self): assert initial_temp == set_temp assert initial_time == set_time - def test_set_initial_condition_origin_short(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_initial_condition_origin_short(self, model): initial_temp = model.components.teacup_temperature() initial_time = model.components.time() @@ -1008,8 +999,8 @@ def test_set_initial_condition_origin_short(self): assert initial_temp == set_temp assert initial_time == set_time - def test_set_initial_condition_for_stock_component(self): - model = pysd.read_vensim(test_model) + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_initial_condition_for_stock_component(self, model): initial_temp = model.components.teacup_temperature() initial_time = model.components.time() @@ -1030,9 +1021,8 @@ def test_set_initial_condition_for_stock_component(self): assert set_time == 10 - def test_set_initial_condition_for_constant_component(self): - model = pysd.read_vensim(test_model) - + @pytest.mark.parametrize("model_path", [test_model]) + def test_set_initial_condition_for_constant_component(self, model): new_state = {"Room Temperature": 100} new_time = 10 @@ -1041,53 +1031,91 @@ def test_set_initial_condition_for_constant_component(self): with pytest.raises(ValueError, match=error_message): model.set_initial_condition((new_time, new_state)) - def test_get_args(self): - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_look) - - assert model.get_args('Room Temperature') == [] - assert model.get_args('room_temperature') == [] - assert model.get_args('teacup_temperature') == [] - assert model.get_args('_integ_teacup_temperature') == [] - - assert model2.get_args('lookup 1d') == ['x', 'final_subs'] - assert model2.get_args('lookup_1d') == ['x', 'final_subs'] - assert model2.get_args('lookup 2d') == ['x', 'final_subs'] - assert model2.get_args('lookup_2d') == ['x', 'final_subs'] + @pytest.mark.parametrize( + "model_path,args", + [ + ( + test_model, + { + "Room Temperature": [], + "room_temperature": [], + "teacup_temperature": [], + "_integ_teacup_temperature": [] + } + ), + ( + test_model_look, + { + "lookup 1d": ["x", "final_subs"], + "lookup_1d": ["x", "final_subs"], + "lookup 2d": ["x", "final_subs"], + "lookup_2d": ["x", "final_subs"], + } + ) + ]) + def test_get_args(self, model, args): + for var, arg in args.items(): + assert model.get_args(var) == arg with pytest.raises(NameError): - model.get_args('not_a_var') - - def test_get_coords(self): - coords = { - "One Dimensional Subscript": ["Entry 1", "Entry 2", "Entry 3"], - "Second Dimension Subscript": ["Column 1", "Column 2"], - } - dims = ["One Dimensional Subscript", "Second Dimension Subscript"] - - coords_dims = (coords, dims) - - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_subs) - - assert model.get_coords("Room Temperature") is None - assert model.get_coords("room_temperature") is None - assert model.get_coords("teacup_temperature") is None - assert model.get_coords("_integ_teacup_temperature") is None - - assert model2.get_coords("Initial Values") == coords_dims - assert model2.get_coords("initial_values") == coords_dims - assert model2.get_coords("Stock A") == coords_dims - assert model2.get_coords("stock_a") == coords_dims - assert model2.get_coords("_integ_stock_a") == coords_dims + model.get_args("not_a_var") + + @pytest.mark.parametrize( + "model_path,coords", + [ + ( + test_model, + { + "Room Temperature": None, + "room_temperature": None, + "teacup_temperature": None, + "_integ_teacup_temperature": None + } + ), + ( + test_model_subs, + { + "Initial Values": { + "One Dimensional Subscript": ["Entry 1", "Entry 2", + "Entry 3"], + "Second Dimension Subscript":["Column 1", "Column 2"] + }, + "initial_values": { + "One Dimensional Subscript": ["Entry 1", "Entry 2", + "Entry 3"], + "Second Dimension Subscript":["Column 1", "Column 2"] + }, + "Stock A": { + "One Dimensional Subscript": ["Entry 1", "Entry 2", + "Entry 3"], + "Second Dimension Subscript":["Column 1", "Column 2"] + }, + "stock_a": { + "One Dimensional Subscript": ["Entry 1", "Entry 2", + "Entry 3"], + "Second Dimension Subscript":["Column 1", "Column 2"] + }, + "_integ_stock_a": { + "One Dimensional Subscript": ["Entry 1", "Entry 2", + "Entry 3"], + "Second Dimension Subscript":["Column 1", "Column 2"] + } + } + ) + ]) + def test_get_coords(self, model, coords): + for var, coord in coords.items(): + if coord is not None: + coord = coord, list(coord) + assert model.get_coords(var) == coord with pytest.raises(NameError): - model.get_coords('not_a_var') + model.get_coords("not_a_var") - def test_getitem(self): - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_look) - model3 = pysd.read_vensim(test_model_data) + def test_getitem(self, _root): + model = pysd.read_vensim(_root / test_model) + model2 = pysd.read_vensim(_root / test_model_look) + model3 = pysd.read_vensim(_root / test_model_data) coords = {'Dim': ['A', 'B'], 'Rows': ['Row1', 'Row2']} room_temp = 70 @@ -1117,10 +1145,10 @@ def test_getitem(self): model3.run() assert model3['data backward'].equals(data1) - def test_get_series_data(self): - model = pysd.read_vensim(test_model) - model2 = pysd.read_vensim(test_model_look) - model3 = pysd.read_vensim(test_model_data) + def test_get_series_data(self, _root): + model = pysd.read_vensim(_root / test_model) + model2 = pysd.read_vensim(_root / test_model_look) + model3 = pysd.read_vensim(_root / test_model_data) error_message = "Trying to get the values of a constant variable." with pytest.raises(ValueError, match=error_message): @@ -1178,17 +1206,33 @@ def test_get_series_data(self): data = model3.get_series_data('_ext_data_data_backward') assert data.equals(data_exp) - def test__integrate(self): - # Todo: think through a stronger test here... - model = pysd.read_vensim(test_model) - model.progress = False + @pytest.mark.parametrize("model_path", [test_model]) + def test__integrate(self, tmp_path, model): + from pysd.py_backend.model import ModelOutput + # TODO: think through a stronger test here... model.time.add_return_timestamps(list(range(0, 5, 2))) - res = model._integrate(capture_elements={'teacup_temperature'}) + capture_elements = {'teacup_temperature'} + + out = ModelOutput(model, capture_elements, None) + model._integrate(out) + res = out.handler.ds assert isinstance(res, pd.DataFrame) assert 'teacup_temperature' in res assert all(res.index.values == list(range(0, 5, 2))) - def test_default_returns_with_construction_functions(self): + model.reload() + model.time.add_return_timestamps(list(range(0, 5, 2))) + out = ModelOutput(model, + capture_elements, + tmp_path.joinpath("output.nc")) + model._integrate(out) + res = out.handler.ds + assert isinstance(res, nc.Dataset) + assert 'teacup_temperature' in res.variables + assert np.array_equal(res["time"][:].data, np.arange(0, 5, 2)) + res.close() + + def test_default_returns_with_construction_functions(self, _root): """ If the run function is called with no arguments, should still be able to get default return functions. @@ -1208,37 +1252,37 @@ def test_default_returns_with_construction_functions(self): "Output Delay3", } <= set(ret.columns.values) - def test_default_returns_with_lookups(self): + @pytest.mark.parametrize( + "model_path", + [Path("test-models/tests/lookups/test_lookups.mdl")]) + def test_default_returns_with_lookups(self, model): """ - Addresses https://github.com/JamesPHoughton/pysd/issues/114 + Addresses https://github.com/SDXorg/pysd/issues/114 The default settings should skip model elements with no particular return value """ - - model = pysd.read_vensim( - _root.joinpath("test-models/tests/lookups/test_lookups.mdl")) ret = model.run() assert {"accumulation", "rate", "lookup function call"}\ <= set(ret.columns.values) - def test_py_model_file(self): - """Addresses https://github.com/JamesPHoughton/pysd/issues/86""" - - model = pysd.read_vensim(test_model) - assert model.py_model_file == str(test_model.with_suffix(".py")) + @pytest.mark.parametrize("model_path", [test_model]) + def test_files(self, model, model_path, tmp_path): + """Addresses https://github.com/SDXorg/pysd/issues/86""" - def test_mdl_file(self): - """Relates to https://github.com/JamesPHoughton/pysd/issues/86""" + # Path from where the model is translated + path = tmp_path / model_path.parent.name / model_path.name - model = pysd.read_vensim(test_model) - assert model.mdl_file == str(test_model) + # Check py_model_file + assert model.py_model_file == str(path.with_suffix(".py")) + # Check mdl_file + assert model.mdl_file == str(path) class TestModelInteraction(): """ The tests in this class test pysd's interaction with itself and other modules. """ - def test_multiple_load(self): + def test_multiple_load(self, _root): """ Test that we can load and run multiple models at the same time, and that the models don't interact with each other. This can @@ -1246,7 +1290,7 @@ def test_multiple_load(self): attributes This test responds to issue: - https://github.com/JamesPHoughton/pysd/issues/23 + https://github.com/SDXorg/pysd/issues/23 """ @@ -1261,12 +1305,12 @@ def test_multiple_load(self): assert "susceptible" not in dir(model_1.components) assert "teacup_temperature" in dir(model_1.components) - def test_no_crosstalk(self): + def test_no_crosstalk(self, _root): """ Need to check that if we instantiate two copies of the same model, changes to one copy do not influence the other copy. - Checks for issue: https://github.com/JamesPHoughton/pysd/issues/108 + Checks for issue: https://github.com/SDXorg/pysd/issues/108 that time is not shared between the two models """ @@ -1284,14 +1328,14 @@ def test_no_crosstalk(self): model_1.run() assert model_1.time() != model_2.time() - def test_restart_cache(self): + @pytest.mark.parametrize("model_path", [test_model]) + def test_restart_cache(self, model): """ Test that when we cache a model variable at the 'run' time, - if the variable is changed and the model re-run, the cache updates - to the new variable, instead of maintaining the old one. - """ + if the variable is changed and the model re-run, the cache updates + to the new variable, instead of maintaining the old one. - model = pysd.read_vensim(test_model) + """ model.run() old = model.components.room_temperature() model.set_components({"Room Temperature": 345}) @@ -1315,7 +1359,7 @@ def test_not_able_to_update_stateful_object(self): class TestMultiRun(): - def test_delay_reinitializes(self): + def test_delay_reinitializes(self, _root): model = pysd.read_vensim(_root.joinpath( "test-models/tests/delays/test_delays.mdl")) res1 = model.run() @@ -1324,93 +1368,85 @@ def test_delay_reinitializes(self): class TestDependencies(): - def test_teacup_deps(self): - from pysd import read_vensim - - model = read_vensim(test_model) - - expected_dep = { - 'characteristic_time': {}, - 'heat_loss_to_room': { - 'teacup_temperature': 1, - 'room_temperature': 1, - 'characteristic_time': 1 - }, - 'room_temperature': {}, - 'teacup_temperature': {'_integ_teacup_temperature': 1}, - '_integ_teacup_temperature': { - 'initial': {}, - 'step': {'heat_loss_to_room': 1} - }, - 'final_time': {}, - 'initial_time': {}, - 'saveper': {'time_step': 1}, - 'time_step': {} - } - assert model.dependencies == expected_dep - - def test_multiple_deps(self): - from pysd import read_vensim - - model = read_vensim( - more_tests.joinpath( - "subscript_individually_defined_stocks2/" - + "test_subscript_individually_defined_stocks2.mdl")) - - expected_dep = { - "stock_a": {"_integ_stock_a": 1, "_integ_stock_a_1": 1}, - "inflow_a": {"rate_a": 1}, - "inflow_b": {"rate_a": 1}, - "initial_values": {"initial_values_a": 1, "initial_values_b": 1}, - "initial_values_a": {}, - "initial_values_b": {}, - "rate_a": {}, - "final_time": {}, - "initial_time": {}, - "saveper": {"time_step": 1}, - "time_step": {}, - "_integ_stock_a": { - "initial": {"initial_values": 1}, - "step": {"inflow_a": 1} - }, - '_integ_stock_a_1': { - 'initial': {'initial_values': 1}, - 'step': {'inflow_b': 1} - } - } - assert model.dependencies == expected_dep - more_tests.joinpath( - "subscript_individually_defined_stocks2/" - + "test_subscript_individually_defined_stocks2.py").unlink() - - def test_constant_deps(self): - from pysd import read_vensim - - model = read_vensim(test_model_constant_pipe) - - expected_dep = { - "constant1": {}, - "constant2": {"constant1": 1}, - "constant3": {"constant1": 3, "constant2": 1}, - "final_time": {}, - "initial_time": {}, - "time_step": {}, - "saveper": {"time_step": 1} - } + @pytest.mark.parametrize( + "model_path,expected_dep", + [ + ( + test_model, + { + 'characteristic_time': {}, + 'heat_loss_to_room': { + 'teacup_temperature': 1, + 'room_temperature': 1, + 'characteristic_time': 1 + }, + 'room_temperature': {}, + 'teacup_temperature': {'_integ_teacup_temperature': 1}, + '_integ_teacup_temperature': { + 'initial': {}, + 'step': {'heat_loss_to_room': 1} + }, + 'final_time': {}, + 'initial_time': {}, + 'saveper': {'time_step': 1}, + 'time_step': {} + } + + ), + ( + more_tests.joinpath( + "subscript_individually_defined_stocks2/" + "test_subscript_individually_defined_stocks2.mdl"), + { + "stock_a": {"_integ_stock_a": 1, "_integ_stock_a_1": 1}, + "inflow_a": {"rate_a": 1}, + "inflow_b": {"rate_a": 1}, + "initial_values": { + "initial_values_a": 1, + "initial_values_b": 1 + }, + "initial_values_a": {}, + "initial_values_b": {}, + "rate_a": {}, + "final_time": {}, + "initial_time": {}, + "saveper": {"time_step": 1}, + "time_step": {}, + "_integ_stock_a": { + "initial": {"initial_values": 1}, + "step": {"inflow_a": 1} + }, + '_integ_stock_a_1': { + 'initial': {'initial_values': 1}, + 'step': {'inflow_b': 1} + } + } + ), + ( + test_model_constant_pipe, + { + "constant1": {}, + "constant2": {"constant1": 1}, + "constant3": {"constant1": 3, "constant2": 1}, + "final_time": {}, + "initial_time": {}, + "time_step": {}, + "saveper": {"time_step": 1} + } + ) + ], + ids=["teacup", "multiple", "constant"]) + def test_deps(self, model, expected_dep, model_path): assert model.dependencies == expected_dep - for key, value in model.cache_type.items(): - if key != "time": - assert value == "run" - - test_model_constant_pipe.with_suffix(".py").unlink() - - def test_change_constant_pipe(self): - from pysd import read_vensim - - model = read_vensim(test_model_constant_pipe) + if model_path == test_model_constant_pipe: + for key, value in model.cache_type.items(): + if key != "time": + assert value == "run" + @pytest.mark.parametrize("model_path", [test_model_constant_pipe]) + def test_change_constant_pipe(self, model): new_var = pd.Series( index=[0, 1, 2, 3, 4, 5], data=[1, 2, 3, 4, 5, 6]) @@ -1437,212 +1473,98 @@ def test_change_constant_pipe(self): assert\ (out2["constant3"] == (5*new_var.values-1)*new_var.values).all() - test_model_constant_pipe.with_suffix(".py").unlink() - class TestExportImport(): - def test_run_export_import_integ(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - model = read_vensim(test_model) - stocks = model.run(return_timestamps=[0, 10, 20, 30]) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks['FINAL TIME'] == 30).all().all() - - model.reload() - stocks1 = model.run(return_timestamps=[0, 10], final_time=12) - assert (stocks1['INITIAL TIME'] == 0).all().all() - assert (stocks1['FINAL TIME'] == 12).all().all() - model.export('teacup12.pic') - model.reload() - stocks2 = model.run(initial_condition='teacup12.pic', - return_timestamps=[20, 30]) - assert (stocks2['INITIAL TIME'] == 12).all().all() - assert (stocks2['FINAL TIME'] == 30).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks1.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks1.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('teacup12.pic').unlink() - - assert_frames_close(stocks1, stocks.loc[[0, 10]]) - assert_frames_close(stocks2, stocks.loc[[20, 30]]) - - def test_run_export_import_delay(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_delays = _root.joinpath( - 'test-models/tests/delays/test_delays.mdl') - model = read_vensim(test_delays) - stocks = model.run(return_timestamps=20) - model.reload() - model.run(return_timestamps=[], final_time=7) - model.export('delays7.pic') - stocks2 = model.run(initial_condition='delays7.pic', - return_timestamps=20) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('delays7.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_delay_fixed(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_delayf = _root.joinpath( - 'test-models/tests/delay_fixed/test_delay_fixed.mdl') - model = read_vensim(test_delayf) - stocks = model.run(return_timestamps=20) - model.reload() - model.run(return_timestamps=7) - model.export('delayf7.pic') - stocks2 = model.run(initial_condition='delayf7.pic', - return_timestamps=20) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('delayf7.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_forecast(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_trend = _root.joinpath( - 'test-models/tests/forecast/' - + 'test_forecast.mdl') - model = read_vensim(test_trend) - stocks = model.run(return_timestamps=50, flatten_output=True) - model.reload() - model.run(return_timestamps=20) - model.export('frcst20.pic') - stocks2 = model.run(initial_condition='frcst20.pic', - return_timestamps=50, - flatten_output=True) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 20).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('frcst20.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_sample_if_true(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_sample_if_true = _root.joinpath( - 'test-models/tests/sample_if_true/test_sample_if_true.mdl') - model = read_vensim(test_sample_if_true) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.reload() - model.run(return_timestamps=7) - model.export('sample_if_true7.pic') - stocks2 = model.run(initial_condition='sample_if_true7.pic', - return_timestamps=20, - flatten_output=True) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('sample_if_true7.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_smooth(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_smooth = _root.joinpath( - 'test-models/tests/subscripted_smooth/' - + 'test_subscripted_smooth.mdl') - model = read_vensim(test_smooth) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.reload() - model.run(return_timestamps=7) - model.export('smooth7.pic') - stocks2 = model.run(initial_condition='smooth7.pic', - return_timestamps=20, - flatten_output=True) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('smooth7.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_trend(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_trend = _root.joinpath( - 'test-models/tests/subscripted_trend/' - + 'test_subscripted_trend.mdl') - model = read_vensim(test_trend) - stocks = model.run(return_timestamps=20, flatten_output=True) - model.reload() - model.run(return_timestamps=7) - model.export('trend7.pic') - stocks2 = model.run(initial_condition='trend7.pic', - return_timestamps=20, - flatten_output=True) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('trend7.pic').unlink() - - assert_frames_close(stocks2, stocks) - - def test_run_export_import_initial(self): - from pysd import read_vensim - - with catch_warnings(): - simplefilter("ignore") - test_initial = _root.joinpath( - 'test-models/tests/initial_function/test_initial.mdl') - model = read_vensim(test_initial) - stocks = model.run(return_timestamps=20) - model.reload() - model.run(return_timestamps=7) - model.export('initial7.pic') - stocks2 = model.run(initial_condition='initial7.pic', - return_timestamps=20) - assert (stocks['INITIAL TIME'] == 0).all().all() - assert (stocks2['INITIAL TIME'] == 7).all().all() - stocks.drop('INITIAL TIME', axis=1, inplace=True) - stocks2.drop('INITIAL TIME', axis=1, inplace=True) - stocks.drop('FINAL TIME', axis=1, inplace=True) - stocks2.drop('FINAL TIME', axis=1, inplace=True) - Path('initial7.pic').unlink() - - assert_frames_close(stocks2, stocks) + + @pytest.mark.parametrize( + "model_path,return_ts,final_t", + [ + ( + test_model, + ([0, 10, 20, 30], [0, 10], [20, 30]), + (None, 12, None) + ), + ( + Path('test-models/tests/delays/test_delays.mdl'), + ([10, 20], [], [10, 20]), + (None, 7, 34) + ), + ( + Path('test-models/tests/delay_fixed/test_delay_fixed.mdl'), + ([7, 20], [7], [20]), + (None, None, None) + ), + ( + Path('test-models/tests/forecast/test_forecast.mdl'), + ([20, 30, 50], [20, 30], [50]), + (55, 32, 52) + ), + ( + Path( + 'test-models/tests/sample_if_true/test_sample_if_true.mdl' + ), + ([8, 20], [8], [20]), + (None, 15, None) + ), + ( + Path('test-models/tests/subscripted_smooth/' + 'test_subscripted_smooth.mdl'), + ([8, 20], [8], [20]), + (None, 15, None) + ), + ( + Path('test-models/tests/subscripted_trend/' + 'test_subscripted_trend.mdl'), + ([8, 20], [8], [20]), + (None, 15, None) + + ), + ( + Path('test-models/tests/initial_function/test_initial.mdl'), + ([8, 20], [8], [20]), + (None, 15, None) + ) + ], + ids=["integ", "delays", "delay_fixed", "forecast", "sample_if_true", + "smooth", "trend", "initial"] + ) + @pytest.mark.filterwarnings("ignore") + def test_run_export_import(self, tmp_path, model, return_ts, final_t): + export_path = tmp_path / "export.pic" + + # Final times of each run + finals = [final_t[i] or return_ts[i][-1] for i in range(3)] + + # Whole run + stocks = model.run( + return_timestamps=return_ts[0], final_time=final_t[0] + ) + assert (stocks['INITIAL TIME'] == 0).all().all() + assert (stocks['FINAL TIME'] == finals[0]).all().all() + + # Export run + model.reload() + stocks1 = model.run( + return_timestamps=return_ts[1], final_time=final_t[1] + ) + assert (stocks1['INITIAL TIME'] == 0).all().all() + assert (stocks1['FINAL TIME'] == finals[1]).all().all() + model.export(export_path) + + # Import run + model.reload() + stocks2 = model.run( + initial_condition=export_path, + return_timestamps=return_ts[2], final_time=final_t[2] + ) + assert (stocks2['INITIAL TIME'] == finals[1]).all().all() + assert (stocks2['FINAL TIME'] == finals[2]).all().all() + + # Compare results + stocks.drop(columns=["INITIAL TIME", "FINAL TIME"], inplace=True) + stocks1.drop(columns=["INITIAL TIME", "FINAL TIME"], inplace=True) + stocks2.drop(columns=["INITIAL TIME", "FINAL TIME"], inplace=True) + if return_ts[1]: + assert_frames_close(stocks1, stocks.loc[return_ts[1]]) + if return_ts[2]: + assert_frames_close(stocks2, stocks.loc[return_ts[2]]) diff --git a/tests/pytest_pysd/pytest_utils.py b/tests/pytest_pysd/pytest_utils.py index 91d6dafb..a3210c07 100644 --- a/tests/pytest_pysd/pytest_utils.py +++ b/tests/pytest_pysd/pytest_utils.py @@ -99,197 +99,6 @@ def test_get_return_elements_not_found_error(self): ["inflow_a", "inflow_b", "inflow_c"], {'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'}) - def test_make_flat_df(self): - - df = pd.DataFrame(index=[1], columns=['elem1']) - df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2'])] - - expected = pd.DataFrame(index=[1], data={'Elem1[B,F]': 6.}) - - return_addresses = { - 'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})} - - actual = pysd.utils.make_flat_df(df, return_addresses) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) - - def test_make_flat_df_0dxarray(self): - - df = pd.DataFrame(index=[1], columns=['elem1']) - df.at[1] = [xr.DataArray(5)] - - expected = pd.DataFrame(index=[1], data={'Elem1': 5.}) - - return_addresses = {'Elem1': ('elem1', {})} - - actual = pysd.utils.make_flat_df(df, return_addresses, flatten=True) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) - - def test_make_flat_df_nosubs(self): - - df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) - df.at[1] = [25, 13] - - expected = pd.DataFrame(index=[1], columns=['Elem1', 'Elem2']) - expected.at[1] = [25, 13] - - return_addresses = {'Elem1': ('elem1', {}), - 'Elem2': ('elem2', {})} - - actual = pysd.utils.make_flat_df(df, return_addresses) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - assert all(actual['Elem1'] == expected['Elem1']) - assert all(actual['Elem2'] == expected['Elem2']) - - def test_make_flat_df_return_array(self): - """ There could be cases where we want to - return a whole section of an array - ie, by passing in only part of - the simulation dictionary. in this case, we can't force to float...""" - - df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) - df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2']), - xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2'])] - - expected = pd.DataFrame(index=[1], columns=['Elem1[A, Dim2]', 'Elem2']) - expected.at[1] = [xr.DataArray([[1, 2, 3]], - {'Dim1': ['A'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2']), - xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2'])] - - return_addresses = { - 'Elem1[A, Dim2]': ('elem1', {'Dim1': ['A'], - 'Dim2': ['D', 'E', 'F']}), - 'Elem2': ('elem2', {})} - - actual = pysd.utils.make_flat_df(df, return_addresses) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - # need to assert one by one as they are xarrays - assert actual.loc[1, 'Elem1[A, Dim2]'].equals( - expected.loc[1, 'Elem1[A, Dim2]']) - assert actual.loc[1, 'Elem2'].equals(expected.loc[1, 'Elem2']) - - def test_make_flat_df_flatten(self): - - df = pd.DataFrame(index=[1], columns=['elem1', 'elem2']) - df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2']), - xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2'])] - - expected = pd.DataFrame(index=[1], columns=[ - 'Elem1[A,D]', - 'Elem1[A,E]', - 'Elem1[A,F]', - 'Elem2[A,D]', - 'Elem2[A,E]', - 'Elem2[A,F]', - 'Elem2[B,D]', - 'Elem2[B,E]', - 'Elem2[B,F]', - 'Elem2[C,D]', - 'Elem2[C,E]', - 'Elem2[C,F]']) - - expected.at[1] = [1, 2, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - return_addresses = { - 'Elem1[A,Dim2]': ('elem1', {'Dim1': ['A'], - 'Dim2': ['D', 'E', 'F']}), - 'Elem2': ('elem2', {})} - - actual = pysd.utils.make_flat_df(df, return_addresses, flatten=True) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - # need to assert one by one as they are xarrays - for col in set(expected.columns): - assert actual.loc[:, col].values == expected.loc[:, col].values - - def test_make_flat_df_flatten_transposed(self): - - df = pd.DataFrame(index=[1], columns=['elem2']) - df.at[1] = [ - xr.DataArray( - [[1, 4, 7], [2, 5, 8], [3, 6, 9]], - {'Dim2': ['D', 'E', 'F'], 'Dim1': ['A', 'B', 'C']}, - ['Dim2', 'Dim1'] - ).transpose("Dim1", "Dim2") - ] - - expected = pd.DataFrame(index=[1], columns=[ - 'Elem2[A,D]', - 'Elem2[A,E]', - 'Elem2[A,F]', - 'Elem2[B,D]', - 'Elem2[B,E]', - 'Elem2[B,F]', - 'Elem2[C,D]', - 'Elem2[C,E]', - 'Elem2[C,F]']) - - expected.at[1] = [1, 2, 3, 4, 5, 6, 7, 8, 9] - - return_addresses = { - 'Elem2': ('elem2', {})} - - actual = pysd.utils.make_flat_df(df, return_addresses, flatten=True) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - # need to assert one by one as they are xarrays - for col in set(expected.columns): - assert actual.loc[:, col].values == expected.loc[:, col].values - - def test_make_flat_df_times(self): - - df = pd.DataFrame(index=[1, 2], columns=['elem1']) - df['elem1'] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2']), - xr.DataArray([[2, 4, 6], [8, 10, 12], [14, 16, 19]], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - dims=['Dim1', 'Dim2'])] - - expected = pd.DataFrame([{'Elem1[B,F]': 6}, {'Elem1[B,F]': 12}]) - expected.index = [1, 2] - - return_addresses = {'Elem1[B,F]': ('elem1', {'Dim1': ['B'], - 'Dim2': ['F']})} - actual = pysd.utils.make_flat_df(df, return_addresses) - - # check all columns are in the DataFrame - assert set(actual.columns) == set(expected.columns) - assert set(actual.index) == set(expected.index) - assert all(actual['Elem1[B,F]'] == expected['Elem1[B,F]']) - def test_doctests(self): doctest.DocTestSuite(pysd.utils) diff --git a/tests/requirements.txt b/tests/requirements.txt index 18dc6a94..833d24f2 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -4,3 +4,5 @@ pytest-mock coverage coveralls psutil +netCDF4==1.5; platform_system == 'Windows' and python_version == "3.7" +netCDF4==1.6; platform_system != 'Windows' or python_version != "3.7" diff --git a/tests/test-models b/tests/test-models index 9a2f47c0..a19097ce 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit 9a2f47c042c11a09ebddb4ac142eb90467ed0f7d +Subproject commit a19097cee55a631652feeb5ecf74adcadea549e6