diff --git a/.aicoe-ci.yaml b/.aicoe-ci.yaml deleted file mode 100644 index a9be8bb9..00000000 --- a/.aicoe-ci.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Setup and configuring aicoe-ci with configuration file `.aicoe-ci.yaml` -# Example `.aicoe-ci.yaml` with a full list of config options is available here: https://github.com/AICoE/aicoe-ci/blob/master/docs/.aicoe-ci.yaml -check: - # Uncomment following line to build a public image of this repo - # - thoth-build -# Uncomment following lines to build a public image of this repo -# build: -# build-stratergy: Source -# build-source-script: "image:///opt/app-root/builder" -# base-image: quay.io/thoth-station/s2i-custom-notebook:latest -# registry: quay.io -# registry-org: aicoe -# registry-project: -# registry-secret: aicoe-pusher-secret diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7e6167ce..318f7ab9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @durandom @tumido +* @joemoorhouse diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..6c8c3182 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +--- +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + # prettier-ignore + - package-ecosystem: "pip" # See documentation for possible values + # prettier-ignore + directory: "/" # Location of package manifests + commit-message: + prefix: "[dependabot] Chore:" + open-pull-requests-limit: 1 + schedule: + interval: "weekly" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..7be77a5f --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,30 @@ +name: CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +env: + python-version: "3.8" + +jobs: + ci: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: "Setup Python" + uses: actions/setup-python@v4.7.0 + with: + python-version: ${{ env.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox + + - name: Run auto-tests + run: tox diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..be6d11b7 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,63 @@ +--- +name: "🐍📦 Production build and release" + +# GitHub/PyPI trusted publisher documentation: +# https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ + +# yamllint disable-line rule:truthy +on: + workflow_dispatch: + push: + # Only invoked on release tag pushes + tags: + - v*.*.* + +env: + python-version: "3.8" + +jobs: + publish: + name: "🐍📦 Build and publish" + runs-on: ubuntu-latest + environment: + name: pypi + permissions: + # IMPORTANT: mandatory for trusted publishing + id-token: write + steps: + - name: "Checkout repository" + uses: actions/checkout@v4 + + - name: "Setup PDM for build commands" + uses: pdm-project/setup-pdm@v3 + with: + version: 2.10.0 + + - name: "Setup Python" + uses: actions/setup-python@v4.7.0 + with: + python-version: ${{ env.python-version }} + + - name: "Build with PDM backend" + run: | + pdm build + + - name: "Sign packages with Sigstore" + uses: sigstore/gh-action-sigstore-python@v2.1.0 + with: + inputs: >- + ./dist/*.tar.gz + ./dist/*.whl + + - name: "Remove files unsupported by PyPi" + run: | + if [ -f dist/buildvars.txt ]; then + rm dist/buildvars.txt + fi + rm dist/*.sigstore | true + + - name: "Publish package to PyPI" + uses: pypa/gh-action-pypi-publish@release/v1 + with: + verbose: true + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml new file mode 100644 index 00000000..53ff532e --- /dev/null +++ b/.github/workflows/test-release.yml @@ -0,0 +1,69 @@ +--- +name: "🐍📦 Test build and publish to Test PyPI" + +# GitHub/PyPI trusted publisher documentation: +# https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ + +# yamllint disable-line rule:truthy +on: push + +env: + python-version: "3.8" + +jobs: + publish: + name: "🐍📦 Test build and publish" + runs-on: ubuntu-latest + environment: + name: testpypi + permissions: + # IMPORTANT: mandatory for trusted publishing + id-token: write + steps: + - name: "Checkout repository" + uses: actions/checkout@v4 + + - name: "Setup PDM for build commands" + uses: pdm-project/setup-pdm@v3 + with: + version: 2.10.0 + + - name: "Setup Python" + uses: actions/setup-python@v4.7.0 + with: + python-version: ${{ env.python-version }} + + - name: "Update version and output Run ID" + run: | + value=`scripts/version.sh` + scripts/dev-versioning.sh "$value-dev${{ github.run_id }}" + echo "$value-dev${{ github.run_id }}" + echo ${{ github.run_id }} + + - name: "Output Run Number" + run: echo ${{ github.run_number }} + + - name: "Build with PDM backend" + run: | + pdm build + + - name: "Sign packages with Sigstore" + uses: sigstore/gh-action-sigstore-python@v2.1.0 + with: + inputs: >- + ./dist/*.tar.gz + ./dist/*.whl + + - name: "Remove files unsupported by PyPi" + run: | + if [ -f dist/buildvars.txt ]; then + rm dist/buildvars.txt + fi + rm dist/*.sigstore | true + + - name: Publish distribution to Test PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + verbose: true + repository-url: https://test.pypi.org/legacy/ + password: ${{ secrets.TEST_PYPI_API_TOKEN }} diff --git a/.gitignore b/.gitignore index d7c9832f..39315b99 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Credentials / Secrets +credentials.env + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -57,7 +60,7 @@ docs/_build/ target/ # DotEnv configuration -.env +*.env # Database *.db @@ -87,3 +90,9 @@ target/ # Mypy cache .mypy_cache/ + +# Virtual environment +.venv/* +src/test/data/coords.json +credentials.env +.pdm-python diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1bfbf453..d1617ce3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,7 @@ --- repos: - - repo: git://github.com/Lucas-C/pre-commit-hooks - rev: v1.1.10 - hooks: - - id: remove-tabs - - - repo: git://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 hooks: - id: trailing-whitespace - id: check-merge-conflict @@ -16,51 +11,32 @@ repos: - id: check-case-conflict - id: check-docstring-first - id: check-json + - id: check-toml + - id: check-yaml - id: check-symlinks - id: detect-private-key - id: check-ast - id: debug-statements - - repo: git://github.com/pycqa/pydocstyle.git - rev: 6.1.1 + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.5 hooks: - - id: pydocstyle - - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: check-toml - - id: check-yaml - - id: end-of-file-fixer - - id: trailing-whitespace - - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.902 - hooks: - - id: mypy - exclude: '^(docs|tasks|tests)|setup\.py' - args: [--ignore-missing-imports] + - id: remove-tabs - repo: https://github.com/psf/black - rev: 21.6b0 + rev: '24.2.0' hooks: - id: black - - - repo: https://github.com/tomcatling/black-nb - rev: '0.5.0' - hooks: - - id: black-nb - - # Enable this in repositories with python packages. - # - repo: https://github.com/mgedmin/check-manifest - # rev: '0.39' - # hooks: - # - id: check-manifest + - id: black-jupyter - repo: https://github.com/s-weigand/flake8-nb - rev: v0.3.0 + rev: v0.5.3 hooks: - id: flake8-nb - additional_dependencies: ['pep8-naming'] + additional_dependencies: + - pep8-naming # Ignore all format-related checks as Black takes care of those. - args: ['--ignore', 'E2,W5', '--select', 'E,W,F,N', '--max-line-length=120'] + args: + - --ignore=E2, W5, F401, E401, E704 + - --select=E, W, F, N + - --max-line-length=120 diff --git a/.prow.yaml b/.prow.yaml deleted file mode 100644 index 77dad25f..00000000 --- a/.prow.yaml +++ /dev/null @@ -1,20 +0,0 @@ -presubmits: - - name: pre-commit - decorate: true - skip_report: false - always_run: true - context: aicoe-ci/prow/pre-commit - spec: - containers: - - image: quay.io/thoth-station/thoth-precommit-py38:v0.12.8 - command: - - "pre-commit" - - "run" - - "--all-files" - resources: - requests: - memory: "500Mi" - cpu: "300m" - limits: - memory: "1Gi" - cpu: "500m" diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..e0f7cc1e --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,28 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.9" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: + - pdf + - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements-docs.txt diff --git a/.thoth.yaml b/.thoth.yaml deleted file mode 100644 index 2482d40f..00000000 --- a/.thoth.yaml +++ /dev/null @@ -1,26 +0,0 @@ -host: khemenu.thoth-station.ninja -tls_verify: false -requirements_format: pipenv - -runtime_environments: - - name: rhel:8 - operating_system: - name: rhel - version: "8" - python_version: "3.8" - recommendation_type: latest - -managers: - - name: pipfile-requirements - - name: update - configuration: - labels: [bot] - - name: info - - name: version - configuration: - maintainers: - - tumido - assignees: - - sesheta - labels: [bot] - changelog_file: true diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..f87fadd2 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,74 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +- n/a + +## [0.3.1] - 2022-05-04 + +- Revert RiverineInundation inventory IDs + +## [0.3.0] - 2022-05-02 + +- Add path, display_name, description, filename to hazard inventory +- Remove zarr_reader debug logging + +## [0.2.3] - 2022-04-25 + +- Add static json for vulnerability to package + +## [0.2.1] - 2022-04-24 + +- Add proxying to real estate model + +## [0.2.0] - 2022-04-20 + +- Add zarr_reader debug logging +- Allow requests to provide zarr store +- Support vulerability curves + +## [0.1.3] - 2022-04-01 + +### Fixes +- Correct ValueError message raised by physrisk.requests.get + +## [0.1.2] - 2022-04-01 + +### Fixes +- Add missing __init__.py file in src/physrisk/data_objects + +## [0.1.1] - 2022-04-01 + +- Correct imports, dependencies, and pre-comit hooks + +## [0.1.0] - 2022-03-31 + +- Enhance inventory; minor refactor of hazard availablity +- Implement hazard inventory and availablity request +- Add literature review + +## [0.0.2] - 2022-03-18 + +- Model development and methodolgy updates + +## 0.0.1-b.1 - 2022-02-17 + +- Early development release + + +[Unreleased]: https://github.com/os-climate/physrisk/compare/v0.3.1...HEAD +[0.3.1]: https://github.com/os-climate/physrisk/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/os-climate/physrisk/compare/v0.2.3...v0.3.0 +[0.2.3]: https://github.com/os-climate/physrisk/compare/v0.2.1...v0.2.3 +[0.2.1]: https://github.com/os-climate/physrisk/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/os-climate/physrisk/compare/v0.1.3...v0.2.0 +[0.1.3]: https://github.com/os-climate/physrisk/compare/v0.1.2...v0.1.3 +[0.1.2]: https://github.com/os-climate/physrisk/compare/v0.1.1...v0.1.2 +[0.1.1]: https://github.com/os-climate/physrisk/compare/v0.1.0...v0.1.1 +[0.1.0]: https://github.com/os-climate/physrisk/compare/v0.0.2...v0.1.0 +[0.0.2]: https://github.com/os-climate/physrisk/compare/v0.0.1-b.1...v0.0.2 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..b5531e88 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,111 @@ +# Contributing to physrisk + +## Getting started +To get set up, clone and enter the repo. +``` +git clone git@github.com:os-climate/physrisk.git +cd physrisk +``` + +We recommend using [pdm](https://pdm-project.org/latest/) for a +consistent working environment. Install via, e.g.: +``` +pip install pdm +``` +For ease of using Jupyter notebooks (e.g. in VS Code) the config can be used: +``` +pdm config venv.with_pip True +``` + +The command: +``` +pdm install +``` +will create a virtual environment (typically .venv folder in the project folder) and install the dependencies. +We recommend that the IDE workspace uses this virtual environment when developing. + +When adding a package for use in new or improved functionality, +`pdm add `. Or, when adding something helpful for +testing or development, `pdm add -dG `. + +## Development +Patches may be contributed via pull requests to +https://github.com/os-climate/physrisk. + +All changes must pass the automated test suite, along with various static +checks. + +[Black](https://black.readthedocs.io/) code style and +[isort](https://pycqa.github.io/isort/) import ordering are enforced +and enabling automatic formatting via [pre-commit](https://pre-commit.com/) +is recommended: +``` +pre-commit install +``` + +To ensure compliance with static check tools, developers may wish to run black and isort against modified files. + +E.g., +``` +# auto-sort imports +isort . +# auto-format code +black . +``` + +Code can then be tested using tox. +``` +# run static checks and unit tests +tox +# run only tests +tox -e py3 +# run only static checks +tox -e static +# run unit tests and produce an HTML code coverage report (/htmlcov) +tox -e cov +``` + +## IDE set-up +For those using VS Code, configure tests ('Python: Configure Tests') to use 'pytest' +to allow running of tests within the IDE. + +## Releasing +Actions are configured to release to PyPI on pushing a tag. In order to do this: +- Ensure version in pyproject.toml is updated (will require pull request like any other change) +- Create new annotated tag and push +``` +git tag -a v1.0.0 -m "v1.0.0" +git push --follow-tags +``` + +## Forking workflow +This is a useful clarification of the forking workflow: +https://gist.github.com/Chaser324/ce0505fbed06b947d962 + +## Project Organization +------------ + + ├── LICENSE + ├── pdm.lock <- pdm.lock stating a pinned down software stack as used by pdm. + ├── README.md <- The top-level README for developers using this project. + │ + ├── methodology <- Contains LaTeX methodology document. + │ └── literature <- Literature review. + │ + ├── docs <- A default Sphinx project; see sphinx-doc.org for details. + │ + ├── notebooks <- Jupyter notebooks. These comprise notebooks used for on-boarding + │ hazard data, on-boarding vulnerability models and tutorial. + │ + ├── setup.py <- makes project pip installable (pip install -e .) so src can be imported. + │ + ├── src <- Source code for use in this project. + │   └── physrisk <- physrisk source code. + │     + ├── tests <- physrisk tests; follows same folder structure as physrisk. + │ + ├── pyproject.toml <- central location of project settings. + │ + └── tox.ini <- tox file with settings for running tox; see tox.readthedocs.io. + +-------- diff --git a/LICENSE b/LICENSE index e69de29b..29f81d81 100644 --- a/LICENSE +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..da6be900 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include src/physrisk/data/static/*.json +include src/physrisk/data/static/example_portfolios/*.json +include src/physrisk/data/static/hazard/*.json +include src/physrisk/data/static/vulnerability/*.json diff --git a/Makefile b/Makefile deleted file mode 100644 index 7a8617a9..00000000 --- a/Makefile +++ /dev/null @@ -1,144 +0,0 @@ -.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3 - -################################################################################# -# GLOBALS # -################################################################################# - -PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) -BUCKET = [OPTIONAL] your-bucket-for-syncing-data (do not include 's3://') -PROFILE = default -PROJECT_NAME = project-template -PYTHON_INTERPRETER = python3 - -ifeq (,$(shell which conda)) -HAS_CONDA=False -else -HAS_CONDA=True -endif - -################################################################################# -# COMMANDS # -################################################################################# - -## Install Python Dependencies -requirements: test_environment - $(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel - $(PYTHON_INTERPRETER) -m pip install -r requirements.txt - -## Make Dataset -data: requirements - $(PYTHON_INTERPRETER) src/data/make_dataset.py data/raw data/processed - -## Delete all compiled Python files -clean: - find . -type f -name "*.py[co]" -delete - find . -type d -name "__pycache__" -delete - -## Lint using flake8 -lint: - flake8 src - -## Upload Data to S3 -sync_data_to_s3: -ifeq (default,$(PROFILE)) - aws s3 sync data/ s3://$(BUCKET)/data/ -else - aws s3 sync data/ s3://$(BUCKET)/data/ --profile $(PROFILE) -endif - -## Download Data from S3 -sync_data_from_s3: -ifeq (default,$(PROFILE)) - aws s3 sync s3://$(BUCKET)/data/ data/ -else - aws s3 sync s3://$(BUCKET)/data/ data/ --profile $(PROFILE) -endif - -## Set up python interpreter environment -create_environment: -ifeq (True,$(HAS_CONDA)) - @echo ">>> Detected conda, creating conda environment." -ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER))) - conda create --name $(PROJECT_NAME) python=3 -else - conda create --name $(PROJECT_NAME) python=2.7 -endif - @echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)" -else - $(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper - @echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\ - export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n" - @bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)" - @echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)" -endif - -## Test python environment is setup correctly -test_environment: - $(PYTHON_INTERPRETER) test_environment.py - -################################################################################# -# PROJECT RULES # -################################################################################# - - - -################################################################################# -# Self Documenting Commands # -################################################################################# - -.DEFAULT_GOAL := help - -# Inspired by -# sed script explained: -# /^##/: -# * save line in hold space -# * purge line -# * Loop: -# * append newline + line to hold space -# * go to next line -# * if line starts with doc comment, strip comment character off and loop -# * remove target prerequisites -# * append hold space (+ newline) to line -# * replace newline plus comments by `---` -# * print line -# Separate expressions are necessary because labels cannot be delimited by -# semicolon; see -.PHONY: help -help: - @echo "$$(tput bold)Available rules:$$(tput sgr0)" - @echo - @sed -n -e "/^## / { \ - h; \ - s/.*//; \ - :doc" \ - -e "H; \ - n; \ - s/^## //; \ - t doc" \ - -e "s/:.*//; \ - G; \ - s/\\n## /---/; \ - s/\\n/ /g; \ - p; \ - }" ${MAKEFILE_LIST} \ - | LC_ALL='C' sort --ignore-case \ - | awk -F '---' \ - -v ncol=$$(tput cols) \ - -v indent=19 \ - -v col_on="$$(tput setaf 6)" \ - -v col_off="$$(tput sgr0)" \ - '{ \ - printf "%s%*s%s ", col_on, -indent, $$1, col_off; \ - n = split($$2, words, " "); \ - line_length = ncol - indent; \ - for (i = 1; i <= n; i++) { \ - line_length -= length(words[i]) + 1; \ - if (line_length <= 0) { \ - line_length = ncol - indent - length(words[i]) - 1; \ - printf "\n%*s ", -indent, " "; \ - } \ - printf "%s ", words[i]; \ - } \ - printf "\n"; \ - }' \ - | more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars') diff --git a/Pipfile b/Pipfile deleted file mode 100644 index 4dcea48e..00000000 --- a/Pipfile +++ /dev/null @@ -1,16 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.org/simple" -verify_ssl = true - -[dev-packages] -flake8 = "*" -coverage = "*" -Sphinx = "*" - -[packages] -src = {editable = true, path = "./"} -awscli = "*" - -[requires] -python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock deleted file mode 100644 index e2fa0d2c..00000000 --- a/Pipfile.lock +++ /dev/null @@ -1,421 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "91a403a4cf038cbb97937e0d56125a8e408804f4d685097bd1062f8666f5759f" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.8" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "awscli": { - "hashes": [ - "sha256:05be15e54d4d0a650814d9cb570b0870baf2011a4bfb2a74073237f7a707f470", - "sha256:53accac94383af4559853f1bcd6f71f1e96d42f9b4147d9e4208810a18cd2914" - ], - "index": "pypi", - "version": "==1.18.80" - }, - "botocore": { - "hashes": [ - "sha256:0ad4fb7b731e924490305584f7dcfd3e9fe955b307392b054a3d132a902a2528", - "sha256:13f4d92589ae9bd8c2c5ce8ea2f59d01a0ac4c14e9f99772ee4d1eb6cf9a3357" - ], - "version": "==1.17.3" - }, - "click": { - "hashes": [ - "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", - "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" - ], - "version": "==7.1.2" - }, - "colorama": { - "hashes": [ - "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff", - "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1" - ], - "markers": "python_version != '3.4'", - "version": "==0.4.3" - }, - "docutils": { - "hashes": [ - "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0", - "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827", - "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99" - ], - "version": "==0.15.2" - }, - "jmespath": { - "hashes": [ - "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9", - "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f" - ], - "version": "==0.10.0" - }, - "pyasn1": { - "hashes": [ - "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" - ], - "version": "==0.4.8" - }, - "python-dateutil": { - "hashes": [ - "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", - "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" - ], - "version": "==2.8.1" - }, - "python-dotenv": { - "hashes": [ - "sha256:0c8d1b80d1a1e91717ea7d526178e3882732420b03f08afea0406db6402e220e", - "sha256:587825ed60b1711daea4832cf37524dfd404325b7db5e25ebe88c495c9f807a0" - ], - "version": "==0.15.0" - }, - "pyyaml": { - "hashes": [ - "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97", - "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76", - "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2", - "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e", - "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648", - "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf", - "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f", - "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2", - "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee", - "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a", - "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d", - "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c", - "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a" - ], - "markers": "python_version != '3.4'", - "version": "==5.3.1" - }, - "rsa": { - "hashes": [ - "sha256:25df4e10c263fb88b5ace923dd84bf9aa7f5019687b5e55382ffcdb8bede9db5", - "sha256:43f682fea81c452c98d09fc316aae12de6d30c4b5c84226642cf8f8fd1c93abd" - ], - "version": "==3.4.2" - }, - "s3transfer": { - "hashes": [ - "sha256:5d48b1fd2232141a9d5fb279709117aaba506cacea7f86f11bc392f06bfa8fc2", - "sha256:c5dadf598762899d8cfaecf68eba649cd25b0ce93b6c954b156aaa3eed160547" - ], - "version": "==0.3.6" - }, - "six": { - "hashes": [ - "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", - "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" - ], - "version": "==1.15.0" - }, - "src": { - "editable": true, - "path": "./" - }, - "urllib3": { - "hashes": [ - "sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2", - "sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e" - ], - "markers": "python_version != '3.4'", - "version": "==1.25.11" - } - }, - "develop": { - "alabaster": { - "hashes": [ - "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359", - "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02" - ], - "version": "==0.7.12" - }, - "babel": { - "hashes": [ - "sha256:9d35c22fcc79893c3ecc85ac4a56cde1ecf3f19c540bba0922308a6c06ca6fa5", - "sha256:da031ab54472314f210b0adcff1588ee5d1d1d0ba4dbd07b94dba82bde791e05" - ], - "version": "==2.9.0" - }, - "certifi": { - "hashes": [ - "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c", - "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830" - ], - "version": "==2020.12.5" - }, - "chardet": { - "hashes": [ - "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", - "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" - ], - "version": "==4.0.0" - }, - "coverage": { - "hashes": [ - "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a", - "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355", - "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65", - "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7", - "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9", - "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1", - "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0", - "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55", - "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c", - "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6", - "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef", - "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019", - "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e", - "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0", - "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf", - "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24", - "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2", - "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c", - "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4", - "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0", - "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd", - "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04", - "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e", - "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730", - "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2", - "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768", - "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796", - "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7", - "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a", - "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489", - "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052" - ], - "index": "pypi", - "version": "==5.1" - }, - "docutils": { - "hashes": [ - "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0", - "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827", - "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99" - ], - "version": "==0.15.2" - }, - "flake8": { - "hashes": [ - "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c", - "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208" - ], - "index": "pypi", - "version": "==3.8.3" - }, - "idna": { - "hashes": [ - "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", - "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" - ], - "version": "==2.10" - }, - "imagesize": { - "hashes": [ - "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", - "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" - ], - "version": "==1.2.0" - }, - "jinja2": { - "hashes": [ - "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419", - "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6" - ], - "index": "pypi", - "version": "==2.11.3" - }, - "markupsafe": { - "hashes": [ - "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", - "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", - "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", - "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", - "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42", - "sha256:195d7d2c4fbb0ee8139a6cf67194f3973a6b3042d742ebe0a9ed36d8b6f0c07f", - "sha256:22c178a091fc6630d0d045bdb5992d2dfe14e3259760e713c490da5323866c39", - "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", - "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", - "sha256:2beec1e0de6924ea551859edb9e7679da6e4870d32cb766240ce17e0a0ba2014", - "sha256:3b8a6499709d29c2e2399569d96719a1b21dcd94410a586a18526b143ec8470f", - "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", - "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", - "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", - "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", - "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b", - "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", - "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15", - "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", - "sha256:6f1e273a344928347c1290119b493a1f0303c52f5a5eae5f16d74f48c15d4a85", - "sha256:6fffc775d90dcc9aed1b89219549b329a9250d918fd0b8fa8d93d154918422e1", - "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", - "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", - "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", - "sha256:7fed13866cf14bba33e7176717346713881f56d9d2bcebab207f7a036f41b850", - "sha256:84dee80c15f1b560d55bcfe6d47b27d070b4681c699c572af2e3c7cc90a3b8e0", - "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", - "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", - "sha256:98bae9582248d6cf62321dcb52aaf5d9adf0bad3b40582925ef7c7f0ed85fceb", - "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", - "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", - "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", - "sha256:a6a744282b7718a2a62d2ed9d993cad6f5f585605ad352c11de459f4108df0a1", - "sha256:acf08ac40292838b3cbbb06cfe9b2cb9ec78fce8baca31ddb87aaac2e2dc3bc2", - "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", - "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", - "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", - "sha256:b1dba4527182c95a0db8b6060cc98ac49b9e2f5e64320e2b56e47cb2831978c7", - "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", - "sha256:b7d644ddb4dbd407d31ffb699f1d140bc35478da613b441c582aeb7c43838dd8", - "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", - "sha256:bf5aa3cbcfdf57fa2ee9cd1822c862ef23037f5c832ad09cfea57fa846dec193", - "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", - "sha256:caabedc8323f1e93231b52fc32bdcde6db817623d33e100708d9a68e1f53b26b", - "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", - "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2", - "sha256:d53bc011414228441014aa71dbec320c66468c1030aae3a6e29778a3382d96e5", - "sha256:d73a845f227b0bfe8a7455ee623525ee656a9e2e749e4742706d80a6065d5e2c", - "sha256:d9be0ba6c527163cbed5e0857c451fcd092ce83947944d6c14bc95441203f032", - "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", - "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be", - "sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621" - ], - "version": "==1.1.1" - }, - "mccabe": { - "hashes": [ - "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", - "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" - ], - "version": "==0.6.1" - }, - "packaging": { - "hashes": [ - "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", - "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" - ], - "version": "==20.9" - }, - "pycodestyle": { - "hashes": [ - "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367", - "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e" - ], - "version": "==2.6.0" - }, - "pyflakes": { - "hashes": [ - "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92", - "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8" - ], - "version": "==2.2.0" - }, - "pygments": { - "hashes": [ - "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94", - "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8" - ], - "version": "==2.8.1" - }, - "pyparsing": { - "hashes": [ - "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", - "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" - ], - "version": "==2.4.7" - }, - "pytz": { - "hashes": [ - "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da", - "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798" - ], - "version": "==2021.1" - }, - "requests": { - "hashes": [ - "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", - "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" - ], - "version": "==2.25.1" - }, - "snowballstemmer": { - "hashes": [ - "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2", - "sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914" - ], - "version": "==2.1.0" - }, - "sphinx": { - "hashes": [ - "sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258", - "sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5" - ], - "index": "pypi", - "version": "==3.1.1" - }, - "sphinxcontrib-applehelp": { - "hashes": [ - "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", - "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" - ], - "version": "==1.0.2" - }, - "sphinxcontrib-devhelp": { - "hashes": [ - "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", - "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" - ], - "version": "==1.0.2" - }, - "sphinxcontrib-htmlhelp": { - "hashes": [ - "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", - "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" - ], - "version": "==1.0.3" - }, - "sphinxcontrib-jsmath": { - "hashes": [ - "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", - "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" - ], - "version": "==1.0.1" - }, - "sphinxcontrib-qthelp": { - "hashes": [ - "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", - "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" - ], - "version": "==1.0.3" - }, - "sphinxcontrib-serializinghtml": { - "hashes": [ - "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", - "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" - ], - "version": "==1.1.4" - }, - "urllib3": { - "hashes": [ - "sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2", - "sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e" - ], - "markers": "python_version != '3.4'", - "version": "==1.25.11" - } - } -} diff --git a/README.md b/README.md index 33f0e7c5..e021abe7 100644 --- a/README.md +++ b/README.md @@ -1,59 +1,49 @@ Physrisk ============================== +Physical climate risk calculation engine. -Physical risk calculation engine - -Project Organization ------------- - - ├── LICENSE - ├── Makefile <- Makefile with commands like `make data` or `make train` - ├── Pipfile <- Pipfile stating package configuration as used by Pipenv. - ├── Pipfile.lock <- Pipfile.lock stating a pinned down software stack with as used by Pipenv. - ├── README.md <- The top-level README for developers using this project. - ├── data - │   ├── external <- Data from third party sources. - │   ├── interim <- Intermediate data that has been transformed. - │   ├── processed <- The final, canonical data sets for modeling. - │   └── raw <- The original, immutable data dump. - │ - ├── docs <- A default Sphinx project; see sphinx-doc.org for details - │ - ├── models <- Trained and serialized models, model predictions, or model summaries - │ - ├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering), - │ the creator's initials, and a short `-` delimited description, e.g. - │ `1.0-jqp-initial-data-exploration`. - │ - ├── references <- Data dictionaries, manuals, and all other explanatory materials. - │ - ├── reports <- Generated analysis as HTML, PDF, LaTeX, etc. - │   └── figures <- Generated graphics and figures to be used in reporting - │ - ├── requirements.txt <- The requirements file stating direct dependencies if a library - │ is developed. - │ - ├── setup.py <- makes project pip installable (pip install -e .) so src can be imported - ├── src <- Source code for use in this project. - │   ├── __init__.py <- Makes src a Python module - │ │ - │   ├── data <- Scripts to download or generate data - │   │   └── make_dataset.py - │ │ - │   ├── features <- Scripts to turn raw data into features for modeling - │   │   └── build_features.py - │ │ - │   ├── models <- Scripts to train models and then use trained models to make - │ │ │ predictions - │   │   ├── predict_model.py - │   │   └── train_model.py - │ │ - │   └── visualization <- Scripts to create exploratory and results oriented visualizations - │   └── visualize.py - │ - ├── .thoth.yaml <- Thoth's configuration file - ├── .aicoe-ci.yaml <- AICoE CI configuration file (https://github.com/AICoE/aicoe-ci) - └── tox.ini <- tox file with settings for running tox; see tox.readthedocs.io - - --------- +drawing + + +## About physrisk + +An [OS-Climate](https://os-climate.org) project, physrisk is a library for assessing the physical effects of climate change and thereby the potential benefit of measures to improve resilience. + +An introduction and methodology is available [here](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf). + +Physrisk is primarily designed to run 'bottom-up' calculations that model the impact of climate hazards on large numbers of individual assets (including natural) and operations. These calculations can be used to assess financial risks or socio-economic impacts. To do this physrisk collects: + +- hazard indicators and +- models of vulnerability of assets/operations to hazards. + +Hazard indicators are on-boarded from public resources or inferred from climate projections, e.g. from CMIP or CORDEX data sets. Indicators are created from code in the +[hazard repo](https://github.com/os-climate/hazard) to make calculations as transparent as possible. + +Physrisk is also designed to be a hosted, e.g. to provide on-demand calculations. [physrisk-api](https://github.com/os-climate/physrisk-api) and [physrisk-ui](https://github.com/os-climate/physrisk-ui) provide an example API and user interface. A [development version of the UI](https://physrisk-ui-sandbox.apps.odh-cl1.apps.os-climate.org) is hosted by OS-Climate. + +## Using the library +The library can be run locally, although access to the hazard indicator data is needed. The library is installed via: +``` +pip install physrisk-lib +``` + +Hazard indicator data is freely available. Members of the project are able to access OS-Climate S3 buckets. Credentials are available [here](https://console-openshift-console.apps.odh-cl1.apps.os-climate.org/k8s/ns/sandbox/secrets/physrisk-s3-keys). Information about the project is available via the [community-hub](https://github.com/os-climate/OS-Climate-Community-Hub). Non-members are able to download or copy hazard indicator data. + +Hazard indicator data can be downloaded or copied from the 'os-climate-public-data' bucket. A list of the keys to copy is available from +https://os-climate-public-data.s3.amazonaws.com/hazard/keys.txt + +An inventory of the hazard data is maintained [here](https://github.com/os-climate/hazard/blob/main/src/inventories/hazard/inventory.json) (this is used by the physrisk library itself). The [UI hazard viewer](https://physrisk-ui-sandbox.apps.odh-cl1.apps.os-climate.org) is a convenient way to browse data sets. + +Access to hazard event data requires setting of environment variables specifying the S3 Bucket, for example: + +``` +OSC_S3_BUCKET=physrisk-hazard-indicators +OSC_S3_ACCESS_KEY=********** +OSC_S3_SECRET_KEY=********** +``` + +For use in a Jupyter environment, it is recommended to put the environment variables in a credentials.env file and do, for example: +``` +from dotenv import load_dotenv +load_dotenv(dotenv_path=dotenv_path, override=True) +``` diff --git a/docs/Makefile b/docs/Makefile index a5de1101..d4bb2cbb 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,153 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . BUILDDIR = _build -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/project-template.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/project-template.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/project-template" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/project-template" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." +.PHONY: help Makefile -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/methodology/PhysicalRiskMethodology.pdf b/docs/_static/PhysicalRiskMethodology.pdf similarity index 62% rename from docs/methodology/PhysicalRiskMethodology.pdf rename to docs/_static/PhysicalRiskMethodology.pdf index af79127a..3951ef2f 100644 Binary files a/docs/methodology/PhysicalRiskMethodology.pdf and b/docs/_static/PhysicalRiskMethodology.pdf differ diff --git a/docs/conf.py b/docs/conf.py index 4b14c924..e19e907f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,255 +1,95 @@ -"""Documentation builder configuration.""" - -# -*- coding: utf-8 -*- -# -# project-template documentation build configuration file, created by -# sphinx-quickstart. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. +# Configuration file for the Sphinx documentation builder. # -# All configuration values have a default; values that are commented out -# serve to show the default. +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- +import os +import sys -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' +# sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath("../src/")) -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [] +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +project = "PhysicalRisk" +copyright = "2024, DCV" +author = "DCV" -# The suffix of source filenames. -source_suffix = ".rst" +# The full version, including alpha/beta/rc tags +# release = "1.1.0" -# The encoding of source files. -# source_encoding = 'utf-8-sig' +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration # The master toctree document. master_doc = "index" -# General information about the project. -project = u"project-template" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "0.1" -# The full version, including alpha/beta/rc tags. -release = "0.1" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "default" +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx_toolbox.installation", + "sphinx_toolbox.latex", + "sphinx.ext.autosummary", + "sphinx.ext.graphviz", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.autosectionlabel", + "sphinx_design", + "sphinx.ext.intersphinx", + # 'myst_nb', + # "myst_parser", + "nbsphinx", + "sphinxcontrib.details.directive", + "sphinxcontrib.bibtex", + "sphinx.ext.mathjax", + "sphinx_simplepdf", +] -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} +bibtex_bibfiles = ["references.bib"] +bibtex_default_style = "alpha" +bibtex_encoding = "latin" -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None +# Not show module +python_module_index = False -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None +# Summary +autosummary_generate = True -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None +# Docstrings of private methods +autodoc_default_options = {"members": True, "undoc-members": True, "private-members": False} -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". +html_logo = "images/OS-Climate-Logo.png" +html_theme = "pydata_sphinx_theme" html_static_path = ["_static"] -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} +# Don't show the code +html_show_sourcelink = True -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = "project-templatedoc" - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', +html_theme_options = { + # 'logo_only': False, + # 'display_version': False, + # Table of contents options + "collapse_navigation": False, } -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ( - "index", - "project-template.tex", - u"project-template Documentation", - u"aicoe-aiops", - "manual", - ), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - "index", - "project-template", - u"project-template Documentation", - [u"aicoe-aiops"], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - "index", - "project-template", - u"project-template Documentation", - u"aicoe-aiops", - "project-template", - "template for the team to use", - "Miscellaneous", - ), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] +html_sidebars = { + "**": [ + "globaltoc.html", # Índice general + # 'localtoc.html', # Índice local para cada archivo + "searchbox.html", # Cuadro de búsqueda + ] +} -# If false, no module index is generated. -# texinfo_domain_indices = True +# This setting ensures that each section in your documentation is automatically assigned +# a unique label based on the document it belongs to. +autosectionlabel_prefix_document = True -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' +# show the members in the order they appear in the source code, you can use the autodoc_member_order option. +autodoc_member_order = "bysource" diff --git a/docs/conf.py.backup b/docs/conf.py.backup new file mode 100644 index 00000000..2bcd5e4f --- /dev/null +++ b/docs/conf.py.backup @@ -0,0 +1,95 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +import os +import sys + +# sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath("..")) +sys.path.insert(0, os.path.abspath("../src/")) + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +#project = "PhysicalRisk" +#copyright = "2024, DCV" +#author = "DCV" + +# The full version, including alpha/beta/rc tags +#release = "1.1.0" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +# The master toctree document. +master_doc = "index" + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx_toolbox.installation", + "sphinx_toolbox.latex", + "sphinx.ext.autosummary", + "sphinx.ext.graphviz", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.autosectionlabel", + "sphinx_design", + "sphinx.ext.intersphinx", + # 'myst_nb', + # "myst_parser", + "nbsphinx", + # "sphinxcontrib.details" + "sphinxcontrib.bibtex", + "sphinx.ext.mathjax", + "sphinx_simplepdf", +] + +bibtex_bibfiles = ["references.bib"] +bibtex_default_style = "alpha" +bibtex_encoding = "latin" + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# Not show module +python_module_index = False + +# Summary +autosummary_generate = True + +# Docstrings of private methods +autodoc_default_options = {"members": True, "undoc-members": True, "private-members": False} + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_logo = "images/OS-Climate-Logo.png" +html_theme = "pydata_sphinx_theme" +html_static_path = ["_static"] + +# Don't show the code +html_show_sourcelink = True + +html_theme_options = { + # 'logo_only': False, + # 'display_version': False, + # Table of contents options + "collapse_navigation": False, +} + +html_sidebars = { + "**": [ + "globaltoc.html", # Índice general + # 'localtoc.html', # Índice local para cada archivo + "searchbox.html", # Cuadro de búsqueda + ] +} + +# This setting ensures that each section in your documentation is automatically assigned +# a unique label based on the document it belongs to. +autosectionlabel_prefix_document = True + +# show the members in the order they appear in the source code, you can use the autodoc_member_order option. +autodoc_member_order = "bysource" diff --git a/docs/getting-started.rst b/docs/getting-started.rst index b4f71c3a..ce773314 100644 --- a/docs/getting-started.rst +++ b/docs/getting-started.rst @@ -4,3 +4,9 @@ Getting started This is where you describe how to get set up on a clean install, including the commands necessary to get the raw data (using the `sync_data_from_s3` command, for example), and then how to make the cleaned, final data sets. + + +.. toctree:: + :maxdepth: 1 + + test diff --git a/docs/handbook/onboarding.rst b/docs/handbook/onboarding.rst new file mode 100644 index 00000000..1887bf72 --- /dev/null +++ b/docs/handbook/onboarding.rst @@ -0,0 +1,92 @@ +Onboarding a new model/data +=========================== + +Exceedance curves +----------------- + +The starting point of the calculation is a curve of hazard event intensities, obtained at the location of one particular asset. The probability of occurrence is given either by the 'return period' or an 'exceedance probability'. The return period is the average time in years between occurrences of an event that exceeds a given intensity. The exceedance probability is the probability that in a year an event occurs that exceeds the given intensity. Exceedance probability is the reciprocal of the return period. + +.. image:: onboarding/return_periods.png + :width: 500 + +.. image:: onboarding/exceedance_curve.png + :width: 500 + +Probability bins can be obtained from the exceedance curve, by subtracting one cumulative probability from another. + +.. image:: onboarding/histo_from_exceedance.png + :width: 500 + +Note that in this case - which is the standard case - linear interpolation between points of the exceedance curve is assumed which corresponds to flat probability density within a bin. + +In code, this can be done using the :code:`ExceedanceCurve`: + +.. code-block:: + + exceedance_curve = ExceedanceCurve(1.0 / return_periods, event_intensities) + intensity_bins, probs = exceedance_curve.get_probability_bins() + +Vulnerability/Event Model +------------------------- +In general, a Vulnerability/Event Model is responsible for obtaining for a particular asset instances of: + +#. :code:`HazardEventDistrib` that provides probabilities of hazard event intensities for the asset, and +#. :code:`VulnerabilityDistrib` that provides conditional probabilities that given a hazard event of given intensity has occurred, a damage/disruption of a given level will occur. + +The damage or disruption is sometimes referred to as the 'impact'. + +The current implementation is non-parametric and based on discrete bins - although continuous versions of :code:`HazardEventDistrib`/:code:`VulnerabilityDistrib` could certainly be added, based on particular parametric distributions. + +:code:`HazardEventDistrib` is in this non-parametric version a histogram of hazard event intensities: defines a set of intensity bins and the annual probability of occurrence. + +:code:`VulnerabilityDistrib` is a matrix that provides the probability that if an event occurs with intensity falling in a particular intensity bin, an impact in a particular impact bin occurs. + +The Vulnerability/Event Model (henceforth 'model') is in general responsible for + +* Defining its hazard event data requirements by implementing method :code:`get_event_data_requests` +* Using the data to construct instances of :code:`HazardEventDistrib` and :code:`VulnerabilityDistrib` that will be used in the impact calculation. This is done by implementing method :code:`get_distributions` + +:code:`HazardEventDistrib` and :code:`VulnerabilityDistrib` can be constructed in a single method to ensure their alignment, although this method is probably only required in most bespoke cases. :code:`get_event_data_requests` is done as a separate step for performance reasons: it is desirable that all models state their data requirements 'up-front' in order that requests can be batched for fast retrieval. + +The model applies to: + +* A type of hazard event (Inundation, Wildfire, Drought, etc) +* A type of asset (residential property, power generating asset, etc) + +Decorators are used to 'tag' a model, so that an appropriate model can be selected for a given asset and hazard type; configuration allows types of :code:`Model` to be used in preference to other candidates. + +Specific types of model also exist for common modelling approaches. In particular, although in general it is desirable that a model has the flexibility to define its hazard event distribution and vulnerability distribution, in many cases the former will be sourced directly from a data set and it only remains to define the vulnerability distribution. The :code:`Model` class allows the general form of the model to be implemented. The :code:`VulnerabilityModel` class is for cases where only the vulnerability is to be specified. + +On-boarding a model based on a damage/disruption curve +------------------------------------------------------ + +A vulnerability distribution can be inferred directly from a damage/disruption curve: + +.. image:: onboarding/disruption_curve.png + :width: 500 + +.. image:: onboarding/vulnerability_curve.png + :width: 500 + +This approach is generally not recommended as it implies that damage/disruption in the event of an event occurs certainly with the given level. + +Note that here the impact bins are inferred from the event intensity bins by interpolating the curve. The resulting impact bins have constant probability density or linear exceedance. + +On-boarding a model based on a damage/disruption curve with uncertainty +----------------------------------------------------------------------- + +In some cases, even if a full vulnerability matrix is unavailable, an estimate of the spread around a mean damage/disruption might be provided: + +.. image:: onboarding/damage_with_uncertainty.png + :width: 500 + +source: Huizinga et al. 'Global flood depth-damage functions: methodology and the database with guidelines' + +In such cases :code:`VulnerabilityModel` can be used to specify the close-formed conditional probability distribution to be used for a given intensity level. The cumulative probability density function is expected, and is provided via an :code:`ImpactCurve` by implementing method :code:`get_impact_curve`. + +Example models are provided to illustrate this. A model that uses beta distributions to provide a distribution yields the following :code:`VulnerabilityDistrib` in the above case: + +.. image:: onboarding/vulnerability_with_uncertainty.png + :width: 500 + +Note that a beta distribution is a commonly-used heuristic; its functional form is convenient for vulnerability distributions although there appears to be limited justification for its use beyond providing a convenient means to apply uncertainty to a vulnerability distribution. diff --git a/docs/handbook/onboarding/damage_with_uncertainty.pdf b/docs/handbook/onboarding/damage_with_uncertainty.pdf new file mode 100644 index 00000000..5f9750e7 Binary files /dev/null and b/docs/handbook/onboarding/damage_with_uncertainty.pdf differ diff --git a/docs/handbook/onboarding/damage_with_uncertainty.png b/docs/handbook/onboarding/damage_with_uncertainty.png new file mode 100644 index 00000000..80ce00e7 Binary files /dev/null and b/docs/handbook/onboarding/damage_with_uncertainty.png differ diff --git a/docs/handbook/onboarding/disruption_curve.pdf b/docs/handbook/onboarding/disruption_curve.pdf new file mode 100644 index 00000000..f555baa8 Binary files /dev/null and b/docs/handbook/onboarding/disruption_curve.pdf differ diff --git a/docs/handbook/onboarding/disruption_curve.png b/docs/handbook/onboarding/disruption_curve.png new file mode 100644 index 00000000..46ef718e Binary files /dev/null and b/docs/handbook/onboarding/disruption_curve.png differ diff --git a/docs/handbook/onboarding/exceedance_curve.pdf b/docs/handbook/onboarding/exceedance_curve.pdf new file mode 100644 index 00000000..cdc68b76 Binary files /dev/null and b/docs/handbook/onboarding/exceedance_curve.pdf differ diff --git a/docs/handbook/onboarding/exceedance_curve.png b/docs/handbook/onboarding/exceedance_curve.png new file mode 100644 index 00000000..72fbc494 Binary files /dev/null and b/docs/handbook/onboarding/exceedance_curve.png differ diff --git a/docs/handbook/onboarding/histo_from_exceedance.pdf b/docs/handbook/onboarding/histo_from_exceedance.pdf new file mode 100644 index 00000000..d969f6c2 Binary files /dev/null and b/docs/handbook/onboarding/histo_from_exceedance.pdf differ diff --git a/docs/handbook/onboarding/histo_from_exceedance.png b/docs/handbook/onboarding/histo_from_exceedance.png new file mode 100644 index 00000000..f8e3897a Binary files /dev/null and b/docs/handbook/onboarding/histo_from_exceedance.png differ diff --git a/docs/handbook/onboarding/return_periods.pdf b/docs/handbook/onboarding/return_periods.pdf new file mode 100644 index 00000000..8f9f03cb Binary files /dev/null and b/docs/handbook/onboarding/return_periods.pdf differ diff --git a/docs/handbook/onboarding/return_periods.png b/docs/handbook/onboarding/return_periods.png new file mode 100644 index 00000000..57a3f08f Binary files /dev/null and b/docs/handbook/onboarding/return_periods.png differ diff --git a/docs/handbook/onboarding/vulnerability_curve.pdf b/docs/handbook/onboarding/vulnerability_curve.pdf new file mode 100644 index 00000000..947f7d45 Binary files /dev/null and b/docs/handbook/onboarding/vulnerability_curve.pdf differ diff --git a/docs/handbook/onboarding/vulnerability_curve.png b/docs/handbook/onboarding/vulnerability_curve.png new file mode 100644 index 00000000..485b0a44 Binary files /dev/null and b/docs/handbook/onboarding/vulnerability_curve.png differ diff --git a/docs/handbook/onboarding/vulnerability_with_uncertainty.pdf b/docs/handbook/onboarding/vulnerability_with_uncertainty.pdf new file mode 100644 index 00000000..21009513 Binary files /dev/null and b/docs/handbook/onboarding/vulnerability_with_uncertainty.pdf differ diff --git a/docs/handbook/onboarding/vulnerability_with_uncertainty.png b/docs/handbook/onboarding/vulnerability_with_uncertainty.png new file mode 100644 index 00000000..ce445ccb Binary files /dev/null and b/docs/handbook/onboarding/vulnerability_with_uncertainty.png differ diff --git a/docs/hazards.rst b/docs/hazards.rst new file mode 100644 index 00000000..9c6e1d6d --- /dev/null +++ b/docs/hazards.rst @@ -0,0 +1,38 @@ +Hazards +==================== + +The following hazards have been including in the recent stress test performed by the ECB. + +* `Coastal Flood `_ + +* `Riverine Flood `_ + +* `Wildfire `_ + +* `Landslide `_ + +* `Windstorm `_ + +* `Water Stress `_ + + +Contents +=================== +.. toctree:: + :maxdepth: 1 + :titlesonly: + + hazards/riverine_floods + hazards/coastal_floods + hazards/wildfire + hazards/landslide + hazards/windstorm + hazards/water_stress + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/hazards/coastal_floods.rst b/docs/hazards/coastal_floods.rst new file mode 100644 index 00000000..0ca28a72 --- /dev/null +++ b/docs/hazards/coastal_floods.rst @@ -0,0 +1,524 @@ +================================ +Coastal floods +================================== + +Introduction +----------------- + +Currently, approximately one-third of the EU population resides within +50 km of the coast. Over this century, extreme sea levels in Europe are +projected to rise by one meter or possibly more. Presently, damages from +coastal flooding in the EU and UK amount to 1.4 €billion annually, +approximately 0.01% of the combined GDP of the EU and UK. Nearly half of +these damages are distributed between the UK (0.4 €billion annually) and +France (0.2 €billion annually). Each year, around 100,000 people in the +EU and UK face exposure to coastal flooding. Considering the current +level of coastal protection, damages from coastal flooding are expected +to increase considerably due to global warming for all EU countries with +a coastline. By 2100, annual damages are projected to reach 239 €billion +(equivalent to 0.52% of the projected GDP for EU+UK in 2100) under a +high emissions scenario and 111 €billion (0.24% GDP) under a moderate +mitigation scenario, following the 2015 Ageing Report’s socioeconomic +development. Among the countries, Germany, Denmark, France, Italy, the +Netherlands, and the UK would face the highest absolute damages. For +some nations, such as Cyprus, Greece, and Denmark, the damages represent +a substantial portion of their future national GDP, amounting to 4.9%, +3.2%, and 2.5%, respectively, by 2100 under high emissions. Given the +information presented above, conducting an efficient climate risk +assessment is of utmost importance to effectively mitigate the coastal +flood risk `JRC +report `__. + +Physical climate risk models typically consist of three main components: +a hazard module, an exposure module, and a vulnerability module. This +model structure is widely accepted in the literature on this topic, and +coastal flood events are no exception. The hazard module includes +information about the specific hazards that we want to consider in our +model, along with their fundamental characteristics. In our case, it +will contain specific information about flood events that we will +discuss further on. Let us stress that in the hazard module, we do not +refer to any group of assets yet. Instead, we focus on the hazard events +themselves. The exposure component includes information about the +assets, including their descriptions and specific locations. Finally, +the vulnerability component serves as a connection between hazard, +exposure, and loss, allowing for the estimation of the relative damage +to an asset based on a specific hazard level. The core of the +vulnerability model is usually given by the so-called damage functions, +which translate the flood intensity into an estimated damage as a ratio +of the total value of the asset. + +In the following, we will analyze each of the above components of the +physical risk model in the context of river floods and demonstrate how +to conduct a risk assessment for coastal flood events. + +Hazard module +------------------ + +What are coastal floods ? +============================== + +Coastal flooding occurs when previously dry, often low-lying land is +submerged by seawater. This phenomenon is caused by rising sea levels, +leading to the overflow of water onto the land. The most common +mechanisms by which coastal flooding occurs are: + +- Direct flooding: This happens when the land is situated at a lower + elevation than the sea level, and natural barriers such as dunes have + not been formed by waves. + +- Water spilling over a barrier: During storms or high tides, when + water levels exceed the height of a barrier, the water spills over to + the other side, causing flooding. These barriers can be natural, like + dunes, or artificial, such as dams. + +- Water breaching a barrier: In this case, powerful waves, typically + during severe weather events, break through barriers, leading to + their destruction or partial collapse. These barriers can be either + natural or artificial. + +Coastal flooding may result from a wide range of factors, some of them +including: + +- Land elevation above sea level: The height of the land in relation to + the sea level plays a significant role in determining the + vulnerability to flooding. Areas with low elevation are more prone to + flooding, especially during high tides or storm events. + +- Erosion and subsidence: Erosion refers to the wearing away of + materials by natural forces like waves, wind, or water, which can + lead to the reduction of land area. Subsidence occurs when the ground + sinks or moves underground, which can also contribute to increased + flood risk. + +- Vegetation removal: The removal of vegetation can reduce the natural + processes of infiltration and interception, causing more water to run + off and increase the risk of flooding, as the water reaches river + channels more rapidly. + +- Storm surges: Coastal flooding is often triggered by storm surges, + which are temporary increases in sea levels caused by events like + tsunamis or cyclones. A storm surge is measured by the water level + that exceeds the normal tidal level, excluding waves. + +In the following discussion, we will focus on the storm surge level as +it is the primary factor contributing to coastal floods. + +Coastal flood datasets +======================================== + +There are two main approaches to the risk assessment of catastrophe +events, particularly for floods. The first approach, sometimes called +the probabilistic one, involves estimating the probability of flood +events of different intensities and then translating this intensity into +potential damage using the vulnerability component. The second approach, +known as the "event-based" or deterministic approach, simulates +thousands of potential flood events and estimates the flood peril based +on this collection of events. Creating this collection of events can be +achieved using climate models or stochastic analysis based on past +historical events. In our approach, we will rely on the probabilistic +approach, and the remainder of this document will focus entirely on this +methodology. + +The choice of the risk assessment method dictates the datasets we rely +on. In the event-based approach, the hazard dataset consists of a +collection of thousands of simulated events that can be used in flood +risk analysis. These datasets are often presented in the form of +event-loss tables. On the other hand, the probabilistic approach mainly +involves working with return period maps of flood events. A return +period map provides information about the likelihood of a flood event of +a given intensity occurring at various locations on a map. + +The area that is affected by a flood event is called a flood footprint. +The map displaying the flood footprint is provided with a specific +resolution, which is a crucial measure of the dataset’s accuracy. Return +period maps, used to indicate the intensity and frequency of flood +events, can be created based on historical climate parameters or in +scenario versions that consider various possibilities of climate change. + +In practice, we typically require a collection of return period maps for +different return periods to perform a comprehensive risk estimation. + +Coastal flood indicators - storm surges +======================================== + +Storm surges, also known as meteorological residuals or meteorological +tide, are among the primary components contributing to extreme water +levels along coastal zones, along with waves and tidal oscillations. +Storm surges result from the combined effects of wind-driven water +circulation toward or away from the coast and changes in water level +induced by atmospheric pressure, known as the inverse barometric effect. +The magnitude of a storm surge depends on various factors, including the +size, track, speed, and intensity of the storm system, local nearshore +bathymetry (water depth), and the configuration of the coastline. + +Despite the pressing need to prepare for expected changes in extreme +water levels in Europe, there remains limited, if any, information on +projections of Storm Surge Levels (SSL) under the Representative +Concentration Pathways (RCPs). In light of this context, the study +referenced in `JRC +report `__ +employed a hydrodynamic +model driven by wind and pressure fields from CMIP5 climate models to +produce projections of extreme storm surge levels (SSL) along the +European coastline. In the following, we will provide a brief analysis +of how such projections can be conducted based on the findings of this +paper. + +The process of creating a storm surge hazard dataset typically involves +the following steps: + +- Choose a hydrological model to simulate storm surge levels. + +- Validate the model by running a hindcast run of simulations and + comparing it to historical observations. + +- Conduct storm surge simulations for a selected future time interval. + +- Perform extreme value analysis to generate return period maps. + +Below we will discuss each of these steps in detail. + +Construction of return period maps for SSL +================================================ + +Hydrological model selection and setup +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To simulate storm surge levels, researchers commonly employ either +open-source or commercial hydrological models. An example of such a +model is the Delft3D-Flow module, which is part of the open-source +Delft3D modeling framework. Setting up the model usually involves +finding a balance between data quality, model stability, and +computational efficiency. For instance, in a study referenced in +`JRC +report `__ +, water level model outputs were +generated at 3-hour intervals and sampled every 25 kilometers along the +coastline. + +Model validation +======================================== + +The first step involves assessing the model’s performance, which means +evaluating how effectively the model can simulate storm surges based on +input climate conditions. To validate the model, it is common practice +to conduct a hindcast run (i.e. retrospective historical climate +simulation) of storm surge levels during a specific past time period. +The model’s simulated storm surge data is then compared to observed +time-series data of storm surge levels from available climate databases. +For example, in a study referenced in `JRC +report `__, a +hindcast run was carried out spanning from 01/01/1979 to 01/06/2014. +This run used + +- hindcast atmospheric pressure, + +- hindcast wind fields + +obtained from the `ERA-Interim +database `__. +Let us recall, that ERA-Interim is a global atmospheric reanalysis from +1979, continuously updated in real time (it’s successor is the ERA5 +reanalysis from the Copernicus Climate Change Service). + +The resulting storm surge values were subsequently validated by +comparing them to water level time series data available from the `JRC +Sea Level Database `__ . +The temporal resolution of the actual historical measurements is +typically in the order of few hours and the temporal extent of the +validation dataset usually varies among stations. + +In the following step, tidal harmonic analysis is usually utilized to +calculate the residual storm surge water levels, represented as +:math:`\eta_s`. In this context, ’residual storm surge level’ refers to +the disparity between the observed water level during a storm event and +the water level expected solely from astronomical tide predictions. +Afterwards, the historical storm surge observations are directly +juxtaposed with the model output and assessed using the root mean square +error (RMSE): + +.. math:: RMSE= \sqrt{ \frac{\sum_{i}^n (\eta^i_{s,measured}-\eta^i_{s,model} )^2 }{n} } + +and relative RMSE error (%RMSE) + +.. math:: \% RMSE= \frac{ RMSE }{ max(\eta_{s,measured}) } \times 100. + +where :math:`n` is the number of measurements in the storm surge time +series at a given location. + +Agreement in terms of the probability density function of the values can +be assessed by means of the Kolmogorov–Smirnov test. + +SSL simulations for different climate scenarios +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After validating the model and confirming its ability to accurately +reproduce storm surge levels, we can employ it for simulating future +SSL. Note that in the historical scenario, it is sufficient to conduct +statistical analysis on the hindcast run (let’s say the last 50 years), +as no climate change assumption presumes that future climate conditions +will be the same as those in the past. + +Another important aspect to note is that information about the specific +climate projection (e.g. RCP) is typically included in the input data +(in this case atmospheric pressure and wind fields) for the hydrological +model, rather than in the hydrological model itself. The input to the +model simulating Storm Surge Levels (SSL) usually comprises hindcast or +future simulated data from various climate models. + +The example of SSL simulations using Delft3D model is discussed in +`JRC +report `__ +. In this work the period 1970-2000 was +considered as baseline period, while 2010-2040 and 2070-2100 were +considered as the short and long term future scenarios for RCP4.5 and +RCP8.5, respectively. The two time slices will be mentioned as 2040 and +2100 hereinafter for reasons of brevity (e.g. RCP8.5\ :math:`_{2040}`). + +The model was forced by the 6-h output of 8 climate models available at +the CMIP5 database: + +- ACCESS1-0 (CSIRO-BOM Australia), + +- ACCESS1-3 (CSIRO-BOM Australia), + +- CSIRO-Mk3.6.0 (CSIRO-QCCCE, Australia), + +- EC-EARTH (EC-EARTH consortium), + +- GFDL-ESM2M (NOAA Geophysical Fluid Dynamics Laboratory USA), + +- HadGEM2-CC (Met Office Hadley Centre UK), + +- MPI-ESM-LR (Max-Planck-Institut für Meteorologie Germany) + +- MPI-ESM-MR (Max-Planck-Institut für Meteorologie Germany). + +For better analysis of the model performance and the storm surge +scenarios, the European coastal zone was divided into 10 regions on the +grounds of the geographical and physical setting: Black Sea, East, +Central and West Mediterranean, South- and North-North Atlantic, Bay of +Biscay, as well as North, Baltic and Norwegian Sea + +Extreme value statistical analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the subsequent step, the tools of extreme value theory are employed +to extract return periods from simulations conducted earlier. Let’s +analyze this process again using the datasets derived in +`JRC +report `__ +as an example. + +In this work the peak-over threshold (POT) approach was applied to +identify extreme events for each 30-year time slice, according to a +certain SSL threshold parameter :math:`u`. Let us recall that the POT +approach is a statistical technique used in extreme value theory (EVT) +to analyze and model the tail of a probability distribution, +particularly focusing on extreme values that exceed a certain threshold. +Given that the peaks need to be independent +observations and not the result of the same extreme event, it is common +to apply de-clustering of events using a minimum time difference between +peaks varying from 34 h to 5 days. + +The selected exceedance events per year were pooled and modelled +according to the Generalized Pareto Distribution (GPD): + +.. math:: + + F(y)= + \left\{\begin{split} + 1- \Big( 1+\frac{\xi_y}{\sigma} \Big)^{ \frac{-1}{\xi} }, \quad \xi\neq 0, y>0, \\ + 1+\exp( \frac{-y}{\sigma} ), \qquad \xi= 0, y>0. \\ + \end{split}\right. + +where :math:`y` is the time-series of SSL above the threshold :math:`u` +such as :math:`y = \eta_s-u`, :math:`\sigma` is the scale parameter and +:math:`\xi` is the shape parameter. The scale parameter :math:`\sigma` +of the GPD is related with the scale parameter :math:`\psi` and the +location parameter :math:`\mu` of the generalized extreme value +distribution according to: + +.. math:: \sigma=\psi+\xi(u-\mu). + +The parameters of the GPD are estimated using the maximum likelihood +method and subsequently the :math:`N`-year return SSL is estimated as +follows + +.. math:: + + \eta_{s,N}= + \left\{\begin{split} + u+\frac{\sigma}{\xi} [ (Nn_y\zeta_u)^\xi-1 ] , \quad \xi\neq 0, \\ + u+ \sigma\log(Nn_y\zeta_u) , \qquad \xi= 0, \\ + \end{split}\right. + +where :math:`n_y` is the number of annual exceedances per year in each +time slice and :math:`\zeta_u` the probability that an empirical value +exceeds the threshold :math:`u`. Extreme SSL values were calculated for +different return periods and :math:`T_r = [5, 10, 50, 100]` years are +discussed in :cite:`JRC-coastal`. + +Exposure module +-------------------------- + +Exposure refers to the collection of assets that are susceptible to +potential hazards. The exposure model encompasses data regarding the +assets, properties, and infrastructure, along with their vulnerability +to potential risks. This information serves as a vital input for the +catastrophe model. In practical applications, an exposure database +typically includes the following information: + +- Type of assets (e.g., buildings, infrastructure, agriculture, + machines, etc.) + +- Location of assets (usually specified in terms of latitude and + longitude) + +- Value of the assets + +Figure `1 `_ provides a visualization of an +illustrative portfolio consisting of power plants in Spain. + +.. _powerplants_exp: + +.. figure:: ../images/coastal_flood/exposure2.png + :align: center + :width: 100.0% + + Figure 1: Power plants in Spain, with the energy production serving as a proxy + for the power plant’s value. The value in USD does not correspond to + the actual energy production value but is used to illustrate the + differences in energy production between the power plants in the + dataset. + +The complexity of the exposure component varies depending on the +specific use case. When conducting risk assessment on a macroeconomic +level, such as for a country or region, estimating exposure can be +challenging due to the need for a comprehensive information about +economic properties and services. However, when assessing a portfolio of +assets for a company or bank, the exposure part typically presents fewer +difficulties. It comes from the fact that companies generally possess +detailed information about their assets, which serves as a primary input +for our climate risk model. + +Let us stress that the coastal zone holds significant importance, with +over 200 million European citizens residing in its proximity, spanning +from the North-East Atlantic and the Baltic to the Mediterranean and +Black Sea. Current trends suggest a continued migration towards coastal +areas. These regions not only accommodate essential commercial +activities but also support diverse ecosystems that offer vital habitats +and food sources. However, coastal zones face heightened vulnerability +to climate change due to rising sea levels and potential changes in +storm frequency and intensity. Global mean sea level has risen by 13-20 +cm since pre-industrial times, with an acceleration observed since the +1990s. This amplifies the risk of coastal floods, potentially exposing +more areas to such hazards. + +Information regarding global exposure to coastal floods, categorized by +countries, can be found, for instance, at: +`https://www.marshmclennan.com `__ + +Vulnerability module +------------------------------ + +While the hazard module focuses on the hazard events themselves, the +vulnerability module’s purpose is to translate the intensity of a hazard +into the damage incurred by exposed assets. This damage is typically +quantified using various metrics, such as the damage ratio, and is +heavily influenced by both hazard and exposure characteristics. As a +result, the vulnerability module relies on the outputs of the hazard and +exposure modules, with the hazard and exposure data serving as inputs +for vulnerability assessments. + +A key concept within the vulnerability module is the use of damage +functions, which quantify the impact of hazard intensity on specific +assets :cite:`Mitchel`. This framework can be applied +effectively to flood events. + +Assessing the potential damage caused by floods is a critical component +of flood risk management. To estimate direct flood damage, depth-damage +curves are commonly employed. These curves provide insights into the +expected flood damage for specific water depths, categorized by asset +types or land-use classes. Figure `2 `_ provides an illustration of a +damage function for residential, commercial, and industrial buildings +exposed to floods, sourced from the Joint Research Centre under the +European Commission. The dataset containing this damage function can be +downloaded from the Joint Research Centre repository +:cite:`Houz`. + +The plots in Figure `2 `_ demonstrate how flood +intensity, represented by flood depth, translates into potential damage +for different building types. As flood depth increases, so does the +expected damage, with all building types reaching 100% damage at +approximately 6 meters of flood depth. + +Though several countries have created flood damage models using +historical flood data and expert input, the lack of comprehensive +depth-damage curves across all regions poses a challenge. Additionally, +variations in methodologies employed by different countries for damage +modeling make direct comparisons difficult and limit the feasibility of +supra-national flood damage assessments :cite:`Houz`. + +.. _fig-damage1: + +.. figure:: ../images/coastal_flood/damage_funs.png + :align: center + :width: 80.0% + + Figure 2: The plots show the relationship between flood depth and the + corresponding damage factor, ranging from 0% to 100%, for three types + of assets: residential buildings, commercial buildings, and + industrial buildings. In all cases, the damage reaches 100% when the + flood depth approaches approximately 6 meters. + +Impact assessment +-------------------------- + +After collecting all the necessary components of hazard, exposure, and +vulnerability, we proceed with the most important part, which is risk +assessment. We usually follow these steps. First, we use the return +period maps to determine the flood intensity associated with each +location of the area of interest. Next, we map the flood intensities +data onto the exposure map to identify the specific flood hazard level +that each asset faces. Then, we estimate the potential damage to each +asset by applying the appropriate damage function, which quantifies the +relationship between flood intensity and asset vulnerability. By +utilizing these functions, we can calculate the expected level of damage +or loss for each asset based on the corresponding flood intensity. + +Once the asset damage estimates are obtained, we aggregate and analyze +the results to gain an overall assessment of the risk. This involves +summarizing the estimated damages for all exposed assets, calculating +the total expected losses, and identifying areas or assets that are at +higher risk. The final output of the risk assessment is usually provided +in a form of risk metrics that provide information about the risk +related to the portfolio of assets. Common metrics include +:cite:`Mitchel`: + +- Annual Expected Loss (AEL). + +- | Standard deviation (SD) around the AEL + | SD is a measure of the volatility of loss around the AAL. + +- | Occurrence Exceedance Probability (OEP). + | OEP is the probability that the maximum event loss in a year + exceeds a given level. + +- | Aggregate Exceedance Probability (AEP). + | AEP is the probability that the sum of event losses in a year + exceeds a given level. + +- | Value at risk (VaR). + | VaR is the loss value at a specific quantile of the relevant loss + distribution. + +Additionally, by considering factors such as asset valuation, +replacement costs, business interruption losses, and indirect expenses, +a more comprehensive estimation of the financial impact can be achieved. + + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/hazards/landslide.rst b/docs/hazards/landslide.rst new file mode 100644 index 00000000..d7575fd7 --- /dev/null +++ b/docs/hazards/landslide.rst @@ -0,0 +1,320 @@ +Introduction +------------------ + +A landslide is defined as the downward movement of rock, soil, or debris +along a slope due to gravity. They are typically categorized according +to the type of material involved, such as rock, debris, earth, or mud, +and the nature of the movement, including falls, topples, avalanches, +slides, flows, and spreads. The broader term ’landslide’ encompasses +various forms of mass movements like rock falls, mudslides, and debris +flows. Additionally, mudflows and debris flows resulting from volcanic +activity are specifically referred to as lahars +:cite:`ESDAC_landslides`. + +Landslides are a recurring natural hazard in Europe, and each year, they +unleash a cascade of devastating consequences, ranging from loss of life +and injury to the destruction of homes, infrastructure, and valuable +land. The expanding footprint of urban developments and transportation +networks into hilly regions prone to landslides further compounds the +potential impact of these geological events. Landslides, however, are +not solely induced by prolonged or heavy rainfall; they can also be +triggered by a multitude of natural phenomena, including earthquakes, +floods, and volcanic eruptions, as well as by human activities, +construction projects, or a combination of these factors. + +It is worth noting that landslides often incur much higher economic +losses and casualties than commonly acknowledged. When landslides +coincide with other major natural disasters, their effects may go +unnoticed by the media, leading to reduced awareness among both +authorities and the general public regarding the risks posed by +landslides. + +In addition to their immediate effects, landslides can lead to +technological disasters in industrial or developed areas, resulting in +environmental pollution. Coastal zones and volcanic islands face the +additional threat of landslides triggering other natural hazards, such +as tsunamis. While landslides can directly cause significant damage +through mass movements, Europe has well-documented instances of +catastrophic flash floods caused by the sudden collapse of natural dams +formed by large landslides in narrow river valleys or by the impact of +landslides on artificial reservoirs. Additionally, the sliding and +collapse of mine waste dams contribute to the array of landslide-related +challenges faced by the continent. + +Landslide susceptibility +--------------------------- + +Landslide susceptibility refers to the likelihood or probability of an +area being vulnerable to landslides or experiencing landslide events. +This measurement is commonly employed in geology, geotechnical +engineering, and hazard assessment to gauge the inherent risk of a +specific location or region to landslides. Susceptibility, in this +context, does not take into account the temporal likelihood of a +landslide occurrence (i.e., when or how frequently landslides happen) or +the potential magnitude of these events (i.e., their size or destructive +potential). + +When evaluating landslide susceptibility on small scales (e.g., less +than 1:200,000) across large areas (such as entire nations or +continents), challenges arise due to generalization, limited spatial +data resolution, and incomplete landslide inventory information. + +A Landslide Susceptibility Index (LSI) serves as a quantitative metric +for evaluating the probability or risk of landslides occurring in a +particular area. The following discussion will detail the process of +constructing this index. + +Input to the LSI +---------------------- + +When constructing a landslide susceptibility index, it is essential to +incorporate a variety of data types to accurately assess the potential +risk and impact of landslides in a given area. Three crucial types of +data typically required for this purpose include terrain gradient data, +lithology data, and land cover data. Below is a coincise description of +each of them: + +- | **Terrain Gradient Data** + | Terrain gradient data refers to the steepness or incline of the + land surface. It’s a measure of how quickly elevation changes over + a certain distance. Essentially, it describes the slope of the + terrain. This data is usually expressed as a percentage or degree. + A :math:`p`\ % gradient means that for every 100 units (like meters + or feet) you move horizontally, the elevation changes by p units in + the vertical direction. For example, a 100% gradient means a + 1-meter rise over a 1-meter horizontal distance, equivalent to a + 45-degree angle. Terrain gradient is crucial in many fields, as it + affects soil erosion rates, water runoff, landslide susceptibility, + and construction projects. It is typically derived from topographic + maps or Digital Elevation Models (DEMs) using Geographic + Information Systems (GIS) software. + +- | **Lithology Data** + | Lithology data pertains to the physical characteristics of rocks or + sediments in a given area. It describes the material composition, + texture, and structure of the Earth’s crust. This data includes + information about rock types (e.g., sandstone, granite, limestone) + and their properties such as grain size, hardness, layering, and + mineral composition. Lithology data is fundamental in understanding + soil stability, groundwater flow, mineral resources, and potential + hazards like landslides or sinkholes. It is usually obtained + through field surveys, borehole data, geological maps, and + sometimes remote sensing techniques. + +- | **Land Cover Data** + | Land cover data describes the physical material at the surface of + the earth. This includes vegetation, water bodies, artificial + structures, and bare ground. The data categorizes areas based on + what covers the ground (e.g., forests, grasslands, urban areas, + lakes, rivers). It’s about the ’cover’ rather than the use of the + land. Land cover data is used in environmental management, urban + planning, agriculture, climate science, and biodiversity + conservation. It is primarily gathered through satellite imagery + and aerial photography, and analyzed using GIS software. + | Each of these three parameters can be characterized by various + internal classes. Segmenting into classes plays a crucial role in + the later development of the LSI. Thus, we can identify the + following distinct classes within each of the above mentioned + parameters + + - Slope classes: slope categories are typically defined according to + intervals of gradient values. For example, one might create + classes such that gradient value belong to 0-1, 1-3, 4-6, and so + on. + + - Lithology classes: for example, marl, volcanic rocks, detrital + formations, crystalline rocks, etc. + + - Land cover: for example cropland, open forest, bare ground, + meadow, artificial surfaces, etc. + + The maps representing exemplary classes of slope, lithology, and land + cover (taken from :cite:`Elsus2`) are presented in + figures `1 `_, `2 `_, and `3 `_, + respectively. + + .. _terrain: + + .. figure:: ../images/landslide/terrain.PNG + :align: center + :width: 80.0% + + Terrain gradient data classified into eight classes depending on + the gradient value. Picture sourced from + :cite:`Elsus2`. + + .. _lithology: + + .. figure:: ../images/landslide/lithology.PNG + :align: center + :width: 80.0% + + The IHME 1500 lithology information grouped into 19 classes. + Picture sourced from :cite:`Elsus2`. + + .. _landcover: + + .. figure:: ../images/landslide/land_cover.PNG + :align: center + :width: 80.0% + + The land cover information derived from the global GlobCover data + set reclassified into seven classes. Picture sourced from + :cite:`Elsus2`. + + +.. _sec:weights: + +Weights assignment +----------------------- + +When considering the three parameters mentioned above as inputs to +landslide susceptibility index (LSI), it becomes necessary to assign +weights to these parameters based on their significance in LSI +construction. The weight tell us how big is the input of a given +parameter to LSI. These weights may vary significantly depending on the +type of region under consideration. Typically, three physiographical +regions (i.e. specific geographical areas characterized by its distinct +physical landscape and natural features) are considered: + +- coastal areas, + +- plains, + +- mountains, + +although subgroups within these regional classes can also be +distinguished (for instance, in :cite:`Elsus`, the authors +divide mountains and plains depending on the climate zone in which they +are located). Each of these region types can be associated with +different weights for terrain gradient, lithology, and land cover +factors. + +The process of assigning weights to specific parameters (such as slope, +lithology, and land cover) and their respective classes typically relies +on data from historical landslide occurrences. This data is collected in +specialized databases, like the one mentioned in +:cite:`Elsus` :cite:`Elsus2`. By analyzing the ground +characteristics where these landslides have occurred, it’s possible to +construct landslide frequency ratios for each parameter and class. Based +on these ratios, weights can then be assigned to the parameters and +their classes. + +The information regarding the relative importance of parameters is +typically summarized in a pairwise comparison matrix, as shown in Figure +`4 `_. In the pairwise comparisons, a value +between 9 (indicating that one parameter is extremely more important +than another) and 1 (indicating that both parameters are equally +important) can be assigned to each pair of parameters in the comparison +matrix. This is done by rating rows against columns. For example, when +comparing ’land cover’ to ’slope’ in the plains (assigned a value of 7), +we conclude that ’land cover’ is significantly more important than +’slope’ in this context. Conversely, when comparing ’land cover’ to +’lithology’ in the mountains (assigned a value of 6), we infer that +’land cover’ is also significantly more important than ’lithology,’ +albeit to a slightly lesser degree than in the previous case. + +.. _fig:comparison_matrix: + +.. figure:: ../images/landslide/landslide_matrix.PNG + :align: center + :width: 80.0% + + Example of a comparison matrix from :cite:`Elsus`. + Parameter weight assignment is done for the three physiographical + regions: coasts, plains, mountains. + + +Landslide susceptability index and LSI maps +=========================================== + +Finally, with all this information, the LSI can be constructed using the +following formula: + +.. math:: + :name: LSI-equation + + LSI = \sum^{n=3}_{j=1}w_j \times x_{ji} + +where: + +- :math:`w_j` represents the weight of parameter :math:`j` (slope, + lithology, land cover). + +- :math:`x_{ji}` represents the weight of parameter class :math:`i` in + criterion :math:`j` (e.g. :math:`j`\ ="land cover" and + :math:`i`\ ="cropland"). + +Both :math:`w_j` and :math:`x_{ji}` are constructed as described in `Section 4 `_. It is important to note that the index +above is not summed over :math:`i`, emphasizing the LSI’s dependence on +individual pixels on the map. + +In the subsequent step, the obtained LSI values are categorized into +multiple classes. This classification is somewhat subjective and +influenced by various factors. For example, in +:cite:`Elsus`, the authors introduced five LSI classes: +’very low,’ ’low,’ ’moderate,’ ’high,’ and ’very high’. The rules of +this classification depending on LSI value are presented in Table +`5 `_ + +.. _classification: + +.. figure:: ../images/landslide/landslide_classification.PNG + :align: center + :width: 90.0% + + Susceptibility classification for zone-specific index maps derived in + :cite:`Elsus`. + +The authors made the decision to assign the highest susceptibility class +exclusively to the physiographic regions of ’mountains’ and ’coasts,’ as +landslide intensity in these areas is expected to be significantly +higher than in ’plain’ areas. In the highest susceptibility class for +each model zone (’very high’ in ’mountains’ and ’coasts,’ ’high’ in +’plains’), approximately 50% of landslide-affected pixels (LSP) are +located, while in the lowest susceptibility classes (’very low’ in +’plains,’ ’low’ and ’very low’ in ’mountains’ and ’coasts,’ +respectively), less than 10% of LSP occur (see Table +`5 `_). + +Let us stress that classification of landslide susceptibility is a +complex task, and there are currently no universally established rules +regarding the number of classes, their characteristics, or +specifications. + +To sum up, the method for creating an LSI map can be summarized as +follows: + +- Select a pixel on the map. + +- The pixel is characterized by weights :math:`w_j` corresponding to + the inputs of slope, lithology, and land cover for the LSI, as well + as weights :math:`x_{ji}` that define the classes of these + parameters. + +- Calculate the LSI for this pixel using the formula + :eq:`LSI-equation`. + +- Classify the LSI value with respect to the chosen classification + method. + +- Repeat the procedure for all pixels on the map. + +An example of the LSI map derived in :cite:`Elsus` is +depicted in Figure `6 `_. + +.. _LSImap: + +.. figure:: ../images/landslide/LSImap.PNG + :align: center + :width: 60.0% + + Classified European landslide susceptibility map derived in + :cite:`Elsus`. + + + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/hazards/riverine_floods.rst b/docs/hazards/riverine_floods.rst new file mode 100644 index 00000000..bf3e6f61 --- /dev/null +++ b/docs/hazards/riverine_floods.rst @@ -0,0 +1,388 @@ + +Introduction +------------------------- + +Globally, floods are the most damaging type of hazard, accounting for +44% of all disaster events from 2000 to 2019 and affecting 1.6 billion +people worldwide (UNDRR & CRED 2020). Recent studies indicate that river +flooding in the European Union and UK leads to annual damages worth €7.6 +billion and exposes approximately 160,000 people to inundation each +year. Furthermore, climate change is exacerbating the severity and +frequency of riverine flooding, with annual flooding more than doubling +in the past four decades. In a 3\ :math:`^\circ` C global warming +scenario, without climate change adaptation, flood damage in Europe +would reach €44 billion per year, posing a risk to nearly half a million +Europeans annually until the end of the century. Given these figures, +scientists emphasize the urgent need for Europe to implement adaptation +measures to mitigate the projected increase in flood risk. +:cite:`JRC_facing` :cite:`nature_summary` + +Physical climate risk models typically consist of three main components: +a hazard module, an exposure module, and a vulnerability module. This +model structure is widely accepted in the literature on this topic, and +flood events are no exception. + +The hazard module includes information about the specific hazards that +we want to consider in our model, along with their fundamental +characteristics. In our case, it will contain specific information about +flood events that we will discuss further on. Let us stress that in the +hazard module, we do not refer to any group of assets yet. Instead, we +focus on the hazard events themselves. The exposure component includes +information about the assets, including their descriptions and specific +locations. Finally, the vulnerability component serves as a connection +between hazard, exposure, and loss, allowing for the estimation of the +relative damage to an asset based on a specific hazard level. The core +of the vulnerability model is usually given by the so-called damage +functions, which translate the flood intensity into an estimated damage +as a ratio of the total value of the asset. + +In the following, we will analyze each of the above components of the +physical risk model in the context of river floods and demonstrate how +to conduct a risk assessment for flood events. + +Hazard module +---------------- + +The aim of the hazard module is to provide relevant information about +the flood events themselves. The main information we are interested in +is related to two issues: potential intensity and frequency of flood +events. Let us stress that "intensity" can be defined in many different +ways, which leads to the concept of the hazard indicator. By hazard +indicator, we mean a quantity that provides relevant information about +the hazard related to its intensity or frequency. The selection of a +hazard metric is a crucial aspect of the hazard model, and it typically +follows a widely accepted approach. However, it is important to note +that the chosen metric may not fully capture all the factors +contributing to damage. For instance, in the context of flood damage, +the primary metric is typically the flood depth, however, factors such +as the duration of inundation, flow velocity, and water pollution may +also have a significant impact :cite:`Mitchel`. + +The relevant information related to the selected flood indicator is +presented in the form of a suitable hazard dataset. Below, we will +briefly discuss the most common datasets utilized in flood risk +assessment, with a specific emphasis on return period maps. + +Flood datasets +===================== + +There are two main approaches to the risk assessment of catastrophe +events, particularly for floods. The first approach, sometimes called +the probabilistic one, involves estimating the probability of flood +events of different intensities and then translating this intensity into +potential damage using the vulnerability component. The second approach, +known as the "event-based" or deterministic approach, simulates +thousands of potential flood events and estimates the flood peril based +on this collection of events. Creating this collection of events can be +achieved using climate models or stochastic analysis based on past +historical events. In our approach, we will rely on the probabilistic +approach, and the remainder of this document will focus entirely on this +methodology. + +The choice of the risk assessment method dictates the datasets we rely +on. In the event-based approach, the hazard dataset consists of a +collection of thousands of simulated events that can be used in flood +risk analysis. These datasets are often presented in the form of +event-loss tables. On the other hand, the probabilistic approach mainly +involves working with return period maps of flood events. A return +period map provides information about the likelihood of a flood event of +a given intensity occurring at various locations on a map. An example of +the return period map for a 100-year return period is shown in Figure +`1 `_. + +.. _fig:hazard_int: + +.. figure:: ../images/river_flood/intensity2_riverflood.png + :align: center + :width: 80.0% + + Return period map for river floods in various locations in Spain. The + intensity is measured in terms of flood depth. + +The interpretation of the map is as follows: for each point +:math:`(x,y)` on the map, there is a unique value :math:`I` representing +the intensity in that location. The intensity value :math:`I` indicates +that the return period for intensity :math:`I` at point :math:`(x,y)` is +exactly 100 years. In simpler terms, if the intensity at a given point +is :math:`I`, it means that, statistically, at least one event of +intensity equal to or greater than :math:`I` will occur within a +100-year period. It’s important to note that the intensity indicator in +this case is flood depth. + +The area that is affected by a flood event is called a flood footprint. +The map displaying the flood footprint is provided with a specific +resolution, which is a crucial measure of the dataset’s accuracy. Return +period maps, used to indicate the intensity and frequency of flood +events, can be created based on historical climate parameters or in +scenario versions that consider various possibilities of climate change. + +In practice, we typically require a collection of return period maps for +different return periods to perform a comprehensive risk estimation. + +Exposure module +------------------- + +Exposure refers to the collection of assets that are susceptible to +potential hazards. The exposure model encompasses data regarding the +assets, properties, and infrastructure, along with their vulnerability +to potential risks. This information serves as a vital input for the +catastrophe model. In practical applications, an exposure database +typically includes the following information: + +- Type of assets (e.g., buildings, infrastructure, agriculture, + machines, etc.) + +- Location of assets (usually specified in terms of latitude and + longitude) + +- Value of the assets + +The complexity of the exposure component varies depending on the +specific use case. When conducting risk assessment on a macroeconomic +level, such as for a country or region, estimating exposure can be +challenging due to the need for a comprehensive information about +economic properties and services. However, when assessing a portfolio of +assets for a company or bank, the exposure part typically presents fewer +difficulties. It comes from the fact that companies generally possess +detailed information about their assets, which serves as a primary input +for our climate risk model. + +It should be stressed that flood events pose a significant risk to a +substantial number of people and assets worldwide. Globally, +approximately 2 billion individuals reside in freshwater flood +inundation zones, accounting for around 25% of the global population. +The level of exposure to river flooding varies across regions, with +Europe, South Asia, and Southeast Asia facing the highest levels of +risk. Just as an example, approximately 23% of the world’s croplands are +situated within inundation areas, including key agricultural nations +such as India (45%), China (31%), and the United States of America (23%) +:cite:`nature_summary`. Information about global exposure to +floods, divided by countries, can be found for instance in +`https://www.marshmclennan.com `__. + +Vulnerability module +------------------------- + +While in the hazard module we are interested in hazard events +themselves, the aim of the vulnerability module is to translate the +intensity of a hazard to the damage incurred by the assets exposed to +it. This damage is usually measured by various metrics, such as the +damage ratio. Since damage will strongly depend on the hazard and +exposure characteristics, it is naturally built on the foundation of the +hazard and exposure modules. More precisely, the output of the hazard +and exposure modules is usually used as the input for the vulnerability +module. The main concept in the vulnerability module is the so-called +damage functions that quantify the impact of a hazard intensity on a +given asset :cite:`Mitchel`. + +In particular the above framework may be applied to flood events. +Assessing the potential damage caused by flood events is an essential +component of effective flood risk management. To estimate direct flood +damage, depth-damage curves are commonly employed. These curves provide +information on the expected flood damage for specific water depths, +categorized by assets or land-use classes. Figure 1.2 illustrates a +damage function for residential, commercial and industrial buildings +exposed to floods, sourced from the Joint Research Centre under the +European Commission. The dataset with this damage function can be +downloaded from the Joint Research Centre repository +:cite:`Houz`. The plots illustrate how flood intensity +(flood depth) is transformed into potential damage for different types +of buildings. As the flood depth increases, the damage also rises, +reaching 100% for all building types when the flood depth reaches +approximately 6 meters. + +Though several countries have created flood damage models using +historical flood data and expert input, the lack of comprehensive +depth-damage curves across all regions poses a challenge. Additionally, +variations in methodologies employed by different countries for damage +modeling make direct comparisons difficult and limit the feasibility of +supra-national flood damage assessments :cite:`Houz`. + +.. _fig:damage1_riv: + +.. figure:: ../images/river_flood/damage_funs.png + :align: center + :width: 80.0% + + The plots show the relationship between flood depth and the + corresponding damage factor, ranging from 0% to 100%, for three types + of assets: residential buildings, commercial buildings, and + industrial buildings. In all cases, the damage reaches 100% when the + flood depth approaches approximately 6 meters. + +Impact assessment +------------------------------- + +After collecting all the necessary components of hazard, exposure, and +vulnerability, we proceed with the most important part, which is risk +assessment. We usually follow these steps: First, we use the return +period maps to determine the flood intensity associated with each +location of the area of interest. Next, we map the flood intensities +data onto the exposure map to identify the specific flood hazard level +that each asset faces. Then, we estimate the potential damage to each +asset by applying the appropriate damage function, which quantifies the +relationship between flood intensity and asset vulnerability. By +utilizing these functions, we can calculate the expected level of damage +or loss for each asset based on the corresponding flood intensity. + +Once the asset damage estimates are obtained, we aggregate and analyze +the results to gain an overall assessment of the risk. This involves +summarizing the estimated damages for all exposed assets, calculating +the total expected losses, and identifying areas or assets that are at +higher risk. The final output of the risk assessment is usually provided +in a form of risk metrics that provide information about the risk +related to the portfolio of assets. Common metrics include +:cite:`Mitchel`: + +- Annual Expected Loss (AEL). + +- | Standard deviation (SD) around the AAL + | SD is a measure of the volatility of loss around the AAL. + +- | Occurrence Exceedance Probability (OEP). + | OEP is the probability that the maximum event loss in a year + exceeds a given level. + +- | Aggregate Exceedance Probability (AEP). + | AEP is the probability that the sum of event losses in a year + exceeds a given level. + +- | Value at risk (VaR). + | VaR is the loss value at a specific quantile of the relevant loss + distribution. + +Additionally, by considering factors such as asset valuation, +replacement costs, business interruption losses, and indirect expenses, +a more comprehensive estimation of the financial impact can be achieved. + +Example - Flood risk assessment for powerplants in Spain +------------------------------------------------------------------- + +| In this section, we will briefly demonstrate how to perform a risk + assessment for flood events using the example of power plants in + Spain. The entire process will be executed using the open-source + platform CLIMADA, but one can also utilize other similar open-source + or commercial tools of this kind (see for instance OS-climate + platform). The documentation related to the CLIMADA platform can be + found here: +| `https://climada-python.readthedocs `__. + +CLIMADA stands for CLIMate ADAptation and is a probabilistic natural +catastrophe impact model developed and maintained mainly by the Weather +and Climate Risks Group at ETH Zürich. It provides a software tool +designed to assess and analyze climate-related risks and impacts for +various hazards, such as floods, storms, heatwaves, and droughts, and +their potential consequences on different sectors, including +infrastructure, agriculture, and human populations. The CLIMADA platform +integrates advanced climate models, geospatial data, and statistical +methods to simulate and visualize the potential impacts of climate +events. + +Hazard +=================== + +For our example we have used river flood hazard maps prepared spanish +Ministerio para la Transición Ecologica y el Reto Demografico. The +dataset consist of return period maps in a historic scenario for three +different return periods: 10, 100, 500 years. The maps cover the region +of entire Spain and its resolution is equal to 1m. The data can be +downloaded from the following link: +`https://www.miteco.gob.es `__ + +Using the CLIMADA platform’s Riverflood python class, we can visualize +the datasets as a map. The figure `3 `_ displays +the return period map representing the intensity of the river flood for +a 10-year return period from the dataset we used. + +.. _intensity_climada: + +.. figure:: ../images/river_flood/intensity2.png + :align: center + :width: 100.0% + + The flood intensity in Spain represented in terms of a flood depth + for a 10-year return period. + +Exposure +============== + +As an example of the asset portfolio in the exposure part, we utilized +the dataset from the Global Power Plant Database, a global and +open-source database of power plants. The dataset includes a set of +power plants in Spain and is accessible at +`https://datasets.wri.org `__. +We used the electrical generating capacity in megawatts as a proxy for +the power plant’s value. CLIMADA provides a tool to create a map +representation of the exposure dataset, and its effect can be seen in +Figure `4 `_. The geographical longitude and latitude +provide the location of the power plants. It is important to note that +the value in USD does not correspond to the actual energy production +value but is solely used to illustrate the differences in energy +production between the power plants in the dataset. + +.. _powerplants_exp_riv: + +.. figure:: ../images/river_flood/exposure2.png + :align: center + :width: 100.0% + + Power plants in Spain, with the energy production serving as a proxy + for the power plant’s value. The value in USD does not correspond to + the actual energy production value but is used to illustrate the + differences in energy production between the power plants in the + dataset. + +Vulnerability +================ + +Next, we proceeded to the vulnerability module, aiming to convert the +intensity of the river flood into the damage incurred on the power +plants. The damage function utilized in this step was obtained from +Huizinga et al. and can be downloaded from the following link: +`https://publications.jrc.ec.europa.eu `__. + +This paper includes damage functions for six different types of assets. +For the sake of simplicity, we have selected the damage function for +residential buildings. The plot of this function is shown in Figure +`5 `_. + +.. _fig:damage2: + +.. figure:: ../images/river_flood/damage_function_spain_riverflood.png + :align: center + :width: 80.0% + + The plots demonstrate the conversion of flood depth into a damage + factor ranging from 0% to 100%. Here, MDD represents the mean damage + (impact) degree, PAA denotes the percentage of affected assets, and + MDR is the mean damage ratio calculated as MDR = + MDD\ :math:`\cdot`\ PAA. + +The plots demonstrate how flood intensity (flood depth) is translated +into potential damage. The blue curve represents the mean damage ratio +(MDR), which shows the fraction (0%-100%) of the total asset value lost +due to the flood event. For example, from the graph, we can see that a +flood intensity of 2m results in approximately 50% damage to residential +assets. The red line indicates the percentage of affected assets (PAA), +and it is an internal parameter of CLIMADA that is not relevant for us +in this example. + +To calculate the damage value to a set of assets, we multiply the value +of each exposed asset in a grid cell by the Mean Damage Ratio +corresponding to the flood intensity in that grid cell. The figure +`6 `_ shows the annual expected impact map. + +.. _impact3: + +.. figure:: ../images/river_flood/impact3.png + :align: center + :width: 100.0% + + Map illustrating the annual expected impact on assets in different + locations. + + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/hazards/water_stress.rst b/docs/hazards/water_stress.rst new file mode 100644 index 00000000..c96b0f46 --- /dev/null +++ b/docs/hazards/water_stress.rst @@ -0,0 +1,332 @@ +Introduction +--------------- + +Water stress occurs when the demand for water in a region surpasses the +available supply or when poor water quality limits its usability. It +serves as a measure of the strain on water resources. In numerous +regions across Europe, water stress has already become a prevailing +concern. Droughts and water shortages are no longer uncommon or extreme +occurrences in the continent. According to the EEA report +:cite:`EEA`, approximately 20% of European territory and 30% +of the European population experience water stress during an average +year. This issue is exacerbated by climate change, which leads to more +frequent, severe, and impactful droughts. Southern and south-western +Europe face particularly concerning trends, with the possibility of +river discharge decreasing by up to 40% during summer under a scenario +of a 3°C temperature rise. + +This document outlines the methodology employed to generate global +assessments of water stress, water demand, water supply, and seasonal +variations for three 21-year periods centered around 2020, 2030, and +2040, as detailed in :cite:`First`. The dataset produced by +this publication contains significant information and finds application, +for example, in the calculation of ECB climate risk indicators. + +Blue water +----------------------- + +The concept of water stress is intrinsically linked to the concepts of +water demand and water supply, both of which we will explore further in +this paper. For now, it is important to note that our analysis primarily +revolves around the concept of "blue water", which refers to freshwater +present in rivers, lakes, reservoirs, and underground aquifers. This +visible water, integral to the landscape, plays a crucial role in the +Earth’s hydrological cycle. A significant aspect of blue water’s +dynamics is its relationship with runoff. Runoff is the process where +water, originating from precipitation, travels across the land’s surface +or through the soil, eventually contributing to the blue water in +rivers, lakes, and oceans. + +Let us stress that we will make a distinction between "blue water" and +"available blue water". "Blue water" refers to the water in rivers, +lakes, and aquifers – essentially, the freshwater that is visible and +stored in the ground or on the surface. On other hand, "Available blue +water" is the amount of this water that’s actually available for use. +It’s calculated as the total flow-accumulated runoff (water from +precipitation that flows over the land surface and accumulates in bodies +of water) minus the consumptive use of water upstream. Here by +consumptive we mean the portion of water use that is not returned to the +water source and is lost due to processes like evaporation, +transpiration, or incorporation into products. + +Water supply +------------------ + +The water supply information relies on runoff data, which is obtained +from global circulation models accessible i.e. through CMIP data. + +When addressing the issue of future water supplies one has to take into +account different scenarios of climate change. The typical approach +involves analyzing runoff information under different Representative +Concentration Pathways (RCPs), which is also usually accessible through +Global Circulation Models (GCMs). To enhance the accuracy of the +results, researchers often aggregate data from multiple GCMs and derive +a suitable mean from their findings. + +From the information provided, two estimates of water supply can be +calculated: + +- Total blue water :math:`(Bt)`: This represents the flow-accumulated + runoff. + +- Available blue water :math:`(B\alpha)`: This is calculated by + subtracting upstream consumptive use. + +For a more comprehensive analysis, factors related to water storage and +retention can also be considered. However, in this discussion, we will +focus exclusively on annual flow. + +Water withdrawal +---------------------- + +Water withdrawal is the total amount of water taken from a water source +(like a river, lake, or aquifer) for use. It includes water for various +purposes such as domestic use, agriculture, industry, and energy +production. Key Points: Not all withdrawn water is actually consumed. It +might be used and then returned to a source, like in the case of cooling +water in power plants. + +Models for water withdrawals and consumptive use typically take into +account projected country size, wealth, and various other +characteristics. The computations are typically carried out for each of +the three sectors as defined by the Food and Agriculture Organization of +the United Nations (FAO): + +- agriculture, + +- industry, + +- domestic. + +Below, we will discuss each of them in detail. + +Agriculture (Irrigation) +============================= + +The exact figures can vary by region and over time, but globally, +irrigation accounts for the largest portion of water withdrawals by far, +significantly more than the combined total of domestic and industrial +uses. We will denote by :math:`U_{ag}` and :math:`C_{ag}` the +agriculture withdrawal and consumptive use respectively. + +Irrigation consumptive use +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The agricultural water use from irrigated area can be estimated +(following FAO methodology) by means of irrigation consumptive use +(ICU). ICU is the annual depth of water needed to fulfill the deficit +between what crops could consume with ample water and what they would +consume under rainfed conditions. The formula for ICU is: + +.. math:: ICU=PET-AET + +where: + +- :math:`PET` is monthly potential evapotranspiration. Let us recall + the reader that evaporation is the process by which water changes + from a liquid to a gas or vapor. It occurs when water on the earth’s + surface, from sources like lakes, rivers, and wet soil, heats up and + turns into water vapor, which then rises into the atmosphere. + +- :math:`AET` is a monthly actual evapotranspiration (AET). It can be + directly derived from GLDAS-2 and the CMIP5 models by converting + latent heat flux into water volume using the latent heat of + vaporization. + +Irrigation water withdrawals +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Agricultural withdrawals can be calculated using FAO methodology for +irrigation water withdrawals (IWW) :cite:`FAO1`. The formula +for IWW (or agricultural water withdrawals :math:`U_{ag}`) is: + +.. math:: U_{ag}=IWW=\frac{IWR}{WRR} + +where: + +- :math:`IWR` is irrigation water requirement, which is a measure of + the water required for optimal crop growth (including consumptive and + nonconsumptive purposes). It can be computed by means of the formula + + .. math:: + + \begin{aligned} + IWR & =ICU \times A_{\text {irr }}+0.2 \times A_{\text {rice }} \\ + & =C_{a g}+0.2 \times A_{\text {rice }} + \end{aligned} + + where :math:`A_{irr}` and :math:`A_{rice}` are area actually + irrigated and area under paddy irrigation (rice), respectively. + +- :math:`WRR` is the water requirement ratio, also referred to as + irrigation efficiency. It is the amount of water required by crops to + meet their evapotranspiration needs, divided by the amount of water + actually withdrawn to meet those needs. + +Domestic and industrial withdrawals +----------------------------------- + +The model for the industrial and domestic water withdrawals in a target +year :math:`t` takes a form: + +.. math:: + + \begin{aligned} + U_{ind, i, t}= & \beta_0+\beta_{Y E A R} Y E A R+\beta_1 X_{1, i, t}+\beta_2 X_{2, i, t}+ \\ + & \beta_k X_{k, i, t}+b_{0, i}+b_{Y E A R, i} Y E A R+\epsilon_{i, t} + \end{aligned} + +where: + +- :math:`U_{ind, i, t}` is industrial water withdrawals for country + :math:`i` + +- :math:`b_{o, i}` and :math:`b_{\mathrm{YEAR}, i}` are + country-specific terms required to produce adequate fits to the data + +- :math:`\beta_k` represents the intercept and coefficents for variable + :math:`k` ( :math:`k`-th input to the withdrawals) + +- :math:`\epsilon_{i,t}` is the residual variation distributed as + :math:`N(0, 1)` + +Downscaling procedure +========================== + +Observe that the discussion regarding projected industrial and domestic +withdrawals is conducted at the country level. To execute the +downscaling procedure from countries to individual pixels within the +country grid, one must associate the :math:`U_{ind}` and :math:`U_{d}` +value with each pixel. This can be achieved using the following +formulas: + +.. math:: U_{ind, p}=U_{i n d, i} \frac{G D P_p}{\sum_{p \in i} G D P_p} + +.. math:: U_{d, p}=U_{d, i} \frac{POP_p}{\sum_{p \in i} POP_p} + +where: + +- :math:`GDP_p` is the GDP of the :math:`p`-pixel within the country. + +- :math:`\sum_{p \in i} GDP_p` is the total GDP of the country. + +- :math:`POP_p` is the population of the :math:`p`-pixel within the + country. + +- :math:`\sum_{p \in i} POP_p` is the total population of the country. + +As we can see, the disaggregation method in the industrial context +operates on the assumption that water withdrawal correlates with the +Gross Domestic Product (GDP) of the specified area. Conversely, in the +domestic scenario, it presumes a similar correlation between water +withdrawal and the population of the area. + +Consumptive use +======================= + +Let us denote by (:math:`C_{ind}` and :math:`C_{dom}`) industrial and +domestic consumptive use respectively. We calculate them from the +formulas: + +.. math:: C_{ind} = U_t\cdot\eta_{2025, d} + +.. math:: C_{dom} = U_t\cdot\eta_{2025, ind} + +where + +- :math:`U_t` is total withdrawals in a year :math:`t` + +- :math:`\eta_{2025, ind},\eta_{2025, d}` are projected ratios + (consumptive use/withdrawals) of industrial and domestic consumptive + use in 2025 respectively + +Let us stress that total withdrawals and total consumptive use +(:math:`U_t` and :math:`C_t` respectively) are equal to the sum of +withdrawals and consumptive use for each of the three sectors. + +.. _water-supply-1: + +Water supply indicator +------------------------- + +As an indicator of a water supply one often takes the total blue water +:math:`Bt` (renewable surface water). + +Water demand indicator +-------------------------- + +Water demand is typically quantified through the measurement of total +water withdrawals denoted as :math:`U_t`. + +Presently, researchers often focus on projecting changes in water +withdrawals. For example, in :cite:`First`, the authors have +defined the projected change in water withdrawals as the cumulative +withdrawals for the target year divided by the baseline year, which is +1. Because irrigation consumptive use varies depending on climate +conditions, distinct estimates for :math:`U_{ag}` and :math:`C_{ag}` +were produced for each year. The estimates for :math:`U_{ag}` and +:math:`C_{ag}` for each ensemble member, scenario, and target year are +calculated as the means over a 21-year period centered around the target +year. + +The change of the water demand in 1950-2010 to 2030-2050 obtained by +authors in :cite:`First` can be seen on a figure +`1 `_. + +.. _fig:waterwithdrawal: + +.. figure:: ../images/waterStress/waterwithdrawal.PNG + :align: center + :width: 80.0% + + Projected Change in Water Demand from Baseline (2010) to Future + Period (2030–50) under Business-as-usual scenario RCP8.5/SSP2. Figure + taken from :cite:`First`. + +Water stress +------------ + +We define water stress (WS) as the ratio of human society’s water demand +to the available water resources on an annual average basis: + +.. math:: WS_t= \frac{Ut_t}{B\alpha_{[t-10: t+10]} }, + +where + +- :math:`WS_t` - is projected water stress in a target year :math:`t` + +- :math:`Ut_t` is the demand for water by human society in a specific + year, denoted as :math:`t`. + +- :math:`B\alpha_{[t-10:t+10]}` is the available blue water, which is + defined as the flow-accumulated runoff minus the consumptive use of + water upstream, calculated across hydrological catchments. To + estimate the available blue water for a specific target year, denoted + as year :math:`t`, it is computed as the average over a 21-year span + centered on that year. The projected available blue water is computed + as the mean of the 21-year period around the target year :math:`t` + using runoff from each year with the mean consumptive use for the + target year. + +Projections indicate a significant rise in water stress throughout large +areas of the Mediterranean, Central Asia, and the southwestern region of +North America. The variation in water stress from the period 1950-2010 +to the projected period 2030-2050 is illustrated in Figure +`2 `_. + +.. _fig:waterstress: + +.. figure:: ../images/waterStress/waterstress1.PNG + :align: center + :width: 80.0% + + Projected Change in Water Stress from Baseline (1950–2010) to Future + Period (2030–50) under Scenario RCP8.5/SSP2. Figure taken from + :cite:`First`. + + + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/hazards/wildfire.rst b/docs/hazards/wildfire.rst new file mode 100644 index 00000000..63ce680a --- /dev/null +++ b/docs/hazards/wildfire.rst @@ -0,0 +1,412 @@ +Introduction +---------------- + +Wildfire risk is rapidly increasing globally, leading to dramatic +impacts on ecosystems, biodiversity and society. Economic damages +threaten individual households, insurance companies and governmental +authorities alike. Over the past few years, (re-)insurance firms and +government agencies announced record losses due to wildfire hazards +(Swiss Re, 2019). While insured losses due to wildfire accounted for +less than 2% of total insured losses during the period from 1985 to +2015, this number is up to 12.4% for the period from 2016 to 2020 +:cite:`Climada_paper`. + +In Europe, wildfires pose a substantial concern, with the European Union +alone experiencing extensive land burning in recent years: 4,000 +km\ :math:`^2` in 2019, 3,400 km\ :math:`^2` in 2020, and 5,000 +km\ :math:`^2` in 2021. The level of risk associated with wildfires +varies greatly depending on the proximity to populated areas, the +characteristics of ecosystems, and the exposure of valuable assets in +vulnerable regions. Multiple factors contribute to the exacerbation or +mitigation of the risk, making it challenging to predict their complex +interactions. Moreover, the natural variability of climate, +environmental conditions, and human factors further complicates the +issue :cite:`DDHAD`. + +The wildfire risk assessment methodology is still in its early stages, +particularly when compared to other natural catastrophes like +earthquakes, tropical cyclones, or flooding. This disparity can be +attributed to the significant influence of the human factor on most +wildfires, which sets them apart from other hazards. As a result, +developing comprehensive and robust wildfire risk assessment methods +remains a challenging task. + +.. _hazard_section: + +Hazard module +----------------------- + +Wildfire risk indicators +============================== + +Wildfire is considered one of the most challenging hazards to model. In +the standard probabilistic approach for physical risk assessment, we +estimate the probability of different intensity hazard events and then +use the vulnerability component to translate this intensity into +potential damage. The probabilistic approach primarily revolves around +working with return period maps of flood events, which offer information +about the likelihood of an event of a specific intensity occurring at +various locations on a map. + +However, when it comes to wildfires, a major challenge arises due to the +significant human component in their origin. Available data reveals that +only 4% of recorded fires are attributed to causes unrelated to human +activities, with the majority caused by lightning. In contrast, a +substantial 96% of fires are associated with human activities, often +resulting from negligence or accidents. Furthermore, the intensity of +the fire is strongly influenced by the actions taken to fight it, which +can significantly reduce its intensity, duration, and potential damage. +This implies that the standard statistical methods of extreme value +theory typically used to create return period maps cannot be applied in +this context. + +Despite these challenges, it does not imply that wildfire risk +assessment cannot be conducted on any level. In fact, two common +intensity indicators are often employed for this purpose: the fire +weather index and the brightness of the ground. In the following +subsections, we will delve into both of these measures and the datasets +associated with them. + +Fire weather index (FWI) system +======================================= + +The fire weather index is a measure that assesses fire intensity by +taking into account both the rate of fire spread and the amount of fuel +being consumed. This index is calculated using the Canadian Forest +Service Fire Weather Index rating system (FWI). The Canadian Forest Fire +Weather Index (FWI) System consists of several elements that offer +insights into the relative potential for wildland fire, taking into +account fuel moisture and weather conditions. We provide the description +of the components after :cite:`CanFWI`. + +- | Fine Fuel Moisture Code (FFMC) + | It describes the mositure content of litter and other cured fine + fuels and it is an indicator of the ease of ignition and + flammability of fine fuel. + +- | Duff Moisture Code (DMC) + | It describes the average moisture content of loosely compacted + organic layers of moderate depth and it is an indicator of fuel + consumption in moderate duff layers and medium-size woody material. + +- | Drought Code (DC) + | It describes the average moisture content of deep, compact organic + layers and it is an indicator of seasonal drought effects on forest + fuels and the amount of smoldering in deep duff layers and large + logs. + + From the above three components we can build the following two: + +- | Initial Spread Index (ISI) + | It quantifies the expected rate of fire spread and is based on wind + speed and FFMC. + +- | Buildup Index (BI) + | It quantifies the total amount of fuel available for combustion and + it is based on the DMC and the DC. + +Finally, the Fire Weather Index (FWI) is a comprehensive component that +combines the Initial Spread Index (ISI) and the Buildup Index (BUI) to +provide a unified numerical rating of the relative fire potential. It +takes into account the total fuel available for consumption and +dynamically integrates information from local meteorological variables +including temperature, wind speed, relative humidity, and precipitation. +A higher FWI value indicates more favorable meteorological conditions +for initiating a wildfire :cite:`DDHAD`. + +It is important to note that the Fire Weather Index (FWI) is not a +conventional measure of hazard intensity. It solely indicates the +favorable conditions for fire spread and sustainability, rather than +providing information about the actual intensity of a fire event that +occurred. Furthermore, the FWI does not consider the significant +influence of the human factor, which plays a critical role in fire +ignition. + +Wildfire brightness +======================= + +Satellite remote sensing plays a crucial role in the detection of +wildfires, with methods leveraging brightness and temperature +differences (in Kelvin [K]) in remote sensing images to determine fire +occurrences being a primary focus in forest fire monitoring research. + +Fire brightness refers to the radiant energy emitted by a fire, +typically measured using infrared radiation. Satellites equipped with +infrared sensors enable the detection and measurement of fire +brightness, facilitating the monitoring and assessment of fire activity +from space. Fire brightness offers valuable insights into the size, +intensity, and behavior of fires. By analyzing the infrared signals +captured by satellites, scientists and fire management agencies can +estimate a fire’s temperature and heat output, which serve as indicators +of its intensity. Brighter and hotter areas observed in satellite +imagery correspond to more active and intense burning. + +The detection of wildfires often employs the change detection method, +which identifies pixels in images that have undergone changes compared +to previous images. Various environmental factors can cause changes in +different aspects of pixels. By setting a threshold value, areas of +"change" and "no change" can be identified in the image. In this method, +fire detection is performed on pixels exhibiting significant changes in +brightness temperature over time, identified through a preliminary +classification based on change detection. Pixels classified as "no +change" are considered non-fire pixels, even if they exhibit high +temperatures, such as hot desert pixels. + +The dataset example containing information about fires in the Galicia +region of Spain in 2021, derived from satellite observations, is +illustrated in Figure `1 `_. The map displays the +maximum annual fire intensity observed at each point of the region in +2021. In areas where no fires occurred (non-fire or "no change" pixels), +the assigned brightness value is zero. + +.. _fig:hazard_int_fire: + +.. figure:: ../images/wildfire/wildfires_intensity2.png + :align: center + :width: 80.0% + + Maximum annual intensity of wildfires in the Galicia region of Spain + in 2021. The intensity is given in terms of ground brightness + measured in Kelvins. + +Data sources +================== + +The availability of wildfire data in both probabilistic and event-based +approaches remains a significant challenge, especially considering the +predominantly human factor-driven nature of most fire events. However, +there are some useful datasets that provide information about wildfire +intensity. These datasets include: + +- | The European Forest Fire Information System (EFFIS) offers the + latest and most comprehensive information on the ongoing fire + season in Europe and the Mediterranean area. This includes current + meteorological fire danger maps and forecasts up to 6 days, along + with daily updated maps of hot spots and fire perimeters. Access to + the complete dataset can be obtained via the following link: + | `https://effis.jrc.ec.europa.eu `__ + | The comprehensive methodology for this tool can be found in: + | `https://effis-gwis-cms.s3-eu-west-1.amazonaws.com/apps/fire.risk.viewer `__. + +- | Moderate Resolution Imaging Spectroradiometer (MODIS) provides a + satellite-based sensor system widely utilized for monitoring + thermal anomalies and fire activity on Earth. MODIS delivers + valuable data and imagery that aid in fire detection, tracking, and + analysis, including associated thermal anomalies. The MODIS sensor + is capable of detecting flaming and smoldering fires as small as + 1000 m\ :math:`^2`, and under optimal observation conditions, fires + as small as 50 m\ :math:`^2` can also be detected (Giglio et al., + 2020). The nominal size of a MODIS pixel is approximately 1 + kilometer (km) in both the horizontal and vertical dimensions. The + data can be downloaded via the following link: + | `https://firms.modaps.eosdis.nasa.gov `__. + +- | The World Bank provides low-resolution global return period maps + for the Fire Weather Index (FWI) for 2, 10, and 30-year return + periods. One can access the data using the following link: + | `https://www.geonode-gfdrrlab.org `__ + +Exposure module +================ + +Exposure refers to the collection of assets that are susceptible to +potential hazards. The exposure model encompasses data regarding the +assets, properties, and infrastructure, along with their vulnerability +to potential risks. This information serves as a vital input for the +catastrophe model. In practical applications, an exposure database +typically includes the following information: + +- Type of assets (e.g., buildings, infrastructure, agriculture, + machines, etc.) + +- Location of assets (usually specified in terms of latitude and + longitude) + +- Value of the assets + +The complexity of the exposure component varies depending on the +specific use case. When conducting risk assessment on a macroeconomic +level, such as for a country or region, estimating exposure can be +challenging due to the need for a comprehensive information about +economic properties and services. However, when assessing a portfolio of +assets for a company or bank, the exposure part typically presents fewer +difficulties. It comes from the fact that companies generally possess +detailed information about their assets, which serves as a primary input +for our climate risk model. + +.. _secvulner: + +Vulnerability module +--------------------------- + +The vulnerability module introduces the concept of damage functions, +which quantify the impact of hazard intensity on a specific asset +:cite:`Mitchel`. In this section, we will consider fire +brightness temperature as a proxy for hazard intensity. Figure +`2 `_ displays a damage function for assets +exposed to fires, sourced from :cite:`Climada_paper`, which +translates fire brightness into damage. For various natural hazards, +impact functions often exhibit a sigmoid shape. In this example, the +commonly used idealized function has been employed: + +.. math:: f(i) = \frac{i^3}{1+i^3} + +where i represents the intensity at a specific location, defined as: + +.. math:: i_{lat, lon} = \frac{ \max[I_{lat,lon},0] }{ I_{half}- I_{thresh}}. + +Here, :math:`I_{lat,lon}` denotes the intensity of a fire at a specific +grid point. :math:`I_{thresh}` is the minimum intensity where damages +occur (chosen as a constant 295K, representing the minimum value of a +FIRMS data point to be displayed as a fire). The only parameter that +undergoes calibration is :math:`I_{half}`, which can be seen as the +steepness of the sigmoid function. As fire brightness increases, the +damage also rises, reaching about 50% for 400K +:cite:`Climada_paper`. + +We emphasize that the damage function shown in Figure +`2 `_ is the only one we have found in the +literature so far, which links fire brightness to asset damage. It is a +universal function, meaning it is not specific to any particular group +of assets. + +.. _wildfire_damage: + +.. figure:: ../images/wildfire/wildfire_damage.png + :align: center + :width: 80.0% + + The plots illustrate the correlation between fire intensity and the + corresponding damage factor, ranging from 0% to 100%. The intensity + (fire brightness temperature) is measured in Kelvin degrees. The blue + horizontal line is an internal parameter of CLIMADA that is not + relevant for us in this example. + +Example: Wildfire damage for power plants in Spain +---------------------------------------------------------- + +Based on the discussions in section `2 `_, return +period maps for fire intensity measured in terms of brightness might not +provide relevant information about possible future events, making risk +assessment in this approach challenging. Instead, we will demonstrate +how wildfire data from past events can be utilized to estimate the +historical damage caused by wildfires. + +The entire process will be executed using the open-source platform +CLIMADA, but one can also utilize other similar open-source or +commercial tools of this kind (see for instance OS-climate platform). +The documentation related to the CLIMADA platform can be found here: +`https://climada-python.readthedocs `__. + +CLIMADA, short for CLIMate ADAptation, is a probabilistic natural +catastrophe impact model created and primarily managed by the Weather +and Climate Risks Group at ETH Zürich. This model offers a sophisticated +software tool tailored to evaluate and analyze climate-related risks and +their consequences for a range of hazards, including floods, storms, +heatwaves, and droughts. Its scope extends to various sectors, +encompassing infrastructure, agriculture, and human populations. By +seamlessly integrating cutting-edge climate models, geospatial data, and +statistical techniques, the CLIMADA platform effectively simulates and +visually portrays the potential impacts of climate events. + +Hazard +============ + +CLIMADA offers a Python class called "WildFire" designed specifically +for handling wildfire events. This class enables the modeling of +wildfire hazards using available historical data and generating +synthetic fires, which are then aggregated into event years for a +comprehensive probabilistic risk assessment. The historical data +utilized is sourced from the Fire Information for Resource Management +System (FIRMS). This system collects temperature data from various +satellite instruments, including: + +- Moderate Resolution Imaging Spectroradiometer (MODIS): Near real time + or standard quality data with 1 km resolution. Data available from + November 2000 to present. + +- Visible Infrared Imaging Radiometer Suite (VIIRS): Near real time + data with 0.375 km resolution. Data available from 20 January 2012 to + present. + +The data can be obtained at +https://firms.modaps.eosdis.nasa.gov/download/ and saved as .csv file. + +By utilizing the WildFire python class in the CLIMADA platform, we can +generate a map visualizing the wildfire intensity datasets. Figure +`3 `_ illustrates the wildfires intensity for the +year 2022, sourced from the FIRMS. The intensity is measured as the +brightness of the ground and is presented in Kelvin degrees. The map +shows the maximum fire brightness measured at each point in 2022. + +.. _intensity_climada_fire: + +.. figure:: ../images/wildfire/wildfire_intensity.png + :align: center + :width: 100.0% + + The wildfire intensity in Spain in 2022 represented in terms of a + maximal ground brightness measured at each point. + +Exposure +===================== + +As an example of the asset portfolio in the exposure part, we utilized +the dataset from the Global Power Plant Database, a global and +open-source database of power plants. The dataset includes a set of +power plants in Spain and is accessible at +`https://datasets.wri.org `__. +The value of each power plant was determined based on its maximum energy +production capacity, measured in MWh. For the sake of simplicity, we +assumed a market value of one US dollar per MWh. It’s important to note +that this value is purely illustrative and not representative of the +actual energy production value. Thus, the value assigned to each power +plant is the value of its maximum energy production capacity. CLIMADA +provides a tool to create a map representation of the exposure dataset, +and its effect can be seen in Figure `4 `_. The +geographical longitude and latitude provide the location of the power +plants. + +.. _powerplants_exp_fire: + +.. figure:: ../images/wildfire/exposure2.png + :align: center + :width: 100.0% + + Power plants in Spain, with the energy production serving as a proxy + for the power plant’s value. The value in USD does not correspond to + the actual energy production value but is used to illustrate the + differences in energy production between the power plants in the + dataset. + +Vulnerability +===================== + +Next, we proceeded to the vulnerability module, aiming to convert the +intensity of the fire into the damage incurred on the power plants. The +damage function utilized in this step is a built-in damage function of +CLIMADA and its precise construction is described in +:cite:`Climada_paper`. For a general discussion of this +damage function see `4 `_. + +Subsequently, we employ the damage function `2 `_ +to assess the impact of events from Figure `3 `_ on +our exposure. To calculate the damage value to a set of assets, we +multiply the value of each exposed asset in a grid cell by the Mean +Damage Ratio corresponding to the fire intensity in that grid cell. +Figure `5 `_ shows the impact of hazard events from 2022 +on the exposure. + +.. _impact2: + +.. figure:: ../images/wildfire/wildfire_impact.png + :align: center + :width: 100.0% + + Map illustrating the impact of fire hazard events from 2022 on the + power plants in Spain. + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/hazards/windstorm.rst b/docs/hazards/windstorm.rst new file mode 100644 index 00000000..b0388a16 --- /dev/null +++ b/docs/hazards/windstorm.rst @@ -0,0 +1,415 @@ +Introduction +------------------- + +The impact of windstorms on the European economy is evident from +historical records of wind events. In 2015, the estimated average annual +loss due to windstorms for the EU and UK was approximately €5 billion +(in 2015 values), representing about 0.04% of the total GDP of these +regions in that year. The damages affected various sectors, including +roads, power plants, agriculture, forests, infrastructure, and private +properties. Each year, around 16 million citizens in the EU and UK are +exposed to windstorms with an intensity that occurs only once every 30 +years in the present climate, resulting in nearly 80 annual deaths. + +Looking ahead, windstorm losses are projected to increase by 2050, +reaching nearly €7 billion per year (in 2015 values) under both 1.5C and +2C global warming scenarios. By the end of this century, these losses +are expected to surpass €11 billion per year, with slightly higher +impacts for higher levels of warming :cite:`JRC-windstorms`. + +These compelling facts underscore the importance of employing +sophisticated and comprehensive risk assessment methods and tools to +address these events effectively. In this document, we will present a +methodology specifically tailored to windstorm events. Instead of +creating new methods, we will leverage well-established tools from +catastrophe modeling to achieve our objectives. + +Let us recall that physical climate risk models typically comprise three +main components: a hazard module, an exposure module, and a +vulnerability module. This model structure is widely acknowledged in the +literature, and windstorm events are no exception. + +The hazard module includes details about the specific windstorm hazards +we wish to include in our model, along with their essential +characteristics. In this case, it will contain specific information +about windstorm events, which we will further discuss. It’s essential to +note that the hazard module does not refer to any group of assets at +this stage; instead, it focuses solely on the hazard events themselves. +The exposure component consists of information about the assets, +including their descriptions and specific locations. Lastly, the +vulnerability component acts as a link between the hazard, exposure, and +potential loss, enabling the estimation of relative damage to an asset +based on a specific windstorm hazard level. The essence of the +vulnerability model lies in the employment of damage functions, which +convert windstorm intensity into an estimated damage ratio relative to +the total value of the asset. + +In the following sections, we will examine each of the above mentioned +components of the physical risk model in the context of windstorms, +illustrating how to carry out a comprehensive risk assessment for +windstorm events. + +Hazard module +---------------- + +What is a windstorm? +======================= + +A windstorm is a severe weather event characterized by strong and +damaging winds. Windstorms can occur in various forms and under +different meteorological conditions, but they typically involve +sustained high wind speeds that can cause significant impacts on +communities, infrastructure, and the environment. Wind speeds during a +windstorm typically exceed 55 km (34 miles) per hour. Wind damage can be +attributed to gusts (short bursts of high-speed winds) or longer periods +of stronger sustained winds. Windstorms may last for just a few minutes +when caused by downbursts from thunderstorms, or they may last for hours +(and even several days) when they result from large-scale weather +systems + +Windstorm intensity indicators +=================================== + +Common metrics used to assess the intensity of windstorm hazards +include: + +- Wind speed - also known as sustained wind, refers to the average + velocity of air movement over a specified duration at a particular + height above the surface (often at 10 meters). It represents the + overall strength or speed of the wind and is commonly expressed in + units like miles per hour (mph) or kilometers per hour (km/h). For + instance, if a weather report indicates a wind speed of 20 mph, it + signifies that the average speed of the wind over the specified + period, usually a few minutes, is 20 mph. + +- Wind gust - also referred to as a gust of wind or squall, represents + a transient and rapid surge in wind speed above the prevailing wind + speed at a specified height above the surface (usually 10m). It is a + brief occurrence characterized by a sudden and intense burst of + stronger winds. Wind gusts typically endure for a few seconds to a + minute or two. + +- The Storm Severity Index (SSI) - it can be defined in various ways + depending on the context. In the case of a specific grid cell within + the footprint, the SSI is calculated using the formula: + + .. math:: SSI = V^3\cdot A\cdot D + + Here, :math:`V` represents the maximum surface wind speed, :math:`A` + represents the grid box area and :math:`D` represents the duration of + the storm (see + :cite:`oxf`, :cite:`SyntheticEventStorm`, :cite:`rmets_online`). + +Windstorm datasets +======================= + +There are two main approaches to the risk assessment of catastrophe +events and in particular to windstorms. The first one, sometimes called +probabilistic one, is based on the estimation of the probability of +windstorm events of different intensity, with an aim of the subsequent +translation (via damage the vulnerability component) of this intensity +into the potential damage. In the second one, sometimes called the +"event-based", or deterministic approach one simulates thousands of +potential windstorm events and, subsequently estimates the windstorm +peril basing on the collection of these events. The creation of this +collection of events can be done using either climate models or the +stochastic analysis based on past historical events. In our approach we +will rely on the probabilistic approach and the rest of this document +will be entirely devoted to this way of thinking. + +The choice of the method of the risk assessment implies the choice of +the datasets we want to rely on. In the event based approach the hazard +dataset consists of the collection of thousand of simulated events that +can subsequently be used in wind risk analysis. These datasets are often +presented in a form of event-loss tables. In the probabilistic approach, +the main datasets we work on are the so-called return period maps of +windstorm events. In a general sense, the return period map provides +information about the likelihood of the occurence of the windstorm event +of a given intensity in every location on a map. + +The example of the return period map for a 5-year return period is given +on a figure `1 `_. + +.. _wind_intensity: + +.. figure:: ../images/windstorm/wind_intensity5.png + :align: center + :width: 80.0% + + 5-year return level of winter half-year (October – March) daily + maximum 10m wind speeds estimated for the present day climate + (1970-2000) by using a multi-model ensemble of 29 CORDEX regional + model simulations. + +The interpretation of the map is as follows: For each point +:math:`(x,y)` on the map, if the intensity value is :math:`I`, it +indicates that, statistically, every 5 years, there will be at least one +winter where the maximum daily wind speed will exceed :math:`I`. The +dataset was generated using a multi-model ensemble of 29 CORDEX regional +model simulations. + +Some examples of data providers for various windstorm hazard datasets +include: + +- https://data.4tu.nl/ + +- http://www.europeanwindstorms.org/ + +- `Windstorm Information + Service `__ + +- `European Centre for Medium-Range Weather + Forecasts `__ + +Exposure module +--------------------------- + +Exposure in the context of risk assessment encompasses a collection of +assets that could be potentially affected by hazards. The exposure model +entails data related to assets, properties, and infrastructure, along +with their vulnerability to various risks. This essential information +acts as a key input for the catastrophe model. In real-world +applications, an exposure database typically includes the details such +as the type of assets (e.g., buildings, infrastructure, agriculture, +machinery, etc.), location (usually specified in terms of latitude and +longitude) or value of the assets. + +The complexity of the exposure component varies depending on the +specific use case. Conducting risk assessment on a macroeconomic level, +such as for a country or region, can be challenging due to the need for +comprehensive information about economic properties and services. +However, when assessing a portfolio of assets for a company or bank, the +exposure part usually presents fewer difficulties. This is because +companies generally possess detailed information about their assets, +which serves as a primary input for our climate risk model. + +Vulnerability module +------------------------------ + +| To estimate direct windstorm damage, wind speed curves are commonly + employed. These curves provide information on the expected windstorm + damage for specific wind speeds, categorized by assets or land-use + classes. Figure `2 `_ illustrates two damage + functions for residential buildings in Germany exposed to windstorms, + sourced from +| `https://www.semanticscholar `__. + +The plots illustrate how wind intensity (wind gust speed) is transformed +into potential damage for residential buildings. Each function +originates from a distinct model used in its construction. As observed, +the resulting damage slightly varies depending on the model, but they +all exhibit the same pattern, wherein damage increases with higher wind +speeds. + +It is important to stress, that the relationship between wind speed and +damage is intricate and lacks extensive experimental evidence, +particularly at higher intensities. Complicating matters further, the +wide regional diversity of building structures worldwide hampers a +globally standardized comparison of severe convective wind phenomena. +Moreover, determining wind speeds in severe convective weather phenomena +such as tornadoes or downbursts poses a significant challenge due to +their localized and short-lived nature. As a result, these events are +often not captured by standard meteorological station networks. Even if +they were recorded, the devices used for measurement can be destroyed or +provide inaccurate data as the wind speeds frequently exceed their +designed range. In some instances, remote sensing by mobile radar +systems, like the Doppler-OnWheels (DOW), has been successful in +measuring wind profiles of tornadoes, but these systems face +difficulties observing regions close to the ground, and their successful +deployment is rare compared to the occurrence of tornadoes and +downbursts. Consequently, estimates of wind speed are typically derived +ex post based on the resulting damage caused by these events +:cite:`FGD`. + +.. _damage_funs_wind2: + +.. figure:: ../images/windstorm/damage_funs_wind3.png + :align: center + :width: 80.0% + + The plots illustrate how wind gust speed is translated into damage + for residential buildings. The curves were obtained from two + different models used in their construction. + +Impact assessment +--------------------- + +After collecting all the necessary components of hazard, exposure, and +vulnerability, we proceed with the most important part, which is risk +assessment. We usually follow these steps: First, we use the return +period maps to determine the wind intensity associated with each +location of the area of interest. Next, we map the wind intensities data +onto the exposure map to identify the specific wind hazard level that +each asset faces. Then, we estimate the potential damage to each asset +by applying the appropriate damage function, which quantifies the +relationship between wind intensity and asset vulnerability. By +utilizing these functions, we can calculate the expected level of damage +or loss for each asset based on the corresponding wind intensity. + +Once the asset damage estimates are obtained, we aggregate and analyze +the results to gain an overall assessment of the risk. This involves +summarizing the estimated damages for all exposed assets, calculating +the total expected losses, and identifying areas or assets that are at +higher risk. The final output of the risk assessment is usually provided +in a form of risk metrics that provide information about the risk +related to the portfolio of assets. Common metrics include +:cite:`Mitchel`: + +- The annual expected loss (AEL). + +- | Standard deviation (SD) around the AAL + | SD is a measure of the volatility of loss around the AAL. + +- | Occurrence Exceedance Probability (OEP). + | OEP is the probability that the maximum event loss in a year + exceeds a given level. + +- | Aggregate Exceedance Probability (AEP). + | AEP is the probability that the sum of event losses in a year + exceeds a given level. + +- | Value at risk (VaR). + | VaR is the loss value at a specific quantile of the relevant loss + distribution. + +Additionally, by considering factors such as asset valuation, +replacement costs, business interruption losses, and indirect expenses, +a more comprehensive estimation of the financial impact can be achieved. + +Example - Windstorm risk assessment for powerplants in Spain +------------------------------------------------------------------- + +In this section, we will provide a concise demonstration of how to +conduct a risk assessment for wind events, using power plants in Spain +as an example. The entire process will be carried out using the +open-source platform CLIMADA, but there are alternative open-source or +commercial tools available, such as the OS-climate platform. Detailed +documentation for the CLIMADA platform can be accessed here: +`https://climada-python.readthedocs `__. + +CLIMADA, short for CLIMate ADAptation, is a probabilistic natural +catastrophe impact model primarily developed and maintained by the +Weather and Climate Risks Group at ETH Zürich. This software tool offers +a comprehensive solution for assessing and analyzing climate-related +risks and their potential consequences across various hazards, including +floods, windstorms, heatwaves, and droughts. The platform’s versatility +extends to evaluating impacts on different sectors, such as +infrastructure, agriculture, and human populations. By incorporating +advanced climate models, geospatial data, and statistical methods, +CLIMADA enables the simulation and visualization of potential climate +event impacts with high accuracy and efficiency. + +Hazard +===================== + +Since CLIMADA does not have a built-in Windstorm python class, we +developed one ourselves, defining parameters like wind intensity, +fraction, and coordinates. + +For our example, we utilized windstorm hazard maps prepared by +4TU.ResearchData, which is led by the 4TU.ResearchData Consortium +comprising Delft University of Technology, Eindhoven University of +Technology, University of Twente, and Wageningen University & Research. +The dataset contains present-day (1970-2000) return levels of daily +maximum 10m wind speeds based on a multi-model ensemble of EURO-CORDEX +simulations for the European region. The data is available for download +from the 4TU Data Catalog for four different return periods: 5, 10, 20, +and 50 years, with a map resolution of 45 km. The data can be accessed +through the following link: +`https://data.4tu.nl `__. + +.. _intensity_climada_wind: + +.. figure:: ../images/windstorm/wind_intensity5.png + :align: center + :width: 100.0% + + The wind intensity in Spain represented in terms of a wind speed (in + meters per second) for a 5-year return period taken from + 4TU.ResearchData Consortium. + +The visualization of the dataset by CLIMADA for a 5-year return period +is shown in Figure `3 `_. + +Exposure +============= + +As an example of the asset portfolio in the exposure part, we utilized +the dataset from the Global Power Plant Database, a global and +open-source database of power plants. The dataset includes a set of +power plants in Spain and is accessible at +`https://datasets.wri.org `__. +The value of each power plant was determined based on its maximum energy +production capacity, measured in MWh. For the sake of simplicity, we +assumed a market value of one US dollar per MWh. It’s important to note +that this value is purely illustrative and not representative of the +actual energy production value. Thus, the value assigned to each power +plant is the value of its maximum energy production capacity. CLIMADA +provides a tool to create a map representation of the exposure dataset, +and its effect can be seen in Figure `4 `_. The +geographical longitude and latitude provide the location of the power +plants. + +.. _powerplants_exp_wind: + +.. figure:: ../images/windstorm/exposure2.png + :align: center + :width: 100.0% + + Power plants in Spain, with the energy production capacity value + serving as a proxy for the power plant’s value. The value in USD does + not correspond to the actual energy production value but is used to + illustrate the differences in energy production between the power + plants in the dataset. + +Vulnerability +=================== + +Next, we proceeded to the vulnerability module, aiming to convert the +intensity of the windstorm into the damage incurred on the power plants. +The damage function utilized in this step is a default damage function +from CLIMADA. + +.. _fig:damage2_wind: + +.. figure:: ../images/windstorm/wind_damage.png + :align: center + :width: 80.0% + + The plots demonstrate the conversion of wind speed into a damage + factor ranging from 0% to 100%. Here, MDD represents the mean damage + (impact) degree, PAA denotes the percentage of affected assets, and + MDR is the mean damage ratio calculated as MDR = + MDD\ :math:`\cdot`\ PAA. + +The plots demonstrate how wind intensity (wind speed) is translated into +potential damage. The blue curve represents the mean damage ratio (MDR), +which shows the fraction (0%-100%) of the total asset value lost due to +the windstorm event. The red line indicates the percentage of affected +assets (PAA), and it is an internal parameter of CLIMADA that is not +relevant for us in this example. + +The figure `6 `_ shows the annual expected impact map. As +shown, the damage for all assets is nearly zero, which can be attributed +to two reasons. Firstly, the wind intensities depicted in figure +`3 `_ are relatively small in the Spain region, +with values not exceeding 20 m/s in most locations. Secondly, the damage +function exhibits a relatively flat behavior for wind speeds up to 40 +m/s. + +.. _impact2_wind: + +.. figure:: ../images/windstorm/wind_impact_4TU.png + :align: center + :width: 100.0% + + The map represents the annual expected impact on power plants in + Spain due to windstorm events. The nearly zero impact is a result of + the region’s relatively low wind intensities, which correspond to low + damage factors associated with those intensities. + + +Bibliography +--------------------------------- + +.. bibliography:: ../references.bib diff --git a/docs/images/Logo.png b/docs/images/Logo.png new file mode 100644 index 00000000..1352b108 Binary files /dev/null and b/docs/images/Logo.png differ diff --git a/docs/images/OS-Climate-Logo.png b/docs/images/OS-Climate-Logo.png new file mode 100644 index 00000000..36c88781 Binary files /dev/null and b/docs/images/OS-Climate-Logo.png differ diff --git a/docs/images/PRR-1.jpg b/docs/images/PRR-1.jpg new file mode 100644 index 00000000..2444b580 Binary files /dev/null and b/docs/images/PRR-1.jpg differ diff --git a/docs/images/PRR-2.jpg b/docs/images/PRR-2.jpg new file mode 100644 index 00000000..c116d322 Binary files /dev/null and b/docs/images/PRR-2.jpg differ diff --git a/docs/images/PRR-3.jpg b/docs/images/PRR-3.jpg new file mode 100644 index 00000000..02cd2876 Binary files /dev/null and b/docs/images/PRR-3.jpg differ diff --git a/docs/images/PRR-4.jpg b/docs/images/PRR-4.jpg new file mode 100644 index 00000000..1ef660cd Binary files /dev/null and b/docs/images/PRR-4.jpg differ diff --git a/docs/images/PRR-5.jpg b/docs/images/PRR-5.jpg new file mode 100644 index 00000000..135af7c0 Binary files /dev/null and b/docs/images/PRR-5.jpg differ diff --git a/docs/images/PRR-6.jpg b/docs/images/PRR-6.jpg new file mode 100644 index 00000000..f48b4d22 Binary files /dev/null and b/docs/images/PRR-6.jpg differ diff --git a/docs/images/ProjectsPhysicalRisk_Image3.jpg b/docs/images/ProjectsPhysicalRisk_Image3.jpg new file mode 100644 index 00000000..07bcd626 Binary files /dev/null and b/docs/images/ProjectsPhysicalRisk_Image3.jpg differ diff --git a/docs/images/coastal_flood/damage_funs.png b/docs/images/coastal_flood/damage_funs.png new file mode 100644 index 00000000..bd043c57 Binary files /dev/null and b/docs/images/coastal_flood/damage_funs.png differ diff --git a/docs/images/coastal_flood/exposure2.png b/docs/images/coastal_flood/exposure2.png new file mode 100644 index 00000000..1b175070 Binary files /dev/null and b/docs/images/coastal_flood/exposure2.png differ diff --git a/docs/images/landslide/LSImap.PNG b/docs/images/landslide/LSImap.PNG new file mode 100644 index 00000000..a9005f79 Binary files /dev/null and b/docs/images/landslide/LSImap.PNG differ diff --git a/docs/images/landslide/land_cover.PNG b/docs/images/landslide/land_cover.PNG new file mode 100644 index 00000000..58c719d4 Binary files /dev/null and b/docs/images/landslide/land_cover.PNG differ diff --git a/docs/images/landslide/landslide_classification.PNG b/docs/images/landslide/landslide_classification.PNG new file mode 100644 index 00000000..199dbfb1 Binary files /dev/null and b/docs/images/landslide/landslide_classification.PNG differ diff --git a/docs/images/landslide/landslide_matrix.PNG b/docs/images/landslide/landslide_matrix.PNG new file mode 100644 index 00000000..affa689a Binary files /dev/null and b/docs/images/landslide/landslide_matrix.PNG differ diff --git a/docs/images/landslide/lithology.PNG b/docs/images/landslide/lithology.PNG new file mode 100644 index 00000000..0f7a50fa Binary files /dev/null and b/docs/images/landslide/lithology.PNG differ diff --git a/docs/images/landslide/terrain.PNG b/docs/images/landslide/terrain.PNG new file mode 100644 index 00000000..ab1c268e Binary files /dev/null and b/docs/images/landslide/terrain.PNG differ diff --git a/docs/images/methodology/PRmodelling.png b/docs/images/methodology/PRmodelling.png new file mode 100644 index 00000000..84871d82 Binary files /dev/null and b/docs/images/methodology/PRmodelling.png differ diff --git a/docs/images/methodology/ex_curve.png b/docs/images/methodology/ex_curve.png new file mode 100644 index 00000000..ecdc69c0 Binary files /dev/null and b/docs/images/methodology/ex_curve.png differ diff --git a/docs/images/methodology/flood_impact_JRC.png b/docs/images/methodology/flood_impact_JRC.png new file mode 100644 index 00000000..debfda65 Binary files /dev/null and b/docs/images/methodology/flood_impact_JRC.png differ diff --git a/docs/images/methodology/histogram.png b/docs/images/methodology/histogram.png new file mode 100644 index 00000000..282d2e87 Binary files /dev/null and b/docs/images/methodology/histogram.png differ diff --git a/docs/images/methodology/intensity2.png b/docs/images/methodology/intensity2.png new file mode 100644 index 00000000..2a8b76a3 Binary files /dev/null and b/docs/images/methodology/intensity2.png differ diff --git a/docs/images/river_flood/damage_function_spain_riverflood.png b/docs/images/river_flood/damage_function_spain_riverflood.png new file mode 100644 index 00000000..d85cf605 Binary files /dev/null and b/docs/images/river_flood/damage_function_spain_riverflood.png differ diff --git a/docs/images/river_flood/damage_funs.png b/docs/images/river_flood/damage_funs.png new file mode 100644 index 00000000..bd043c57 Binary files /dev/null and b/docs/images/river_flood/damage_funs.png differ diff --git a/docs/images/river_flood/exposure2.png b/docs/images/river_flood/exposure2.png new file mode 100644 index 00000000..1b175070 Binary files /dev/null and b/docs/images/river_flood/exposure2.png differ diff --git a/docs/images/river_flood/impact3.png b/docs/images/river_flood/impact3.png new file mode 100644 index 00000000..4490381c Binary files /dev/null and b/docs/images/river_flood/impact3.png differ diff --git a/docs/images/river_flood/intensity2.png b/docs/images/river_flood/intensity2.png new file mode 100644 index 00000000..2a8b76a3 Binary files /dev/null and b/docs/images/river_flood/intensity2.png differ diff --git a/docs/images/river_flood/intensity2_riverflood.png b/docs/images/river_flood/intensity2_riverflood.png new file mode 100644 index 00000000..2a8b76a3 Binary files /dev/null and b/docs/images/river_flood/intensity2_riverflood.png differ diff --git a/docs/images/waterStress/waterstress1.PNG b/docs/images/waterStress/waterstress1.PNG new file mode 100644 index 00000000..5ac03598 Binary files /dev/null and b/docs/images/waterStress/waterstress1.PNG differ diff --git a/docs/images/waterStress/waterwithdrawal.PNG b/docs/images/waterStress/waterwithdrawal.PNG new file mode 100644 index 00000000..11bd40fd Binary files /dev/null and b/docs/images/waterStress/waterwithdrawal.PNG differ diff --git a/docs/images/wildfire/2. damage_function.png b/docs/images/wildfire/2. damage_function.png new file mode 100644 index 00000000..d85cf605 Binary files /dev/null and b/docs/images/wildfire/2. damage_function.png differ diff --git a/docs/images/wildfire/EAI_map.png b/docs/images/wildfire/EAI_map.png new file mode 100644 index 00000000..bf0c2af4 Binary files /dev/null and b/docs/images/wildfire/EAI_map.png differ diff --git a/docs/images/wildfire/damage_funs.png b/docs/images/wildfire/damage_funs.png new file mode 100644 index 00000000..bd043c57 Binary files /dev/null and b/docs/images/wildfire/damage_funs.png differ diff --git a/docs/images/wildfire/exposure2.png b/docs/images/wildfire/exposure2.png new file mode 100644 index 00000000..1b175070 Binary files /dev/null and b/docs/images/wildfire/exposure2.png differ diff --git a/docs/images/wildfire/impact3.png b/docs/images/wildfire/impact3.png new file mode 100644 index 00000000..4490381c Binary files /dev/null and b/docs/images/wildfire/impact3.png differ diff --git a/docs/images/wildfire/intensity2.png b/docs/images/wildfire/intensity2.png new file mode 100644 index 00000000..2a8b76a3 Binary files /dev/null and b/docs/images/wildfire/intensity2.png differ diff --git a/docs/images/wildfire/powerplants.png b/docs/images/wildfire/powerplants.png new file mode 100644 index 00000000..848dbacd Binary files /dev/null and b/docs/images/wildfire/powerplants.png differ diff --git a/docs/images/wildfire/wildfire_damage.png b/docs/images/wildfire/wildfire_damage.png new file mode 100644 index 00000000..24263ee6 Binary files /dev/null and b/docs/images/wildfire/wildfire_damage.png differ diff --git a/docs/images/wildfire/wildfire_impact.png b/docs/images/wildfire/wildfire_impact.png new file mode 100644 index 00000000..a366a30e Binary files /dev/null and b/docs/images/wildfire/wildfire_impact.png differ diff --git a/docs/images/wildfire/wildfire_intensity.png b/docs/images/wildfire/wildfire_intensity.png new file mode 100644 index 00000000..a38b637f Binary files /dev/null and b/docs/images/wildfire/wildfire_intensity.png differ diff --git a/docs/images/wildfire/wildfires_intensity2.png b/docs/images/wildfire/wildfires_intensity2.png new file mode 100644 index 00000000..c920dcf1 Binary files /dev/null and b/docs/images/wildfire/wildfires_intensity2.png differ diff --git a/docs/images/windstorm/damage_funs_wind3.png b/docs/images/windstorm/damage_funs_wind3.png new file mode 100644 index 00000000..d66fe069 Binary files /dev/null and b/docs/images/windstorm/damage_funs_wind3.png differ diff --git a/docs/images/windstorm/exposure2.png b/docs/images/windstorm/exposure2.png new file mode 100644 index 00000000..1b175070 Binary files /dev/null and b/docs/images/windstorm/exposure2.png differ diff --git a/docs/images/windstorm/wind_damage.png b/docs/images/windstorm/wind_damage.png new file mode 100644 index 00000000..7d4ef9bc Binary files /dev/null and b/docs/images/windstorm/wind_damage.png differ diff --git a/docs/images/windstorm/wind_impact_4TU.png b/docs/images/windstorm/wind_impact_4TU.png new file mode 100644 index 00000000..6a6b8274 Binary files /dev/null and b/docs/images/windstorm/wind_impact_4TU.png differ diff --git a/docs/images/windstorm/wind_intensity5.png b/docs/images/windstorm/wind_intensity5.png new file mode 100644 index 00000000..ef581de5 Binary files /dev/null and b/docs/images/windstorm/wind_intensity5.png differ diff --git a/docs/index.rst b/docs/index.rst index ee1dba10..f4e7c4d0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,20 +1,133 @@ -.. project-template documentation master file, created by - sphinx-quickstart. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. +Welcome to PhysicalRisk's documentation! +======================================== -project-template documentation! -============================================== +This website contains the documentation related to the Physical Risk engine of OS-Climate. -Contents: +Please, visit the `official webpage! `_ + +.. _cards-clickable: + +.. + list with all the possible icons for the grid + https://sphinx-design.readthedocs.io/en/latest/badges_buttons.html + +.. raw:: html + + + + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Getting started + :link: getting-started.html + :text-align: center + + :octicon:`location;5em;sd-text-info` + ^^^ + + .. grid-item-card:: Code documentation + :link: physrisk.html + :text-align: center + + :octicon:`code;5em;sd-text-info` + ^^^ + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Commands + :link: commands.html + :text-align: center + + :octicon:`terminal;5em;sd-text-info` + ^^^ + + .. grid-item-card:: Onboarding + :link: handbook/onboarding.html + :text-align: center + + :octicon:`upload;5em;sd-text-info` + ^^^ + See `Contributing `_ + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: methodology + :link: methodology.html + :text-align: center + + :octicon:`book;5em;sd-text-info` + ^^^ + + .. grid-item-card:: hazards + :link: hazards.html + :text-align: center + + :octicon:`flame;5em;sd-text-info` + ^^^ + + + +Physical Risk and Resilience +============================= + +.. image:: images/PRR-1.jpg + :width: 800 + +| + +.. image:: images/PRR-2.jpg + :width: 800 + +| + +.. image:: images/ProjectsPhysicalRisk_Image3.jpg + :width: 800 + +| + +.. image:: images/PRR-3.jpg + :width: 800 + +| + +.. image:: images/PRR-4.jpg + :width: 800 + +| + +.. image:: images/PRR-5.jpg + :width: 800 + +| + +.. image:: images/PRR-6.jpg + :width: 800 + + + +Contents +========== .. toctree:: :maxdepth: 2 getting-started + physrisk commands - - + handbook/onboarding + Methodology + hazards Indices and tables ================== diff --git a/docs/make.bat b/docs/make.bat index 2f466854..51d36527 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -1,190 +1,35 @@ @ECHO OFF +pushd %~dp0 + REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) +set SOURCEDIR=. set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 ) -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\project-template.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\project-template.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) +if "%1" == "" goto help -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end +popd diff --git a/docs/methodology.rst b/docs/methodology.rst new file mode 100644 index 00000000..443385e2 --- /dev/null +++ b/docs/methodology.rst @@ -0,0 +1,14 @@ +================================= +Physical Climate Risk Methodology +================================= + +:download:`Download PDF <../methodology/PhysicalRiskMethodology.pdf>` + +.. + In order to display the pdf, it has to be included in the _static directory + There has to be a way of reading it automatically from the repo.. #TODO + +.. pdf-include:: _static/PhysicalRiskMethodology.pdf + :toolbar: 0 + :width: 100% + :height: 800px diff --git a/docs/methodology/Asset drilldown.mp4 b/docs/methodology/Asset drilldown.mp4 deleted file mode 100644 index 10a5b3c0..00000000 Binary files a/docs/methodology/Asset drilldown.mp4 and /dev/null differ diff --git a/docs/methodology/Asset drilldown_Trim.mp4 b/docs/methodology/Asset drilldown_Trim.mp4 deleted file mode 100644 index 99f388e3..00000000 Binary files a/docs/methodology/Asset drilldown_Trim.mp4 and /dev/null differ diff --git a/docs/methodology/PhysicalRiskMethodology.tex b/docs/methodology/PhysicalRiskMethodology.tex deleted file mode 100644 index bb918923..00000000 --- a/docs/methodology/PhysicalRiskMethodology.tex +++ /dev/null @@ -1,418 +0,0 @@ -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% -% Generic Methodology for Physical Climate Risk Modelling -% -% 2021 OS-Climate -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - -\documentclass[a4paper,11pt]{extarticle} %12pt - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Required packages -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\usepackage[utf8]{inputenc} -\usepackage{amsmath} -\usepackage{amssymb} -\usepackage{bm} -\usepackage{fancyhdr} -\usepackage{float} -\usepackage{framed} -\usepackage{graphicx} -\usepackage[colorlinks,citecolor=blue,urlcolor=black,linkcolor=black,bookmarks=false,hypertexnames=true]{hyperref} -\usepackage{numprint} -%\usepackage{physics} % causing problems; using different notation for bra-ket -%\usepackage{sfmath}[cmbright] -%\usepackage[round]{natbib} -\usepackage{ragged2e} -\usepackage{scrextend} -\usepackage{sistyle} -\usepackage{subcaption} - -\usepackage{bookmark} - -\usepackage[normalem]{ulem} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% General settings -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%% -% Font -%%%%%%%%%%%%% - -%\renewcommand*{\familydefault}{\sfdefault} - -%%%%%%%%%%%%% -% Spacing -%%%%%%%%%%%%% - -\setlength{\parskip}{1em} -\setlength{\parindent}{0em} -\frenchspacing - -%%%%%%%%%%%%% -% Hyphenation -%%%%%%%%%%%%% - -\tolerance=1 -\emergencystretch=\maxdimen -\hyphenpenalty=10000 -\hbadness=10000 - -%%%%%%%%%%%%% -% Number formatting -%%%%%%%%%%%%% - -\npthousandsep{,} -\npthousandthpartsep{} -\npdecimalsign{.} - -%%%%%%%%%%%%% -% Footnotes -%%%%%%%%%%%%% - -\usepackage[hang, flushmargin]{footmisc} -\setlength{\footnotemargin}{4mm} - -%%%%%%%%%%%%% -% Running title -%%%%%%%%%%%%% - -\pagestyle{fancy} -\lhead{OS-Climate} -\rhead{Physical Climate Risk Methodology} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\title{Physical Climate Risk Methodology} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\author{Joe Moorhouse\thanks{\textit{E-mail}: Joe.Moorhouse@gmail.com.} - \and - Florian Gallo\thanks{ - \smallskip - \newline% \indent - The views expressed in this paper are those of the authors and do not necessarily reflect the views and policies of their respective employers.} - } - -\date{24 September 2021 [Draft]} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{document} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\maketitle{} - -%\begin{abstract} -%Add abstract here. -%\end{abstract} - - -\clearpage -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% TEMP ONLY during writing stage -\setcounter{tocdepth}{4} -\renewcommand{\contentsname}{Contents} -\tableofcontents - -%\listoftables - -%\listoffigures -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - -\clearpage -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Introduction} -\label{Sec:Introduction} - -The purpose of this paper is to present the methodology of a framework that is sufficiently generic to be used for a wide range of physical climate risk models. The motivation is to provide a specification for use in the OS-Climate (OS-C) \cite{OSC} physical climate risk calculation module. OS-C aims to provide an platform unconstrained by any one particular methodology choice. However, it is hoped that just as the open-source Oasis Loss Modelling Framework \cite{OasisLMF} (henceforth Oasis) was designed to accommodate a wide range of catastrophe models, within a well-defined framework and set of interfaces, an analogous physical risk modelling framework can be defined. This could expedite the implementation of a wide range of models and promote standardisation and resource sharing. - -The modelling framework can be split into three main parts: - -\begin{enumerate} - \item Hazard - \item Vulnerability - \item Financial -\end{enumerate} - -Hazard models are used to obtain probability distributions of future events, such as inundations or periods of drought. Vulnerability models are used to assess the impact of these events on the assets within a portfolio. Financial models convert these impacts into financial measures. These could be measures of the financial impact of climate change on a portfolio of assets, for example Average Annual Loss or loss Exceedance Probability. The impact on an asset could also be used in structural credit risk models. - -At time of writing, physical risk calculations may make use of `bulk-assessment' approaches where accurate asset vulnerability information is unavailable and approximations are therefore required. The modelling framework accommodates bulk-assessment-type models as well as approaches capable of modelling vulnerability more precisely\footnote{There is potentially great value in the results obtained from very simple models, as long as the model error can be quantified. The aim is to be able to accommodate both simple and complex models in combination.}. The framework is designed to control the model risk that this creates by incorporating a model of the uncertainty of the approximations into the calculation. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\section{Model description} - -\subsection{Overview} -A high-level view of the physical risk modelling framework is shown in Figure~\ref{Fig:top_level_view}. - -Hazard models are used to create hazard data sets, providing probability distributions of events such as inundations, periods of drought or periods of high wind. These data sets might, for example, specify the annual probability of occurrence of an event (e.g. high wind) of a certainty intensity (e.g. maximum wind speed) for some specified year in the future. - -Vulnerability models are used to construct, for a given set of assets, both: -\begin{itemize} - \item Asset event distributions: probability distributions of events that impact the assets at their locations, derived from hazard data sets - \item Vulnerability distributions: conditional probability distributions of the impacts on the assets of events of given intensity -\end{itemize} - -The asset impact model uses these quantities to derive distributions of impact for each asset. An impact might be, for example, damage to the asset, expressed as a fraction of the asst value. The financial risk model calculates financial measures from the impact distributions, for example Exceedance Probability. - -Within the OS-C modelling framework, models are interchangeable and allow forms of composition. That is, different choices of vulnerability model may be used for a particular asset and a vulnerability model may use different hazard data sets for its calculation. The intention is to allow a risk calculation to be built from an ecosystem of hazard and vulnerability models according to the requirements of the model owner. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{figure}[ht] - - \begin{framed} - % left, bottom, right, top - \includegraphics[clip, trim=0cm 7cm 0cm 1cm, width=1.00\textwidth]{plots/top_level_view.pdf} - - \end{framed} - - \footnotesize - - \renewcommand{\arraystretch}{1.01} - - \vspace{-3ex} - - \vspace{-0.5ex} - - \caption{\small Physical risk model components. } - \label{Fig:top_level_view} - -\end{figure} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\subsection{Asset impact model} -The {\it asset impact} model is used to determine how an asset is impacted by an event. The impact is a quantity from which financial loss can be inferred, but is not itself a monetary value. For example, a response might be the damage sustained to a building as a fraction of its value or the annual loss of energy output of a power station as a fraction of its annual output\footnote{A systemic change in annual output changes asset value, since this is partly determined by the expected future cash flows generated by the asset.}. In each case, a further model is required to translate the impact to a change in asset value. In principle an impact might lead to an increase or decrease in value. - -Catastrophe models sometimes define a quantity `damage', and talk about `damageability'. `Damage' and `impact' are analagous quantities here but `impact' is perhaps better-suited to situations where there is, say, a decrease in output efficiency of a plant as a result of a period of higher temperatures. - -Asset impact models as used in physical risk calculations may overlap with those of catastrophe models. OS-C aims to support a wide range of models, but it is desirable to identify approaches that generalize a large class of these. One such approach is adopted from Oasis \cite{OasisLMF}. The first assumption behind this is that a model should capture two important types of uncertainty, doing so by representing each by a probability distributions: -\begin{enumerate} - \item Uncertainty as to the frequency and intensity (or severity) of events that potentially lead to a change in asset value. This is sometimes called the {\it primary uncertainty} - \item Uncertainty as to the vulnerability of assets to events (i.e. response of assets to events of a given intensity), the {\it secondary uncertainty} -\end{enumerate} - -These quantities are defined more precisely in \ref{Sec:MathematicalDescriptionOfAssetImpactModel}. Impact can be modelled using a {\it mean impact curve} (or {\it mean damage curve} in catastrophe modelling nomenclature). This is a curve relating an event intensity to an impact (e.g. a wind event with a given maximum gust speed will cause a given fractional damage to a property). In general, however, there is uncertainty as to the impact on an asset to an event of a given intensity -- in the example, the wind may cause mild or severe damage. For this reason, the vulnerability is represented rather as a two dimensional curve. - -A second assumption is that the probabilities of such events may not be readily represented by distributions such as beta, gamma, beta-Bernoulli or truncated Gaussian and may be complex and multi-modal. Discrete probability distributions are therefore used in order to represent the range of possible distributions: a non-parametric approach. - -\subsubsection{Mathematical description of asset impact model} -\label{Sec:MathematicalDescriptionOfAssetImpactModel} - -There are $n$ intensity bins with index $i$ such that $i \in \{1, \dots, n \}$. We define $e^{(a)}_i$ to be the probability that a hazard event of type $a$ occurs with an intensity that falls in bin $i$. If $S^{(a)}$, a random variable, is the intensity of event $a$ then: - -\begin{equation} - \label{Eq:event} - e^{(a)}_i = P \left( s^{(a, \text{lower})}_i < S^{(a)} \le s^{(a, \text{upper})}_i \right) -\end{equation} - -That is, $s^{(a, \text{lower})}_i$ and $s^{(a, \text{upper})}_i$ define the range of bin $i$. - -We define $v^{(a, b)}_{ij}$ to be the conditional probability that \emph{given} the occurrence of an event of type $a$ with intensity $S^{(a)}$ there is an impact (typically a damage or disruption\footnote{$d$ for `damage/disruption' is used to denote impact as $i$ is reserved for indexing}), $D^{(b)}$ in the range $d^{(a,b,\text{lower})}_j < D \le d^{(a,b,\text{upper})}_j$. The impact is of type $b$. - - -\begin{equation} - \label{Eq:vulnerability} - v^{(a, b)}_{ij} = P \left( d^{(a,b,\text{lower})}_j < D^{(b)} \le d^{(a,b,\text{upper})}_j | s^{(a, \text{lower})}_i < S^{(a)} \le s^{(a, \text{upper})}_i \right) -\end{equation} - -The definition of an event type $a$ includes a time interval e.g. $a$ is the occurrence of an inundation in the locale of the asset {\it within a one year period}. $b$ is, for example, the fractional damage to the asset. - -We define $d^{(a,b)}_j$ to be the marginal probability of impact $D^{(b)}$ in the range $d^{(a,b, \text{lower})}_j < D^{(b)} \le d^{(a,b,\text{upper})}_j$ occurring as a result of an event of type $a$. - -\begin{equation} - \label{Eq:impact} - d^{(a,b)}_j = P \left( d^{(a,b,\text{lower})}_j < D^{(b)} \le d^{(a,b,\text{upper})}_j \right) -\end{equation} - -From the definition of conditional probability: - -\begin{equation} - \label{Eq:model} - d^{(a,b)}_j = \sum_{i} v^{(a,b)}_{ij} e^{(a)}_i -\end{equation} - -If only the mean impact curve is available, then it is possible to create the matrix such that $v_{ij} \in \{0, 1\}$. The matrix then provides a simple mapping from intensity to impact; if the number of intensity and response bins is equal then matrix $\mathbf{v}$ is simply the identity matrix. However, note that these simplifications exclude from the model any uncertainty in the parameters\footnote{A better approach would be to estimate the standard deviation of the distributions from which the mean impact curve was calculated and to incorporate this.}. - -Note that $d^{(a,b)}_j$ is identical to the {\it effective damage} distribution of Oasis and can be described as the `effective impact'. It is a marginal distribution and does not capture any correlation between events nor impacts. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{figure}[ht] - - \begin{framed} - - \includegraphics[width=\textwidth]{plots/fig_intensity.pdf} - - \end{framed} - - \footnotesize - - \renewcommand{\arraystretch}{1.01} - - \vspace{-3ex} - - {\justify - The exceedance curve of event intensity at the asset location is shown on the right. The event intensity in this example is inundation depth in metres. Exceedance is a cumulative probability. As an example, the probability of an inundation event occurring within a single year of intensity 0.91m or greater is 0.002. An exceedance probability is the reciprocal of the return period; it could equivalently be said that the 0.91m intensity event occurs with a return period of 500 years. - The exceedance curve can be converted to a histogram of probabilities. Here the $n$ bins have ranges $[s^{(a, \text{lower})}_i, s^{(a, \text{upper})}_i]$. For example, the first bin has range [0.28m, 0.38m]. The second bin has range [0.38m, 0.51m]; that is $s^{(a, \text{lower})}_2 = 0.38$m and $s^{(a, \text{upper})}_2 = 0.51$m. $e^{(a)}_2 = 0.06$. - \par} - - \vspace{-0.5ex} - - \caption{\small Event intensity exceedance curve (right) and corresponding histogram (left).} - \label{Fig:intensity} - -\end{figure} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\subsubsection{Importance of secondary uncertainty} -The importance of the vulnerability matrix as opposed to mean damage curve (or vector) is emphasized above; see also \cite{Taylor:2015} for a discussion of this point. This is true not only in cases where the underlying distribution of an impact, for example a fractional damage, can be inferred from empirical data; see for example Figure~\ref{Fig:vulnerability_matrix}). This is arguably \emph{more} important where data is limited in order that approximate data can be incorporated into the model in a way that the impact of the approximations can be well-understood. - -Vulnerability data may be provided by -\begin{itemize} - \item Modelling of asset vulnerability based on asset characteristics and/or historical data - \item 'Calibrated' vulnerabilities, for example based on realized insurance claims -\end{itemize} -Physical risk models may make use of so-called `bulk assessment' approaches for certain assets, where precise vulnerability information is not available and less precise estimates of the damage/disruption of the asset are used. The presence of such estimates in an overall model may, or may not, materially impact the accuracy of the results, but it is important that this impact can be assessed. By quantifying the uncertainty in the response estimates, a distribution of financial losses is ultimately obtained from which the model user can derive the impact of the approximation. - -\paragraph{Handling epistemic uncertainty} -In forms of bulk-assessment, a common case is that insufficient information exists with which to characterize an asset. This is an example of an epistemic, as opposed to aleatory, uncertainty. The epistemic uncertainty, and its impact, can be included in the model in a relatively straight-forward way. - -We extend Equation~{\ref{Eq:vulnerability}, by including a new discrete random variable, $A$, which is the type of the asset. -\begin{equation} - \label{Eq:vulnerability} - v^{(a, b)}_{ij} = P \left( d^{(a,b,\text{lower})}_j < D^{(b)} \le d^{(a,b,\text{upper})}_j | s^{(a, \text{lower})}_i < S^{(a)} \le s^{(a, \text{upper})}_i, A = a_1 \right) -\end{equation} - - -\subsubsection{Interpolation of probability distributions} -Cases arise where the event distributions and vulnerability distributions are not defined for a common set of intensity bins and interpolation is therefore required. The question then arises of how probability density is distributed within bins. The choice is model-specific and customizable, but here two common cases are described. - -\begin{itemize} - \item Probability density constant across bin: linear interpolation of cumulative probability function - \item Probability density changes linearly across bin: quadratic interpolation of cumulative probability function -\end{itemize} - -{\textcolor{red}{\emph{[Add equations and example plots here]}}} - -Hazard data sets might also contain instances of `point-probabilities', for example where there is a finite probability that the intensity of an event takes a single value. These represent Dirac delta functions in the probability distribution, steps in the cumulative probability function. There is the option of retaining these as delta functions (bins of zero width), but in some cases it may be necessary to make assumptions about how these the probability might be distributed across a bin. - -{\textcolor{red}{\emph{[Add equations and plot of step-CDF with interpolation; exemplify by `damage threshold']}}} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\begin{figure}[ht] - - \begin{framed} - - \includegraphics[width=\textwidth]{plots/vulnerability_lagace_2008.png} - - \end{framed} - - \footnotesize - - \renewcommand{\arraystretch}{1.01} - - \vspace{-3ex} - -% {\justify -% Taken from -% \par} - - \vspace{-0.5ex} - - \caption{\small Taken from Lagacé (2008) Catastrophe Modeling, Université Laval. Mean damage curve as an approximation to an underlying set of distributions, modelled using a vulnerability matrix. {\textcolor{red}{\emph{[To seek permission or replace e.g. with synthetic plot]}}}} - \label{Fig:vulnerability_matrix} - -\end{figure} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - - - -\subsection{Effective impact distribution} -$d^{(a,b)}_j$ from Equation~\ref{Eq:vulnerability} is the probability distribution of impacts of type $b$ for an asset as a result of events of type $a$. In the catastrophe models of Oasis, impacts are sampled from this distribution \cite{OasisFinancialModule}, for example samples of fractional damage, which form the basis of a Monte Carlo calculation. This is done in order to apply insurance policy terms and conditions which can be complex and non-linear. - -The Monte Carlo sampling is done by constructing a cumulative probability density function, $Y_D(d)$, of impact $D$ from the effective impact distribution ($Y_D(d) = P(D \le d$)). Random numbers, $u_i$ are then sampled from a standard uniform distribution ($u_i \in [0, 1]$), from which impacts are calculated by: - -\begin{equation} - \label{Eq:sampling} - d_i = Y^{-1}_D(u_i) -\end{equation} - -In this Monte Carlo approach, samples of fractional damage can be drawn from distributions so as to be correlated or uncorrelated. For example, if the impact distributions represent damage to buildings as a result of inundation then it may be appropriate to model damage to two buildings in close proximity as being highly correlated\footnote{Catastrophe model practitioners might point out that presence or absence of kerb stones and availability of sand bags are highly significant so any such assumption is prone to error}. If the buildings are far apart (say in different countries) then the correlation is likely to be close to zero. - -\subsubsection{Full Monte Carlo calculation} -A more sophisticated correlation model might try to capture correlation of events and of vulnerabilities. Such models would typically need to first sample from the distribution of event intensity and then from the vulnerability distribution. This is more computationally expensive than the approach of deriving an effective impact distribution. Such a `full Monte Carlo' approach might prove to be relevant for some models as it is a highly flexible approach. - - -\subsection{Aggregation of impacts} -For impacts of the same type, $b$, arising from different events, it is assumed that the impacts are additive, up to a ceiling value\footnote{this approximation is only strictly valid for sufficiently small impacts; consider the contrived example of 0.8 fractional damage that occurs from both flood and high wind in the same year.}. If the annual impacts from events with index 1 and 2 are represented by random variables, $Y^{(1,b)}$, $Y^{(2,b)}$ then $Y^{(\text{tot}, b)} = Y^{(1,b)} + Y^{(2,b)}$. - -If the random variables are uncorrelated, then the aggregated effective impact distribution is given by the convolution: - -\begin{equation} - \label{Eq:sampling} - y^{(\text{tot}, b)}(r) = \int^{\infty}_{-\infty} y^{(1, b)}(t) y^{(2, b)}(r - t) dt -\end{equation} - -{\textcolor{red}{\emph{[Add version with discrete binned data.]}}} - -\subsection{Financial loss model} -Several financial measures are of interest. - -\begin{enumerate} - \item Annual Exceedance Probability (AEP): the probability that in a given year the aggregated losses of a portfolio will exceed a certain value - \item Valuation Adjustment: an adjustment to the present value of an asset to reflect the expected loss -\end{enumerate} - -The first of these typically requires less data in its calculation. This is a cumulative probability distribution of losses from which the average annual loss (AAL) can be inferred, but also the range of losses in a given confidence interval. This interval is driven by the primary and secondary uncertainties above. - -With additional modelling steps, credit risk measures can also be derived. - -\subsubsection{Structural models of credit risk} -Changes in asset value can be used to model changes in the credit quality of market participants. Financial risk modules for physical risk may then use distributions of asset value changes in order to model changes of credit quality over time as a result of climate change, for example estimates of default probability and loss given default. - -The intention of this section is not to specify any particular model, but rather to give a brief introduction. Particularly of interest is the question of what inputs credit risk models require. - -For medium and large cap firms, a credit default event typically occurs when a firm is not able to meet its debt servicing obligations. Under an important class of credit risk models called `structural models', it is assumed that a default event occurs for a firm when its assets are sufficiently low compared to its liabilities. - -A number of different structural models exist which make various assumptions about how a firm's assets change over time, how its capital is structured and the nature of its debt. - -The earliest structural model was described by Merton in 1974 \cite{Merton:1974} based on an extremely simple debt structure. Black and Cox \cite{BlackCox:1976} introduced an important refinement to the Merton model in 1976. Practical implementations were subsequently created as a result of this foundational work. A notable one of these is the `KMV' model, named after Kealhofer, McQuown and Vasiek, now owned by Moody's Investors Service, Inc. - -Use of such credit models, may provide a mechanism for incorporation of physical risk into financial institutions existing risk models\cite{KenyonEtAl:2021}. - -\subsection{Uncertainties in the calculation} - -\subsection{Model limitations} - -\begin{enumerate} - \item Spatial correlation of events: to what extent possible without MC calculation; to what extent is provided / can be inferred from data sets - \item Correlation of vulnerability - \item Data availability -\end{enumerate} - - -\subsubsection{Data availability} -Issues related to data availability and relevance are still one of the main limitations of physical risks assessments. If past and future climate data are becoming increasingly available through open-sources portals and tools (e.g. Copernicus, WRI Aqueduct), their availability and their reliability varies widely according to the climate hazard of interest, the region and the modelling process. If the availability of climate data is improving, open-source, asset-level information (required to estimate the exposure of an asset to a give climate hazard) is still seldom available. Such data include the location of assets, their link with owning companies and more generally any damages records that could be used to quantify the response of an asset (or of a type of asset) to a given climate event. Newly-published datasets have been recently released for some sectors but their exhaustiveness remains to be verified. Moreover, many industrial sectors are not covered, thus limiting the application of physical risks methodologies to a diversified portfolio. -Finally, building and applying the correlation between hazard and damage (or impact), as described in section 2.2, requires common distribution between historical events, historical damages and future climate events. In a changing climate, assets and activities will be impacted by more intense events that will not have been experienced either in a given region of the world or even on the -whole globe, leading to a potentially large mismatch between historical and future distributions of events. The interpolation of the damage curve, as described in section 2.2.3, might lead to very high uncertainties that need to be taken into account when interpreting the data. - - - - -\clearpage -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\bibliography{Physical Risk Methodology Bibliography} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%\bibliographystyle{plain} -\bibliographystyle{acm} -%\bibliographystyle{agsm} - - -\end{document} diff --git a/docs/methodology/PhysicalRiskMethodologyBibliography.bib b/docs/methodology/PhysicalRiskMethodologyBibliography.bib deleted file mode 100644 index dc3dddef..00000000 --- a/docs/methodology/PhysicalRiskMethodologyBibliography.bib +++ /dev/null @@ -1,78 +0,0 @@ -@article{BlackCox:1976, - title = {Valuing corporate securities: some effects of bond indenture provisions}, - author = {Fischer Black and John C. Cox}, - journal = {Journal of Finance}, - year = {1976}, - pages = {351-367}, - number = 2, - volume = 31, -} - -@article{KenyonEtAl:2021, - title = {Climate change valuation adjustment (CCVA) using parameterized climate change impacts}, - author = {Kenyon, Chris and Berrahouia, Mourad}, - journal = {Risk}, - year = {2021} -} - -@article{Merton:1974, - title = {On the pricing of corporate debt: the risk structure of interest rates}, - author = {Robert C. Merton}, - journal = {Journal of Finance}, - year = {1974}, - pages = {449-470}, - number = 2, - volume = 29, -} - -@misc{OasisFinancialModule, - author = {Peter Taylor and Johanna Carter}, - title = {Oasis Financial Module}, - year = {2020} -} - -@misc{OasisLMF, - author = {Oasis}, - title = {Oasis loss modelling framework: open source catastrophe modelling platform}, - year = {2021}, - url = {https://oasislmf.org/} -} - -@misc{OSC, - author = {OS-C}, - title = {{OS-Climate} {(OS-C)} platform}, - year = {2021}, - url = {http://www.os-climate.org/} -} - -@inproceedings{Taylor:2015, - author = {Taylor, Peter}, - year={2015}, - title={Calculating financial loss from catastrophes}, - booktitle={SECED 2015 Conference: Earthquake risk and engineering towards a resilient world}, - publisher={Society for earthquake and civil engineering dynamics}, - url = {http://seced.org.uk/images/newsletters/TAYLOR.pdf} -} - -@article{AlacazarEtAl:2020, - title = {Classical versus quantum models in machine learning: insights from a finance application}, - author = {Alcazar, Javier and Leyton-Ortega, Vicente and Perdomo-Ortiz, Alejandro}, - doi = {10.1088/2632-2153/ab9009}, - url = {https://www.osti.gov/biblio/1648867}, - journal = {Machine Learning: Science and Technology}, - issn = {2632-2153}, - number = 3, - volume = 1, - place = {United States}, - year = {2020}, - month = {7} -} - -@unpublished{AlcazarEtAl:2021, - author = {Javier Alcazar and Andrea Cadarso and Amara Katabarwa and Marta Mauri and Borja Peropadre and Guoming Wang and and Yudong Cao}, - title = {Quantum Algorithm for Credit Valuation Adjustments}, - year = {Working paper, Zapata Computing, May 2021}, - organization = {Zapata Computing} -} - - diff --git a/docs/methodology/plots/top_level_view.pdf b/docs/methodology/plots/top_level_view.pdf deleted file mode 100644 index 21120515..00000000 Binary files a/docs/methodology/plots/top_level_view.pdf and /dev/null differ diff --git a/docs/methodology/plots/top_level_view.pptx b/docs/methodology/plots/top_level_view.pptx deleted file mode 100644 index 7b76b74e..00000000 Binary files a/docs/methodology/plots/top_level_view.pptx and /dev/null differ diff --git a/docs/physrisk.api.rst b/docs/physrisk.api.rst new file mode 100644 index 00000000..5f1c1607 --- /dev/null +++ b/docs/physrisk.api.rst @@ -0,0 +1,19 @@ +physrisk.api +==================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + physrisk.api.v1 + physrisk.api.v2 + +Module contents +--------------- + +.. automodule:: physrisk.api + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.api.v1.rst b/docs/physrisk.api.v1.rst new file mode 100644 index 00000000..a5a46577 --- /dev/null +++ b/docs/physrisk.api.v1.rst @@ -0,0 +1,61 @@ +physrisk.api.v1 +======================= + +Submodules +---------- + +physrisk.api.v1.common +----------------------------- + +.. automodule:: physrisk.api.v1.common + :members: + :undoc-members: + :show-inheritance: + +physrisk.api.v1.example\_portfolios +------------------------------------------ + +.. automodule:: physrisk.api.v1.example_portfolios + :members: + :undoc-members: + :show-inheritance: + +physrisk.api.v1.exposure\_req\_resp +------------------------------------------ + +.. automodule:: physrisk.api.v1.exposure_req_resp + :members: + :undoc-members: + :show-inheritance: + +physrisk.api.v1.hazard\_data +----------------------------------- + +.. automodule:: physrisk.api.v1.hazard_data + :members: + :undoc-members: + :show-inheritance: + +physrisk.api.v1.hazard\_image +------------------------------------ + +.. automodule:: physrisk.api.v1.hazard_image + :members: + :undoc-members: + :show-inheritance: + +physrisk.api.v1.impact\_req\_resp +---------------------------------------- + +.. automodule:: physrisk.api.v1.impact_req_resp + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.api.v1 + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.api.v2.rst b/docs/physrisk.api.v2.rst new file mode 100644 index 00000000..a0bf1b18 --- /dev/null +++ b/docs/physrisk.api.v2.rst @@ -0,0 +1,10 @@ +physrisk.api.v2 +======================= + +Module contents +--------------- + +.. automodule:: physrisk.api.v2 + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.data.rst b/docs/physrisk.data.rst new file mode 100644 index 00000000..6b5c5d0b --- /dev/null +++ b/docs/physrisk.data.rst @@ -0,0 +1,85 @@ +physrisk.data +===================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + physrisk.data.static + +Submodules +---------- + +physrisk.data.colormap\_provider +--------------------------------------- + +.. automodule:: physrisk.data.colormap_provider + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.geotiff\_reader +------------------------------------ + +.. automodule:: physrisk.data.geotiff_reader + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.hazard\_data\_provider +------------------------------------------- + +.. automodule:: physrisk.data.hazard_data_provider + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.image\_creator +----------------------------------- + +.. automodule:: physrisk.data.image_creator + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.inventory +------------------------------ + +.. automodule:: physrisk.data.inventory + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.inventory\_reader +-------------------------------------- + +.. automodule:: physrisk.data.inventory_reader + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.pregenerated\_hazard\_model +------------------------------------------------ + +.. automodule:: physrisk.data.pregenerated_hazard_model + :members: + :undoc-members: + :show-inheritance: + +physrisk.data.zarr\_reader +--------------------------------- + +.. automodule:: physrisk.data.zarr_reader + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.data + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.data.static.example_portfolios.rst b/docs/physrisk.data.static.example_portfolios.rst new file mode 100644 index 00000000..e2777154 --- /dev/null +++ b/docs/physrisk.data.static.example_portfolios.rst @@ -0,0 +1,10 @@ +physrisk.data.static.example\_portfolios +================================================ + +Module contents +--------------- + +.. automodule:: physrisk.data.static.example_portfolios + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.data.static.hazard.rst b/docs/physrisk.data.static.hazard.rst new file mode 100644 index 00000000..4b49393f --- /dev/null +++ b/docs/physrisk.data.static.hazard.rst @@ -0,0 +1,10 @@ +physrisk.data.static.hazard +=================================== + +Module contents +--------------- + +.. automodule:: physrisk.data.static.hazard + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.data.static.rst b/docs/physrisk.data.static.rst new file mode 100644 index 00000000..95057615 --- /dev/null +++ b/docs/physrisk.data.static.rst @@ -0,0 +1,31 @@ +physrisk.data.static +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + physrisk.data.static.example_portfolios + physrisk.data.static.hazard + physrisk.data.static.vulnerability + +Submodules +---------- + +physrisk.data.static.world +--------------------------------- + +.. automodule:: physrisk.data.static.world + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.data.static + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.data.static.vulnerability.rst b/docs/physrisk.data.static.vulnerability.rst new file mode 100644 index 00000000..8d1b339c --- /dev/null +++ b/docs/physrisk.data.static.vulnerability.rst @@ -0,0 +1,10 @@ +physrisk.data.static.vulnerability +========================================== + +Module contents +--------------- + +.. automodule:: physrisk.data.static.vulnerability + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.hazard_models.rst b/docs/physrisk.hazard_models.rst new file mode 100644 index 00000000..57ab104f --- /dev/null +++ b/docs/physrisk.hazard_models.rst @@ -0,0 +1,21 @@ +physrisk.hazard\_models +=============================== + +Submodules +---------- + +physrisk.hazard\_models.core\_hazards +-------------------------------------------- + +.. automodule:: physrisk.hazard_models.core_hazards + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.hazard_models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.kernel.rst b/docs/physrisk.kernel.rst new file mode 100644 index 00000000..1f8361fa --- /dev/null +++ b/docs/physrisk.kernel.rst @@ -0,0 +1,133 @@ +physrisk.kernel +======================= + +Submodules +---------- + +physrisk.kernel.assets +----------------------------- + +.. automodule:: physrisk.kernel.assets + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.calculation +---------------------------------- + +.. automodule:: physrisk.kernel.calculation + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.curve +---------------------------- + +.. automodule:: physrisk.kernel.curve + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.events +----------------------------- + +.. automodule:: physrisk.kernel.events + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.exposure +------------------------------- + +.. automodule:: physrisk.kernel.exposure + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.financial\_model +--------------------------------------- + +.. automodule:: physrisk.kernel.financial_model + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.hazard\_event\_distrib +--------------------------------------------- + +.. automodule:: physrisk.kernel.hazard_event_distrib + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.hazard\_model +------------------------------------ + +.. automodule:: physrisk.kernel.hazard_model + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.hazards +------------------------------ + +.. automodule:: physrisk.kernel.hazards + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.impact +----------------------------- + +.. automodule:: physrisk.kernel.impact + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.impact\_distrib +-------------------------------------- + +.. automodule:: physrisk.kernel.impact_distrib + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.risk +--------------------------- + +.. automodule:: physrisk.kernel.risk + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.vulnerability\_distrib +--------------------------------------------- + +.. automodule:: physrisk.kernel.vulnerability_distrib + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.vulnerability\_matrix\_provider +------------------------------------------------------ + +.. automodule:: physrisk.kernel.vulnerability_matrix_provider + :members: + :undoc-members: + :show-inheritance: + +physrisk.kernel.vulnerability\_model +------------------------------------------- + +.. automodule:: physrisk.kernel.vulnerability_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.kernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.risk_models.rst b/docs/physrisk.risk_models.rst new file mode 100644 index 00000000..6a61ce06 --- /dev/null +++ b/docs/physrisk.risk_models.rst @@ -0,0 +1,29 @@ +physrisk.risk\_models +============================= + +Submodules +---------- + +physrisk.risk\_models.loss\_model +---------------------------------------- + +.. automodule:: physrisk.risk_models.loss_model + :members: + :undoc-members: + :show-inheritance: + +physrisk.risk\_models.risk\_models +----------------------------------------- + +.. automodule:: physrisk.risk_models.risk_models + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.risk_models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.rst b/docs/physrisk.rst new file mode 100644 index 00000000..194a4567 --- /dev/null +++ b/docs/physrisk.rst @@ -0,0 +1,40 @@ +Code documentation +==================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + physrisk.api + physrisk.data + physrisk.hazard_models + physrisk.kernel + physrisk.risk_models + physrisk.utils + physrisk.vulnerability_models + +physrisk.container +------------------------- + +.. automodule:: physrisk.container + :members: + :undoc-members: + :show-inheritance: + +physrisk.requests +------------------------ + +.. automodule:: physrisk.requests + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.utils.rst b/docs/physrisk.utils.rst new file mode 100644 index 00000000..6f577d1b --- /dev/null +++ b/docs/physrisk.utils.rst @@ -0,0 +1,29 @@ +physrisk.utils +====================== + +Submodules +---------- + +physrisk.utils.helpers +----------------------------- + +.. automodule:: physrisk.utils.helpers + :members: + :undoc-members: + :show-inheritance: + +physrisk.utils.lazy +-------------------------- + +.. automodule:: physrisk.utils.lazy + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/physrisk.vulnerability_models.rst b/docs/physrisk.vulnerability_models.rst new file mode 100644 index 00000000..f63b6db6 --- /dev/null +++ b/docs/physrisk.vulnerability_models.rst @@ -0,0 +1,53 @@ +physrisk.vulnerability\_models +====================================== + +Submodules +---------- + +physrisk.vulnerability\_models.chronic\_heat\_models +----------------------------------------------------------- + +.. automodule:: physrisk.vulnerability_models.chronic_heat_models + :members: + :undoc-members: + :show-inheritance: + +physrisk.vulnerability\_models.example\_models +----------------------------------------------------- + +.. automodule:: physrisk.vulnerability_models.example_models + :members: + :undoc-members: + :show-inheritance: + +physrisk.vulnerability\_models.labour\_models +---------------------------------------------------- + +.. automodule:: physrisk.vulnerability_models.labour_models + :members: + :undoc-members: + :show-inheritance: + +physrisk.vulnerability\_models.power\_generating\_asset\_models +---------------------------------------------------------------------- + +.. automodule:: physrisk.vulnerability_models.power_generating_asset_models + :members: + :undoc-members: + :show-inheritance: + +physrisk.vulnerability\_models.real\_estate\_models +---------------------------------------------------------- + +.. automodule:: physrisk.vulnerability_models.real_estate_models + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: physrisk.vulnerability_models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/references.bib b/docs/references.bib new file mode 100644 index 00000000..2b811bd2 --- /dev/null +++ b/docs/references.bib @@ -0,0 +1,398 @@ +@book{Mitchel, + author = "Foote, M. and Hillier, J. and Mitchell-Wallace, K. and Jones, M.", + title = "Natural catastrophe risk management and modelling: A practitioner's guide", + publisher = "John Wiley \& Sons", + year = "2017", + doi = "10.1002/9781118906057" +} + +@misc{OS, + author = "Oasis", + title = "Oasis loss modelling framework: open source catastrophe modelling platform", + year = "2021" +} + +@incollection{Parodi, + author = "Pietro Parodi", + title = "Pricing in General Insurance", + booktitle = "Pricing in General Insurance", + chapter = "24", +} + +@misc{Abe, + author = "Joe Moorhouse and Florian Gallo and Mariem Bouchaala and Davide Ferri", + title = "Physical Climate Risk Methodology", + howpublished = "OS-climate" +} + +@misc{ExcProb, + title = "\url{https://www.casact.org/sites/default/files/2021-03/02_humphreys.pdf}", +} + +@article{KR, + author = "Shree Khare and Keven Roy", + title = "Quantifying the Role of Occurrence Losses in Catastrophe Excess of Loss Reinsurance Pricing", +} + +@misc{DABS, + title = "A new dataset of river flood hazard maps for Europe and the Mediterranean Basin", +} + +@misc{ECB-stress-test, + author = "ECB", + title = "2022 climate risk stress test report", + howpublished = "\url{https://www.bde.es/f/webbde/GAP/Secciones/SalaPrensa/ComunicadosBCE/NotasInformativasBCE/22/ssm.pr220708_presentation1.en.pdf}", + year = "2022" +} + +@misc{ECB1, + author = "ECB", + title = "Climate-related risk and financial stability. Data supplement", + year = "2021", + month = "July" +} + +@misc{TCRSC, + author = "{ECB}", + title = "Towards climate-related statistical indicators", + year = "2023" +} + +@misc{TCRSI-TA, + author = "ECB", + title = "Towards climate-related statistical indicators. Technical Annex", +} + +@misc{NGFS_report, + author = "{Network for Greening the Financial System (NGFS)}", + title = "Progress report on bridging data gaps", + year = "2021", + howpublished = "\url{https://www.ngfs.net/sites/default/files/medias/documents/progress_report_on_bridging_data_gaps.pdf}" +} + +@article{Vousdoukas2016, + author = {Michalis Vousdoukas and Evangelos Voukouvalas and Alessandro Annunziato and Alessio Giardino and Luc Feyen}, + title = {Projections of extreme storm surge levels along Europe}, + journal = {Climate Dynamics}, + volume = {47}, + number = {9}, + pages = {3171--3190}, + year = {2016}, + doi = {10.1007/s00382-016-3019-5}, + url = {https://link.springer.com/article/10.1007/s00382-016-3019-5} +} + +@article{JRC-coastal, + author = {Michalis Vousdoukas and Evangelos Voukouvalas and Alessandro Annunziato and Alessio Giardino and Luc Feyen}, + title = {Projections of extreme storm surge levels along Europe}, + journal = {Climate Dynamics}, + volume = {47}, + number = {9}, + pages = {3171--3190}, + year = {2016}, + doi = {10.1007/s00382-016-3019-5}, + url = {https://link.springer.com/article/10.1007/s00382-016-3019-5} +} + + +@book{Foote2017, + author = {Matthew Foote and Jeremy Hillier and Kirsten Mitchell-Wallace and Matthew Jones}, + title = {Natural Catastrophe Risk Management and Modelling: A Practitioner's Guide}, + publisher = {John Wiley \& Sons}, + year = {2017}, + doi = {10.1002/9781118906057} +} + +@techreport{Huizinga2017, + author = {Jaap Huizinga and Hans de Moel and Wojciech Szewczyk}, + title = {Global flood depth-damage functions: Methodology and the database with guidelines}, + institution = {Publications Office of the European Union}, + year = {2017}, + type = {EUR 28552 EN}, + address = {Luxembourg}, + doi = {10.2760/16510}, + isbn = {978-92-79-67781-6}, + note = {JRC105688} +} + +@techreport{Houz, + author = {Jaap Huizinga and Hans de Moel and Wojciech Szewczyk}, + title = {Global flood depth-damage functions: Methodology and the database with guidelines}, + institution = {Publications Office of the European Union}, + year = {2017}, + type = {EUR 28552 EN}, + address = {Luxembourg}, + doi = {10.2760/16510}, + isbn = {978-92-79-67781-6}, + note = {JRC105688} +} + +@misc{JRC2020, + title = {Adapting to rising coastal flood risk in the EU under climate change}, + author = {{Joint Research Centre}}, + year = {2020}, + howpublished= {\url{https://joint-research-centre.ec.europa.eu/system/files/2020-05/pesetaiv_task_6_coastal_final_report.pdf}}, + note = {JRC technical report} +} + +@article{Papiras2020, + author = {Vassilios Papiras}, + title = {Pitfalls of data-driven peaks-over-threshold analysis: Perspectives from extreme ship motions}, + journal = {Probabilistic Engineering Mechanics}, + volume = {60}, + pages = {103053}, + year = {2020}, + doi = {10.1016/j.probengmech.2020.103053}, + url = {https://doi.org/10.1016/j.probengmech.2020.103053} +} + + +@article{floodmap, + author = {Nicola Lugeri and Zbigniew Kundzewicz and Elisabetta Genovese and Stefan Hochrainer-Stigler and Maciej Radziejewski}, + title = {River Flood Risk and Adaptation in Europe – Assessment of the Present Status}, + journal = {Mitigation and Adaptation Strategies for Global Change}, + volume = {15}, + pages = {621-639}, + year = {2010}, + doi = {10.1007/s11027-009-9211-8} +} + +@misc{JRC_facing, + author = {Joint Research Centre}, + title = {Facing increasing river flood risk in Europe: adaptation measures can save lives and billions of euro}, + year = {2023}, + note = {Accessed: 6 February 2023} +} + +@misc{nature_summary, + author = {Mark Smith and others}, + title = {Global assessment of current and future river flooding and the role of nature-based solutions for risk management}, + year = {2021}, + note = {Summary Report} +} + +@techreport{JRC-windstorms, + author = {Jonathan Spinoni and Giuseppe Formetta and Lorenzo Mentaschi and Giovanni Forzieri and Luc Feyen}, + title = {Global warming and windstorm impacts in the EU}, + institution = {Publications Office of the European Union}, + year = {2019}, + type = {EUR 29960 EN}, + address = {Luxembourg}, + isbn = {978-92-76-12955-4}, + doi = {10.2760/039014}, + note = {JRC118595} +} + +@misc{oxf, + author = {Oxford Reference}, + title = {Measure of the severity of a windstorm}, + howpublished = {\url{https://www.oxfordreference.com}} +} + +@misc{SyntheticEventStorm, + author = {Copernicus Climate Change Service}, + title = {Synthetic windstorm events for Europe from 1986 to 2011}, + year = {2022}, + publisher = {Copernicus Climate Change Service (C3S) Climate Data Store (CDS)}, + doi = {10.24381/cds.ce973f02} +} + +@article{rmets_online, + author = {M. A. Walz and T. Kruschke and H. W. Rust and U. Ulbrich and G. C. Leckebusch}, + title = {Quantifying the extremity of windstorms for regions featuring infrequent events}, + journal = {Atmos. Sci. Lett.}, + volume = {18}, + pages = {315-322}, + year = {2017}, + doi = {10.1002/asl.758} +} + + +@article{FGD, + author = {Bernold Feuerstein and Pieter Groenemeijer and Erik Dirksen and Michael Hubrig and Annette M. Holzer and Nikolai Dotzek}, + title = {Towards an improved wind speed scale and damage description adapted for Central Europe}, + journal = {Atmospheric Research}, + volume = {100}, + number = {4}, + pages = {547-564}, + year = {2011}, + issn = {0169-8095}, + doi = {10.1016/j.atmosres.2010.12.026} +} + +@article{Elsus, + author = {Günther, A. and Van Den Eeckhaut, M. and Malet, J.-P. and Reichenbach, P. and Hervás, J.}, + title = {Climate-physiographically differentiated Pan-European landslide susceptibility assessment using spatial multi-criteria evaluation and transnational landslide information}, + journal = {Geomorphology}, + volume = {224}, + pages = {69--85}, + year = {2014} +} + +@article{Elsus2, + author = {Wilde, Martina and Günther, Andreas and Reichenbach, Paola and Malet, Jean-Philippe and Hervás, Javier}, + title = {Pan-European landslide susceptibility mapping: ELSUS Version 2}, + journal = {Journal of Maps}, + volume = {14}, + number = {2}, + pages = {97--104}, + year = {2018}, + doi = {10.1080/17445647.2018.1432511} +} + +@misc{JRC-landslide, + author = {Hervas De Diego, F.}, + title = {Guidelines for Mapping Areas at Risk of Landslides in Europe}, + howpublished = {EUR 23093 EN. Luxembourg (Luxembourg): Office for Official Publications of the European Communities}, + year = {2007}, + note = {JRC42198} +} + +@misc{ESDAC_landslides, + howpublished = {\url{https://esdac.jrc.ec.europa.eu/themes/landslides}}, +} + + + + +@misc{CGJ, + author = {Camia, A. and Libertà, G. and San-Miguel-Ayanz, J.}, + title = {Modeling the impacts of climate change on forest fire danger in Europe: sectorial results of the PESETA II Project}, + year = {2017}, + publisher = {Publications Office of the European Union, Luxembourg}, + pages = {24}, + isbn = {978-92-79-66259-1}, + doi = {10.2760/768481} +} + +@article{EIMH, + author = {Chuvieco, E. and Aguado, I. and Yebra, M. and Nieto, H. and Salas, J. and Martín, M.P. and Vilar, L. and Martínez, J. and Martín, S. and Ibarra, P. and de la Riva, J. and Baeza, J. and Rodríguez, F. and Molina, J.R. and Herrera, M.A. and Zamora, R.}, + title = {Development of a framework for fire risk assessment using remote sensing and geographic information system technologies}, + journal = {Ecological Modelling}, + volume = {221}, + number = {1}, + pages = {46--58}, + year = {2010}, + doi = {10.1016/j.ecolmodel.2008.11.017} +} + +@misc{RGTT, + author = {de Rigo, D. and Libertà, G. and Houston Durrant, T. and Artés Vivancos, T. and San-Miguel-Ayanz, J.}, + title = {Forest fire danger extremes in Europe under climate change: variability and uncertainty}, + year = {2017}, + publisher = {Publications Office of the European Union, Luxembourg}, + pages = {71}, + isbn = {978-92-79-77046-3}, + doi = {10.2760/13180} +} + +@article{JTJS, + author = {Dijkstra, J. and Houston Durrant, T. and San-Miguel-Ayanz, J. and Veraverbeke, S.}, + title = {Anthropogenic and lightning fire incidence and burned area in Europe}, + journal = {Land}, + volume = {11}, + number = {5}, + pages = {651+}, + year = {2022}, + doi = {10.1007/s00267-012-9961-z} +} + +@article{GAM, + author = {Ganteaume, A. and Camia, A. and Jappiot, M. and San-Miguel-Ayanz, J. and Long-Fournel, M. and Lampin, C.}, + title = {A review of the main driving factors of forest fire ignition over Europe}, + journal = {Environmental Management}, + volume = {51}, + number = {3}, + pages = {651--662}, + year = {2013}, + doi = {10.1007/s00267-012-9961-z} +} + +@misc{DDHAD, + author = {Jacome Felix Oom, D. and De Rigo, D. and Pfeiffer, H. and Branco, A. and Ferrari, D. and Grecchi, R. and Artes Vivancos, T. and Durrant, T. and Boca, R. and Maianti, P. and Liberta`, G. and San-Miguel-Ayanz, J.}, + title = {Pan-European wildfire risk assessment}, + year = {2022}, + publisher = {Publications Office of the European Union, Luxembourg}, + isbn = {978-92-76-55137-9}, + doi = {10.2760/9429}, + note = {JRC130136} +} + +@techreport{SB, + author = {Scott, J.H. and Burgan, R.E.}, + title = {Standard fire behavior fuel models: a comprehensive set for use with Rothermel’s surface fire spread model}, + institution = {U.S. Department of Agriculture, Forest Service, Rocky Mountain Research Station, United States}, + year = {2005}, + type = {General Technical Report (GTR) RMRS-GTR-153}, + doi = {10.2737/RMRS-GTR-153} +} + +@misc{Swiss_Re, + title = {sigma 2/2019: Secondary natural catastrophe risks on the front line}, + author = {Swiss Re}, + year = {2019}, + howpublished = {Tech. Rep. 2/2019, Swiss Re, Zurich}, + note = {Available at: \url{https://www.swissre.com/institute/research/sigma-research/sigma-2019-02.html} (last access: 22 November 2021)} +} + +@misc{CanFWI, + title = {Fire Weather Index}, + author = {Canadian Wildland Fire Information System}, + howpublished = {\url{https://cwfis.cfs.nrcan.gc.ca/background/summary/fwi}} +} + +@article{Climada_paper, + author = {Lüthi, S. and Aznar-Siguan, G. and Fairless, C. and Bresch, D. N.}, + title = {Globally consistent assessment of economic impacts of wildfires in CLIMADA v2.2}, + journal = {Geosci. Model Dev.}, + volume = {14}, + pages = {7175--7187}, + year = {2021}, + doi = {10.5194/gmd-14-7175-2021} +} + +@misc{First, + author = {Luck, M. and Landis, M. and Gassert, F.}, + title = {Aqueduct Water Stress Projections: Decadal projections of water supply and demand using CMIP5 GCMs}, + year = {2015}, + publisher = {World Resources Institute}, + address = {Washington, DC} +} + +@misc{website, + title = {NREL Report}, + year = {2011}, + howpublished = {\url{https://www.nrel.gov/docs/fy11osti/50900.pdf}} +} + +@article{ESDAC, + author = {Joseph Alcamo et al.}, + title = {Global Estimates of Water Withdrawals and Availability under Current and Future ‘Business-as-Usual’ Conditions}, + journal = {Hydrological Sciences Journal}, + volume = {48}, + number = {3}, + pages = {339--348}, + year = {2003}, + doi = {10.1623/hysj.48.3.339.45278} +} + +@article{Vorosmaty, + author = {Charles J. Vorosmarty et al.}, + title = {Global Water Resources: Vulnerability from Climate Change and Population Growth}, + journal = {Science}, + volume = {289}, + number = {5477}, + pages = {284--288}, + year = {2000}, + url = {http://www.sciencemag.org/cgi/doi/10.1126/science.289.5477.284} +} + +@misc{EEA, + title = {Water resources across Europe — confronting water stress: an updated assessment}, + author = {The EEA report}, +} + +@misc{FAO1, + author = {Frenken and Gillet}, + title = {Irrigation Water Requirement and Water Withdrawal by Country}, +} diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt new file mode 100644 index 00000000..6891a24b --- /dev/null +++ b/docs/requirements-docs.txt @@ -0,0 +1,27 @@ +sphinx==5.3.0 +sphinxcontrib-details-directive +numpydoc==1.0.0 +sphinx-copybutton==0.5.0 +myst-nb==0.16.0 +pydata-sphinx-theme==0.13.3 +sphinxcontrib-bibtex +sphinx-toolbox +sphinx_toggleprompt==0.2.0 +sphinx_design==0.3 +sphinx_rtd_theme +sphinx-toolbox +sphinx-simplepdf +pandoc +nbsphinx +graphviz +affine==2.3.0 +numpy==1.22.0 +pydantic==1.9.0 +python-dotenv==0.19.2 +requests==2.27.1 +scipy==1.11.1 +s3fs==2022.1.0 +zarr==2.10.3 +pillow==10.2.0 +dependency-injector==4.41.0 +numba==0.56.4 diff --git a/docs/test.ipynb b/docs/test.ipynb new file mode 100644 index 00000000..e27bdae7 --- /dev/null +++ b/docs/test.ipynb @@ -0,0 +1,130 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "\n", + "# you need to add matplotlib to the Pipfile\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example of jupyter notebook" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some text here. You can put equations like:\n", + "$$N\\times e^{-r T},$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can do a lot of things here, like documenting code:\n", + "\n", + "* `maturity` $\\rightarrow T$,\n", + "* `calendar` $\\rightarrow$ $\\tau(t,T)$," + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "You can also create some notes with basic html\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Also run some python code" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Alonso is the best F1 driver ever!\n", + "33\n" + ] + } + ], + "source": [ + "print(\"Alonso is the best F1 driver ever!\")\n", + "\n", + "a = 11\n", + "b = 3\n", + "print(a * b)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Or even do some plots" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'matplotlib'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[4], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# you need to add matplotlib to the Pipfile\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mmatplotlib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpyplot\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mplt\u001b[39;00m\n\u001b[0;32m 4\u001b[0m plt\u001b[38;5;241m.\u001b[39mplot([\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m3\u001b[39m, \u001b[38;5;241m4\u001b[39m], [\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m4\u001b[39m, \u001b[38;5;241m9\u001b[39m, \u001b[38;5;241m16\u001b[39m])\n\u001b[0;32m 5\u001b[0m plt\u001b[38;5;241m.\u001b[39mylabel(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msome numbers\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'matplotlib'" + ] + } + ], + "source": [ + "plt.plot([1, 2, 3, 4], [1, 4, 9, 16])\n", + "plt.ylabel(\"some numbers\")\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "physrisk-1CxTBUVQ", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/manifests/.sops.yaml b/manifests/.sops.yaml deleted file mode 100644 index 3b05ce92..00000000 --- a/manifests/.sops.yaml +++ /dev/null @@ -1,3 +0,0 @@ -creation_rules: - - encrypted_regex: "^(data|stringData)$" - pgp: "EFDB9AFBD18936D9AB6B2EECBD2C73FF891FBC7E, A76372D361282028A99F9A47590B857E0288997C, 04DAFCD9470A962A2F272984E5EB0DA32F3372AC" # ", " diff --git a/manifests/README.md b/manifests/README.md deleted file mode 100644 index c0720354..00000000 --- a/manifests/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Automated Argo workflows - -If you'd like to automate your Jupyter notebooks using Argo, please use these kustomize manifests. If you follow the steps bellow, your application is fully set and ready to be deployed via Argo CD. - -For a detailed guide on how to adjust your notebooks etc, please consult [documentation](https://github.com/aicoe-aiops/data-science-workflows/blob/master/Automating%20via%20Argo.md) - -1. Replace all `` mentions with your project name, respective url or any fitting value -2. Define your automation run structure in the `templates` section of [`cron-workflow.yaml`](./cron-workflow.yml) -3. Set up `sops`: - - 1. Install `go` from your distribution repository - 2. Setup `GOPATH` - - ```bash - echo 'export GOPATH="$HOME/.go"' >> ~/.bashrc - echo 'export PATH="${GOPATH//://bin:}/bin:$PATH"' >> ~/.bashrc - source ~/.bashrc - ``` - - 3. Install `sops` from your distribution repository if possible or use [sops GitHub release binaries](https://github.com/mozilla/sops#stable-release) - - 4. Import AICoE-SRE's public key [EFDB9AFBD18936D9AB6B2EECBD2C73FF891FBC7E](https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xefdb9afbd18936d9ab6b2eecbd2c73ff891fbc7e): - - ```bash - gpg --keyserver keyserver.ubuntu.com --recv EFDB9AFBD18936D9AB6B2EECBD2C73FF891FBC7E - ``` - - 5. Import tcoufal's ([A76372D361282028A99F9A47590B857E0288997C](https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xa76372d361282028a99f9a47590b857e0288997c)) and mhild's [04DAFCD9470A962A2F272984E5EB0DA32F3372AC](https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x04dafcd9470a962a2f272984e5eb0da32f3372ac) keys (so they can help) - - ```bash - gpg --keyserver keyserver.ubuntu.com --recv A76372D361282028A99F9A47590B857E0288997C # tcoufal - gpg --keyserver keyserver.ubuntu.com --recv 04DAFCD9470A962A2F272984E5EB0DA32F3372AC # mhild - ``` - - 6. If you'd like to be able to build the manifest on your own as well, please list your GPG key in the [`.sops.yaml` file](.sops.yaml), `pgp` section (add to the comma separated list). With your key present there, you can later generate the full manifests using `kustomize` yourself (`ksops` has to be installed, please follow ksops [guide](https://github.com/viaduct-ai/kustomize-sops#0-verify-requirements). - -4. Create a secret and encrypt it with `sops`: - - ```bash - # If you're not already in the `manifest` folder, cd here - cd manifests - # Mind that `SECRET_NAME` must match the `SECRET_NAME` used in `cron-workflow.yaml` - oc create secret generic \ - --from-literal=path= \ - --from-literal=bucket= \ - --from-literal=access-key-id= \ - --from-literal=secret-access-key= \ - --dry-run -o yaml | - sops --input-type=yaml --output-type=yaml -e /dev/stdin > ceph-creds.yaml - ``` - -Note: You can use the S2I image, that was built by [s2i-custom-notebook](https://github.com/AICoE/s2i-custom-notebook) for this automation. This image is expected to be used by default, therefore the `workingDir` is adjusted to `/opt/app-root/backup`. Please change or remove this settings in case you plan on using different image. diff --git a/manifests/cronwf.yaml b/manifests/cronwf.yaml deleted file mode 100644 index efd17a34..00000000 --- a/manifests/cronwf.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: CronWorkflow -metadata: - generateName: - name: -spec: - schedule: "0 0 1 * *" - concurrencyPolicy: "Replace" - workflowSpec: - volumeClaimTemplates: - - metadata: - name: local-data-storage - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi - entrypoint: entrypoint - templates: - - name: entrypoint - steps: - - - name: execute-template - templateRef: - name: - template: entrypoint diff --git a/manifests/imagestream.yaml b/manifests/imagestream.yaml deleted file mode 100644 index da34b0dd..00000000 --- a/manifests/imagestream.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ImageStream -apiVersion: image.openshift.io/v1 -metadata: - name: -spec: - lookupPolicy: - local: true - tags: - - name: latest - from: - kind: DockerImage - name: diff --git a/manifests/ksops.yaml b/manifests/ksops.yaml deleted file mode 100644 index 4240f695..00000000 --- a/manifests/ksops.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: viaduct.ai/v1 -kind: ksops -metadata: - name: ksops-name-doesnt-matter -files: - - ceph-creds.yaml diff --git a/manifests/kustomization.yaml b/manifests/kustomization.yaml deleted file mode 100644 index 24a2016c..00000000 --- a/manifests/kustomization.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -commonLabels: - app.kubernetes.io/name: - app.kubernetes.io/component: - app.kubernetes.io/part-of: aiops-analytics - app.kubernetes.io/managed-by: aicoe-aiops-devops-argocd - -resources: - - cronwf.yaml - - imagestream.yaml - - postsync-wf.yaml - - wftmpl.yaml - -generators: - - ksops.yaml - -patchesJson6902: -- patch: &patch | - - op: remove - path: /metadata/name - target: - group: argoproj.io - version: v1alpha1 - kind: Workflow - name: ignored diff --git a/manifests/postsync-wf.yaml b/manifests/postsync-wf.yaml deleted file mode 100644 index 48e32e66..00000000 --- a/manifests/postsync-wf.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - name: ignored - generateName: - annotations: - argocd.argoproj.io/hook: PostSync -spec: - volumeClaimTemplates: - - metadata: - name: local-data-storage - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 10Gi - entrypoint: entrypoint - templates: - - name: entrypoint - steps: - - - name: execute-template - templateRef: - name: - template: entrypoint diff --git a/manifests/wftmpl.yaml b/manifests/wftmpl.yaml deleted file mode 100644 index 35aeb88d..00000000 --- a/manifests/wftmpl.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: -spec: - entrypoint: entrypoint - templates: - # - name: entrypoint - # steps: - # - - name: data-collection - # template: data-collection - # - - name: analysis - # template: analysis - # - # - name: data-collection - # dag: - # tasks: - # - name: collect-raw-data - # template: notebook-executor - # arguments: - # parameters: [{name: notebook, value: }] - # - ... - # - # - name: analysis - # steps: - # - - name: contributor-analysis - # template: notebook-executor - # arguments: - # parameters: [{name: notebook, value: }] - # - ... - - - name: notebook-executor - inputs: - parameters: - - name: notebook - outputs: - artifacts: - - name: rendered_notebook - path: "/mnt/data/notebooks/{{inputs.parameters.notebook}}" - archive: - none: {} - s3: - endpoint: s3.upshift.redhat.com:443 - bucket: - key: "production_data/rendered_notebooks/{{inputs.parameters.notebook}}" - accessKeySecret: - key: access-key-id - name: - secretKeySecret: - key: secret-access-key - name: - container: - image: :latest - command: [jupyter-nbconvert] - args: - - --config - - .jupyter/jupyter_nbconvert_config.py - - "notebooks/{{inputs.parameters.notebook}}" - # If using different image than built by https://github.com/AICoE/s2i-custom-notebook, please change or remote the workingDir settings - workingDir: /opt/app-root/backup - volumeMounts: - - name: local-data-storage - mountPath: /mnt/data - env: - - name: LOCAL_DATA_PATH - value: /mnt/data - - name: RUN_IN_AUTOMATION - value: "true" - - name: NOTEBOOK_NAME - value: "{{inputs.parameters.notebook}}" - - name: S3_ENDPOINT_URL - value: https://s3.upshift.redhat.com - - name: S3_PROJECT_KEY - valueFrom: - secretKeyRef: - key: path - name: - - name: S3_BUCKET - valueFrom: - secretKeyRef: - key: bucket - name: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: access-key-id - name: - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: secret-access-key - name: - resources: - requests: - cpu: 500m - memory: 1Gi - limits: - cpu: '2' - memory: 4Gi diff --git a/methodology/PhysicalRiskMethodology.bbl b/methodology/PhysicalRiskMethodology.bbl new file mode 100644 index 00000000..758908a8 --- /dev/null +++ b/methodology/PhysicalRiskMethodology.bbl @@ -0,0 +1,236 @@ +\begin{thebibliography}{10} + +\bibitem{BavandiEtAl:2022} +{\sc Bavandi, A., Berrais, D., Dolk, M., and Mahul, O.} +\newblock Physical climate risk assessment: Practical lessons for the + development of climate scenarios with extreme weather events from emerging + markets and developing economies. +\newblock Tech. rep., Network for Greening the Financial System, 2022. + +\bibitem{BertramEtAl:2020} +{\sc Bertram, C., Hilaire, J., Kriegler, E., Beck, T., Bresch, D., Clarke, L., + Cui, R., Edmonds, J., Min, J., Piontek, F., et~al.} +\newblock Ngfs climate scenarios database: Technical documentation. + +\bibitem{BlackCox:1976} +{\sc Black, F., and Cox, J.~C.} +\newblock Valuing corporate securities: some effects of bond indenture + provisions. +\newblock {\em Journal of Finance 31}, 2 (1976), 351--367. + +\bibitem{ChauvetEtAl:2024} +{\sc Chauvet, M., and Piger, J.~M.} +\newblock Smoothed u.s. recession probabilities. + +\bibitem{Christidis:2021} +{\sc Christidis, N.} +\newblock Using {CMIP6} multi-model ensembles for near real-time attribution of + extreme events. +\newblock {\em Hadley Centre Technical Note 107\/} (2019). + +\bibitem{Christidis:2013} +{\sc Christidis, N., Stott, P.~A., Scaife, A.~A., Arribas, A., Jones, G.~S., + Copsey, D., Knight, J.~R., and Tennant, W.~J.} +\newblock A new {HadGEM3-A}-based system for attribution of weather-and + climate-related extreme events. +\newblock {\em Journal of Climate 26}, 9 (2013), 2756--2783. + +\bibitem{DosioEtAl:2018} +{\sc Dosio, A., Mentaschi, L., Fischer, E.~M., and Wyser, K.} +\newblock Extreme heat waves under {1.5 C} and {2 C} global warming. +\newblock {\em Environmental Research Letters 13}, 5 (2018), 054006. + +\bibitem{HuizingaEtAl:2017} +{\sc Huizinga, J., De~Moel, H., Szewczyk, W., et~al.} +\newblock Global flood depth-damage functions: Methodology and the database + with guidelines. +\newblock Tech. rep., Joint Research Centre (Seville site), 2017. + +\bibitem{KenyonEtAl:2021} +{\sc Kenyon, C., and Berrahouia, M.} +\newblock Climate change valuation adjustment (ccva) using parameterized + climate change impacts. +\newblock {\em Risk\/} (2021). + +\bibitem{KuzmaEtAl:2023} +{\sc Kuzma, S., Bierkens, M.~F., S.~Lakshman, S., Luo, T., Saccoccia, L., + Sutanudjaja, E.~H., and Van~Beek, R.} +\newblock Aqueduct 4.0: Updated decision-relevant global water risk indicators. +\newblock {\em World Resources Institute\/} (2023). + +\bibitem{LuoEtAl:2021} +{\sc Luo, T., Zhou, L., Falzon, J., Cheng, Y., Christianson, G., Wu, Y., and + Habchi, A.} +\newblock Assessing physical climate risks for the european bank for + reconstruction and development's power generation project investment + portfolio. + +\bibitem{LuoEtAl:2023} +{\sc Luo, T., Zhou, L., Falzon, J., Cheng, Y., Christianson, G., Wu, Y., and + Habchi, A.} +\newblock A framework to assess multi-hazard physical climate risk for power + generation projects from publicly-accessible sources. +\newblock {\em Communications Earth \& Environment 4}, 117 (2023). + +\bibitem{MaskreyEtAl:2011} +{\sc Maskrey, A., Peduzzi, P., Chatenoux, B., Herold, C., Dao, Q.-H., and + Giuliani, G.} +\newblock Revealing risk, redefining development, global assessment report on + disaster risk reduction. +\newblock {\em United Nations Strategy for Disaster Reduction\/} (2011), + 17--51. + +\bibitem{MazdiyasniEtAl:2019} +{\sc Mazdiyasni, O., Sadegh, M., Chiang, F., and AghaKouchak, A.} +\newblock Heat wave intensity duration frequency curve: A multivariate approach + for hazard and attribution analysis. +\newblock {\em Scientific reports 9}, 1 (2019), 1--8. + +\bibitem{MentaschiEtAl:2016} +{\sc Mentaschi, L., Vousdoukas, M., Voukouvalas, E., Sartini, L., Feyen, L., + Besio, G., and Alfieri, L.} +\newblock The transformed-stationary approach: a generic and simplified + methodology for non-stationary extreme value analysis. +\newblock {\em Hydrology and Earth System Sciences 20}, 9 (2016), 3527--3547. + +\bibitem{Merton:1974} +{\sc Merton, R.~C.} +\newblock On the pricing of corporate debt: the risk structure of interest + rates. +\newblock {\em Journal of Finance 29}, 2 (1974), 449--470. + +\bibitem{MitchellEtAl:2017} +{\sc Mitchell-Wallace, K., Jones, M., Hillier, J., and Foote, M.} +\newblock {\em Natural catastrophe risk management and modelling: A + practitioner's guide}. +\newblock John Wiley \& Sons, 2017. + +\bibitem{NeidellEtAl:2014} +{\sc Neidell, M., and Graff~Zivin, J.} +\newblock Temperature and the allocation of time: Implications for climate + change. +\newblock {\em Journal of Labor Economics 32}, 1 (2006), 1--26. + +\bibitem{NeidellEtAl:2021} +{\sc Neidell, M., Graff~Zivin, J., Sheahan, M., Willwerth, J., Fant, C., + Sarofim, M., and Martinich, J.} +\newblock Temperature and work: Time allocated to work under varying climate + and labor market conditions. +\newblock {\em PloS one 16}, 8 (2021), e0254224. + +\bibitem{Nelsen:2007} +{\sc Nelsen, R.~B.} +\newblock {\em An Introduction to Copulas}, 2nd~ed. +\newblock Princeton University Press, New York~(NY), 2007. + +\bibitem{OasisLMF} +{\sc Oasis}. +\newblock Oasis loss modelling framework: open source catastrophe modelling + platform, 2021. + +\bibitem{OSC} +{\sc OS-C}. +\newblock {OS-Climate} {(OS-C)} platform, 2021. + +\bibitem{PaprotnyEtAl:2016} +{\sc Paprotny, D., and Morales~Nápoles, O.~O.} +\newblock Pan-european data sets of river flood probability of occurrence under + present and future climate, 2016. + +\bibitem{PortnerEtAl:2022} +{\sc P{\"o}rtner, H.-O., Roberts, D.~C., Adams, H., Adler, C., Aldunce, P., + Ali, E., Begum, R.~A., Betts, R., Kerr, R.~B., Biesbroek, R., et~al.} +\newblock {\em Climate change 2022: Impacts, adaptation and vulnerability}. +\newblock IPCC Geneva, Switzerland:, 2022. + +\bibitem{RangerEtAl:2022} +{\sc Ranger, N.~A., Mahul, O., and Monasterolo, I.} +\newblock Assessing financial risks from physical climate shocks. + +\bibitem{RaschkeEtAl:2022} +{\sc Raschke, M.} +\newblock About the return period of a catastrophe. +\newblock {\em Natural Hazards and Earth System Sciences 22}, 1 (2022), + 245--263. + +\bibitem{ReisingerEtAl:2020} +{\sc Reisinger, A., Howden, M., Vera, C., et~al.} +\newblock {\em The Concept of Risk in the IPCC Sixth Assessment Report: A + Summary of Cross-Working Group Discussions}. +\newblock IPCC Geneva, Switzerland:, 2020. + +\bibitem{RichtersEtAl:2022} +{\sc Richters, O., et~al.} +\newblock Climate scenarios database: Technical documentation v3.1. +\newblock Tech. rep., Network for Greening the Financial System, 2022. + +\bibitem{ScawthornEtAl:2006} +{\sc Scawthorn, C., Blais, N., Seligson, H., Tate, E., Mifflin, E., Thomas, W., + Murphy, J., and Jones, C.} +\newblock {HAZUS-MH} flood loss estimation methodology. i: Overview and flood + hazard characterization. +\newblock {\em Natural Hazards Review 7}, 2 (2006), 60--71. + +\bibitem{StottEtAl:2016} +{\sc Stott, P.~A., Christidis, N., Otto, F.~E., Sun, Y., Vanderlinden, J.-P., + van Oldenborgh, G.~J., Vautard, R., von Storch, H., Walton, P., Yiou, P., + et~al.} +\newblock Attribution of extreme weather and climate-related events. +\newblock {\em Wiley Interdisciplinary Reviews: Climate Change 7}, 1 (2016), + 23--41. + +\bibitem{Taylor:2015} +{\sc Taylor, P.} +\newblock Calculating financial loss from catastrophes. +\newblock In {\em SECED 2015 Conference: Earthquake risk and engineering + towards a resilient world\/} (2015), Society for earthquake and civil + engineering dynamics. + +\bibitem{OasisFinancialModule} +{\sc Taylor, P., and Carter, J.} +\newblock Oasis financial module, 2020. + +\bibitem{ThrasherEtAl:2022} +{\sc Thrasher, B., Wang, W., Michaelis, A., Melton, F., Lee, T., and Nemani, + R.} +\newblock Nasa global daily downscaled projections, {CMIP6}. +\newblock {\em Scientific Data 9}, 1 (2022), 1--6. + +\bibitem{VicenteSerranoEtAl:2010} +{\sc Vicente-Serrano, S., Begueria, S., and Lopez-Moreno, J.~I.} +\newblock A multiscalar drought index sensitive to global warming: the + standardized precipitation evapotranspiration index. +\newblock {\em Journal of Climate 23}, 7 (2010), 1696--1718. + +\bibitem{WardEtAl:2011} +{\sc Ward, P.~J., De~Moel, H., and Aerts, J.} +\newblock How are flood risk estimates affected by the choice of + return-periods? +\newblock {\em Natural Hazards and Earth System Sciences 11}, 12 (2011), + 3181--3195. + +\bibitem{WardEtAl:2013} +{\sc Ward, P.~J., Jongman, B., Weiland, F.~S., Bouwman, A., van Beek, R., + Bierkens, M.~F., Ligtvoet, W., and Winsemius, H.~C.} +\newblock Assessing flood risk at the global scale: model setup, results, and + sensitivity. +\newblock {\em Environmental research letters 8}, 4 (2013), 044019. + +\bibitem{WardEtAl:2020} +{\sc Ward, P.~J., Winsemius, H.~C., Kuzma, S., Bierkens, M.~F., Bouwman, A., + De~Moel, H., Loaiza, A.~D., Eilander, D., Englhardt, J., Erkens, G., et~al.} +\newblock Aqueduct floods methodology. +\newblock {\em World Resources Institute\/} (2020), 1--28. + +\bibitem{WoetzelEtAl:2020} +{\sc Woetzel, J., Pinner, D., and Samandari, H.} +\newblock Climate risk and response: Physical hazards and socioeconomic + impacts. + +\bibitem{ZhangAndShindell:2021} +{\sc Zhang, Y., and Shindell, D.~T.} +\newblock Costs from labor losses due to extreme heat in the usa attributable + to climate change. +\newblock {\em Climatic change 164}, 3 (2021), 1--18. + +\end{thebibliography} diff --git a/methodology/PhysicalRiskMethodology.pdf b/methodology/PhysicalRiskMethodology.pdf new file mode 100644 index 00000000..7e899265 Binary files /dev/null and b/methodology/PhysicalRiskMethodology.pdf differ diff --git a/methodology/PhysicalRiskMethodology.tex b/methodology/PhysicalRiskMethodology.tex new file mode 100644 index 00000000..89ed3598 --- /dev/null +++ b/methodology/PhysicalRiskMethodology.tex @@ -0,0 +1,1320 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +% Generic Methodology for Physical Climate Risk Modelling +% +% 2021 OS-Climate +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +\documentclass[a4paper,11pt]{extarticle} %12pt + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Required packages +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\usepackage[utf8]{inputenc} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{bm} +\usepackage[shortlabels]{enumitem} +\usepackage{fancyhdr} +\usepackage{float} +\usepackage{framed} +\usepackage{glossaries} +\usepackage{graphicx} +\usepackage{numprint} +%\usepackage{physics} % causing problems; using different notation for bra-ket +%\usepackage{sfmath}[cmbright] +%\usepackage[round]{natbib} +\usepackage{ragged2e} +\usepackage{scrextend} +\usepackage{sistyle} +\usepackage{subcaption} + +%\usepackage{bookmark} +% Define hyperlink colors +\usepackage{xcolor} +\usepackage[linkcolor=blue, colorlinks=true, citecolor=blue, urlcolor=blue]{hyperref} +\usepackage[normalem]{ulem} +\usepackage{mathtools} + +% Theorem environments +\usepackage{amsthm} +\newtheorem{theorem}{Theorem}[section] +\newtheorem{lemma}[theorem]{Lemma} +\newtheorem{corollary}[theorem]{Corollary} +\newtheorem{proposition}[theorem]{Proposition} + + +% Definition environments +\theoremstyle{definition} +\newtheorem{definition}[theorem]{Definition} +\newtheorem{example}[theorem]{Example} +\newtheorem{assumption}{Assumption} +\newtheorem{error}{Error} + +%% Remark environment +%% Define a 'remark' environment, numbered in sequence with theorem +\newtheorem{remarkx}[theorem]{Remark} +\newenvironment{remark} +{\pushQED{\qed}\renewcommand{\qedsymbol}{$\diamond$}\begin{remarkx}} + {\popQED\end{remarkx}} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% General settings +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%% +% Font +%%%%%%%%%%%%% + +%\renewcommand*{\familydefault}{\sfdefault} + +%%%%%%%%%%%%% +% Spacing +%%%%%%%%%%%%% + +\setlength{\parskip}{1em} +\setlength{\parindent}{0em} +\frenchspacing + +%%%%%%%%%%%%% +% Hyphenation +%%%%%%%%%%%%% + +\tolerance=1 +\emergencystretch=\maxdimen +\hyphenpenalty=10000 +\hbadness=10000 + +%%%%%%%%%%%%% +% Number formatting +%%%%%%%%%%%%% + +\npthousandsep{,} +\npthousandthpartsep{} +\npdecimalsign{.} + +%%%%%%%%%%%%% +% Footnotes +%%%%%%%%%%%%% + +\usepackage[hang, flushmargin]{footmisc} +\setlength{\footnotemargin}{4mm} + +%%%%%%%%%%%%% +% Running title +%%%%%%%%%%%%% + +\pagestyle{fancy} +\lhead{OS-Climate} +\rhead{Physical Climate Risk Methodology} + +\makeglossaries + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\title{Physical Climate Risk Methodology} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\author{Joe Moorhouse\thanks{\textit{E-mail}: joe.moorhouse@gmail.com} + \and + Mariem Bouchaala\thanks{\textit{E-mail}: mariem.bouchaala@essec.edu} + \and + Davide Ferri + \and + Florian Gallo + \and + Eglantine Giraud + \and + Michael Levin + \and + Álvaro Romaniega\thanks{\textit{E-mail}: aromaniega@arfimaconsulting.com. Romaniega´s work has been developed under CPP2021-008644, financed by MCIN/AEI/10.13039/501100011033 and Europe´s NextGenerationEU/PRTR. + \smallskip + \newline% \indent + The views expressed in this paper are those of the authors and do not necessarily reflect the views and policies of their respective employers.} + } + +\date{Feb 2024 [Draft]} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\maketitle{} + +%\begin{abstract} +%Add abstract here. +%\end{abstract} + + +\clearpage +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% TEMP ONLY during writing stage +\setcounter{tocdepth}{4} +\renewcommand{\contentsname}{Contents} +\tableofcontents + +%\listoftables + +%\listoffigures +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +\clearpage +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Introduction} +\label{Sec:Introduction} + +\newglossaryentry{hazard} +{ + name=hazard, + description=Climate-related physical phenonenon that can impact natural and socioeconomic systems. +} +\newglossaryentry{acute_hazard} +{ + name=acute hazard, + description={Hazard which is an event, for example a heat wave, inundation, hurricane or wild fire.} +} +\newglossaryentry{chronic_hazard} +{ + name=chronic hazard, + description={Hazard which is a long-term shift in a climate parameter such as average temperature, sea-level or a water stress index.} +} +\newglossaryentry{hazard_event} +{ + name=hazard event, + description={Definition of the occurrence of a hazard which can be assigned a probability. For example, a flood occurring in the year 2050 at a certain location with a depth greater than 50 cm.} +} +\newglossaryentry{hazard_parameter} +{ + name=hazard parameter, + description=Definition of the shifting parameter of a chronic hazard. This is a time-varying quantity such as the average temperature or the average number of days per year over a certain temperature threshold. +} +\newglossaryentry{CMIP} +{ + name=CMIP, + description={The Coupled Model Intercomparison Project (CMIP) is a project of the World Climate Research Programme (WCRP) providing climate projections to understand past, present and future climate changes. CMIP and its associated data infrastructure have become essential to the Intergovernmental Panel on Climate Change (IPCC) and other international and national climate assessements.} +} + +The changing climate introduces new risks. These can be grouped into: +\begin{enumerate} + \item Physical risks -- risks arising from the physical effects of climate change, + \item Transition risks -- risks arising from the transition to a low-carbon economy, + \item Liability risks -- considered a third by some \cite{WoetzelEtAl:2020}, these are the risks arising when those affected by anthropogenic climate change seek compensation. +\end{enumerate} + +The methodology presented in this document concerns the assessment of physical risk. Physical risk comes from changes in climate \emph{\gls{hazard}s}. A hazard is the potential occurrence of a climate-related physical phenomenon that can impact human and ecological systems \cite{ReisingerEtAl:2020,WoetzelEtAl:2020,MitchellEtAl:2017}. More precisely, the impact may be loss of life, injury, or other +health impact, as well as damage and loss to property, infrastructure, +livelihoods, service provision, ecosystems, and environmental resources (see Annex II of \cite{PortnerEtAl:2022}). Hazards can be divided into \emph{\gls{acute_hazard}s} and \emph{\gls{chronic_hazard}s}. An acute hazard is the potential occurrence of an \emph{event}, for example a heat wave, inundation (flood) or hurricane. A chronic hazard is the potential occurrence of a trend in climate parameters such as an increase in average temperature, sea-level or water stress indices. + +The authors of \cite{RangerEtAl:2022} argue that sudden accute events are the most likely to generate \emph{`material shocks to the financial sector in the near-term'} and note that techniques to generate probabilistic scenarios to inform financial decision making is well-developed in the insurance industry. This is taken as a guiding principle, that a methodology for the measurement of physical climate risk should be adapted from the techniques developed by catastrophe modellers. The authors also note that \emph{`the financial risks from physical climate shocks cannot be approximated by considering only average annual costs of weather extremes, even on long timescales. Larger, rarer events can cause significant damage and disruption and have long-lived impacts.'}. Another guiding principle is this handling of rare events. + +A model designed to quantify physical risk must take into account: a) hazard likelihood of occurrence b) the damage or disruption caused c) the consequence of this damage/disruption. Damage/disruption caused by a hazard is determined by the \emph{vulnerability} of the asset that is exposed\footnote{\emph{Exposure} of an asset to a hazard is defined after \cite{MaskreyEtAl:2011}; for most purposes this is determined by asset location.}. With a focus on financial risk, damage/disruption refers to damage of financial assets and disruption to business activities. More generally, damage can refer to natural assets and disruption to populations and ecosystems. Hereafter we use the word `asset' to describe both physical assets and business activities. As an example, the physical infrastructure of a power generating asset may be damaged by inundation and its electricity production may be disrupted, leading to a loss in revenue. + +We assign explicit names to these three components, a), b) and c) for the financial risk case: +\begin{enumerate}[a)] +\item Hazard +\item Vulnerability +\item Financial +\end{enumerate} + +The authors of \cite{BavandiEtAl:2022} note that, \emph{`historical damage data may be insufficient or even irrelevant for generating a risk assessment that reflects the current or future risk reality'} and further that \emph{`physical climate risk assessments now increasingly use data sets that can individually characterize hazard, exposure, and vulnerability'}. The methodology presented in this document is indeed intended to perform such assessments. + +A precise model of the physical risk from a single (localized) asset or business activity must consider a hazard's likelihood of occurrence \emph{at the asset's locale} (i.e. if the asset is exposed to the hazard) and vulnerability to the hazard particular to that asset. This requires specific knowledge of the asset. For example a power station that relies on air-cooling might be disrupted by a period of extremely high air temperature. In addition, an asset may be impacted through its reliance on other assets; a manufacturing facility may rely on continuity of electricity supply for example. + +Such precise models of physical risk can be complex and may rely on information that is not readily available. For these reasons, approximations are commonly used, although approximate models still typically include hazard, vulnerability and financial components \cite{BertramEtAl:2020,WoetzelEtAl:2020} -- even global-scale impact analyses intended to be used in macroeconomic models. + +The purpose of this paper is to present the methodology of a framework that is sufficiently generic to be used for a wide range of physical climate risk models, both precise and approximate as required. The ability to perform precise, fine-grained calculations is an important requirement therefore. This paper serves as a specification for use in the \emph{`physrisk'} OS-Climate (OS-C) \cite{OSC} physical climate risk calculation module. + +OS-C aims to provide a platform unconstrained by any one particular methodology choice, but takes inspiration from natural catastrophe modelling \cite{MitchellEtAl:2017} and in particular the \emph{Oasis Loss Modelling Framework} \cite{OasisLMF} (henceforth \emph{Oasis LMF}), which was designed to accommodate a wide range of catastrophe models and analyse physical risk in the context of the insurance market. Similarly to \emph{Oasis LMF}, we adopt a modular approach. This approach allows the user to change easily a particular modelling method, whilst maintaining the integration of the components. + +In the following, models of hazards, vulnerability and financial impact are discussed in more detail. In a later section these are presented more formally. + +\paragraph{Hazard.} +As noted above, hazard models come in two varieties: models of acute hazards -- events -- and models of chronic hazards -- long-term shifts in climate parameters. In climate risk events are \emph{climate-conditioned}: based not just on historical events but also future projections under different assumptions. +\begin{enumerate}[label=\Alph*.] +\item{\emph{Accute hazard models.}} + +Accute hazard models may be \emph{event-based} or \emph{return-period-based}. +\emph{Event-based hazard models} are models of individual events and are common in natural catastrophe modelling. Typically, for a large number of events, a model provides spatial distributions (i.e. a map) of the probabilities of occurrence of different event intensities. These distributions are sometimes called `hazard footprints' \cite{OasisFinancialModule}. As an example, in the case of inundation, the hazard footprint would provide for different locations the probability of occurrence of different inundation depths -- associated with one particular inundation event. + +Event-based hazard models are important when the \emph{correlation} of hazards to which assets are exposed is material to the analysis being performed. For example, if one house on a street is exposed to an inundation it is likely that the house two doors down will also be exposed. This is captured by event-based hazard models: both houses might well appear in the same hazard footprint. The models are therefore important in reinsurance risk calculations, say, where the ability to calculate accurately `worst-case' losses is highly desirable. + +\emph{Return-period-based hazard models}, in contrast, provide at each location a curve of return-periods and corresponding intensities of an accute hazard. For example an inundation model may specify that at a certain latitude and longitude an inundation event with a depth of 50~cm will occur with a return period of 100 years, 70~cm with a return period of 200 years and so on\footnote{For inundation models, 10 return periods for each location may be specified.}. Such a return-period map provides no correlation information. + +Return-period-based approaches offer a significant computational advantage at the expense of the missing correlation information. Appropriate use-cases are: + +\begin{itemize} + +\item when correlation of hazards is not relevant to the calculation, for example when modelling a single asset, or for certain calculations, such as annual average ground-up loss\footnote{For calculation of certain measures only marginal probability of occurrence of a hazard at a given location is relevant. This is discussed in more detail later, but may be true for certain average measures; for measures of tail-risk, in contrast, joint probability is needed for the calculation.}, or +\item when it is satisfactory to model correlation using a heuristic approach (e.g. when obtaining a rough estimate based on 0\% ot 100\% correlation or some estimated average spatial correlation). This may be used in cases where it is desirable to separate low-risk from high risk lending portfolios for example. + +\end{itemize} +\item{\emph{Chronic hazards.}} +As discussed above, acute hazard models provide probability distributions for future events. In contrast chronic hazard models provide climate parameters only. The models are therefore non-probabilistic for a given scenario. For example, a model of average surface temperature would provide values for different latitudes and longitudes under a particular climate scenario (e.g. particular social-economic and representative concentration pathway). \emph{Under this particular scenario}, the assumption is that the climate parameter is certain to take the value. + +\end{enumerate} + +Hazard likelihood of occurrence is generally scenario-based \cite{BertramEtAl:2020}. That is, hazards are modelled in a historical baseline and then again assuming a particular climate scenario. + +\paragraph{Vulnerability.} +The vulnerability component measures the potential impact of a catastrophic event on an asset. One very important feature of the methodological approach is that like acute hazard models \emph{vulnerability models are probabilistic}. This is because: +\begin{itemize} +\item{The damage or disruption to an asset caused by some precisely known event (e.g. flood of known intensity) may not itself be precisely known or knowable. That is, there is an \emph{aleatory uncertainty} \cite{MitchellEtAl:2017}}. +\item{In many cases the properties of the asset sufficient to ascertain its vulnerability are, although in principle knowable, unknown. That is there is an \emph{epistemic uncertainty} \cite{MitchellEtAl:2017}}. +\end{itemize} +Both these sources of uncertainty may be material to the calculation in hand and neglecting these is a potential source of model risk\footnote{We define model risk to be the risk that the model outputs lead to poor management information or cause bad decisions to be made. Note that model risk is \emph{not} the risk that a model is wrong: all models are in some sense `wrong', although some may be useful.}. In many climate risk calculations, epistemic uncertainty is particularly important: the vulnerability of assets in some portfolios may be uncertain but this does not prevent, say, the estimation of a likely upper bound to damage or disruption. Such a bound may be wrong however, if one does not allow for the possibility that the asset's nature is such that it may be significantly more vulnerable than some average level. + +At time of writing, physical risk calculations may make use of `bulk-assessment' approaches where accurate asset vulnerability information is unavailable and approximations are therefore required. The modelling framework aims to accommodate bulk-assessment-type models as well as approaches capable of modelling vulnerability more precisely, the assumption being that there is potentially great value in the results obtained from very simple models, as long as the model uncertainty is properly-quantified. The aim is to be able to accommodate both simple and complex models in combination. + +\paragraph{Financial.} +Finally, the financial component is concerned with translating a probability distribution of asset damage/disruption to a loss of profitability for a company, or a loss of value for a lender, insurer, equity stakeholder etc. Clearly, the models used in the Financial module answer questions specific to a certain user: is the ultimate objective that of measuring the physical risk for the company or for one of the asset's insurer? Depending on the answer to that question, a different financial model might be needed. + +One important feature of financial models is the role played by insurance. For example, for a lender, what is the impact of climate change on the level of risk of a commercial real estate portfolio? An increase in the frequency and severity of acute hazards may affect the value of the loan collateral but this can depend strongly on whether damage/disruption is covered by insurance and premiums remain low for example. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\paragraph{Design goal} +The design goal of the \emph{`physrisk'} library is to facilitate the analysis of physical risk from a variety of perspectives. There is no specific market (e.g. insurance market) in mind; the intent is to be sufficiently general to allow any stakeholder to analyse their physical risks. In addition, the framework should introduce as few constraints as possible: the clear hypothesis is that modules are dependent from one another only through their potential input-output relationships. Other than that, the intent is to allow users to have a flexible access to a wide choice of models within each module: the models are only required to have a predictable external behaviour, while the details of the internal workings can be defined with flexibility. + +A number of teams have developed models and tools that tackle one specific aspect of the problem. However, it is not currently easy to seamlessly integrate models which operate in different areas to produce an end to end physical risk analysis, or to experiment with different approaches. + +\section{Model description} + +\subsection{Overview} +A high-level view of the physical risk modelling framework is shown in Figure~\ref{Fig:top_level_view}. + +Data sets of \emph{hazard indicators}\footnote{a hazard indicator is measure used to quantify a hazard.} are obtained from a variety of sources. For modelling acute hazards, datasets typically comprise probability distributions of events such as inundations, periods of drought or periods of high wind. These data sets might, for example, specify the annual probability of occurrence of an event (e.g. high wind) of a certainty intensity (e.g. maximum wind speed) for some specified year in the future. An important class of such data sets are the \emph{hazard maps}, described later in this section. + +\emph{Vulnerability models} are applied to different types of asset and different types of hazard. A vulnerability model will use information about an asset, typically its geolocation, type and relevant characteristics, and will source hazard indicator data using one or more \emph{hazard models}. The vulnerability model will construct from these inputs an impact distribution. + +In the case of acute hazards, in order to derive the impact distribution vulnerability models will typically construct both: +\begin{itemize} + \item Hazard event distributions: probability distributions of events that impact the assets \emph{at their locations}, derived from hazard data sets, + \item Vulnerability distributions: conditional probability distributions of the impacts on the assets of events of given intensity. +\end{itemize} + +The vulnerability model uses these quantities to derive distributions of impact for each asset. An impact might be, for example, damage to the asset, expressed as a fraction of the asst value. The financial risk model calculates financial measures from impact distributions. + +Within the OS-C modelling framework, models are interchangeable and allow forms of composition. That is, different choices of vulnerability model may be used for a particular asset and a vulnerability model may use different hazard data sets for its calculation. The intention is to allow a risk calculation to be built from an ecosystem of hazard and vulnerability models according to the requirements of the model owner. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{figure}[ht] + + \begin{framed} + % left, bottom, right, top + \includegraphics[clip, trim=0cm 0.5cm 0cm 1cm, width=1.00\textwidth]{plots/top_level_view.pdf} + + \end{framed} + + \footnotesize + + \renewcommand{\arraystretch}{1.01} + + \vspace{-3ex} + + \vspace{-0.5ex} + + \caption{\small Physical risk model components. } + \label{Fig:top_level_view} + +\end{figure} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\subsection{Hazard model} +As described above, one or more hazard models are used to provide the hazard indicators required by vulnerability models. Vulnerability models request information from hazard models based on location and spatial extent of the assets. + +Hazard indicator data sets are available from a number of sources both publicly-available and proprietary. A recent paper \cite{BavandiEtAl:2022} provides a review of sources of the \emph{`forward-looking climate-conditioned hazard data sets'}, as the authors describe the data, that are required by the methodology presented in this document. \cite{RichtersEtAl:2022} provides technical information on the data sets provided by the Network for Greening the Financial System (NGFS) scenarios and provides a useful introduction to scenarios and in particular Representative Concentration Pathways (RCPs) and Shared Socioeconomic Pathways (SSPs) as well as the Coupled Model Intercomparison Project (CMIP) of global climate models data sets. \gls{CMIP} data sets are very often key inputs into the derivation of forward-looking climate-conditioned hazard indicators. + +Subsequent sections include details of both vulnerability models and sources of hazard indicator data sets for each type of hazard. + +\subsection{Vulnerability model} +\label{SubSec:VulnerabilityModel} +The \emph{vulnerability model} determines how an asset is impacted by a hazard. The impact is a quantity from which financial loss can be inferred, but is not itself a monetary value. For example, an impact might be the damage sustained to a building as a fraction of its value or the annual loss of energy output of a power station as a fraction of its annual output\footnote{A systemic change in annual output changes asset value, since this is partly determined by the expected future cash flows generated by the asset.}. In each case, a further (financial) model is required to translate the impact to a change in asset value and thereby a change in financial measure. In principle an impact might lead to an increase or decrease in value. + +Catastrophe models sometimes define a quantity `damage', and talk about `damageability'. `Damage' and `impact' are analagous quantities here but `impact' is perhaps better-suited to situations where there is, say, a decrease in output efficiency of a plant as a result of a period of higher temperatures. + +Vulnerability models as used in physical risk calculations may overlap with those of catastrophe models. OS-C aims to support a wide range of models, but it is desirable to identify approaches that generalize a large class of these. One such approach is adopted from Oasis LMF \cite{OasisLMF}. The first assumption behind this is that a model should capture two important types of uncertainty, doing so by representing each by a probability distributions: +\begin{enumerate} + \item Uncertainty as to the frequency and intensity (or severity) of events that potentially lead to a change in asset value. This is sometimes called the {\it primary uncertainty} + \item Uncertainty as to the vulnerability of assets to events (i.e. response of assets to events of a given intensity), the {\it secondary uncertainty} +\end{enumerate} + +These quantities are defined more precisely in \ref{Sec:MathematicalDescriptionOfAssetImpactModel}. Impact can be modelled using a {\it mean impact curve} (or {\it mean damage curve} in catastrophe modelling nomenclature). This is a curve relating an event intensity to an impact (e.g. a wind event with a given maximum gust speed will cause a given fractional damage to a property). In general, however, there is uncertainty as to the impact on an asset to an event of a given intensity -- in the example, the wind may cause mild or severe damage. For this reason, the vulnerability is represented rather as a two dimensional function. + +A second assumption is that the probabilities of such events may not be readily represented by distributions such as beta, gamma, beta-Bernoulli or truncated Gaussian and may be complex and multi-modal. Discrete probability distributions are therefore used in order to represent the range of possible distributions: a non-parametric approach. In the mathematical description below, the continuous form of the model is first given followed by a discretisation intended for the capture of non-parametric distributions. + +\subsubsection{Mathematical description of vulnerability models} +The output of the vulnerability model is an impact, $d$ for an asset and for a given year\footnote{$d$ for `damage/disruption' is used to denote impact as $i$ is reserved for indexing.}. Physical climate risk analyses are generally concerned with how impacts change as a result of changes to the climate. In contrast with catastrophe modelling where an impact is typically calculated according to a historical baseline, a climate risk calculation would typically compare the baseline to an impact calculated under a particular climate scenario and period in the future. + +The impact of hazards on a portfolio of $n$ assets is a (multivariate) probability distribution as a result of the primary and secondary uncertainties. The impact in a given year on a single asset is a random variable, $D$, with (marginal) probability density function $f_D(d)$. The probability of an impact exceeeding $d$ (the \emph{exceedance probability}), $F'_D(d) = \mathbb{P}[D > d]$ is given by: + + \begin{equation} + \label{Eq:ImpactExceed} + F'_D(d) = \int_d^{\infty} f_D(u) du + \end{equation} + +This is related to the cumulative probability $F_{D}(d) = \mathbb{P}[D \le d]$ by $F'_D(d) = 1 - F_D(d)$. Exceedance probabilities are a popular measure in catastrophe model and the use of $F$ for cumulative probability and $F'$ for exceedance probability will be used hereafter to avoid confusion. + +The value of a hazard indicator, which quantifies the intensity of the phenomenon to which the hazard refers (we also use the term `hazard intensity') is given by random variable $S$. This has marginal probability density $f_S(s)$. This is the probability density of an event occurring in a given year with intensity $s$. This distribution captures the primary uncertainty. + +We define a conditional probability density function $f_{D|S}(d, s)$ to be the probability that given the occurrence of a hazard indicator $s$, an impact occurs of size $d$. This distribution captures the secondary uncertainty. + +The impact distribution is then given by: + + \begin{equation} + \label{Eq:ImpactEffective} + f_D(d) = \int_{-\infty}^{\infty} f_S(s) f_{D|S}(d, s) ds + \end{equation} + +Note that $f_d(d)$ is identical to the {\it effective damageability} distribution of Oasis LMF\cite{OasisFinancialModule} and can be described as the `effective impact'. It is a marginal distribution and as such does not capture any correlation between events nor impacts. In the catastrophe models of Oasis LMF, impacts are sampled from this distribution \cite{OasisFinancialModule}, for example samples of fractional damage, which form the basis of a Monte Carlo calculation. This is done in order to apply insurance policy terms and conditions which can be complex and non-linear. + +In the case of $n$ assets, multivariate random variables are defined $\mathbf{D} = (D_1,\ldots,D_d)$ and $\mathbf{S} = (S_1,\ldots,S_n)$. We henceforth use the single-asset index $i$ to denote marginal distributions: $f_{S_i}(s_i)$. + +\subsubsection{Return-period-based approach} + +The \emph{return-period-based approach} -- sometimes called, a hazard-map-based approach -- makes use of marginal distributions $f_{S_i}$ for each (asset) location $i$, and not the joint distribution, $f_S(\mathbf{s})$. By definition, the marginals are related to the joint distribution by: + + \begin{equation} + \label{Eq:ImpactMarginal} + f_{S_i}(s_i) = \int_{-\infty}^{\infty} \int_{-\infty}^{\infty} \dots \int_{-\infty}^{\infty} f_S(s_1,s_2, \dots,s_n) \,ds_1 \dots ds_{i-1} ds_{i + 1} \dots ds_n +\end{equation} + +$f_{S_i}(s_i)$ is usually inferred from a type of hazard indicator data set which are known as \emph{hazard maps}. + +\paragraph{Hazard maps.} Hazard maps are three-dimensional data sets from which intensities of hazard indicators can be looked up for different locations and different return periods, i.e., $h(x, y, \tau)$ provides hazard indicator intensity at location $(x, y)$ for return period $\tau$. That is, $h$ is the hazard indicator intensity such that the average time between events with intensity higher than $h$ is $\tau$. + +\begin{remark} + More precisely, if we define $X^h_t$ as the number of events in a given period of length $t$ such that the intensity is greater than $h$, then, the return period (in years) satisfies \cite{RaschkeEtAl:2022}, + \begin{equation}\label{Eq:ReturnPeriodDef} + \mathbb{E}\left( X_t^{h(x,y, \tau)}\right)=\frac{t}{\tau}\,. + \end{equation} +For instance, assuming that $X_t^h$ follows a Poisson distribution with parameter $\lambda^h$ (see below), that is, $X_t^h\sim\text{Poi}\left(\lambda^H_t\right)$, then, +$$ +\mathbb{E}\left( X_t^{h(x,y, \tau)}\right)=\lambda^h_t\,, +$$ +where $\lambda^h$ is the intensity. Thus, $\lambda^h_t = t/\tau .$ By definition, we set $X_1^h\coloneqq X^h,$ that is, if there is no subindex we are assuming one year. +\end{remark} + +In order to use hazard maps to derive probabilities, we must specify the model of probability of occurrence of events with intensity higher than $h$ assumed by the data set. Occurrence may be modelled by a Poisson distribution as in Equation~\ref{Eq:Poisson}. This gives the probability of $k$ occurrences in time interval $t$ where $\tau$ is the return period. + +\begin{equation} +\label{Eq:Poisson} +\mathbb{P}[{X^h_t} = k] = \frac{(t / \tau)^k}{k!} e^{-\frac{t}{\tau}} +\end{equation} + + +\begin{remark} + It can be noted that by Taylor's Theorem, +\begin{equation}\label{Eq:PoissonApprox} + \mathbb{P}(X_t^h\ge1)=1-e^{-t/\tau}=t/\tau+o\left(t/\tau\right)\approx t/\tau\,, +\end{equation} + where the last approximation is valid if $t/\tau$ is ``small''. $\mathbb{P}(X_t^h\ge1)$ is also known as the \emph{occurrence exceedance probability}: the probability that the intensity of the highest-intensity event in $t$ exceeds $h$. Using this notation, $F'_S$ is more precisely defined: $F'_S(h) = \mathbb{P}(X_1^h\ge1)$. That is, $F'_S$ is the occurrence exceedance probability. +\end{remark} + +Alternatively, the number of occurrences can be modelled as a Binomial distribution as in Equation~\ref{Eq:Binomial}, which provides the probability that $k$ years have at least one occurrence, for a period of $n$ years, assuming that $\tau$ is specified in years. This is given by +\begin{equation} + \label{Eq:Binomial} + \binom{n}{k} (1/\tau)^k (1-1/\tau)^{n - k}. +\end{equation} + +According to Equation~\ref{Eq:Binomial}, \emph{the probability that in a single year there is at least one event with intensity of $h$ or higher is $1/\tau$}. Unless otherwise specified, this is the interpretation used for $\tau$. Note that for Equation~\ref{Eq:Poisson} this relationship only applies approximately, see \eqref{Eq:PoissonApprox}. + + +$F_{S_i}$ can then be inferred from the hazard map for point-like assets. The curve $h(x_i, y_i, \tau)$ is looked up, providing $\tau$ and thereby annual (occurrence) exceedance probabilities for different intensities, $h$. + +\begin{remark}\label{rem:OEPvsFS} + More precisely, let us define occurrence exceedance probability, $F'_{S,t}(\cdot)$, as + \begin{equation}\label{Eq:DefOEP} + F'_{S,t}(s)\coloneqq \mathbb{P}\left(\exists~S~\text{in the period }t/~S>s\right)=\mathbb{P}\left(X_t^s\ge 1\right)\,. + \end{equation} + How does this relate to the cumulative probability of each individual event, $F^{(e)}_S$? As before, $F'_S\coloneqq F'_{S,1}$. Then, if we consider a collection of identically distributed variables for each event $\{S^k\}_{k\in \mathbb{N}}$: + \begin{align*} + 1-F'_S(s)&=\sum_{k=0}^{\infty} \mathbb{P}\left(S^1\le s,\ldots, S^k\le s\right)\mathbb{P}\left(X^{H=0}=k\right)\\ + &=\sum_{k=0}^{\infty} \mathbb{P}\left(S\le s,\ldots, S\le s\right)\mathbb{P}\left(X^{H=0}=k\right)\,. + \end{align*} +Note that $X^{H=0}$ represents the number of events in the given period, no matter the intensity. Using Sklar's Theorem, there exist copulas $C_k$ such that + \begin{equation*} + 1-F'_S(s)=\sum_{k=0}^{\infty} C_k\left(F^{(e)}_S(s),\ldots, F^{(e)}_S(s)\right)\mathbb{P}\left(X^{H=0}=k\right)=:G(F^{(e)}_S(s))\,. + \end{equation*} +By the rectangle inequality for copulas, we know that $\tilde{C}_k(F^{(e)}_S(s))\coloneqq C_k\left(F^{(e)}_S(s),\ldots, F^{(e)}_S(s)\right)$ is non-decreasing. If furthermore, it is strictly increasing, it is sufficient\footnote{Consider the case of $\mathbb{P}(N=2)=1$ and the countermonotonicity copula $C_2(u,v)=(u+v-1)_+$, then $G\mid_{[0,1]}$ is not invertible, so $F^{(e)}_S$ cannot be always recovered from $O$.} to guarantee that $G$ is increasing. So we can recover $F^{(e)}_S=G^{-1}\circ O$. For instance, if we assume independence and a Poisson process, using the Taylor series of the exponential function, +\begin{equation} + 1-F'_S(s) = \sum_{k=0}^{\infty}F^{(e)}_S(s)^k \frac{\lambda^k}{k!}e^{-\lambda}=e^{F^{(e)}_S(s)\lambda -\lambda}\,, +\end{equation} +where $\lambda\coloneqq \lambda^{H=0}$. Thus, the relation between exceedance probability and occurrence exceedance probability is the following +$$ +F^{(e)}_S(s)=\frac1\lambda\log\left(1-F'_S(s)\right)+1\,. +$$ +Note that in this case, by \eqref{Eq:PoissonApprox}, +$$ +F'_S(s)\approx\frac1{\tau_s}\,. +$$ +\end{remark} + +In the case of a point-like asset, the look up is from spatial coordinates ($x_i$, $y_i$). A hazard map will have an associated co-ordinate reference system (CRS). For example the CRS of whole-globe maps is often the WGS84 World Geodetic System (EPSG:4326). In this case ($x_i$, $y_i$) represent longitude and latitude under that CRS. + +\paragraph{Effective impact.} Once $F_{S_i}$ and thereby $f_{S_i}$ is obtained, Equation~\ref{Eq:ImpactEffective} can be applied to obtain the impact distributions for each location $i$: + + \begin{equation} + \label{Eq:ImpactMarginal2} + f_{D_i}(d_i) = \int_{-\infty}^{\infty} f_{S_i}(s_i) f_{D_i|S_i}(d_i, s_i) ds_i +\end{equation} + + In order to aggregate impacts over a portfolio, the dependency structure must be provided; in general, such a dependency structure is specified by a copula\cite{Nelsen:2007}. This may be derived via a heuristic; for example a `worst case' dependency structure could be provided to obtain an upper bound, perhaps assuming 100\% correlation between impacts. In general the approach approach in this section is not intended for cases where accurate treatment of the dependence between the impacts is needed, but rather for: + +\begin{itemize} +\item analyses of single assets, +\item quick calculations intended to give an upper bound of an impact, perhaps to identify areas of particular risk, or +\item certain calculations where intra-asset correlation is not required, e.g. expected total ground-up loss. +\end{itemize} + +The Sklar theorem of copula theory states that for $n$ random variables~$(D_1, \dots, D_n)$ with joint cumulative density function~(CDF)~$F_D(d_1, \dots, d_n) = \mathbb{P}[D_1 \le d_1, \dots, D_n \le d_n]$, there exists a copula~\mbox{$C:[0,1]^n \rightarrow [0,1]$} such that +\begin{equation} + \label{Eq:Copula} + F_D(d_1, \dots, d_n) = C \left( F_{D_1}(d_1), \dots, F_{D_n}(x_n) \right). +\end{equation} +As a reminder, $F_{D_i}(d_i) = \mathbb{P}[D_i \le d_i]$ is the marginal distribution of random variable~$D_i$, \mbox{$i=1, \dots, n$}. + +In the general case, Monte Carlo approaches can be used to sample from $F_D$. The approach is: + +\begin{enumerate}[] + \item Sample vector $\mathbf{u}$ from $C$; + \item Calculate samples for vector $\mathbf{d}$ using the relationship $d_i = F_{D_i}^{-1}(u_i)$. +\end{enumerate} + +The samples can then be used to construct a total damage, for example, via $\sum_i d_i$. + +Under heuristic approaches a Gaussian copula might be chosen: +\begin{equation} + \label{Eq:CopulaGaussian} + C^{\text{Gaussian}}_K(\mathbf{u}) = \Phi_n(\Phi^{-1}(u_1), \dots, \Phi^{-1}(u_n);\mathbf{K}), +\end{equation} +where $\Phi(z)$ is the CDF of a standard normal variable, and $\Phi_n(\mathbf{z}; \mathbf{K})$ denotes a joint standard normal multivariate~CDF with mean zero and correlation matrix $\mathbf{K}$. Samples $\mathbf{u}$ can be obtained by the approach: + +\begin{enumerate}[] + \item Sample vector $\mathbf{z}$ of correlated normal random numbers, + \item Calculate samples for vector $\mathbf{u}$ using the relationship $u_i = \Phi_i(z_i)$. +\end{enumerate} + +As an example, if impact distributions represent damage to buildings as a result of inundation then one may attempt to model damage to two buildings in close proximity as being highly correlated. However catastrophe model practitioners might point out that even such considerations as the presence or absence of kerb stones and availability of sand bags are highly significant so any such assumption is prone to error. If the buildings are far apart (say in different continents) then the correlation is likely to be close to zero however. The two extremes of 0\% and 100\% correlation of impacts are special cases of this general approach and it may be appropriate to run calculations with such cases to obtain an estimate of the impact of correlation -- rather than to attempt to model correlation more precisely. + + +\subsubsection{Event-based approach} +As mentioned in the previous section, where an accurate treatment of the dependence between impacts is needed, a heuristic is unlikely to be adequate. This can occur, for example, where it is required to model the occurrence of a 1 in 250 years `worst-case' event, the event being `worst-case' for a specific portfolio\footnote{In general, a `1 in 250 year' event for a portfolio is an ambiguous statement and requires the definition of a measure. For example the `1 in 250 year' \emph{ground-up loss} of a portfolio is unambiguous. For a single asset, `1 in 250 year' is unambiguous if impact is assumed to be a non-decreasing function of hazard intensity.}. In such cases, it is necessary to model the dependence of hazard intensity and vulnerability explicitly. In an event-based approach, this is achieved by calculating the impact of a large number of events. An event might, for example, be a flood or storm affecting a particular geographical region. Events can be: + +\begin{itemize} + \item historical events, or + \item synthetic, `plausible' events. +\end{itemize} + +In the case of synthetic events, the event may itself be probabilistic; at a particular time, in a particular part of the region, the intensity of the hazard might be represented by a probability distribution. However historical events are typically deterministic and synthetic event sets are often deterministic also; unless otherwise stated, this will be assumed to be the case. + +\paragraph{Relationship to hazard maps.} +Events may be specified with respect to hazard maps; that is the hazard data comprises both hazard map and event set. Typically in such cases the severity of an event is specified using a return period which can then be related back to the hazard map. + +More formally, for each event, index $j$, a function $z_j$ is defined such that $\tau_{i, j} = z_j(x_i, y_i)$. Here $\tau_{i, j}$ is the severity of the event specified as a return period for asset index $i$ and event index $j$. $(x_i, y_i)$ are the coordinates of the asset in the relevant CRS. + +For the given event, $\tau_{i, j}$ is then used to look up a hazard intensity, from the hazard map: + + \begin{equation} + \label{Eq:Severity1} + s_{i, j} = H(x_i, y_i, \tau_{i, j}) +\end{equation} + +Note that here $s_{i, j}$ is deterministic \emph{for a given event} and we have simply for the impact distribution for event $j$: + + \begin{equation} + \label{Eq:Severity2} + f_{D_i, j}(d_i) = f_{D_i|S_i}(d_i, s_{i, j}) +\end{equation} + +For each event and each asset, we can then draw $m$ samples from $f_{D_i, j}(d_i)$. The samples across all assets for a single event are drawn jointly using a copula as per Equation~\ref{Eq:Copula} to capture the dependence structure of impacts. Such an approach can be important if levels of damage sustained under a given hazard intensity are correlated between assets, which might be the case if the assets are similar in construction. However, by default samples are assumed to be independent. That is the approach is: + +For each of $n$ events, and for each of $m$ samples: +\begin{enumerate}[] + \item sample vector $\mathbf{u}$ of uncorrelated random numbers $\mathbf{U} \stackrel{iid}{\sim} U[0, 1]$; + \item calculate vector $\mathbf{d}$ using the relationship $d_i = F_{D_i, j}^{-1}(u_i)$. +\end{enumerate} + +The resulting $n \times m$ sets of impacts for the portfolio of assets is the inputs into the financial model. + +\paragraph{Constant severity regions} +Under a constant severity region approximation, $\tau_{i, j}$ is a constant across each asset $i$ for a particular event $j$. + + +\subsubsection{Discrete form of acute vulnerability model} +\label{Sec:MathematicalDescriptionOfAssetImpactModel} + +The continuous forms of expressions for the impact distribution are given by Equation~\ref{Eq:ImpactMarginal2} and Equation~\ref{Eq:Severity2}, the latter being the special case of deterministic hazard -- generally encountered for one specific event. Here a discrete version of Equation~\ref{Eq:ImpactMarginal2} is defined. + +There are $n_h$ intensity bins with index $q$ such that $q \in \{1, \dots, n_h \}$. $\sigma^{(h)}_q$ is defined to be the probability that a hazard event of type $h$ occurs in a given year with an intensity that falls in bin $q$. With random variable $S^{(h)}$, now with superscript, being the intensity of hazard type $h$: + +\begin{equation} + \label{Eq:Discrete1} + \sigma^{(h)}_q = \mathbb{P} \left[ s^{(h, \text{lower})}_q < S^{(h)} \le s^{(h, \text{upper})}_q \right] +\end{equation} + +That is, $s^{(h, \text{lower})}_q$ and $s^{(h, \text{upper})}_q$ define the range of bin $q$. For the avoidance of doubt, $\sigma^{(h)}_q$ is related to the continuous probability density $f_S(s)$ by: + +\begin{equation} + \label{Eq:Discrete2} + \sigma^{(h)}_q = \int_ {s^{(h, \text{lower})}_q}^{s^{(h, \text{upper})}_q} f_S(s) ds +\end{equation} + +We define $v^{(h, b)}_{pq}$ to be the conditional probability that \emph{given} the occurrence of an event associated with a hazard of type $h$ and with intensity $s^{(h)} \in (s^{(h, \text{lower})}_q, s^{(h, \text{upper})}_q]$ there is an impact of type $b$, $d^{(b)} \in (d^{(b,\text{lower})}_p, d^{(b,\text{upper})}_p]$. $b$ may be, for example, damage incurred expressed as a fraction of the asset present value. + + +\begin{equation} + \label{Eq:vulnerability} + v^{(h, b)}_{pq} = \mathbb{P} \left[ d^{(b,\text{lower})}_p < D^{(b)} \le d^{(b,\text{upper})}_p | s^{(h, \text{lower})}_q < S^{(h)} \le s^{(h, \text{upper})}_q \right] +\end{equation} + +The definition of an event type $h$ includes a time interval e.g. $h$ is the occurrence of an inundation in the locale of the asset {\it within a one year period}. $b$ is, for example, the fractional damage to the asset. + +$\delta^{(h,b)}_p$ is defined to be the marginal probability of impact $D^{(h, b)}$ in the range $d^{(b, \text{lower})}_p < D^{(h, b)} \le d^{(b,\text{upper})}_p$ occurring as a result of an event of type $h$. + +\begin{equation} + \label{Eq:impact} + \delta^{(h, b)}_p = \mathbb{P} \left[ d^{(b,\text{lower})}_p < D^{(h, b)} \le d^{(b,\text{upper})}_p \right] +\end{equation} + +From the definition of conditional probability: + +\begin{equation} + \label{Eq:model} + \delta^{(b)}_p = \sum_{q} v^{(h,b)}_{pq} \sigma^{(h)}_q +\end{equation} + +If only the mean impact curve is available, then it is possible to create the matrix such that $v_{pq} \in \{0, 1\}$. The matrix then provides a simple mapping from intensity to impact; if the number of intensity and response bins is equal then matrix $\mathbf{v}$ is simply the identity matrix. However, note that these simplifications exclude from the model any uncertainty in the parameters\footnote{A better approach would be to estimate the standard deviation of the distributions from which the mean impact curve was calculated and to incorporate this.}. + +\paragraph{Multiple occurrence of events.} Note that $\sigma^{(h)}_q$ is the probability of occurrence of \emph{at least one event} with intensity in bin $q$ in a year and the vulnerability, $v_{pq}$ gives the probability of impact given at least one event has occurred. Some care must therefore be taken when using probabilities $v_{pq}$ calibrated from single events as there is an implied approximation that either probability of multiple events is small and/or that impact is well-modelled as a single impact from the most intense event for a given year. + + +\subsubsection{Importance of secondary uncertainty} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{figure}[ht] + + \begin{framed} + + \includegraphics[width=\textwidth]{plots/fig_intensity.pdf} + + \end{framed} + + \footnotesize + + \renewcommand{\arraystretch}{1.01} + + \vspace{-3ex} + + {\justify + The exceedance curve of event intensity at the asset location is shown on the right. The event intensity in this example is inundation depth in metres. Exceedance is a cumulative probability. As an example, the probability of an inundation event occurring within a single year of intensity 0.91m or greater is 0.2\%. An exceedance probability is the reciprocal of the return period; it could equivalently be said that the 0.91m intensity event occurs with a return period of 500 years. + The exceedance curve can be converted to a histogram of probabilities. Here the $n_h$ bins have ranges $(s^{(h, \text{lower})}_q, s^{(h, \text{upper})}_q]$. For example, the first bin has range (0.28m, 0.38m]. The second bin has range (0.38m, 0.43m]; that is $s^{(h, \text{lower})}_2 = 0.38$m and $s^{(h, \text{upper})}_2 = 0.43$m. $\sigma^{(h)}_2 = 0.02$. + \par} + + \vspace{-0.5ex} + + \caption{\small Event intensity exceedance curve (right) and corresponding histogram (left).} + \label{Fig:intensity} + +\end{figure} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +The importance of the vulnerability matrix as opposed to mean damage curve (or vector) is emphasized above; see also \cite{Taylor:2015} for a discussion of this point. This is true not only in cases where the underlying distribution of an impact, for example a fractional damage, can be inferred from empirical data; see for example Figure~\ref{Fig:vulnerability_matrix}. This is arguably \emph{more} important where data is limited in order that approximate data can be incorporated into the model in a way that the impact of the approximations can be well-understood. + +Vulnerability data may be provided by +\begin{itemize} + \item modelling of asset vulnerability based on asset characteristics and/or historical data, or + \item 'calibrated' vulnerabilities, for example based on realized insurance claims +\end{itemize} +Physical risk models may make use of so-called `bulk assessment' approaches for certain assets, where precise vulnerability information is not available and less precise estimates of the damage/disruption of the asset are used. The presence of such estimates in an overall model may, or may not, materially impact the accuracy of the results, but it is important that this impact can be assessed. By quantifying the uncertainty in the response estimates, a distribution of financial losses is ultimately obtained from which the model user can derive the impact of the approximation. + +\paragraph{Including epistemic uncertainty in vulnerability.} +In forms of bulk-assessment, and indeed in other cases, a common occurrence is that insufficient information exists with which to characterize an asset. This is an example of an epistemic, as opposed to aleatory, uncertainty. The epistemic uncertainty, and its impact, can be included in the model in the following way. + +We extend Equation~\ref{Eq:vulnerability}, by including a new discrete random variable, $A$, which is an integer index, $A \in \{0, ..., n_a\}$, indicating the type of the asset. We define $v^{(h, b)}_{pqa}$ to be the probability that $D^{(h, b)} \in (d^{(b,\text{lower})}_p, d^{(b,\text{upper})}_p]$ given that $S^{(h)} \in (s^{(h, \text{lower})}_q, s^{(h, \text{upper})}_q]$ \emph{and} the asset in question is of type $a$: +\begin{equation} + \label{Eq:spistemic1} + v^{(h, b)}_{pqa} = \mathbb{P} \left[ D^{(h, b)} \in (d^{(b,\text{lower})}_p, d^{(b,\text{upper})}_p] | S^{(h)} \in (s^{(h, \text{lower})}_q, s^{(h, \text{upper})}_q], A = a \right] +\end{equation} + +The conditional probabilities can then be combined: +\begin{equation} + \label{Eq:epistemic2} + v^{(h, b)}_{pq} = \sum_a v^{(h, b)}_{pqa} \mathbb{P}[A = a] +\end{equation} + +$\mathbb{P}[A = a]$ is the \emph{prior} probability that the asset is of type $a$. This may be obtained from knowledge of the make-up of a portfolio. + +Note that through the application of Equation~\ref{Eq:model} the impact distribution now depends on the prior probabilities. To illustrate why it is reasonable that this should be the case, say we have two types of assets in our portfolio. Type A is vulnerable to a hazard with an intensity that has a relatively short return period of 50 years, whereas type B is invulnerable to all hazards but those with a vanishing small probability of occurrence. To derive the probability that an asset is damaged by a certain amount in a given year using Equation~\ref{Eq:epistemic2}, we must allow for the possibility that the asset may be of type A and may therefore be damaged as a result of 50 year events. + +\paragraph{Epistemic uncertainty as source of error.} An alternate approach is to treat epistemic uncertainty as a source of error rather than, or in addition to, including it in the vulnerability as we have done here. This might be driven by the observation that as information as to the identity of assets is improved then the exceedance probability of a certain impact will change. This can be achieved by running an ensemble of calculations, changing the prior probabilities in each case. + +\subsubsection{Interpolation of probability distributions} +Cases arise where the event distributions and vulnerability distributions are not defined for a common set of intensity bins and interpolation is therefore required. The question then arises of how probability density is distributed within bins. The choice is model-specific and customizable, but here two common cases are described. + +\begin{itemize} + \item Probability density constant across bin: linear interpolation of cumulative probability function + \item Probability density changes linearly across bin: quadratic interpolation of cumulative probability function +\end{itemize} + +{\textcolor{red}{\emph{[Add equations and example plots here]}}} + +Hazard data sets might also contain instances of `point-probabilities', for example where there is a finite probability that the intensity of an event takes a single value. These represent Dirac delta functions in the probability distribution, steps in the cumulative probability function. There is the option of retaining these as delta functions (bins of zero width), but in some cases it may be necessary to make assumptions about how these the probability might be distributed across a bin. + +{\textcolor{red}{\emph{[Add equations and plot of step-CDF with interpolation; exemplify by `damage threshold']}}} + +\subsubsection{Probability bins from hazard maps} + +From Equation~\ref{Eq:Discrete2} the probability of an event occurring with hazard intensity in bin $q$ is expressed in terms of the probability density $f_S$ (dropping superscript $h$ for clarity): + +\begin{equation} + \label{Eq:Discrete2Again} + \sigma_q = \int_ {s^{(\text{lower})}_q}^{s^{(\text{upper})}_q} f_S(u) du \ + = \int_ {s^{(\text{lower})}_q}^{\infty} f_S(u) du - \int_ {s^{(\text{upper})}_q}^{\infty} f_S(u) du +\end{equation} + +The exceedance probability $F_S'$ is defined as: + +\begin{equation} + \label{Eq:DiscreteExceed} + F'_S(s) = \int_s^{\infty} f_S(u) du +\end{equation} + +from which we can write: + +\begin{equation} + \label{Eq:DiscreteExceed2} + \sigma_q = F'_S({s^{(\text{lower})}_q}) - F'_S({s^{(\text{upper})}_q}) +\end{equation} + +Using Equation~\ref{Eq:DiscreteExceed2}, a set of probability bins for the hazard event can be inferred from an exceedance probability curve. An exceedance probability curve can readily be inferred from a return-period curve using the result that the annual exceedance probability is the reciprocal of the return period expressed in years. + +As an example, suppose that we have a hazard map for flood which contains return periods of 2, 5, 10, 25, 50, 100, 250, 500 and 1000 years. For a certain latitude/longitude the flood depths corresponding to the 9 return periods are, in metres: 0.06, 0.33, 0.51, 0.72, 0.86, 1.00, 1.15, 1.16 and 1.16. The data is shown together with the exceedance probability in Table~\ref{Table:HazardData}. + +\begin{table}[ht] + \caption{Example hazard event data. The exceedance probability calculated via $1 / \tau$ is provided as well as the value assuming a Poisson distribution. There is a significant difference for short return periods only. } + \centering + \begin{tabular}{c c c c} + \hline + Return period (years) & Flood depth (m) & $F'_S(s)$ & Poisson $F'_S(s)$ \\ [0.5ex] + \hline +2 & 0.06 & 0.5 & 0.39347 \\ +5 & 0.33 & 0.2 & 0.18127 \\ +10 & 0.51 & 0.1 & 0.09516 \\ +25 & 0.72 & 0.04 & 0.03921 \\ +50 & 0.86 & 0.02 & 0.01980 \\ +100 & 1.00 & 0.01 & 0.00995 \\ +250 & 1.15 & 0.004 & 0.00399 \\ +500 & 1.16 & 0.002 & 0.00199 \\ +1000 & 1.16 & 0.001 & 0.00099 \\ + \hline + \end{tabular} + \label{Table:HazardData} +\end{table} + +The flood depths become the bin edges of the probability distribution and the probabilities are calculated from Equation~\ref{Eq:DiscreteExceed2}. +\begin{remark} + As before, $F'_S$ is the \emph{occurrence} exceedance probability. Similarly to Remark \ref{rem:OEPvsFS}, for $u>l$, + \begin{align*} + \mathbb{P}\left(\exists~S~/~S\in(l,u]\right)&=\mathbb{P}\left(\exists~S~/~S> l\right) - \mathbb{P}\left(\exists~S~/~S> u\right) \\ + &= + \mathbb{P}\left(X^l\ge 1\right) - \mathbb{P}\left(X^u\ge 1\right) = + F'_S(l)-F'_S(u)\,, + \end{align*} +\end{remark} +For example, the probability of occurrence of a flood with depth in the range (0.86m, 1.00m] is $0.02 - 0.01 = 0.01$\footnote{Care is needed at either end of the curve. There is a 0.001 probability that flood depth exceeds 1.16m in this example; should this be included in the (point-like) 1.16m bin?}. Note that in defining a set of bins in this way, no assumption about the interpolation between the flood depths is required. However, if we assume this to be linear then this implies that the probability density is constant across each bin since $f_S = \frac{dF_S(s)}{ds}$. + + +\subsubsection{Vulnerability distributions and heuristics} +For some assets, a vulnerability matrix may be available, corresponding to the `ideal' case of Figure~\ref{Fig:vulnerability_matrix}. That is, for each intensity value a probability distribution of the impact (damage/disruption) is given. In other cases, only the impact itself is available for a given hazard intensity -- a damage curve -- together with some measure of uncertainty. Here the probability distribution is unknown, but it is at least possible to fit some choice of distribution to the descriptive statistics (e.g. mean and standard deviation). + +Given that the distribution of fractional impact -- e.g. damage as a fraction of asset value or disruption as a fraction of output capacity -- is in the range (0, 1), heuristic choices of distributions include Beta and Truncated Gaussian \cite{MitchellEtAl:2017}. It should be emphasized that neither distribution is likely to be correct, especially in lacking multi-modality and fat-tails, but provide a method by which uncertainty in the impact can be taken into account. + +\paragraph{Modelling impact using a Beta distribution.} +The cumulative probability function of a Beta distribution is given by: + +\begin{equation} + \label{Eq:Beta1} + F_{\text{Beta}}(x) = \frac{B(x; a, b)}{B(1; a, b)} , +\end{equation} + +where $0 < x < 1$ and $B(x; a, b)$ is the incomplete Beta function: + +\begin{equation} + \label{Eq:IncompleteBeta} + B(x; a, b) = \int_0^{x} t^{a-1} (1 - t)^{b-1} dt . +\end{equation} + +If the mean, $\mu$ and standard deviation, $\sigma$ of the impact distribution are known then \cite{MitchellEtAl:2017}: + +\begin{equation} + \label{Eq:BetaA} + a = \frac{(1 - \mu)}{c^2} - \mu , +\end{equation} + +\begin{equation} + \label{Eq:BetaB} + b = \frac{a(1 - \mu)}{\mu} +\end{equation} + +where + +\begin{equation} + \label{Eq:BetaC} + c = \frac{\sigma}{\mu} +\end{equation} + +In order to calculate the impact, the set of bins $p$ that define the impact probabilities $\delta_p$ of interest are first defined. The vulnerability matrix $v_{pq}$ of Equation~\ref{Eq:vulnerability} is then calculated. In order to apply Equation~\ref{Eq:Beta1}, $a$ and $b$ are calculated using mean and standard deviations of impact calculated at the mid-point of each intensity bin. We then have: + +\begin{equation} + \label{Eq:BetaVuln} + v_{pq} = F_{\text{Beta}}(d_p^{(\text{upper})}; a_q, b_q) - F_{\text{Beta}}(d_p^{(\text{lower})}; a_q, b_q) +\end{equation} + +$a_q$ and $b_q$ are the values of $a$ and $b$ calculated from means and standard deviations calculated at the intensity bin centre $s_q = \frac{s_q^{(lower)} + s_q^{(upper)}}{2}$. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{figure}[ht] + + \begin{framed} + + \includegraphics[width=\textwidth]{plots/vulnerability_lagace_2008.png} + + \end{framed} + + \footnotesize + + \renewcommand{\arraystretch}{1.01} + + \vspace{-3ex} + +% {\justify +% Taken from +% \par} + + \vspace{-0.5ex} + + \caption{\small Taken from Lagacé (2008) Catastrophe Modeling, Université Laval. Mean damage curve as an approximation to an underlying set of distributions, modelled using a vulnerability matrix.} + \label{Fig:vulnerability_matrix} + +\end{figure} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +\subsection{Aggregation of impacts} +For impacts of the same type, $b$, arising from different events, it is assumed that the impacts are additive, up to a ceiling value\footnote{this approximation is only strictly valid for sufficiently small impacts; consider the contrived example of 0.8 fractional damage that occurs from both flood and high wind in the same year.}. If the annual impacts from events with index 1 and 2 are represented by random variables, $Y^{(1,b)}$, $Y^{(2,b)}$ then $Y^{(\text{tot}, b)} = Y^{(1,b)} + Y^{(2,b)}$. + +If the random variables are uncorrelated, then the aggregated effective impact distribution is given by the convolution: + +\begin{equation} + \label{Eq:sampling} + y^{(\text{tot}, b)}(r) = \int^{\infty}_{-\infty} y^{(1, b)}(t) y^{(2, b)}(r - t) dt +\end{equation} + +{\textcolor{red}{\emph{[Add version with discrete binned data.]}}} + +\subsection{Financial model} + +\subsubsection{Types of calculation.} +Vulnerability models were described in \ref{SubSec:VulnerabilityModel} as a means to calculate the damage and disruption to a portfolio of assets for different hazard models. How the resulting probability distributions of impact are then used in a financial model depends on the intent of the analysis. We distinguish between \emph{stress test} and \emph{portfolio analysis} use cases. + +\emph{Stress tests} make use of climate-related shocks -- severe acute events -- in order to assess an impact. This might for example be the impact on a financial institution. Stress tests may make use of narrative scenarios or may take the approach of simulating large number of extreme but plausible events in order to identify a worst-case which is \emph{a priori} unknown. Indeed the two approaches may be combined, assessing the impact of a severe acute event coupled with other aggravating factors. + +In a \emph{portfolio analysis}, the aim is to assess which parts of a portfolio of assets are most subject to physical climate risk. + +\subsubsection{Climate risk measures and importance of change.} +Climate risk models generally differ from catastrophe models in more than their use of climate-conditioned and projected hazard models. In particular, physical climate risk arises from \emph{changes} in climate. + +We take as an example a real estate asset that is collateral for a loan. The hypothetical asset is exposed to both hurricane and flood hazards today, however it is covered by an insurance policy and its current valuation takes into account both historical occurrence of hazards and the policy details. Risk managers concerned with climate risk are likely to be interested in potential changes in value of the asset over the full term of the loan: + +\begin{enumerate} + \item if acute events become more frequent and severe, assets may become more expensive to insure and less desirable {\textcolor{red}{\emph{[Citation]}}} \footnote{For example, to a resident of a house subject to regular flooding.}, which can decrease the asset value; + \item if an extremely severe and widespread event occurs, default may lead to a distressed sale and/or the full insurance payout may not be made. +\end{enumerate} + +For a portfolio analysis, it is therefore likely to be the change in frequency of events and the change in probability of extremely severe events that is particularly of interest; these are the risk drivers. + +\paragraph{Definition of measures.} For a portfolio, distributions of annual impacts, typically damage and disruption, can be aggregated and converted into distributions of change in portfolio value in a given reporting currency. Measures describing the loss of value are: + +\begin{enumerate} + \item Annual Exceedance Probability (AEP): the probability that in a given year the aggregated losses of a portfolio will exceed a certain value. + \item Average Annual Loss (AAL): the mean value of the aggregated losses of a portfolio in a given year. +\end{enumerate} + +From the argument above, a change in AEP for moderate and severe loss cases can be a useful indicator of risk. The change is from historical baseline to a climate-conditioned projection. + +\subsubsection{Structural models of credit risk} +Changes in asset value can be used to model changes in the credit quality of market participants. Financial risk modules for physical risk may then use distributions of asset value changes in order to model changes of credit quality over time as a result of climate change, for example estimates of default probability and loss given default. + +The intention of this section is not to specify any particular model, but rather to give a brief introduction. Particularly of interest is the question of what inputs credit risk models require. + +For medium and large cap firms, a credit default event typically occurs when a firm is not able to meet its debt servicing obligations. Under an important class of credit risk models called `structural models', it is assumed that a default event occurs for a firm when its assets are sufficiently low compared to its liabilities. + +A number of different structural models exist which make various assumptions about how a firm's assets change over time, how its capital is structured and the nature of its debt. + +The earliest structural model was described by Merton in 1974 \cite{Merton:1974} based on an extremely simple debt structure. Black and Cox \cite{BlackCox:1976} introduced an important refinement to the Merton model in 1976. Practical implementations were subsequently created as a result of this foundational work. A notable one of these is the `KMV' model, named after Kealhofer, McQuown and Vasiek, now owned by Moody's Investors Service, Inc. + +Use of such credit models, may provide a mechanism for incorporation of physical risk into financial institutions existing risk models\cite{KenyonEtAl:2021}. + +\subsection{Uncertainties in the calculation} + +\subsection{Model limitations} + +\begin{enumerate} + \item Spatial correlation of events: to what extent possible without MC calculation; to what extent is provided / can be inferred from data sets + \item Correlation of vulnerability + \item Data availability +\end{enumerate} + + +\subsubsection{Data availability} +Issues related to data availability and relevance are still one of the main limitations of physical risks assessments. If past and future climate data are becoming increasingly available through open-sources portals and tools (e.g. Copernicus, WRI Aqueduct), their availability and their reliability varies widely according to the climate hazard of interest, the region and the modelling process. If the availability of climate data is improving, open-source, asset-level information (required to estimate the exposure of an asset to a give climate hazard) is still seldom available. Such data include the location of assets, their link with owning companies and more generally any damages records that could be used to quantify the response of an asset (or of a type of asset) to a given climate event. Newly-published datasets have been recently released for some sectors but their exhaustiveness remains to be verified. Moreover, many industrial sectors are not covered, thus limiting the application of physical risks methodologies to a diversified portfolio. +Finally, building and applying the correlation between hazard and damage (or impact), as described in section 2.2, requires common distribution between historical events, historical damages and future climate events. In a changing climate, assets and activities will be impacted by more intense events that will not have been experienced either in a given region of the world or even on the +whole globe, leading to a potentially large mismatch between historical and future distributions of events. The interpolation of the damage curve, as described in section 2.2.3, might lead to very high uncertainties that need to be taken into account when interpreting the data. + + + +%\section{Hazard and vulnerability models} + +\section{Inundation} +\subsection{Hazard models} +Inundation is modelled as an acute risk using the approach of Section~\ref{Sec:MathematicalDescriptionOfAssetImpactModel}. Hazard event models compatible with this method provide inundation depths for different annual probabilities of occurrence -- or equivalently return periods. The need for sufficient granularity in the set of return periods is discussed in \cite{WardEtAl:2011}. + +Inundation comprises three types: +\begin{itemize} +\item Coastal +\item Pluvial +\item Riverine (aka Fluvial) +\end{itemize} + +The World Resource Institute (WRI) Aqueduct flood model \cite{WardEtAl:2020} is a public-domain global flood model, comprising coastal and riverine inundation, which has relatively high return-period granularity. The data set is based on the modelling approach of \cite{WardEtAl:2013}. The WRI data set has a spatial resolution of approximately 1 km at the equator. + +Higher resolution public-domain sets are available, albeit the ones we are aware of -- and have on-boarded into OS-Climate -- cover limited geographical regions. TU Delft provide a pan-European set \cite{PaprotnyEtAl:2016} with a 100 m resolution, covering riverine and coastal hazards. + +Some commercial flood models provide global coastal, pluvial and riverine hazard maps and event sets with resolution sufficient to model street-level inundation (approx. 5 m resolution or finer). OS-Climate provides connectors for certain model-providers, to facilitate integration of commercial alongside public-domain models. + +{\textcolor{red}{\emph{[Discuss and include refs for approaches based on flooded area?]}}} + +\subsection{Vulnerability models} + +\subsubsection{Real estate assets} +Notable damage models for real estate assets include the FEMA FAST `HAZUS' model \cite{ScawthornEtAl:2006} and an European Commission Joint Research Centre (JRC) model \cite{HuizingaEtAl:2017}. The latter is implemented in the \emph{physrisk} library. + +\subsubsection{Thermal power generation projects} +When flood water inundated a thermal power plant, operations are disrupted due to infrastructure damage and/or mechanical failure. The flood module described in \cite{LuoEtAl:2021,LuoEtAl:2023} is implemented in the \emph{physrisk} library. The inputs are: +\begin{itemize} +\item The highest flood water depth per return period due to either coastal or riverine inundation in a buffer zone with a radius of 0.01 arc degrees around the geolocation of the generation unit. It allows to derive: +\begin{itemize} +\item a discrete exceedance probability curve for flood water depth in a climate-conditioned projection; +\item a designed flood protection level defined as the flood water depth interpolated from a baseline for a return period equal to either $10000$ years for nuclear plants and $250$ years otherwise. Setting the baseline to the historical scenario did not provide satisfactory results. Hence, we rather opt for the year $2030$ associated with the selected climate-conditioned projection. +\end{itemize} +\item The disruption curve estimating the number of inoperable days in a year from flood water depth provided that it exceeds the designed flood protection level. It depends on the power production technology, more precisely, the combination of generation technology (gas or steam) and cooling system (dry, recirculating or once-through). The maximum number of inoperable days interpolated for a given flood water depth across potential disruption curves is chosen when turbine and/or cooling system is not provided as an asset archetype. +\end{itemize} +The output is a discrete disruption probability curve due to the assessed flood risk, that is, either riverine or coastal inundation. It provides the probability in a future projection that the operations of the generation unit are disrupted for a given period expressed as a year fraction. + +\section{Drought} + +\subsection{Hazard models} +Drought, a significant global natural hazard, leads to substantial economic, environmental, and human distress. Numerous indices have been developed to measure drought intenstiy. In this framework, standard precipitation evapotranspiration index (SPEI) is used to assess meteorological drought. SPEI is a multi-scalar index designed to take into account both precipitation and potential evapotranspiration as detailed in \cite{VicenteSerranoEtAl:2010,BegueriaEtAl:2014}. Based on this index, drought is modelled as a chronic risk using the number of months per year that the Standardised Precipitation-Evotranspiration Index (SPEI) calculated on a specific timescale (3 or 12 months) is below a given threshold. +The SPEI is computed using temprature and percipitation from General Circulation Models under various Representative Concentartion Pathways (e.g., RCP 8.5). Potential evapotranspiration (PET) is estimated from surface daily temprature according to \cite{TanguyEtAl:2018} using xclim: Climate services library. For the scope of this methodology, 12-month Standardized Precipitation-Evapotranspiration Index (SPEI) has been considered following \cite{MarziEtAl:2021,AriasEtAl:2020,PoljansekEtAl:2022}. The SPEI at each grid point is calculated for the calibration period of (1985 - 2015) and projected on (2015 - 2100) period using xclim: Climate services library. + +In litterature, SPEI below -1.5 is often considered as severe and extreme drought \cite{SmirnovEtAl:2016}, in this framework, the dorught index is measured as number of months per year where the 12-month rolling average of SPEI is below set of thresholds ranging from -3.6 to 0 following \cite{LuoEtAl:2021} . Eventually, the drought index at each grid point is calculated for periods of 2005 (1995-2015), 2030 (2020-2040), 2040 (2030-2050), 2050 (2040-2060), and 2080 (2070-2090) averaged over 20 years. + +%Drought is modelled as a chronic risk using the number of months per year that the Standardised Precipitation-Evotranspiration Index (SPEI) calculated on a specific timescale (3 or 12 months) is below a given threshold. The SPEI is designed to take into account both precipitation and potential evapotranspiration in determining drought as detailed in \cite{VicenteSerranoEtAl:2010}. This allows to capture the main impact of increased temperatures on water demand. + +\subsection{Vulnerability models} + +\subsubsection{Thermal power generation projects} +Water levels in rivers and/or reservoirs are low during droughts. Wet-cooled plants might lose access to cooling water or not have sufficient flow rates for cooling effect. The drought module described in \cite{LuoEtAl:2021,LuoEtAl:2023} is implemented in the \emph{physrisk} library. It excludes temperature effects which are taken into account in a separate module. The inputs are: +\begin{itemize} +\item The number of months per year that the 12-month rolling average of SPEI at the geolocation of the generation unit is below a discrete set of thresholds ranging from $-3.6$ to $0$. An alternative hazard indicator provides only the number of months per year that the 3-month rolling average of SPEI is below $-2$. When relying on this alternative hazard indicator, we assume that the number of months per year that the 3-month rolling average of SPEI is below a threshold $x$ lower than $-2$ satisfies +$$ +\left(\text{estimated \#months/year with 3-month SPEI $<-2$}\right)\left(\frac{\Phi_{\mathcal{N}\left(0,1\right)}\left(x\right)}{\Phi_{\mathcal{N}\left(0,1\right)}\left(-2\right)}\right) +$$ +given the cumulative distribution function $\Phi_{\mathcal{N}\left(0,1\right)}$ of a standard normal random variable $\mathcal{N}\left(0,1\right)$. +\item The SPEI-disruption curve estimating the fraction of inoperable capacity with respect to the 12-month rolling average of SPEI. It applies to once-through or recirculating steam turbine. The largest inoperable capacity ratio interpolated for a given SPEI threshold across potential disruption curves is chosen when turbine and/or cooling system is not provided as an asset archetype. +\end{itemize} +The output is a discrete disruption probability curve due to drought risks. It provides the probability in a future projection that the operations of the generation unit are disrupted for a given period expressed as a year fraction. + +\section{Heat} + +Heat is classified as both a chronic and an acute hazard. For example, increased average temperature in a particular area can lower average productivity from labour or make the area less desirable as a place to live, lowering real estate prices. We classify these as risks from chronic hazards. Heat waves are examples of acute hazard events; a period of particularly high temperature might lead to the complete suspension of industrial activity. + +Multiple indexes for quantifying heat hazards have been suggested and multiple approaches for the modelling of acute events are present in the literature, e.g. \cite{MazdiyasniEtAl:2019}. Similarly, various methods for modelling the vulnerability to heat hazards have been suggested. Analyses of heat wave events are commonly based on Global and Regional Circulation Model (GCM and RCM) outputs \cite{DosioEtAl:2018}. In \cite{Christidis:2021} and \cite{Christidis:2013} the authors analyse ensembles of CMIP6 simulations with and without anthropogenic forcings in order to determine if extreme heat events are attributable to (anthropogenic) climate change. Such attribution analysis is based in part on finding return periods of events (see also \cite{StottEtAl:2016}). This estimation of return periods for events is directly applicable to acute hazard models. + +In order to support a wide range of hazard and vulnerability models, \emph{physrisk} includes the derivation of heat statistics from CMIP6 data\footnote{This is somewhat in contrast to the use of the Aqueduct model of \cite{WardEtAl:2020} for modelling inundation where the complete hazard model is used as-is within \emph{physrisk} -- albeit reformatted to handle efficiently the access patterns needed for physical risk calculations.}. + +\subsection{Hazard Models} + +\subsubsection{Chronic hazard models} +As mentioned above, hazard models and vulnerability models are closely coupled. \cite{ZhangAndShindell:2021} describes the `GZN' (Graff-Zivin and Neidell) and `WBGT' (Wet-Bulb Globe Temperature methodology) methods. The statistics required for these methods are derived using bias-corrected and down-scaled data sets. There are multiple sources of data suitable for the estimation of the required statistics, notably the NEX-GDDP-NASA set \cite{ThrasherEtAl:2022}. + +Many OS-Climate-generated indicators for chronic heat count the number of days (or weeks) per year over a 20-year period for which a daily (or weekly) average temperature is above a threshold specified in °C. The average temperatures are sourced from down-scaled \gls{CMIP} data sets. These chronic heat indicators are available in the \emph{physrisk} library at a predefined set of thresholds for near-surface temperature, WBGT index and water temperature. This allows to infer a discrete probability distribution under some climate-conditioned projections as well as a historical baseline. + +\subsubsection{Acute hazard models} +Acute hazard modelling approaches are based on calculating return periods of events in a way analogous to acute inundation models. The calculation of return periods from data sets presents a statistical challenge, dealt with for example by \cite{MentaschiEtAl:2016}. + +\subsection{Vulnerability models} + +\setcounter{secnumdepth}{4} +\subsubsection{Thermal power generation projects} + +\paragraph{Air temperature\\} +\textbf{\\} +Air temperature primarily affects the generation efficiency of gas turbines and cooling efficiency of air-cooling systems. The air temperature module described in \cite{LuoEtAl:2021,LuoEtAl:2023} is implemented in the \emph{physrisk} library. The inputs are: +\begin{itemize} +\item The number of days per year that the daily average air temperature at the geolocation of the generation unit is above a discrete set of thresholds ranging from 25°C to 55°C. It allows to derive: +\begin{itemize} +\item a discrete exceedance probability curve for air temperature in a climate-conditioned projection; +\item a design air temperature (DAT) defined as the 90\%-quantile interpolated from the probability curve in a baseline scenario. It corresponds to the air temperature at which the unit generates electricity with the designed maximum efficiency. +\end{itemize} +\item The disruption curve estimating the number of inoperable days in a year from the number of days per year that the air temperature reaches a threshold expressed as a number of degrees above DAT provided that it does not exceed the shutdown air temperature (SAT). SAT corresponds to the threshold above which it no longer makes technical or economic sense to keep a thermal power plant running. It is set to 50°C. The disruption curve depends on whether the generation technology is gas or steam combined with a dry cooling system. The maximum number of inoperable days interpolated for a given `air temperature above DAT' threshold across potential disruption curves is chosen when turbine and/or cooling system is not provided as an asset archetype. +\end{itemize} +The output is a discrete disruption probability curve due to air temperature risks. It provides the probability in a future projection that the operations of the generation unit are disrupted for a given period expressed as a year fraction. + +\paragraph{Water temperature\\} +\textbf{\\} +Water temperature primarily affects a power plant’s cooling efficiency and/or its ability to meet local regulatory discharge temperature standards. It has no impact on gas turbines or air-cooled units. Additionally, wet-bulb air temperature (WBGT), determined by the combined conditions of air temperature and relative humidity, affects cooling efficiency of recirculating cooling towers. The water temperature module described in \cite{LuoEtAl:2021,LuoEtAl:2023} is implemented in the \emph{physrisk} library. The inputs are: +\begin{enumerate} +\item The number of weeks per year that the weekly average water temperature at the geolocation of the generation unit is above a discrete set of thresholds ranging from 5°C to 40°C. It allows to derive: +\begin{itemize} +\item a discrete exceedance probability curve for water temperature in a climate-conditioned projection; +\item a design water temperature (DWT) defined as the 90\%-quantile interpolated from the probability curve in a baseline scenario. It corresponds to the water temperature below which a steam turbine with a once-through cooling system does not experience generation losses. +\end{itemize} +\item The number of days per year that the daily average WBGT at the geolocation of the generation unit is above a discrete set of thresholds ranging from 5°C to 60°C. The 99\%-quantile interpolated from the probability curve in a baseline scenario corresponds to the WBGT below which a steam turbine with a recirculating cooling system does not experience generation losses. The joint distribution of water temperature and WBGT in a climate-conditioned projection is estimated using a Gaussian copula with a correlation $\rho$ set to $50$\% by default. The discrete exceedance probability curve for water temperature (WT) knowing that WBGT exceeds the 99\%-quantile of the baseline scenario (denoted $\text{WBGT}^{99\%}_{\text{baseline}}$) is thus given by: +$$ +\frac{\Phi_{\rho}\left(\Phi^{-1}_{\mathcal{N}\left(0,1\right)}\left(\text{\#weeks/year WT}>.\right),\Phi^{-1}_{\mathcal{N}\left(0,1\right)}\left(\text{\#days/year WBGT }>\text{WBGT}^{99\%}_{\text{baseline}}\right)\right)} +{\left(\text{\#days/year WBGT }> \text{WBGT}^{99\%}_{\text{baseline}}\right)} +$$ +with $\Phi_{\rho}$ the cumulative distribution functions of a $\rho$-correlated standard normal random pair. Scaled by the probability in a climate-conditioned projection that WBGT exceeds the 99\%-quantile of the baseline scenario, it replaces the unconditional discrete exceedance probability curve calculated in the first step when the generation unit features a steam turbine with a recirculating cooling system. DWT is also capped at 24.7°C\footnote{The intake water temperature equivalent to a 35°C discharge water temperature given the linear approximation: +$$ +T_{\text{discharge}}=1.0191\times T_{\text{intake}}+9.7951^{\circ}C +$$ +}. +\item The disruption curve estimating the number of inoperable days in a year from the number of days per year that the water temperature reaches a threshold expressed as a number of degrees above DWT. It depends on the cooling technology (once-through or recirculating) of the steam turbine. The maximum number of inoperable days interpolated for a given `water temperature above DWT' threshold across potential disruption curves is chosen when turbine and/or cooling system is not provided as an asset archetype. +\item The regulatory discharge water limit estimating the number of inoperable days in a year from (intake) water temperature. It applies to steam turbine with a once-through cooling system on top of the preceding disruption impact using a max-of approach. +\end{enumerate} +The output is a discrete disruption probability curve due to water temperature risks. It provides the probability in a future projection that the operations of the generation unit are disrupted for a given period expressed as a year fraction. + +\subsubsection{Heat vulnerability model} + +\label{SubSec:HeatVulnerabilityModel} + +\paragraph{Impact of temperature on labour productivity\\} +\textbf{\\} +The heat vulnerability model presented in this section is based on the approach introduced in \cite{NeidellEtAl:2021}. The paper uses survey data to estimate labour allocation decisions in the United States (US) based on temperature. It does not extend the analysis beyond the US. It replicates previous research done (original GZN method \cite{NeidellEtAl:2014}.) whilst extending the period of data used and adding an assessment based on the economic cycle: the main innovation is that it includes the economic cycle by splitting out the 2008 financial crisis first through segmented regressions and following them by using an indicator variable: the non-recession period is 2003-2007 and 2015-2018. The `Great Recession' period is 2008-2014. The methodology in only applied to climate exposed sectors: agriculture, forestry, fishing and hunting, construction, mining, and transportation and utilities. + +The paper's main conclusions are: +\begin{itemize} + \item A statistically significant impact of 2.6 minutes lost per degree of temperature above $90$°F during normal economic periods and no relationship during a recession. This result is converted into the Celsius scale as \emph{physrisk} decided to take that scale as a reference. The 2.6 minutes is multiplied by a scaling factor of 1.8, which returns \textbf{an impact of 4.7 minutes lost per degree of temperature above $\mathbf{32.2}$°C}. + \item When using an indicator variable and linear regression the estimated impact was 5.6 minutes under Fahrenheit scale (respectively 10.08 minutes under Celsius scale) during normal economic periods, but the parameter was not significant at the 90\% level. + \item No relationship between temperature and work allocation with temperatures below $32.2$°C. + \item Focus on labour allocation decisions, it does not account for other impacts such as reducing productivity. +\end{itemize} +While the results are relevant and significant, is it important to highlight the following disclaimers on the results: +\begin{itemize} + \item There are some questions of the reliability of the forecasts in the long run as climate change will likely result in structural changes: on a long-term basis, adaptive solutions might be considered by people to adjust their work productivity with respect to temperature rise. + \item The conclusion holds for US (so maybe also for the EU as well other developed countries), but not for developing countries which experience more economic turmoil periods. +\end{itemize} +The paper attempts to estimate economic cost (assuming the impact of 4.7 minutes lost per degree of temperature above $32.2$°C during normal economic periods) however it only focuses on the direct costs and does not account for feedback effects (reducing the labour productivity will result in a decrease of the products available, wages, demand, etc.). Figure \ref{fig:economiccost} provides the results, as extracted from the paper: +\begin{figure}[h] + \centering + \includegraphics[scale = 0.6]{plots/economic cost.png} + \caption{Economic cost} + \label{fig:economiccost} +\end{figure} +Last but not least, it is worth noting that the results of this paper are interpreted as the impact of \textbf{chronic increase in temperature}. +%NEW +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +The GZN hazard model is based on the projections of the climate variable `daily maximum temperature'. The projected data spans for 20 years. Then, the following statistical processing is used to compute the daily cooling degree days: if the daily number of degrees is higher than $32.2$°C then the cooling degree days is equal to the number of degrees, and 0 if not. Once the 20-year projected time series of daily cooling degree days is computed, the indicator of the GZN hazard data is calculated as an annual average of the cooling degree days (over all the days within the 20-year period). That indicator will be used afterwards as an input in the impact function to compute the number of minutes of labour productivity loss. %Figure \ref{fig:GZL-Hazard} provides a summary of the methodology: + +$T^\text{max}$ is the daily maximum near-surface temperature. This is a point of attention, given that heating and cooling degree day indicators are often calculated using the daily \emph{average} near-surface temperature. +\begin{equation} + \label{Eq:degree_days} + I^\text{dd} = \frac{365}{n_y} \sum_{i = 1}^{n_y} | T^\text{max}_i - T^\text{ref} | +\end{equation} + +\paragraph{Uncertainty around the vulnerability Heat model\\} +\textbf{\\Overview} + +The assessment is based on the research performed by Zhang and Shindel, which reviews the uncertainty in the heat risk literature \cite{ZhangAndShindell:2021}. This paper provides context around the uncertainty that exists in the result discussed in previous section, which is mainly explained by the methodology used for the estimation of the impact of temperature on labour productivity. +\begin{itemize} + \item \cite{ZhangAndShindell:2021} provides an analysis of the \textbf{differential forecasts} between using the \textbf{GZN method} (as reference to Graff-Zivin and Neidell) documented in \cite{NeidellEtAl:2021}, versus the WetBulb Globe Temperature methodology (\textbf{WBGT method}) which includes other climate factors in addition to temperature: humidity, wind speed and heat radiation -- figure \ref{fig:WBGT} \footnote{Extracted from \cite{ZhangAndShindell:2021} -- p.4} provides the WBGT detailed approach. + \item Another major source of differentiation is that the GZN method focuses on changes in labour allocation decisions while the WBGT method focuses on the physiological impacts of rising temperatures. +\end{itemize} +\begin{figure}[H] + \centering + \includegraphics{plots/WBGT.png} + \caption{WBGT method} + \label{fig:WBGT} +\end{figure} +The $\alpha_1$ and $\alpha_2$ are the parameters derived for the three different work intensities: light (24.64, 22.72), medium (32.98, 17.81), and heavy (30.94, 16.64) work. +\begin{itemize} + \item Light work includes all service sectors, such as trade, all retail sales, wholesale trade and commission trade, hotels and restaurants, repairs of motor vehicles and personal and household goods, retail sale of automotive fuel, post and telecom, financial services, insurance, recreational and service activities, public administration, dwellings real estate, and other businesses. + \item Medium work includes food, textile and wood, machinery and electronic equipment, and other industries and transport sectors. + \item Heavy work includes agriculture, forestry and fishery, extraction sectors, and construction sectors. These are the sectors considered in the GZN model methodology, which are the climate exposed sectors. Hence in this study, $\alpha_1$ and $\alpha_2$ of the heavy work intensity are applied $(30.94, 16.64)$. +\end{itemize} + +Note that there are differences in the functional forms applied in the original GZN method \cite{NeidellEtAl:2014} and the approach presented in \cite{NeidellEtAl:2021}, with original GZN method using one linear regression with dummy variables for temperature buckets, while \cite{NeidellEtAl:2021} uses multiple linear regressions with one variable reflecting the breach of the maximum temperature around anchor points (less than $70$°F, $90$°F, $90$°F and above). The WBGT methodology uses a non-linear function to relate labour loss to the WBGT consolidated measure. + +Figure \ref{fig:CostsByRCP} \footnote{Extracted from \cite{ZhangAndShindell:2021} -- p.11} provides the forecasts of labour lost millions of 2016 USD (constant USD value). Most notably the original GZN produces more optimistic forecasts of the cost of labour lost (lower) than the WBGT method. Note that RCP8.5\footnote{The Intergovernmental Panel on Climate Chance modelling are based on representative concentration pathways (RCPs), which represent different emissions projections under basic, plausible economic and social assumptions, while staying within physical constraints. RPCs are constructed by back-calculating the amount of emissions that would result in a given amount of radiative forcing (which is the difference between solar radiation (energy) absorbed by the Earth and energy radiated back into the space) that would then result in a given amount of warming} refers to the scenario of high emissions and RCP4.5 refers to the scenario of moderate emissions. +\begin{figure}[h] + \centering + \includegraphics{plots/CostsByRCP.png} + \caption{Costs By RCP under GLZ method versus WBGT method} + \label{fig:CostsByRCP} +\end{figure} + +\textbf{Detailed approach: vulnerability around GLZ model} + +There are two identified areas where uncertainty exists in the forecasts in \cite{NeidellEtAl:2021}: the economic cycle and the model parameter uncertainty. + +The \textbf{economic cycle} is one area where uncertainty exists in the forecasts. +The paper shows that labour allocation decisions are sensitive to where in the economic cycle the US is; during a recession there does not appear to be a relationship between labour allocation and temperature. In order to measure the uncertainty explained by the economic cycle, one might consider the probability of a recession as a Bernoulli random variable with a probability p. Based on this there are two possibly approaches. A first approach is a Monte-Carlo like approach where one can randomly sample 1 or 0 depending on whether a recession occurs or not at each period on a time path. A second approach would be to use the expected value of the probability of the recession and estimate the impact as: +\begin{equation} + \label{Eq:economiccycle} + \text{forecast minutes lost} = p \times 0 + (1-p) \times \text{EV}\left(X\right) +\end{equation} +Where $X$ is the variable that refers to Minutes Lost during normal economic cycle +The second approach is attractive in its simplicity and ensuring the model does not lose its focus. +One concern is that the probability of recession, in reality, may be related to the realisation of climate related risks. Hence, there might be a over/under estimation of the probability unless the impact of the climate risk is also considered. Historical model estimated recession probabilities can be sourced from \cite{ChauvetEtAl:2024}. However, we did not go further in the measurement of the uncertainty explained by the economic cycle because it requires us to focus on the modelling of another parameter ($p$, the probability of the recession to occur), which is not the purpose of this work. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% to add in the references: https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodologyBibliography.bib +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%@article{SmoothedU.S.RecessionProbabilities:2022, +% title={Smoothed U.S. Recession Probabilities}, +% author={Jeremy Piger}, +% publisher={University of Oregon} +% year={2022}, +%} +Instead, we focus on the \textbf{model parameter uncertainty approach}. In \cite{ZhangAndShindell:2021}, there is a linear relationship between temperature and work minutes lost ($\beta$),and a constraint is applied to ensure that total time allocation sums to 24 hours, which returns a non-linear regression model at the end. Given that the main coefficient of interest is denoted $\beta$, an inference is applied assuming that $\beta$ follows a Student's-t distribution: +\begin{equation} + \label{Eq:uncertaintyStudentT} + \delta \beta \sim T_{\left(\beta, \mathrm{SE}, N-K\right)} +\end{equation} +Where $\text{SE}$ is the Standard Error of the coefficient and $(N-K)$ is the number of degrees of freedom, $N$ is the number of observations and $K$ is the number of model parameters. Hence, one can estimate the coefficients of the confidence interval (CI) at a given level of confidence CI\%: +\begin{equation} + \label{Eq:CIStudent} + \beta_{\mathrm{CI}\%} = \beta \pm T(p) \times \text{SE} +\end{equation} +Where $T(p)$ donates the probability density function (pdf) of the student $T$ distribution with probability $p$ which corresponds to the CI\%, the standard error $\text{SE}=2.23$ minutes and $\beta=-4.68$. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%START HERE +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +Figure \ref{fig:GZL-Vulnerability} shows the uncertainly around the GZN vulnerability model, based on the assumptions above, at $95\%$ confidence level\footnote{In order to compute the figure, N is taken from the paper \cite{TemperatureAndWork:2021}: N 11,732, and K is assumed to be 0 as it is very small relatively to N. One could do further researches to check the exact number of K, but that will not impact the results. K includes the number of control variables which provide information on demographic data -- age, gender, education, employment status, income, etc. -- and climate variables such as level of precipitation and snow.}. +\begin{figure}[h] + \centering + \includegraphics[scale = 0.8]{plots/GZL-Vulnerability.png} + \caption{Estimate of daily minutes of labour loss} + \label{fig:GZL-Vulnerability} +\end{figure} +As the degrees of freedom increases, the t distributions converge to the standard normal. Therefore, for simplicity reasons in the context of this work, it is assumed that that the daily labour productivity impact $\beta$ can be measured as: +\begin{equation} + \label{Eq:uncertainty1} + \beta \sim \mathcal{N}_{\left(m,\text{SE}^{2}\right)} +\end{equation} +where $m = 4.68$ minutes and $\text{SE}=2.23$ minutes. As an example, consider an $1.5$°C degree day increase in the temperature. We can then multiply through the normal distribution as shown below: +%The buckets of the daily maximum temperature above $32.2$°C are defined as an incremental increase of $1.5^\circ %C$, starting from $0$°C to $18$°C: $\begin{pmatrix} 0 & 1.5 & 3 & ... & 18 \end{pmatrix}$. For a $1.5$°C daily temperature increase, the uncertainty around the daily labour productivity impact is given by the following %normal distribution: +\begin{equation} + \label{Eq:uncertainty2} + 1.5 \times \beta \sim \mathcal{N}_{\left(1.5 \times \beta,(1.5 \times \text{SE})^{2}\right)} +\end{equation} +This can be generalised for a degree day increase of x using the following distribution: +\begin{equation} + \label{Eq:uncertainty3} + x \times \beta \sim \mathcal{N}_{\left(x \times \beta,\left(x\times\text{SE}\right)^{2}\right)} +\end{equation} +For example, there is 1\% risk that the lost labour productivity exceeds $29.6$ minutes per day if the maximum daily temperature exceeds at $32.2$°C by $3$°C. This number is computed as +$$ +\mathcal{N}^{-1}_{\left(m',\left(\text{SE}'\right)^2\right)}\left(1\%\right) +$$ +where $m'= 3 \times m = -14.013$ minutes and $\text{SE}'= 3 \times \text{SE} = 6.69$ minutes. The $99\%$ confidence interval of the lost labour productivity per day if the maximum daily temperature exceeds at $32.2$°C by $3$°C is $\left[+1.6\text{ min}, -29.6\text{ min}\right]$. +%Adding point regarding impact as a percentage of total labour +The estimated loss of labour time is then transformed into a percentage estimate: +\begin{equation} + \label{Eq:uncertainty3} + \text{estimated loss of labour time (\%)}= \frac{\text{labour lost}}{\text{total minutes worked in a year}} +\end{equation} +For this the figures reported by the Organisation for Economic Co-operation and Development (OECD) are used, specifically the 2021 estimate for the United States of America. This is to ensure the alignment with the region where the labour parameters are estimated in. Buckets of labour lost are defined within the range of no impact to 100\%, with estimated marginal probabilities for each bucket returned. + +The main weakness of the Temperature and Work article (which is more likely to be close to the original GZN method than to the WBGT method) is that it does not take into account of higher levels of optimism in the original GZN method which was noted in figure \ref{fig:CostsByRCP}. Hence, the results might be underestimating the actual impact. The next paragraph explores the vulnerability model around the WBGT method and its uncertainty. + +\textbf{Detailed approach: vulnerability around WBGT model} + +%NEW +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +The WBGT hazard model is based on the projections of the climate variables 'daily temperature' and 'daily humidity'. The projected data spans for 20 years. Then, the computations provided in figure \ref{fig:WBGT} are used to compute the daily WBGT, which is considered as the indicator of the WBGT hazard data that will be afterwards considered as an input in the impact function to get the daily Work Ability (WA) projections over the 20-year period. Finally, the impact deriving from the WBGT model is computed as the annual average of the daily projected WA. +%\begin{figure}[h] +% \centering +% \includegraphics[scale = 0.8]{plots/WBGT-Hazard.PNG} +% \caption{WBGT and labour availability} +% \label{fig:WBGT-Hazard} +%\end{figure} +Note that another indicator deriving from the WBGT hazard model is the Work Loss (WL), which is computed as $\text{WL} = 1 - \text{WA}$. It is another way to do the assessment leading to the same results. + +The aggregation of the outputs of the GZN vulnerability model (minutes of productivity labour loss) and the WBGT vulnerability model (WA), returns the effective number of working hours, which represents a way to measure the uncertainty around the GZN vulnerability model. %Figure \ref{fig:Aggregation} provides the aggregation process and results: +%\begin{figure}[h] +% \centering +% \includegraphics[scale = 0.8]{plots/Aggregated Hazard WBGT GZN.PNG} +% \caption{Aggregation of GZN model and WBGT model} +% \label{fig:Aggregation} +%\end{figure} +If the WL was used instead of the WA, then it will be multiplied by the hours worked derived from the GZN model to get the annual total labor loss due to heat. + +Given the modelling assumptions around the parameters used to compute the WA in the WBGT model, it is important to measure the uncertainty around these parameters, $\alpha_1$ and $\alpha_2$, which depend on the work intensities. + +The source paper provides the parameters $\alpha_1$ and $\alpha_2$ for three different work intensities low, medium and high with industries mapped to each intensity. These categories are broad and do not account for variance within and industry and between industries in the same category. To account for this uncertainty the WBGT approach was adjusted to include uncertainty around the industry. + +Consider an asset which is market as in a high risk sector. We assume that the work ability is uniformly distributed with a mean equal to the $\text{WA}_H$ and a floor (a) and ceiling (b) equidistant from the mean. We assume that the floor a is halfway between $\text{WA}_H$ and $\text{WA}_M$. So a and b can be estimated based on the below formulae: +\begin{equation} + \label{Eq:WBGT_Floor} + a = \text{WA}_H - \frac{\text{WA}_H - \text{WA}_M}{2}\, +\end{equation} +\begin{equation} + \label{Eq:WBGT_Ceil} + b = \text{WA}_H + \frac{\text{WA}_H - \text{WA}_M}{2}\, +\end{equation} +And the WBGT work ability can be represented based on the below formula: +\begin{equation} + \label{Eq:WBGT_Uniform} + \text{WA} \sim \mathcal{U}(a ,b)\, +\end{equation} +With this we can estimate the variance of the WBGT work ability using the standard formula for the variance of a Uniform distribution: +\begin{equation} + \label{Eq:WBGT_Variance} + Var\left(\text{WBGT}\right) = \frac{(a-b)^2}{ 12} +\end{equation} +In order to get to a final work ability we multiple the output of the GZN model by the ouput of the WBGT model. +\begin{equation} + \label{Eq:Final_Mean_WA} + \text{effective work} = \left(1- \text{estimated loss of labour time}\right) \times \text{WA}_H +\end{equation} +As we have uncertainty both for the GZN component as well as the WBGT component we need to estimate a joint variance. We assume that the epistemic uncertainty in the GZN model is uncorrelated with the variance in the sector uncertainty of the work ability measure. To estimate the variance of the product of both the GZN and WBGT components we using the following formula: +\begin{equation} +\begin{aligned} + \label{Eq:Var_Joint} + Var\left(\text{effective work}\right) = \left(1- \text{estimated loss of labour time}\right)^2 \times Var\left(\text{WBGT}\right) + \\ + \text{WA}_H^2 \times Var\left(\text{GZN}\right) + Var\left(\text{WBGT}\right) \times Var\left(\text{GZN}\right) + \end{aligned} +\end{equation} +To get the final impact we assume that the product of the two variables are normally distributed. The final work ability distribution can be represented as below: +\begin{equation} +\begin{aligned} + \label{Eq:Final_Result_Heat} + \mathcal{N}_{\left(\text{effective work}, Var\left(\text{effective work}\right)\right)} + \end{aligned} +\end{equation} +\setcounter{secnumdepth}{3} + +\section{Water-related risks} +\subsection{Hazard models} +Water-related risks are modelled as chronic using some indicators implemented by the World Resources Institute (WRI) Aqueduct 4.0 \cite{KuzmaEtAl:2023}: +\begin{itemize} +\item Water demand is the maximum potential water required to meet sectoral demands (domestic, industrial, irrigation and livestock). +\item Water supply is the available blue water, that is, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed. +\item Water stress is an indicator of competition for water resources defined as the ratio of water demand by human society divided by available blue water. +\item Water depletion is the ratio of total water consumption divided by available renewable water supplies. Total water consumption includes domestic, industrial, irrigation and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. +\end{itemize} +\subsection{Vulnerability models} +\subsubsection{Thermal power generation projects} +In high water-stress areas, decreased water availability, caused either by reduced runoff or increased demand/competition, can result in reduction in permitted water withdrawals and/or more stringent water efficiency regulations in the future. The water stress module described in \cite{LuoEtAl:2021,LuoEtAl:2023} is implemented in the \emph{physrisk} library. The inputs are: +\begin{itemize} +\item The indicator for water stress in a climate-conditioned projection at the geolocation of the generation unit. A catchment with Water Stress (WS) higher than 40\% is considered highly water stressed and prone to water-stressed induced efficiency reductions impacting wet-cooled plants. In this vulnerability model, a steam turbine with a once-through or recirculating cooling system experiences generation losses when Water Stress exceeds 40\%. From its mean provided as an input, Water Stress is assumed to follow a shifted uniform probability distribution +$$ +\text{WS}\sim\left(\text{water demand indicator}\right)_{\text{climate-conditioned projection}}-0.5+\mathcal{U}\left(0,1\right) +$$ +so that +$$ +\mathbb{P}\left(WS>0.4\right)=\max\left(0,\min\left(0.1+\left(\text{water demand indicator}\right)_{\text{climate-conditioned projection}},1\right)\right) +$$ +\item The indicator for water demand in both a climate-conditioned projection and a baseline scenario at the geolocation of the generation unit. It allows to calculate the Supply Reduction Rate (SRR) corresponding to the relative change of the future blue water availability from baseline +$$ +\text{SRR}=\displaystyle\frac{\left(\text{water demand indicator}\right)_{\text{climate-conditioned projection}}}{\left(\text{water demand indicator}\right)_{\text{baseline}}}-1 +$$ +\item The disruption curve estimating the number of stranded days in a year from the supply reduction rate due to a limited cooling water budget. It depends on the cooling technology (once-through or recirculating) of the steam turbine. The maximum number of inoperable days interpolated for a given SRR across potential disruption curves is chosen when turbine and/or cooling system is not provided as an asset archetype. +\end{itemize} +The output is a discrete disruption probability curve due to water-related risks. It provides the probability in a future projection that the operations of the generation unit are disrupted for a given period expressed as a year fraction. + +\section{Disaggregation data sets} + +Where asset locations are unknown, the vulnerability of those assets to climate hazards may still be modelled by using aggregate exposure information. For example, the total value of a portfolio of assets may be known as well as the region containing those assets, even if the individual locations are unknown. The total asset value can be \emph{disaggregated} using a (two dimension) probability distribution of the asset locations. + +For high resolution hazards such as flood and wildfire, two possible approaches are: +\begin{itemize} +\item Sample a number of point locations from the distribution of asset locations. +\item Split the region into grid cells and integrate over the cells. +\end{itemize} + +The latter is challenging in case of high resolution hazards, although techniques do exist to reduce the number of grid cells -- i.e. to use a resolution lower than the resolution of the original hazard indicator data set (in cases where that may be impractical). For example, distributions of grid cell fractions experiencing hazard intensities of different magnitudes can be derived. + +In this section however, we focus more on the former approach . Essentially, this is the sampling of a proxy portfolio which is used to disaggregate the total asset value and has the advantage that the proxy portfolios can be treated in the same way as known asset portfolios. + + +\subsection{Sampling from two dimensional data sets} +The probability density of the asset locations is given by $f_{X, Y}(x, y)$ where $x$ and $y$ are spatial co-ordinates. The fraction of the exposure measure (e.g. asset total value) within a particular region is given by: + +\begin{equation} + \label{Eq:Disagg1} + \iint\limits_{\mathrm{region}} f_{X, Y}(x, y) \, dx \, dy +\end{equation} + +In order to sample $n$ locations from $f_{X, Y}$, the marginal distribution is calculated: + +\begin{equation} + \label{Eq:Disagg2} + f_X(x) = \int_{y} f_{X, Y}(x, y) \, dy +\end{equation} + +and values $x_k$, $k \in [1..n]$ are sampled from this. For each $k$, $y_k$ is then sampled from the conditional distribution, $f_{Y|X}(y, x_k)$. + +\begin{equation} + \label{Eq:Disagg3} + f_{Y|X}(y, x_k) = f_{X, Y}(x_k, y) / f_X(x_k) +\end{equation} + +In both cases, sampling is done by finding the inverse cumulative of the distribution. In the discrete case, say we have a two dimensional exposure data set $\mathbf{E}$ with elements $E_{i, j}$, $i$ corresponding to the $x$ spatial dimension and $j$ the $y$ dimension. $\sum_{i,j} E_{i,j} = 1$ by construction. We can define a discrete form of the cumulative $\int_{-\infty}^{x} f_X(u) \, du$, $F_X(i)$ as: + +\begin{equation} + \label{Eq:Disagg4} + F_X(i) = \sum_{u = 1}^{i} \sum_{j=1}^{n_j} E_{u, j} +\end{equation} + +The index $i_k$ is then sampled by drawing uniforms $p_k$ from $P \sim \mathcal{U}(0, 1)$ and finding $i_k$, the largest value of $i$ such that $p_k < F_X(i)$ + +\begin{equation} + \label{Eq:Disagg4} + i_k = \sup \{ i : p_k <= F_X(i) \} +\end{equation} + +The discrete conditional is then calculated: + +\begin{equation} + \label{Eq:Disagg5} + F_{Y|X}(j, i_k,) = \sum_{u = 1}^{j} E_{i_k, u} / \sum_{u = 1}^{n_j} E_{i_k, u} +\end{equation} + +and index $j_k$ is then sampled by drawing another set of uniforms $q_k$: + +\begin{equation} + \label{Eq:Disagg4} + j_k = \sup \{ j : q_k <= F_{Y|X}(j, i_k) \} +\end{equation} + +\clearpage +\printglossaries +\clearpage +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\bibliography{Physical Risk Methodology Bibliography} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%\bibliographystyle{plain} +\bibliographystyle{acm} +%\bibliographystyle{agsm} + +\end{document} diff --git a/methodology/PhysicalRiskMethodology.toc b/methodology/PhysicalRiskMethodology.toc new file mode 100644 index 00000000..9ca26c35 --- /dev/null +++ b/methodology/PhysicalRiskMethodology.toc @@ -0,0 +1,60 @@ +\contentsline {section}{\numberline {1}Introduction}{5}{section.1}% +\contentsline {paragraph}{Hazard.}{7}{section*.2}% +\contentsline {paragraph}{Vulnerability.}{8}{section*.3}% +\contentsline {paragraph}{Financial.}{9}{section*.4}% +\contentsline {paragraph}{Design goal}{10}{section*.5}% +\contentsline {section}{\numberline {2}Model description}{10}{section.2}% +\contentsline {subsection}{\numberline {2.1}Overview}{10}{subsection.2.1}% +\contentsline {subsection}{\numberline {2.2}Hazard model}{11}{subsection.2.2}% +\contentsline {subsection}{\numberline {2.3}Vulnerability model}{12}{subsection.2.3}% +\contentsline {subsubsection}{\numberline {2.3.1}Mathematical description of vulnerability models}{13}{subsubsection.2.3.1}% +\contentsline {subsubsection}{\numberline {2.3.2}Return-period-based approach}{14}{subsubsection.2.3.2}% +\contentsline {paragraph}{Hazard maps.}{15}{section*.7}% +\contentsline {paragraph}{Effective impact.}{17}{section*.8}% +\contentsline {subsubsection}{\numberline {2.3.3}Event-based approach}{19}{subsubsection.2.3.3}% +\contentsline {paragraph}{Relationship to hazard maps.}{19}{section*.9}% +\contentsline {paragraph}{Constant severity regions}{20}{section*.10}% +\contentsline {subsubsection}{\numberline {2.3.4}Discrete form of acute vulnerability model}{20}{subsubsection.2.3.4}% +\contentsline {paragraph}{Multiple occurrence of events.}{22}{section*.11}% +\contentsline {subsubsection}{\numberline {2.3.5}Importance of secondary uncertainty}{22}{subsubsection.2.3.5}% +\contentsline {paragraph}{Including epistemic uncertainty in vulnerability.}{23}{section*.13}% +\contentsline {paragraph}{Epistemic uncertainty as source of error.}{24}{section*.14}% +\contentsline {subsubsection}{\numberline {2.3.6}Interpolation of probability distributions}{24}{subsubsection.2.3.6}% +\contentsline {subsubsection}{\numberline {2.3.7}Probability bins from hazard maps}{25}{subsubsection.2.3.7}% +\contentsline {subsubsection}{\numberline {2.3.8}Vulnerability distributions and heuristics}{26}{subsubsection.2.3.8}% +\contentsline {paragraph}{Modelling impact using a Beta distribution.}{27}{section*.16}% +\contentsline {subsection}{\numberline {2.4}Aggregation of impacts}{28}{subsection.2.4}% +\contentsline {subsection}{\numberline {2.5}Financial model}{29}{subsection.2.5}% +\contentsline {subsubsection}{\numberline {2.5.1}Types of calculation.}{29}{subsubsection.2.5.1}% +\contentsline {subsubsection}{\numberline {2.5.2}Climate risk measures and importance of change.}{29}{subsubsection.2.5.2}% +\contentsline {paragraph}{Definition of measures.}{30}{section*.18}% +\contentsline {subsubsection}{\numberline {2.5.3}Structural models of credit risk}{30}{subsubsection.2.5.3}% +\contentsline {subsection}{\numberline {2.6}Uncertainties in the calculation}{31}{subsection.2.6}% +\contentsline {subsection}{\numberline {2.7}Model limitations}{31}{subsection.2.7}% +\contentsline {subsubsection}{\numberline {2.7.1}Data availability}{31}{subsubsection.2.7.1}% +\contentsline {section}{\numberline {3}Inundation}{32}{section.3}% +\contentsline {subsection}{\numberline {3.1}Hazard models}{32}{subsection.3.1}% +\contentsline {subsection}{\numberline {3.2}Vulnerability models}{33}{subsection.3.2}% +\contentsline {subsubsection}{\numberline {3.2.1}Real estate assets}{33}{subsubsection.3.2.1}% +\contentsline {subsubsection}{\numberline {3.2.2}Thermal power generation projects}{33}{subsubsection.3.2.2}% +\contentsline {section}{\numberline {4}Drought}{34}{section.4}% +\contentsline {subsection}{\numberline {4.1}Hazard models}{34}{subsection.4.1}% +\contentsline {subsection}{\numberline {4.2}Vulnerability models}{34}{subsection.4.2}% +\contentsline {subsubsection}{\numberline {4.2.1}Thermal power generation projects}{34}{subsubsection.4.2.1}% +\contentsline {section}{\numberline {5}Heat}{35}{section.5}% +\contentsline {subsection}{\numberline {5.1}Hazard Models}{35}{subsection.5.1}% +\contentsline {subsubsection}{\numberline {5.1.1}Chronic hazard models}{35}{subsubsection.5.1.1}% +\contentsline {subsubsection}{\numberline {5.1.2}Acute hazard models}{36}{subsubsection.5.1.2}% +\contentsline {subsection}{\numberline {5.2}Vulnerability models}{36}{subsection.5.2}% +\contentsline {subsubsection}{\numberline {5.2.1}Thermal power generation projects}{36}{subsubsection.5.2.1}% +\contentsline {paragraph}{\numberline {5.2.1.1}Air temperature\\}{36}{paragraph.5.2.1.1}% +\contentsline {paragraph}{\numberline {5.2.1.2}Water temperature\\}{37}{paragraph.5.2.1.2}% +\contentsline {subsubsection}{\numberline {5.2.2}Heat vulnerability model}{39}{subsubsection.5.2.2}% +\contentsline {paragraph}{\numberline {5.2.2.1}Impact of temperature on labour productivity\\}{39}{paragraph.5.2.2.1}% +\contentsline {paragraph}{\numberline {5.2.2.2}Uncertainty around the vulnerability Heat model\\}{41}{paragraph.5.2.2.2}% +\contentsline {section}{\numberline {6}Water-related risks}{48}{section.6}% +\contentsline {subsection}{\numberline {6.1}Hazard models}{48}{subsection.6.1}% +\contentsline {subsection}{\numberline {6.2}Vulnerability models}{48}{subsection.6.2}% +\contentsline {subsubsection}{\numberline {6.2.1}Thermal power generation projects}{48}{subsubsection.6.2.1}% +\contentsline {section}{\numberline {7}Disaggregation data sets}{49}{section.7}% +\contentsline {subsection}{\numberline {7.1}Sampling from two dimensional data sets}{50}{subsection.7.1}% diff --git a/methodology/PhysicalRiskMethodologyBibliography.bib b/methodology/PhysicalRiskMethodologyBibliography.bib new file mode 100644 index 00000000..a5128d2a --- /dev/null +++ b/methodology/PhysicalRiskMethodologyBibliography.bib @@ -0,0 +1,495 @@ +@inbook{AdlerEtAl:2022, + author = {Adler, C. and Wester, P. and Bhatt, I. and Huggel, C. and Insarov, G.E. and Morecroft, M.D. and Muccione, V. and Prakash, A.}, + title = {Cross-Chapter Paper 5: Mountains}, + booktitle = {Climate Change 2022: Impacts, Adaptation and Vulnerability. Contribution of Working Group II to the Sixth Assessment Report of the Intergovernmental Panel on Climate Change}, + editor = {Pörtner, H. O. and Roberts, D. C. and Tignor, M. and Poloczanska, E. S. and Mintenbeck, K. and Alegría, A. and Craig, M. and Langsdorf, S. and Löschke, S. and Möller, V. and Okem, A. and Rama, B.}, + publisher = {Cambridge University Press}, + address = {Cambridge, UK and New York, USA}, + pages = {2273-2318}, + ISBN = {9781009325844}, + DOI = {10.1017/9781009325844.022.2273}, + year = {2022}, + type = {Book Section} +} + +@article{AriasEtAl:2020, + title={Impacts of climate change and deforestation on hydropower planning in the Brazilian Amazon}, + author={Arias, Mauricio E and Farinosi, Fabio and Lee, Eunjee and Livino, Angela and Briscoe, John and Moorcroft, Paul R}, + journal={Nature Sustainability}, + volume={3}, + number={6}, + pages={430--436}, + year={2020}, + publisher={Nature Publishing Group UK London} +} + +@techreport{BavandiEtAl:2022, + author = {Bavandi, Antoine and Berrais, Dorra and Dolk, Michaela and Mahul, Olivier}, + title = {Physical Climate Risk Assessment: Practical Lessons for the Development of Climate Scenarios with Extreme Weather Events from Emerging Markets and Developing Economies}, + institution = {Network for Greening the Financial System}, + year = {2022} +} + +@article{BegueriaEtAl:2014, + title={Standardized precipitation evapotranspiration index (SPEI) revisited: parameter fitting, evapotranspiration models, tools, datasets and drought monitoring}, + author={Beguer{\'\i}a, Santiago and Vicente-Serrano, Sergio M and Reig, Fergus and Latorre, Borja}, + journal={International journal of climatology}, + volume={34}, + number={10}, + pages={3001--3023}, + year={2014}, + publisher={Wiley Online Library} +} + +@article{BertramEtAl:2020, + title={NGFS Climate Scenarios Database: Technical Documentation}, + author={Bertram, Chris and Hilaire, J and Kriegler, E and Beck, T and Bresch, DN and Clarke, L and Cui, R and Edmonds, J and Min, J and Piontek, F and others}, + year={2020}, + publisher={Potsdam Institute for Climate Impact Research (PIK), International Institute~…} +} + +@article{BlackCox:1976, + title = {Valuing corporate securities: some effects of bond indenture provisions}, + author = {Black, Fischer and Cox, John C.}, + journal = {Journal of Finance}, + year = {1976}, + pages = {351-367}, + number = 2, + volume = 31, +} + +@article{ChauvetEtAl:2024, + title={Smoothed U.S. Recession Probabilities}, + author={Chauvet, Marcelle and Piger, Jeremy Max}, + year={2024}, + publisher={Federal Reserve Bank of St. Louis} +} + +@article{Christidis:2013, + title={A new {HadGEM3-A}-based system for attribution of weather-and climate-related extreme events}, + author={Christidis, Nikolaos and Stott, Peter A and Scaife, Adam A and Arribas, Alberto and Jones, Gareth S and Copsey, Dan and Knight, Jeff R and Tennant, Warren J}, + journal={Journal of Climate}, + volume={26}, + number={9}, + pages={2756--2783}, + year={2013}, + publisher={American Meteorological Society} +} + +@article{Christidis:2021, + title = {Using {CMIP6} multi-model ensembles for near real-time attribution of extreme events}, + author = {Christidis, N.}, + journal = {Hadley Centre Technical Note}, + year = {2019}, + volume = {107}, + url = {https://www.metoffice.gov.uk/research/library-and-archive/publications/science/climate-sciencetechnical-notes} +} + +@inbook{Cooley:2013, + author = {Cooley, Daniel}, + year = {2013}, + month = {01}, + pages = {97-114}, + title = {Return Periods and Return Levels Under Climate Change}, + isbn = {978-94-007-4478-3}, + journal = {Extremes in A Changing Climate: Detection, Analysis, and Uncertainty}, + doi = {10.1007/978-94-007-4479-0_4} +} + +@techreport{CFRF:2022, + title={Scenario Analysis: Physical Risk}, + institution={Climate Financial Risk Forum}, + url={https://www.fca.org.uk/publication/corporate/cfrf-guide-2022-scenario-analysis-physical-risk-underwriting-guide.pdf.pdf}, + year={2022} +} + +@article{DosioEtAl:2018, + title={Extreme heat waves under {1.5 C} and {2 C} global warming}, + author={Dosio, Alessandro and Mentaschi, Lorenzo and Fischer, Erich M and Wyser, Klaus}, + journal={Environmental Research Letters}, + volume={13}, + number={5}, + pages={054006}, + year={2018}, + publisher={IOP Publishing} +} + +@article{DeesEtAl:2017, + title={Stress-test analytics for macroprudential purposes: Introducing STAMP€}, + author={Dees, St{\'e}phane and Henry, J{\'e}r{\^o}me}, + journal={Satellite Models}, + volume={13}, + year={2017} +} + +@article{DunneEtAl:2013, + title={Reductions in labour capacity from heat stress under climate warming}, + author={Dunne, John P and Stouffer, Ronald J and John, Jasmin G}, + journal={Nature Climate Change}, + volume={3}, + number={6}, + pages={563--566}, + year={2013}, + publisher={Nature Publishing Group} +} + +@Article{EberenzEtAl:2013, + title={Regional tropical cyclone impact functions for globally consistent risk assessments}, + author={Eberenz, Samuel and L{\"u}thi, Samuel and Bresch, David N}, + journal={Natural Hazards and Earth System Sciences}, + volume={21}, + number={1}, + pages={393--415}, + year={2021}, + publisher={Copernicus GmbH} +} + +@techreport{HuizingaEtAl:2017, + title={Global flood depth-damage functions: Methodology and the database with guidelines}, + author={Huizinga, Jan and De Moel, Hans and Szewczyk, Wojciech and others}, + year={2017}, + institution={Joint Research Centre (Seville site)} +} + +@article{KenyonEtAl:2021, + title = {Climate change valuation adjustment (CCVA) using parameterized climate change impacts}, + author = {Kenyon, Chris and Berrahouia, Mourad}, + journal = {Risk}, + year = {2021} +} + +@article{KuzmaEtAl:2023, + title={Aqueduct 4.0: Updated decision-relevant global water risk indicators}, + author={Kuzma, Samantha and Bierkens, Marc F.P. and S. Lakshman, Shivani and Luo, Tianyi and Saccoccia, Liz and Sutanudjaja, Edwin H. and Van Beek, Rens}, + journal={World Resources Institute}, + year={2023}, + DOI={10.46830/writn.23.00061} +} + +@article{LuoEtAl:2021, + title={Assessing Physical Climate Risks for the European Bank for Reconstruction and Development's Power Generation Project Investment Portfolio}, + author={Luo, Tianyi and Zhou, Lihuan and Falzon, James and Cheng, Yan and Christianson, Giulia and Wu, Yili and Habchi, Amir}, + year={2021} +} + +@article{LuoEtAl:2023, + title={A framework to assess multi-hazard physical climate risk for power generation projects from publicly-accessible sources}, + author={Luo, Tianyi and Zhou, Lihuan and Falzon, James and Cheng, Yan and Christianson, Giulia and Wu, Yili and Habchi, Amir}, + year={2023}, + journal={Communications Earth \& Environment}, + volume={4}, + number={117}, + DOI = {10.1038/s43247-023-00782-w}, + publisher={Nature Publishing Group} +} + +@article{MarziEtAl:2021, + title={Assessing future vulnerability and risk of humanitarian crises using climate change and population projections within the INFORM framework}, + author={Marzi, Sepehr and Mysiak, Jaroslav and Essenfelder, Arthur H and Pal, Jeremy S and Vernaccini, Luca and Mistry, Malcolm N and Alfieri, Lorenzo and Poljansek, Karmen and Marin-Ferrer, Montserrat and Vousdoukas, Michalis}, + journal={Global Environmental Change}, + volume={71}, + pages={102393}, + year={2021}, + publisher={Elsevier} +} + +@article{MaskreyEtAl:2011, + title={Revealing Risk, Redefining Development, Global Assessment Report on Disaster Risk Reduction}, + author={Maskrey, Andrew and Peduzzi, Pascal and Chatenoux, Bruno and Herold, Christian and Dao, Quoc-Hy and Giuliani, Gregory}, + journal={United Nations Strategy for Disaster Reduction}, + pages={17--51}, + year={2011}, + publisher={United Nations} +} + +@article{MazdiyasniEtAl:2019, + title={Heat wave intensity duration frequency curve: A multivariate approach for hazard and attribution analysis}, + author={Mazdiyasni, Omid and Sadegh, Mojtaba and Chiang, Felicia and AghaKouchak, Amir}, + journal={Scientific reports}, + volume={9}, + number={1}, + pages={1--8}, + year={2019}, + publisher={Nature Publishing Group} +} + +@article{MentaschiEtAl:2016, + title={The transformed-stationary approach: a generic and simplified methodology for non-stationary extreme value analysis}, + author={Mentaschi, Lorenzo and Vousdoukas, Michalis and Voukouvalas, Evangelos and Sartini, Ludovica and Feyen, Luc and Besio, Giovanni and Alfieri, Lorenzo}, + journal={Hydrology and Earth System Sciences}, + volume={20}, + number={9}, + pages={3527--3547}, + year={2016}, + publisher={Copernicus GmbH} +} + +@article{Merton:1974, + title = {On the pricing of corporate debt: the risk structure of interest rates}, + author = {Merton, Robert C.}, + journal = {Journal of Finance}, + year = {1974}, + pages = {449-470}, + number = 2, + volume = 29, +} + +@article{MerzEtAl:2004, + title={Estimation uncertainty of direct monetary flood damage to buildings}, + author={Merz, Bruno and Kreibich, Heidi and Thieken, A and Schmidtke, Reinhard}, + journal={Natural Hazards and Earth System Sciences}, + volume={4}, + number={1}, + pages={153--163}, + year={2004}, + publisher={Copernicus GmbH} +} +@book{MitchellEtAl:2017, + title={Natural catastrophe risk management and modelling: A practitioner's guide}, + author={Mitchell-Wallace, Kirsten and Jones, Matthew and Hillier, John and Foote, Matthew}, + year={2017}, + publisher={John Wiley \& Sons} +} + +@book{Nelsen:2007, + author = {Roger~B. Nelsen}, + title = {An Introduction to Copulas}, + year = {2007}, + edition = {2nd}, + publisher = {Princeton University Press}, + address = {New York~(NY)} +} + +@article{NeidellEtAl:2014, + title={Temperature and the Allocation of Time: Implications for Climate Change}, + author={Neidell, Matthew and Graff Zivin, Joshua}, + journal={Journal of Labor Economics}, + volume={32}, + number={1}, + pages={1--26}, + year={2006}, + publisher={The University of Chicago Press on behalf of the Society of Labor Economists and the NORC at the University of Chicago} +} + +@article{NeidellEtAl:2021, + title={Temperature and work: Time allocated to work under varying climate and labor market conditions}, + author={Neidell, Matthew and Graff Zivin, Joshua and Sheahan, Megan and Willwerth, Jacqueline and Fant, Charles and Sarofim, Marcus and Martinich, Jeremy}, + journal={PloS one}, + volume={16}, + number={8}, + pages={e0254224}, + year={2021}, + publisher={Public Library of Science San Francisco, CA USA} +} + +@misc{OasisFinancialModule, + author = {Taylor, Peter and Carter, Johanna}, + title = {Oasis Financial Module}, + year = {2020} +} + +@misc{OasisLMF, + author = {Oasis}, + title = {Oasis loss modelling framework: open source catastrophe modelling platform}, + year = {2021}, + url = {https://oasislmf.org/} +} + +@misc{OSC, + author = {OS-C}, + title = {{OS-Climate} {(OS-C)} platform}, + year = {2021}, + url = {http://www.os-climate.org/} +} + +@book{PortnerEtAl:2022, + title={Climate change 2022: Impacts, adaptation and vulnerability}, + author={P{\"o}rtner, Hans-O and Roberts, Debra C and Adams, Helen and Adler, Carolina and Aldunce, Paulina and Ali, Elham and Begum, Rawshan Ara and Betts, Richard and Kerr, Rachel Bezner and Biesbroek, Robbert and others}, + year={2022}, + publisher={IPCC Geneva, Switzerland:} +} + +@misc{PaprotnyEtAl:2016, + doi = {10.4121/uuid:968098ce-afe1-4b21-a509-dedaf9bf4bd5}, + url = {https://data.4tu.nl/articles/dataset/Pan-European_data_sets_of_river_flood_probability_of_occurrence_under_present_and_future_climate/12708122/1}, + author = {Paprotny, Dominik and Morales Nápoles, O. (Oswaldo)}, + keywords = {Climate, Climate change, Earth system modelling, Floods, Hydraulic engineering, Rivers}, + title = {Pan-European data sets of river flood probability of occurrence under present and future climate}, + publisher = {TU Delft}, + year = {2016}, + copyright = {4TU General Terms of Use}, +} + +@article{PoljansekEtAl:2022, + title={INFORM Climate Change Risk Index}, + author={Poljansek, K. and Marzi, S. and Galimberti, L. and Dalla Valle, D. and Pal, J. and Essenfelder, A.H. and Mysiak, J. and Corbane, C.}, + journal={Publications Office of the European Union}, + year={2022}, + publisher={Publications Office of the European Union} +} + +@article{RangerEtAl:2022, + title={Assessing Financial Risks from Physical Climate Shocks}, + author={Ranger, Nicola Ann and Mahul, Olivier and Monasterolo, Irene}, + year={2022}, + publisher={Washington, DC: World Bank} +} + +@article{RaschkeEtAl:2022, + title={About the return period of a catastrophe}, + author={Raschke, Mathias}, + journal={Natural Hazards and Earth System Sciences}, + volume={22}, + number={1}, + pages={245--263}, + year={2022}, + publisher={Copernicus GmbH} +} + +@book{ReisingerEtAl:2020, + title={The Concept of Risk in the IPCC Sixth Assessment Report: A Summary of Cross-Working Group Discussions}, + author={Reisinger, Andy and Howden, Mark and Vera, Carolina and others}, + year={2020}, + publisher={IPCC Geneva, Switzerland:} +} + +@techreport{RichtersEtAl:2022, + author = {Richters, Oliver and others}, + title = "Climate Scenarios Database: Technical Documentation v3.1", + institution = "Network for Greening the Financial System", + year = "2022" +} + +@article{ScawthornEtAl:2006, + title={{HAZUS-MH} flood loss estimation methodology. I: Overview and flood hazard characterization}, + author={Scawthorn, Charles and Blais, Neil and Seligson, Hope and Tate, Eric and Mifflin, Edward and Thomas, Will and Murphy, James and Jones, Christopher}, + journal={Natural Hazards Review}, + volume={7}, + number={2}, + pages={60--71}, + year={2006}, + publisher={American Society of Civil Engineers} +} + +@article{SmirnovEtAl:2016, + title={The relative importance of climate change and population growth for exposure to future extreme droughts}, + author={Smirnov, Oleg and Zhang, Minghua and Xiao, Tingyin and Orbell, John and Lobben, Amy and Gordon, Josef}, + journal={Climatic Change}, + volume={138}, + pages={41--53}, + year={2016}, + publisher={Springer} +} + +@article{StottEtAl:2016, + title={Attribution of extreme weather and climate-related events}, + author={Stott, Peter A and Christidis, Nikolaos and Otto, Friederike EL and Sun, Ying and Vanderlinden, Jean-Paul and van Oldenborgh, Geert Jan and Vautard, Robert and von Storch, Hans and Walton, Peter and Yiou, Pascal and others}, + journal={Wiley Interdisciplinary Reviews: Climate Change}, + volume={7}, + number={1}, + pages={23--41}, + year={2016}, + publisher={Wiley Online Library} +} + +@article{TanguyEtAl:2018, + title={Historical gridded reconstruction of potential evapotranspiration for the UK}, + author={Tanguy, Maliko and Prudhomme, Christel and Smith, Katie and Hannaford, Jamie}, + journal={Earth System Science Data}, + volume={10}, + number={2}, + pages={951--968}, + year={2018}, + publisher={Copernicus GmbH} +} + +@inproceedings{Taylor:2015, + author = {Taylor, Peter}, + year={2015}, + title={Calculating financial loss from catastrophes}, + booktitle={SECED 2015 Conference: Earthquake risk and engineering towards a resilient world}, + publisher={Society for earthquake and civil engineering dynamics}, + url = {http://seced.org.uk/images/newsletters/TAYLOR.pdf} +} + +@article{ThrasherEtAl:2022, + title={NASA Global Daily Downscaled Projections, {CMIP6}}, + author={Thrasher, Bridget and Wang, Weile and Michaelis, Andrew and Melton, Forrest and Lee, Tsengdar and Nemani, Ramakrishna}, + journal={Scientific Data}, + volume={9}, + number={1}, + pages={1--6}, + year={2022}, + publisher={Nature Publishing Group} +} + +@article{VicenteSerranoEtAl:2010, + title={A multiscalar drought index sensitive to Global Warming: the Standardized Precipitation Evapotranspiration Index}, + author={Vicente-Serrano, Sergio and Begueria, Santiago and Lopez-Moreno, Juan I.}, + journal={Journal of Climate}, + volume={23}, + number={7}, + pages={1696-1718}, + year={2010}, + publisher={American Meteorological Society}, + doi = {10.1175/2009JCLI2909.1} +} + +@article{WardEtAl:2011, + title={How are flood risk estimates affected by the choice of return-periods?}, + author={Ward, Philip J and De Moel, H and Aerts, JCJH}, + journal={Natural Hazards and Earth System Sciences}, + volume={11}, + number={12}, + pages={3181--3195}, + year={2011}, + publisher={Copernicus GmbH} +} + +@article{WardEtAl:2013, + title={Assessing flood risk at the global scale: model setup, results, and sensitivity}, + author={Ward, Philip J and Jongman, Brenden and Weiland, Frederiek Sperna and Bouwman, Arno and van Beek, Rens and Bierkens, Marc FP and Ligtvoet, Willem and Winsemius, Hessel C}, + journal={Environmental research letters}, + volume={8}, + number={4}, + pages={044019}, + year={2013}, + publisher={IOP Publishing} +} + +@article{WardEtAl:2020, + title={Aqueduct floods methodology}, + author={Ward, Philip J and Winsemius, Hessel C and Kuzma, Samantha and Bierkens, Marc FP and Bouwman, Arno and De Moel, Hans and Loaiza, Andr{\'e}s D{\'\i}az and Eilander, Dirk and Englhardt, Johanna and Erkens, Gilles and others}, + journal={World Resources Institute}, + pages={1--28}, + year={2020} +} + +@article{WoetzelEtAl:2020, + title={Climate risk and response: Physical hazards and socioeconomic impacts}, + author={Woetzel, Jonathan and Pinner, Dickon and Samandari, Hamid}, + year={2020}, + publisher={McKinsey \& Company} +} + +@article{XuEtAl:2021, + title={Bias-corrected {CMIP6} global dataset for dynamical downscaling of the historical and future climate (1979--2100)}, + author={Xu, Zhongfeng and Han, Ying and Tam, Chi-Yung and Yang, Zong-Liang and Fu, Congbin}, + journal={Scientific Data}, + volume={8}, + number={1}, + pages={1--11}, + year={2021}, + publisher={Nature Publishing Group} +} + +@article{ZhangAndShindell:2021, + title={Costs from labor losses due to extreme heat in the USA attributable to climate change}, + author={Zhang, Yuqiang and Shindell, Drew T}, + journal={Climatic change}, + volume={164}, + number={3}, + pages={1--18}, + year={2021}, + publisher={Springer} +} diff --git a/methodology/literature/Literature_review.xlsx b/methodology/literature/Literature_review.xlsx new file mode 100644 index 00000000..2da76dea Binary files /dev/null and b/methodology/literature/Literature_review.xlsx differ diff --git a/methodology/plots/CostsByRCP.png b/methodology/plots/CostsByRCP.png new file mode 100644 index 00000000..90c3c8ed Binary files /dev/null and b/methodology/plots/CostsByRCP.png differ diff --git a/methodology/plots/GZL-Vulnerability.png b/methodology/plots/GZL-Vulnerability.png new file mode 100644 index 00000000..4666fc55 Binary files /dev/null and b/methodology/plots/GZL-Vulnerability.png differ diff --git a/methodology/plots/WBGT.png b/methodology/plots/WBGT.png new file mode 100644 index 00000000..7c93d61c Binary files /dev/null and b/methodology/plots/WBGT.png differ diff --git a/methodology/plots/economic cost.png b/methodology/plots/economic cost.png new file mode 100644 index 00000000..9ccf0e5e Binary files /dev/null and b/methodology/plots/economic cost.png differ diff --git a/docs/methodology/plots/fig_intensity.pdf b/methodology/plots/fig_intensity.pdf similarity index 100% rename from docs/methodology/plots/fig_intensity.pdf rename to methodology/plots/fig_intensity.pdf diff --git a/methodology/plots/top_level_view.pdf b/methodology/plots/top_level_view.pdf new file mode 100644 index 00000000..86f7bd61 Binary files /dev/null and b/methodology/plots/top_level_view.pdf differ diff --git a/methodology/plots/top_level_view.pptx b/methodology/plots/top_level_view.pptx new file mode 100644 index 00000000..39e81889 Binary files /dev/null and b/methodology/plots/top_level_view.pptx differ diff --git a/docs/methodology/plots/vulnerability_lagace_2008.png b/methodology/plots/vulnerability_lagace_2008.png similarity index 100% rename from docs/methodology/plots/vulnerability_lagace_2008.png rename to methodology/plots/vulnerability_lagace_2008.png diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index 3a4597c4..00000000 --- a/mypy.ini +++ /dev/null @@ -1,2 +0,0 @@ -[mypy-click] -ignore_missing_imports = true diff --git a/notebooks/examples/1_hazard_indicators.ipynb b/notebooks/examples/1_hazard_indicators.ipynb new file mode 100644 index 00000000..96ab22c1 --- /dev/null +++ b/notebooks/examples/1_hazard_indicators.ipynb @@ -0,0 +1,1393 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Hazard indicators\n", + "Physrisk is primarily designed to run 'bottom-up' calculations that model the impact of climate hazards on large numbers of individual assets (including natural) and operations. These calculations can be used to assess financial risks or socio-economic impacts. To do this physrisk collects:\n", + "\n", + "- hazard indicators and\n", + "- models of vulnerability of assets/operations to hazards.\n", + "\n", + "Hazard indicators – that is, quantities that provide the information about hazards needed by the vulnerability models – are collected from a variety of sources. OS-Climate consolidates public domain hazard indicators and also provides the means to combine these with commercial data.\n", + "\n", + "We start with public domain indicators: which ones are available and how can these be obtained?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Requesting hazard indicators via the physrisk API\n", + "\n", + "Using physrisk, hazard indicators can be obtained directly from source, as long as the user has the necessary API keys. Alternatively, the API – which is simply a hosted instance of physrisk – can be used, albeit the number of requests will be restricted to 30,000 latitudes and longitudes.\n", + "\n", + "We give a walk-through of the hazard indicator API below, as well as examples of how to obtain the inventory of available hazard indicators." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "# UNCOMMENT TO RUN THIS NOTEBOOK\n", + "# pip install nbformat pandas plotly requests\n", + "\n", + "# UNCOMMENT TO RUN THE SECOND PART OF THIS NOTEBOOK:\n", + "# pip install physrisk-lib\n", + "\n", + "import plotly.graph_objs as go\n", + "import pprint as pp\n", + "import requests\n", + "\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from physrisk.container import Container\n", + "from plotly.subplots import make_subplots" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "base_url = \"https://physrisk-api2-sandbox.apps.odh-cl1.apps.os-climate.org/api/\"\n", + "\n", + "request = {\n", + " \"items\": [\n", + " {\n", + " \"longitudes\": [69.4787, 68.71, 20.1047, 19.8936, 19.6359, 0.5407, 6.9366, 6.935, 13.7319, 13.7319],\n", + " \"latitudes\": [34.556, 35.9416, 39.9116, 41.6796, 42.0137, 35.7835, 36.8789, 36.88, -12.4706, -12.4706],\n", + " \"request_item_id\": \"my_flood_request\",\n", + " \"hazard_type\": \"RiverineInundation\",\n", + " \"indicator_id\": \"flood_depth\",\n", + " \"scenario\": \"historical\",\n", + " \"year\": 1980,\n", + " },\n", + " {\n", + " \"longitudes\": [69.4787, 68.71, 20.1047, 19.8936, 19.6359, 0.5407, 6.9366, 6.935, 13.7319, 13.7319],\n", + " \"latitudes\": [34.556, 35.9416, 39.9116, 41.6796, 42.0137, 35.7835, 36.8789, 36.88, -12.4706, -12.4706],\n", + " \"request_item_id\": \"my_flood_request\",\n", + " \"hazard_type\": \"RiverineInundation\",\n", + " \"indicator_id\": \"flood_depth\",\n", + " \"scenario\": \"rcp8p5\",\n", + " \"indicator_model_gcm\": \"NorESM1-M\", # optional: can specify\n", + " \"year\": 2050,\n", + " },\n", + " {\n", + " \"longitudes\": [114.089],\n", + " \"latitudes\": [22.4781],\n", + " \"request_item_id\": \"my_wind_request_ssp585\",\n", + " \"hazard_type\": \"Wind\",\n", + " \"indicator_id\": \"max_speed\",\n", + " \"scenario\": \"historical\",\n", + " \"path\": \"wind/iris/v1/max_speed_{scenario}_{year}\",\n", + " # if path is specified then that particular data array is used\n", + " \"year\": 2010,\n", + " },\n", + " {\n", + " \"longitudes\": [114.089],\n", + " \"latitudes\": [22.4781],\n", + " \"request_item_id\": \"my_wind_request_histo\",\n", + " \"hazard_type\": \"Wind\",\n", + " \"indicator_id\": \"max_speed\",\n", + " \"scenario\": \"ssp585\",\n", + " \"path\": \"wind/iris/v1/max_speed_{scenario}_{year}\",\n", + " \"year\": 2050,\n", + " },\n", + " {\n", + " \"longitudes\": [114.089],\n", + " \"latitudes\": [22.4781],\n", + " \"request_item_id\": \"my_fire_request\",\n", + " \"hazard_type\": \"Fire\",\n", + " \"indicator_id\": \"fire_probability\",\n", + " \"scenario\": \"ssp585\",\n", + " \"path\": \"fire/jupiter/v1/fire_probability_{scenario}_{year}\",\n", + " \"year\": 2040,\n", + " },\n", + " ]\n", + "}\n", + "url = base_url + \"get_hazard_data\"\n", + "response = requests.post(url, json=request).json()\n", + "flood_results_baseline, flood_results_rcp585 = (\n", + " response[\"items\"][0][\"intensity_curve_set\"],\n", + " response[\"items\"][1][\"intensity_curve_set\"],\n", + ")\n", + "wind_results_baseline, wind_results_ssp585 = (\n", + " response[\"items\"][2][\"intensity_curve_set\"],\n", + " response[\"items\"][3][\"intensity_curve_set\"],\n", + ")\n", + "fire_results = response[\"items\"][4][\"intensity_curve_set\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "name": "baseline flood", + "type": "scatter", + "x": [ + 2, + 5, + 10, + 25, + 50, + 100, + 250, + 500, + 1000 + ], + "xaxis": "x", + "y": [ + 0, + 0.24315482378005981, + 0.40180158615112305, + 0.5969884395599365, + 0.7376832962036133, + 0.8727496862411499, + 1.0505859851837158, + 1.1848692893981934, + 1.3172094821929932 + ], + "yaxis": "y" + }, + { + "name": "flood RCP 8.5 2050", + "type": "scatter", + "x": [ + 2, + 5, + 10, + 25, + 50, + 100, + 250, + 500, + 1000 + ], + "xaxis": "x", + "y": [ + 0.03016260266304016, + 0.3154944181442261, + 0.5021084547042847, + 0.7294760942459106, + 0.8911455869674683, + 1.0516209602355957, + 1.2618706226348877, + 1.4183039665222168, + 1.571667194366455 + ], + "yaxis": "y" + }, + { + "name": "baseline wind", + "type": "scatter", + "x": [ + 10, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 1000 + ], + "xaxis": "x2", + "y": [ + 34.16875076293945, + 40.48125076293945, + 44.493751525878906, + 46.98125076293945, + 48.53125, + 49.849998474121094, + 51.20000076293945, + 52.243751525878906, + 52.79999923706055, + 53.51874923706055, + 57.400001525878906, + 59.875, + 60.95624923706055, + 61.79999923706055, + 62.431251525878906, + 62.8125, + 62.9375, + 63.58124923706055, + 64.4625015258789 + ], + "yaxis": "y2" + }, + { + "name": "wind SSP585 2050", + "type": "scatter", + "x": [ + 10, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 1000 + ], + "xaxis": "x2", + "y": [ + 37.224998474121094, + 44.76874923706055, + 48.63750076293945, + 51.66875076293945, + 53.5, + 55.23749923706055, + 56.20000076293945, + 57.23125076293945, + 58.368751525878906, + 59.17499923706055, + 63.11249923706055, + 65.3125, + 66.2125015258789, + 67.1624984741211, + 67.7125015258789, + 67.9749984741211, + 68.23750305175781, + 69.07499694824219, + 71.19999694824219 + ], + "yaxis": "y2" + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "xaxis": { + "anchor": "y", + "domain": [ + 0, + 0.45 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Return period (years)" + }, + "type": "log" + }, + "xaxis2": { + "anchor": "y2", + "domain": [ + 0.55, + 1 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Return period (years)" + }, + "type": "log" + }, + "yaxis": { + "anchor": "x", + "domain": [ + 0, + 1 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Flood depth (m)" + } + }, + "yaxis2": { + "anchor": "x2", + "domain": [ + 0, + 1 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Max (1 minute) wind speed (m/s)" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig1 = make_subplots(rows=1, cols=2)\n", + "fig1.add_trace(\n", + " go.Scatter(\n", + " x=flood_results_baseline[0][\"index_values\"], y=flood_results_baseline[0][\"intensities\"], name=\"baseline flood\"\n", + " ),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig1.add_trace(\n", + " go.Scatter(\n", + " x=flood_results_rcp585[0][\"index_values\"], y=flood_results_rcp585[0][\"intensities\"], name=\"flood RCP 8.5 2050\"\n", + " ),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig1.update_xaxes(title=\"Return period (years)\", title_font={\"size\": 14}, row=1, col=1, type=\"log\")\n", + "fig1.update_yaxes(title=\"Flood depth (m)\", title_font={\"size\": 14}, row=1, col=1)\n", + "fig1.add_trace(\n", + " go.Scatter(\n", + " x=wind_results_baseline[0][\"index_values\"], y=wind_results_baseline[0][\"intensities\"], name=\"baseline wind\"\n", + " ),\n", + " row=1,\n", + " col=2,\n", + ")\n", + "fig1.add_trace(\n", + " go.Scatter(\n", + " x=wind_results_ssp585[0][\"index_values\"], y=wind_results_ssp585[0][\"intensities\"], name=\"wind SSP585 2050\"\n", + " ),\n", + " row=1,\n", + " col=2,\n", + ")\n", + "fig1.update_xaxes(title=\"Return period (years)\", title_font={\"size\": 14}, row=1, col=2, type=\"log\")\n", + "fig1.update_yaxes(title=\"Max (1 minute) wind speed (m/s)\", title_font={\"size\": 14}, row=1, col=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Accessing the inventory of hazard indicators\n", + "\n", + "Hazard indicator data are 3 dimensional arrays, the dimensions being ('index', 'spatial y', 'spatial x'). In the case of a EPSG:4326 co-ordinate reference system (CRS), for example, this would be ('index', 'latitude', 'longitude'). The data can be in any (CRS); generally the CRS of the original CRS is retained. API requests specify location via latitude and longitude and physrisk performs the necessary conversion.\n", + "\n", + "The 'index' depends on the nature of the hazards. For acute hazards – potential occurrence of events such as flood or tropical cyclone – 'index' will be the return period of the event. For chronic hazards its definition can vary. Chronic hazard indicators may be threshold-based, for example 'mean degree days per year above threshold' or 'mean days per year above temperature threshold'; in these cases 'index' gives the thresholds. Chronic hazards may comprise just a single value (e.g. annual probability of fire) in which case there is just a single element in 'index'. \n", + "\n", + "The inventory is a list of 'hazard resources' and each resource corresponds to a single array. This means that there are separate entries for the same hazard indicators inferred from different (e.g. CMIP6 or CORDEX) climate models.\n", + "\n", + "For the requests above, the 'path' of the array can be specified explicitly. This is the unique identifier for the hazard resource. But path can also be omitted in which can physrisk applies a set of rules to select a particular array. " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "url = base_url + \"get_hazard_data_availability\"\n", + "hazard_resources = requests.post(url, json=request).json()[\"models\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hazards: {'CoastalInundation', 'Precipitation', 'CombinedInundation', 'RiverineInundation', 'Wind', 'Drought', 'ChronicHeat', 'Hail', 'WaterRisk', 'Fire'}\n", + "119 hazard resources in the inventory, of which 2 are Wind hazard resources.\n", + "The resource 'path' is a unique identifier. For the IRIS Wind resource, for example, this is: 'wind/iris/v1/max_speed_{scenario}_{year}'.\n", + "Where resources are multi-dimensional arrays (as opposed to an external API), this is also the path of the array.\n", + "The available climate scenarios and years for the IRIS resource are:\n", + "[{'id': 'historical', 'years': [2010]},\n", + " {'id': 'ssp119', 'years': [2050]},\n", + " {'id': 'ssp245', 'years': [2050]},\n", + " {'id': 'ssp585', 'years': [2050]}]\n", + "A description is provided in markdown:\n" + ] + }, + { + "data": { + "text/markdown": [ + "Assessing tropical cyclone risk on a global scale given the infrequency of landfalling tropical cyclones and the short period of reliable observations remains a challenge. Synthetic tropical cyclone datasets can help overcome these problems. Here we present a new global dataset created by IRIS, the ImpeRIal college Storm Model. IRIS is novel because, unlike other synthetic TC models, it only simulates the decay from the point of lifetime maximum intensity. This minimises the bias in the dataset. It takes input from 42 years of observed tropical cyclones and creates a 10,000 year synthetic dataset which is then validated against the observations. IRIS captures important statistical characteristics of the observed data. The return periods of the landfall maximum wind speed (1 minute sustained in m/s) are realistic globally. Climate model projections are used to adjust the life-time maximum intensity.\n", + "https://www.imperial.ac.uk/grantham/research/climate-science/modelling-tropical-cyclones/\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The whole resource:\n", + "{'description': 'Assessing tropical cyclone risk on a global scale given the '\n", + " 'infrequency of landfalling tropical cyclones and the short '\n", + " 'period of reliable observations remains a challenge. '\n", + " 'Synthetic tropical cyclone datasets can help overcome these '\n", + " 'problems. Here we present a new global dataset created by '\n", + " 'IRIS, the ImpeRIal college Storm Model. IRIS is novel '\n", + " 'because, unlike other synthetic TC models, it only simulates '\n", + " 'the decay from the point of lifetime maximum intensity. This '\n", + " 'minimises the bias in the dataset. It takes input from 42 '\n", + " 'years of observed tropical cyclones and creates a 10,000 year '\n", + " 'synthetic dataset which is then validated against the '\n", + " 'observations. IRIS captures important statistical '\n", + " 'characteristics of the observed data. The return periods of '\n", + " 'the landfall maximum wind speed (1 minute sustained in m/s) '\n", + " 'are realistic globally. Climate model projections are used to '\n", + " 'adjust the life-time maximum intensity.\\n'\n", + " 'https://www.imperial.ac.uk/grantham/research/climate-science/modelling-tropical-cyclones/\\n',\n", + " 'display_groups': [],\n", + " 'display_name': 'Max wind speed (IRIS)',\n", + " 'group_id': 'iris_osc',\n", + " 'hazard_type': 'Wind',\n", + " 'indicator_id': 'max_speed',\n", + " 'indicator_model_gcm': 'combined',\n", + " 'indicator_model_id': None,\n", + " 'map': {'bounds': [[-180.0, 60.0],\n", + " [180.0, 60.0],\n", + " [180.0, -60.0],\n", + " [-180.0, -60.0]],\n", + " 'colormap': {'max_index': 255,\n", + " 'max_value': 120.0,\n", + " 'min_index': 1,\n", + " 'min_value': 0.0,\n", + " 'name': 'heating',\n", + " 'nodata_index': 0,\n", + " 'units': 'm/s'},\n", + " 'path': 'wind/iris/v1/max_speed_{scenario}_{year}_map',\n", + " 'source': 'map_array_pyramid'},\n", + " 'params': {},\n", + " 'path': 'wind/iris/v1/max_speed_{scenario}_{year}',\n", + " 'scenarios': [{'id': 'historical', 'years': [2010]},\n", + " {'id': 'ssp119', 'years': [2050]},\n", + " {'id': 'ssp245', 'years': [2050]},\n", + " {'id': 'ssp585', 'years': [2050]}],\n", + " 'units': 'm/s'}\n" + ] + } + ], + "source": [ + "def printmd(string):\n", + " display(Markdown(string))\n", + "\n", + "\n", + "hazard_types = set(r[\"hazard_type\"] for r in hazard_resources)\n", + "print(f\"Hazards: {hazard_types}\")\n", + "wind_resources = [r for r in hazard_resources if r[\"hazard_type\"] == \"Wind\"]\n", + "print(f\"{len(hazard_resources)} hazard resources in the inventory, of which {len(wind_resources)} are Wind resources.\")\n", + "iris_model_resource = next(r for r in wind_resources if \"iris\" in r[\"path\"])\n", + "# pp.pprint(iris_model_resource[0])\n", + "path = iris_model_resource[\"path\"]\n", + "print(f\"The resource 'path' is a unique identifier. For the IRIS Wind resource, for example, this is: '{path}'.\")\n", + "print(\n", + " \"Where resources are multi-dimensional arrays (as opposed to an external API), this is also the path of the array.\"\n", + ")\n", + "print(\"The available climate scenarios and years for the IRIS resource are:\")\n", + "pp.pprint(iris_model_resource[\"scenarios\"])\n", + "print(\"A description is provided in markdown:\")\n", + "printmd(iris_model_resource[\"description\"])\n", + "print(\"The whole resource:\")\n", + "pp.pprint(iris_model_resource)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Requesting hazard indicators through physrisk directly\n", + "\n", + "As mentioned above, the API is simply using a hosted version of physrisk and it is possible to run the same calculations using physrisk directly, as long as the necessary API keys are present in a credentials.env file.\n", + "\n", + "The requests above can be run in physrisk using a Requester object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(\"../../credentials.env\")\n", + "# the container is a dependency injection container,\n", + "# which allows the calculation to be configured to a particular use-case\n", + "container = Container()\n", + "# the requester is used to run calculations using the API.\n", + "# At this point, we can of course debug into the code and modify as required.\n", + "requester = container.requester()\n", + "result = requester.get(request_id=\"get_hazard_data\", request_dict=request)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/examples/2_asset_level_impacts.ipynb b/notebooks/examples/2_asset_level_impacts.ipynb new file mode 100644 index 00000000..a0067be6 --- /dev/null +++ b/notebooks/examples/2_asset_level_impacts.ipynb @@ -0,0 +1,1238 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Asset-level impact calculations\n", + "Here 'asset-level' impacts means the impact of hazards on each asset in a portfolio, taken in isolation. This is, as opposed to portfolio-level impacts where the asset impacts are aggregated together – a topic for another notebook.\n", + "\n", + "### Obtaining impact distributions for a portfolio of assets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "# UNCOMMENT TO RUN THIS NOTEBOOK\n", + "# pip install nbformat pandas plotly requests\n", + "\n", + "import plotly.graph_objs as go\n", + "import pprint as pp\n", + "import requests\n", + "\n", + "from dotenv import load_dotenv\n", + "from physrisk.container import Container\n", + "from plotly.subplots import make_subplots\n", + "from typing import NamedTuple" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "base_url = \"https://physrisk-api2-sandbox.apps.odh-cl1.apps.os-climate.org/api/\"\n", + "\n", + "portfolio = {\n", + " \"items\": [\n", + " {\n", + " \"asset_class\": \"RealEstateAsset\",\n", + " \"type\": \"Building/Industrial\",\n", + " \"location\": \"Asia\",\n", + " \"latitude\": 24.0426,\n", + " \"longitude\": 91.0158,\n", + " },\n", + " {\n", + " \"asset_class\": \"RealEstateAsset\",\n", + " \"type\": \"Building/Industrial\",\n", + " \"location\": \"Asia\",\n", + " \"latitude\": 22.6588,\n", + " \"longitude\": 90.3373,\n", + " },\n", + " ]\n", + "}\n", + "request = {\n", + " \"assets\": portfolio,\n", + " \"include_asset_level\": True,\n", + " \"include_calc_details\": True,\n", + " \"include_measures\": True,\n", + " \"years\": [2050],\n", + " \"scenario\": \"ssp585\",\n", + "}\n", + "\n", + "url = base_url + \"get_asset_impact\"\n", + "response = requests.post(url, json=request).json()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "asset0_impacts = response[\"asset_impacts\"][1][\"impacts\"]\n", + "\n", + "\n", + "class Key(NamedTuple):\n", + " hazard_type: str\n", + " scenario_id: str\n", + " year: str\n", + "\n", + "\n", + "asset0_impact_dict = {}\n", + "for i in asset0_impacts:\n", + " key = i[\"key\"]\n", + " asset0_impact_dict[Key(key[\"hazard_type\"], key[\"scenario_id\"], key[\"year\"])] = i\n", + "\n", + "hazard_types = set(k.hazard_type for k in asset0_impact_dict.keys())\n", + "wind_impact_histo = asset0_impact_dict[Key(\"Wind\", \"historical\", \"None\")]\n", + "wind_impact_ssp585 = asset0_impact_dict[Key(\"Wind\", \"ssp585\", \"2050\")]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "name": "baseline wind", + "type": "scatter", + "x": [ + 0.04900000000000001, + 0.03233333333333334, + 0.024000000000000004, + 0.019000000000000003, + 0.01566666666666667, + 0.013285714285714286, + 0.011500000000000002, + 0.010111111111111112, + 0.009000000000000001, + 0.004, + 0.0023333333333333335, + 0.0015, + 0.001, + 0.0006666666666666668, + 0.00042857142857142855, + 0.00025, + 0.00011111111111111109, + 0 + ], + "xaxis": "x", + "y": [ + 0.0004863306300282079, + 0.0032202045740822743, + 0.010735442202008984, + 0.016996661463623993, + 0.02155590325614915, + 0.028485761901396834, + 0.041850456280047466, + 0.05007404596694858, + 0.059701685162724984, + 0.11332636752166708, + 0.16591577789151649, + 0.20183716992148243, + 0.23152412925689597, + 0.26220139356582195, + 0.279007730852783, + 0.29570799814905474, + 0.307745842586294, + 0.3217969471882758 + ], + "yaxis": "y" + }, + { + "name": "wind SSP585", + "type": "scatter", + "x": [ + 0.099, + 0.04900000000000001, + 0.03233333333333334, + 0.024000000000000004, + 0.019000000000000003, + 0.01566666666666667, + 0.013285714285714286, + 0.011500000000000002, + 0.010111111111111112, + 0.009000000000000001, + 0.004, + 0.0023333333333333335, + 0.0015, + 0.001, + 0.0006666666666666668, + 0.00042857142857142855, + 0.00025, + 0.00011111111111111109, + 0 + ], + "xaxis": "x", + "y": [ + 7.682019883172996e-05, + 0.0005583812056975454, + 0.008825685603841983, + 0.017046757981196484, + 0.023436186553044146, + 0.0396124241720199, + 0.0560279993842828, + 0.07006830117541162, + 0.08194450719972869, + 0.09264894302678975, + 0.1933473153228637, + 0.25481157725696824, + 0.3041005921318781, + 0.3238739140242556, + 0.34008684145476864, + 0.3628907252701525, + 0.3693335717289997, + 0.3823038619319513, + 0.39510456950344713 + ], + "yaxis": "y" + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "xaxis": { + "anchor": "y", + "autorange": "reversed", + "domain": [ + 0, + 0.45 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Annual exceedance probability" + }, + "type": "log" + }, + "xaxis2": { + "anchor": "y2", + "domain": [ + 0.55, + 1 + ] + }, + "yaxis": { + "anchor": "x", + "domain": [ + 0, + 1 + ], + "title": { + "font": { + "size": 14 + }, + "text": "Damage as fraction of insurable value" + } + }, + "yaxis2": { + "anchor": "x2", + "domain": [ + 0, + 1 + ] + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "exceedance_histo = wind_impact_histo[\"impact_exceedance\"]\n", + "exceedance_ssp585 = wind_impact_ssp585[\"impact_exceedance\"]\n", + "\n", + "fig1 = make_subplots(rows=1, cols=2)\n", + "fig1.add_trace(\n", + " go.Scatter(x=exceedance_histo[\"exceed_probabilities\"], y=exceedance_histo[\"values\"], name=\"baseline wind\"),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig1.add_trace(\n", + " go.Scatter(x=exceedance_ssp585[\"exceed_probabilities\"], y=exceedance_ssp585[\"values\"], name=\"wind SSP585\"),\n", + " row=1,\n", + " col=1,\n", + ")\n", + "fig1.update_xaxes(\n", + " title=\"Annual exceedance probability\", title_font={\"size\": 14}, row=1, col=1, type=\"log\", autorange=\"reversed\"\n", + ")\n", + "fig1.update_yaxes(title=\"Damage as fraction of insurable value\", title_font={\"size\": 14}, row=1, col=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Making impacts actionable: score-based risk measures\n", + "\n", + "The main outputs of impact calculations are probability distributions of impacts, such as damage, but how do analysts make use of this data? Often it is desirable to categorize assets within a portfolio, for example identifying which are vulnerable to the physical effects of climate change and which are not. It may therefore be desirable to assign scores to assets, the score being meaningful for a particular analysis or work-flow. Two important points:\n", + "\n", + "1) These are _risk_ scores, not exposure scores. Pure exposure scores take into account only the exposure of an asset to climate hazards, typically as a result of its location, but do not say anything about vulnerability. This makes exposure scores unsuitable for many use-cases. For example an asset that is exposed to a hazard that becomes more intense under certain climate change scenarios may still be low risk (under that scenario) if it is not vulnerable to the hazard in question.\n", + "\n", + "2) The definition of scores are in principle unique to a particular type of user/analyst. The scores that are most useful for the manager of a portfolio of assets may be different to those that are useful to a lender providing finance for a new project. \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The scores for the assets are: [1, 3].\n" + ] + } + ], + "source": [ + "asset_measures = response[\"risk_measures\"][\"measures_for_assets\"]\n", + "\n", + "\n", + "class Key(NamedTuple):\n", + " hazard_type: str\n", + " measure_id: str\n", + " scenario_id: str\n", + " year: str\n", + "\n", + "\n", + "asset_measures_dict = {}\n", + "for i in asset_measures:\n", + " key = i[\"key\"]\n", + " asset_measures_dict[Key(key[\"hazard_type\"], key[\"measure_id\"], key[\"scenario_id\"], key[\"year\"])] = i\n", + "\n", + "wind_impact_scores = asset_measures_dict[Key(\"Wind\", \"measure_set_0\", \"ssp585\", \"2050\")]\n", + "\n", + "scores = wind_impact_scores[\"scores\"]\n", + "print(f\"The scores for the assets are: {scores}.\")\n", + "print(f\"The scores for the assets are: {scores}.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'hazard_types': ['RiverineInundation', 'CoastalInundation', 'Wind'],\n", + " 'underlying_measures': [{'description': '1-in-100 year loss as fraction of '\n", + " 'asset insured value.',\n", + " 'label': '1-in-100 year annual loss.',\n", + " 'measure_id': 'measures_0'}],\n", + " 'values': [{'description': 'Projected 1-in-100 year annual loss is more than '\n", + " '10% and increases by more than 3% of asset value '\n", + " 'over historical baseline.',\n", + " 'label': 'The asset is very significantly impacted and the impact '\n", + " 'will increase as a result of climate change.',\n", + " 'value': 4},\n", + " {'description': 'Projected 1-in-100 year annual loss is more than '\n", + " '3% and increases by more than 3% of asset value '\n", + " 'over historical baseline.',\n", + " 'label': 'The asset is materially impacted and the impact will '\n", + " 'increase as a result of climate change.',\n", + " 'value': 3},\n", + " {'description': 'Projected 1-in-100 year annual loss is more than '\n", + " '3% but increases by less than 3% of asset value '\n", + " 'over historical baseline.',\n", + " 'label': 'The asset is materially impacted but the impact will '\n", + " 'not significantly increase as a result of climate '\n", + " 'change.',\n", + " 'value': 2},\n", + " {'description': 'Projected 1-in-100 year annual loss is less than '\n", + " '3% of asset value.',\n", + " 'label': 'No material impact.',\n", + " 'value': 1},\n", + " {'description': 'No data.', 'label': 'No data.', 'value': 0}]}\n" + ] + } + ], + "source": [ + "pp.pprint(response[\"risk_measures\"][\"score_based_measure_set_defn\"][\"score_definitions\"][\"measure_1\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Running impacts through physrisk directly\n", + "\n", + "Just as for obtaining hazard indicators, it is possible to run the same calculations using physrisk directly, as long as the necessary API keys are present in a credentials.env file.\n", + "\n", + "For developers who are focussed on using existing hazard models but building new vulnerability models or risk models, it is desirable to be able to develop these components locally while making use of hazard APIs. This is functionality which will be provided in a future version." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(\"../../credentials.env\")\n", + "# the container is a dependency injection container,\n", + "# which allows the calculation to be configured to a particular use-case\n", + "container = Container()\n", + "# the requester is used to run calculations using the API.\n", + "# At this point, we can of course debug into the code and modify as required.\n", + "requester = container.requester()\n", + "result = requester.get(request_id=\"get_asset_impact\", request_dict=request)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/EU JRC global flood depth-damage functions.json b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/EU JRC global flood depth-damage functions.json new file mode 100644 index 00000000..6618bed7 --- /dev/null +++ b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/EU JRC global flood depth-damage functions.json @@ -0,0 +1,711 @@ +{ + "items": [ + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.25, + 0.4, + 0.5, + 0.6, + 0.75, + 0.85, + 0.95, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.201804543, + 0.443269857, + 0.582754693, + 0.682521912, + 0.783957148, + 0.854348922, + 0.923670101, + 0.958522773, + 1.0 + ], + "impact_std": [ + 0.0, + 0.168357605, + 0.141121464, + 0.137452367, + 0.166725182, + 0.14072086, + 0.129131694, + 0.102073428, + 0.059134697, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.490885951, + 0.711294067, + 0.842026011, + 0.949369096, + 0.983636977, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.209427814, + 0.135409866, + 0.081630245, + 0.060853453, + 0.024070255, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.326556502, + 0.494050324, + 0.616572124, + 0.720711764, + 0.869528213, + 0.931487084, + 0.983604148, + 1.0 + ], + "impact_std": [ + 0.0, + 0.251622626, + 0.215442216, + 0.214468998, + 0.207322898, + 0.167536629, + 0.124508994, + 0.047803103, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.219925401, + 0.378226846, + 0.530589082, + 0.635636733, + 0.81693978, + 0.903434688, + 0.957152173, + 1.0 + ], + "impact_std": [ + 0.0, + 0.042003678, + 0.114296315, + 0.198396224, + 0.207821558, + 0.205246932, + 0.141856441, + 0.076208799, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.475418119, + 0.640393124, + 0.714614662, + 0.787726348, + 0.928779884, + 0.967381853, + 0.982795444, + 1.0 + ], + "impact_std": [ + 0.088039918, + 0.141050712, + 0.163528188, + 0.169484243, + 0.166855806, + 0.112877499, + 0.058153405, + 0.03589275, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.15, + 0.3, + 0.45, + 0.55, + 0.75, + 0.9, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.018404908, + 0.239263804, + 0.374233129, + 0.466257669, + 0.552147239, + 0.687116564, + 0.82208589, + 0.90797546, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.611477587, + 0.839531094, + 0.923588457, + 0.991972477, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.077023435, + 0.035924027, + 0.026876525, + 0.016055046, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.376789623, + 0.537681619, + 0.659336684, + 0.762845232, + 0.883348656, + 0.941854895, + 0.98075938, + 1.0 + ], + "impact_std": [ + 0.0, + 0.240462285, + 0.240596279, + 0.243605156, + 0.250253511, + 0.171703625, + 0.11240992, + 0.052781064, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.238953575, + 0.481199682, + 0.673795091, + 0.864583333, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.142878204, + 0.204113206, + 0.190903594, + 0.178000078, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.323296918, + 0.506529105, + 0.63459558, + 0.744309656, + 0.864093044, + 0.932788157, + 0.977746968, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.15, + 0.27, + 0.4, + 0.52, + 0.7, + 0.85, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.025714286, + 0.322857143, + 0.511428571, + 0.637142857, + 0.74, + 0.86, + 0.937142857, + 0.98, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.6670194, + 0.888712522, + 0.946737213, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.174459885, + 0.098191042, + 0.046492655, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.283181524, + 0.481615653, + 0.629218894, + 0.717240588, + 0.85667503, + 0.908577004, + 0.955327463, + 1.0 + ], + "impact_std": [ + 0.0, + 0.243322302, + 0.295987071, + 0.300583358, + 0.268517907, + 0.234498136, + 0.159197865, + 0.079457988, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.062682043, + 0.247196046, + 0.403329984, + 0.494488633, + 0.684652389, + 0.918589786, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.297148022, + 0.479790559, + 0.60328579, + 0.694345844, + 0.820265484, + 0.922861929, + 0.987065493, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + } + ] +} diff --git a/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/onboard.ipynb b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/onboard.ipynb new file mode 100644 index 00000000..4ec78a8f --- /dev/null +++ b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/onboard.ipynb @@ -0,0 +1,118 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## EU JRC global flood depth-damage functions\n", + "\n", + "### CITATION\n", + "Huizinga, J., De Moel, H. and Szewczyk, W., Global flood depth-damage functions: Methodology and the database with guidelines, EUR 28552 EN, Publications Office of the European Union, Luxembourg, 2017, ISBN 978-92-79-67781-6, doi:10.2760/16510, JRC105688.\n", + "\n", + "### LINK\n", + "https://publications.jrc.ec.europa.eu/repository/handle/JRC105688\n", + "\n", + "### NOTES\n", + "Data from copy_of_global_flood_depth-damage_functions__30102017.xlsx converted into CSV format in raw.csv. Ingested into vulnerability model as standard json.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "df = pd.read_csv(\"raw.csv\")\n", + "\n", + "# consistent with physrisk continent definition\n", + "location_mappings = {\n", + " \"Europe\": \"Europe\",\n", + " \"North America\": \"North America\",\n", + " \"Central & South America\": \"South America\",\n", + " \"Asia\": \"Asia\",\n", + " \"Africa\": \"Africa\",\n", + " \"Oceania\": \"Oceania\",\n", + " \"Global\": \"Global\",\n", + "}\n", + "type_mappings = {\n", + " \"Residential buildings\": \"Buildings/Residential\",\n", + " \"Commercial buildings\": \"Buildings/Commercial\",\n", + " \"Industrial buildings\": \"Buildings/Industrial\",\n", + "}\n", + "\n", + "data = {\"items\": []}\n", + "\n", + "curve_list = data[\"items\"]\n", + "for mapping in type_mappings:\n", + " type_df = df[df[\"Type\"] == mapping]\n", + " flood_depth = type_df[\"Flood depth [m]\"].to_numpy()\n", + " for location in location_mappings:\n", + " # whether zero depth is considered really zero or a flood event with smallest depth\n", + " zero_as_minimum = True if location == \"North America\" else False\n", + " # for North America, the 0 depth damage is for flooding of any depth. We consider that a 1 cm inundation.\n", + " depth = np.concatenate([[0, 0.01], flood_depth[1:]]) if zero_as_minimum else flood_depth\n", + "\n", + " mean = type_df[location + \"_Mean\"].to_numpy()\n", + " std = type_df[location + \"_Std\"].to_numpy()\n", + " mean = np.concatenate([[0], mean]) if zero_as_minimum else mean\n", + " std = np.concatenate([[0], std]) if zero_as_minimum else std\n", + " if np.any(np.isnan(mean)):\n", + " mean = []\n", + " if np.any(np.isnan(std)):\n", + " std = []\n", + " curve_list.append(\n", + " {\n", + " \"asset_type\": type_mappings[mapping],\n", + " \"event_type\": \"RiverineInundation\",\n", + " \"location\": location_mappings[location],\n", + " \"impact_type\": \"Damage\",\n", + " \"intensity\": list(depth),\n", + " \"intensity_units\": \"m\",\n", + " \"impact_mean\": list(mean),\n", + " \"impact_std\": list(std),\n", + " }\n", + " )\n", + "\n", + "with open(\"EU JRC global flood depth-damage functions.json\", \"w\") as f:\n", + " vulnerability_json = json.dumps(data, sort_keys=True, indent=4)\n", + " f.write(vulnerability_json)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/raw.csv b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/raw.csv new file mode 100644 index 00000000..970939c6 --- /dev/null +++ b/notebooks/vulnerability_onboarding/EU JRC global flood depth-damage functions/raw.csv @@ -0,0 +1,55 @@ +Type,Flood depth [m],Europe_Mean,North America_Mean,Central & South America_Mean,Asia_Mean,Africa_Mean,Oceania_Mean,Global_Mean,Europe_Std,North America_Std,Central & South America_Std,Asia_Std,Africa_Std,Oceania_Std,Global_Std +Residential buildings,0,0,0.201804543,0,0,0,0,,,0.168357605,0,0,0,0.088039918, +Residential buildings,0.5,0.25,0.443269857,0.490885951,0.326556502,0.219925401,0.475418119,,,0.141121464,0.209427814,0.251622626,0.042003678,0.141050712, +Residential buildings,1,0.4,0.582754693,0.711294067,0.494050324,0.378226846,0.640393124,,,0.137452367,0.135409866,0.215442216,0.114296315,0.163528188, +Residential buildings,1.5,0.5,0.682521912,0.842026011,0.616572124,0.530589082,0.714614662,,,0.166725182,0.081630245,0.214468998,0.198396224,0.169484243, +Residential buildings,2,0.6,0.783957148,0.949369096,0.720711764,0.635636733,0.787726348,,,0.14072086,0.060853453,0.207322898,0.207821558,0.166855806, +Residential buildings,3,0.75,0.854348922,0.983636977,0.869528213,0.81693978,0.928779884,,,0.129131694,0.024070255,0.167536629,0.205246932,0.112877499, +Residential buildings,4,0.85,0.923670101,1,0.931487084,0.903434688,0.967381853,,,0.102073428,0,0.124508994,0.141856441,0.058153405, +Residential buildings,5,0.95,0.958522773,1,0.983604148,0.957152173,0.982795444,,,0.059134697,0,0.047803103,0.076208799,0.03589275, +Residential buildings,6,1,1,1,1,1,1,,,0,0,0,0,0, +Commercial buildings,0,0,0.018404908,0,0,,0,0,,,0,0,,0, +Commercial buildings,0.5,0.15,0.239263804,0.611477587,0.376789623,,0.238953575,0.323296918,,,0.077023435,0.240462285,,0.142878204, +Commercial buildings,1,0.3,0.374233129,0.839531094,0.537681619,,0.481199682,0.506529105,,,0.035924027,0.240596279,,0.204113206, +Commercial buildings,1.5,0.45,0.466257669,0.923588457,0.659336684,,0.673795091,0.63459558,,,0.026876525,0.243605156,,0.190903594, +Commercial buildings,2,0.55,0.552147239,0.991972477,0.762845232,,0.864583333,0.744309656,,,0.016055046,0.250253511,,0.178000078, +Commercial buildings,3,0.75,0.687116564,1,0.883348656,,1,0.864093044,,,0,0.171703625,,0, +Commercial buildings,4,0.9,0.82208589,1,0.941854895,,1,0.932788157,,,0,0.11240992,,0, +Commercial buildings,5,1,0.90797546,1,0.98075938,,1,0.977746968,,,0,0.052781064,,0, +Commercial buildings,6,1,1,1,1,,1,1,,,0,0,,0, +Industrial buildings,0,0,0.025714286,0,0,0,,0,,,0,0,,, +Industrial buildings,0.5,0.15,0.322857143,0.6670194,0.283181524,0.062682043,,0.297148022,,,0.174459885,0.243322302,,, +Industrial buildings,1,0.27,0.511428571,0.888712522,0.481615653,0.247196046,,0.479790559,,,0.098191042,0.295987071,,, +Industrial buildings,1.5,0.4,0.637142857,0.946737213,0.629218894,0.403329984,,0.60328579,,,0.046492655,0.300583358,,, +Industrial buildings,2,0.52,0.74,1,0.717240588,0.494488633,,0.694345844,,,0,0.268517907,,, +Industrial buildings,3,0.7,0.86,1,0.85667503,0.684652389,,0.820265484,,,0,0.234498136,,, +Industrial buildings,4,0.85,0.937142857,1,0.908577004,0.918589786,,0.922861929,,,0,0.159197865,,, +Industrial buildings,5,1,0.98,1,0.955327463,1,,0.987065493,,,0,0.079457988,,, +Industrial buildings,6,1,1,1,1,1,,1,,,0,0,,, +Transport,0,0,,0,0,,,0,,,,0,,, +Transport,0.5,0.316666667,,0.087719298,0.35751634,,,0.253967435,,,,0.30215383,,, +Transport,1,0.541666667,,0.175438596,0.571895425,,,0.429666896,,,,0.281482409,,, +Transport,1.5,0.701666667,,0.596491228,0.733333333,,,0.677163743,,,,0.251661148,,, +Transport,2,0.831666667,,0.842105263,0.847222222,,,0.840331384,,,,0.168393829,,, +Transport,3,1,,1,1,,,1,,,,0,,, +Transport,4,1,,1,1,,,1,,,,0,,, +Transport,5,1,,1,1,,,1,,,,0,,, +Transport,6,1,,1,1,,,1,,,,0,,, +Infrastructure roads,0,0,,,0,,,0,,,,0,,, +Infrastructure roads,0.5,0.25,,,0.214436906,,,0.232218453,,,,0.069410857,,, +Infrastructure roads,1,0.42,,,0.37275441,,,0.396377205,,,,0.073943937,,, +Infrastructure roads,1.5,0.55,,,0.603934871,,,0.576967436,,,,0.205814323,,, +Infrastructure roads,2,0.65,,,0.709659091,,,0.679829545,,,,0.270555844,,, +Infrastructure roads,3,0.8,,,0.808409091,,,0.804204545,,,,0.221245379,,, +Infrastructure roads,4,0.9,,,0.887159091,,,0.893579545,,,,0.133514877,,, +Infrastructure roads,5,1,,,0.96875,,,0.984375,,,,0.0625,,, +Infrastructure roads,6,1,,,1,,,1,,,,0,,, +Agriculture,0,0,0.018575388,,0,0,,0,,,,0,0,, +Agriculture,0.5,0.3,0.267797668,,0.135,0.242873563,,0.236417808,,,,0.188944436,0.095973845,, +Agriculture,1,0.55,0.473677377,,0.37,0.47183908,,0.466379114,,,,0.351069794,0.17508562,, +Agriculture,1.5,0.65,0.550560785,,0.524,0.74137931,,0.616485024,,,,0.476319221,0.250445501,, +Agriculture,2,0.75,0.602161303,,0.558,0.916666667,,0.706706992,,,,0.455872789,0.144337567,, +Agriculture,3,0.85,0.760057091,,0.66,1,,0.817514273,,,,0.357770876,0,, +Agriculture,4,0.95,0.874094602,,0.834,1,,0.914523651,,,,0.163645959,0,, +Agriculture,5,1,0.954075572,,0.988,1,,0.985518893,,,,0.026832816,0,, +Agriculture,6,1,1,,1,1,,1,,,,0,0,, diff --git a/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/WRI thermal power plant physical climate vulnerability factors.json b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/WRI thermal power plant physical climate vulnerability factors.json new file mode 100644 index 00000000..1eed6eb5 --- /dev/null +++ b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/WRI thermal power plant physical climate vulnerability factors.json @@ -0,0 +1,399 @@ +{ + "items": [ + { + "asset_type": "Steam/OnceThrough", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Dry", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Gas", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Dry", + "event_type": "AirTemperature", + "impact_mean": [ + 0.0, + 0.02, + 0.04, + 0.08, + 0.11, + 0.15, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 6.0, + 12.0, + 18.0, + 24.0, + 30.0, + 198.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Gas", + "event_type": "AirTemperature", + "impact_mean": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.8, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 10.0, + 20.0, + 30.0, + 40.0, + 50.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "Drought", + "impact_mean": [ + 0.0, + 0.0, + 0.1, + 0.2, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + -2.0, + -2.5, + -3.0, + -3.6 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "Drought", + "impact_mean": [ + 0.0, + 0.0, + 0.1, + 0.2, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + -2.0, + -2.5, + -3.0, + -3.6 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "WaterTemperature", + "impact_mean": [ + 0.0, + 0.003, + 0.009, + 0.017, + 0.027, + 0.041, + 0.061, + 0.089, + 0.118, + 0.157, + 0.205, + 0.257, + 0.327, + 0.411, + 0.508, + 0.629, + 0.775, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 7.0, + 8.0, + 9.0, + 10.0, + 11.0, + 12.0, + 13.0, + 14.0, + 15.0, + 16.0, + 17.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "WaterTemperature", + "impact_mean": [ + 0.0, + 0.003, + 0.009, + 0.017, + 0.027, + 0.041, + 0.061, + 0.089, + 0.118, + 0.157, + 0.205, + 0.257, + 0.327, + 0.411, + 0.508, + 0.629, + 0.775, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 7.0, + 8.0, + 9.0, + 10.0, + 11.0, + 12.0, + 13.0, + 14.0, + 15.0, + 16.0, + 17.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "WaterStress", + "impact_mean": [ + 0.0, + 0.02, + 0.1, + 0.2, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "WaterStress", + "impact_mean": [ + 0.0, + 0.02, + 0.1, + 0.2, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "RegulatoryDischargeWaterLimit", + "impact_mean": [ + 0.0, + 0.1, + 0.2, + 0.4, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 27.0, + 28.0, + 29.0, + 30.0, + 31.0, + 32.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + } + ] +} diff --git a/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/onboard.ipynb b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/onboard.ipynb new file mode 100644 index 00000000..d691bdcc --- /dev/null +++ b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/onboard.ipynb @@ -0,0 +1,95 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## World Resources Institute's thermal power plant physical climate vulnerability factors\n", + "\n", + "### CITATION\n", + "Tianyi Luo, Lihuan Zhou, James Falzon, Yan Cheng, Giulia Christianson, Yili Wu and Amir Habchi, Assessing Physical Climate Risks for the European Bank for Reconstruction and Development's Power Generation Project Investment Portfolio, World Resources Institute, Working paper, doi:10.46830/wriwp.21.00060.\n", + "\n", + "### LINK\n", + "https://www.wri.org/research/assessing-physical-climate-risks-european-bank-power-portfolio\n", + "\n", + "### NOTES\n", + "Data from Table B1 in Appendix B converted into CSV format in raw.csv. Ingested into vulnerability model as standard json.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json, pandas\n", + "\n", + "data = {\"items\": []}\n", + "\n", + "df = pandas.read_csv(\"raw.csv\")\n", + "df[\"Asset Type\"] = [\n", + " (x if pandas.isna(y) else (x + \"/\" + y)).replace(\" \", \"\")\n", + " for (x, y) in zip(df[\"Turbine Type\"], df[\"Cooling Technology\"])\n", + "]\n", + "df.drop(columns=[\"Turbine Type\", \"Cooling Technology\"], inplace=True)\n", + "\n", + "hazard_set = set(df[\"Hazard Type\"])\n", + "for hazard_type in hazard_set:\n", + " selection = df.loc[df[\"Hazard Type\"] == hazard_type]\n", + " asset_set = set(selection[\"Asset Type\"])\n", + " for asset_type in asset_set:\n", + " sub_selection = selection.loc[selection[\"Asset Type\"] == asset_type]\n", + " threshold_set = set(sub_selection[\"Threshold Type\"])\n", + " for threshold_type in threshold_set:\n", + " item = sub_selection.loc[sub_selection[\"Threshold Type\"] == threshold_type]\n", + " if (\n", + " len(set(item[\"Threshold Unit\"])) == 1\n", + " and len(set(item[\"Vulnerability Type\"])) == 1\n", + " and len(set(item[\"Vulnerability Unit\"])) == 1\n", + " ):\n", + " data[\"items\"].append(\n", + " {\n", + " \"asset_type\": asset_type.replace(\" \", \"\"),\n", + " \"event_type\": hazard_type.replace(\" \", \"\"),\n", + " \"location\": \"Global\",\n", + " \"intensity\": list(item[\"Threshold\"].values),\n", + " \"intensity_units\": item[\"Threshold Unit\"].values[0].replace(\" \", \"\"),\n", + " \"impact_type\": item[\"Vulnerability Type\"].values[0].replace(\" \", \"\"),\n", + " \"impact_units\": item[\"Vulnerability Unit\"].values[0].replace(\" \", \"\"),\n", + " \"impact_mean\": list(item[\"Vulnerability\"].values),\n", + " \"impact_std\": [],\n", + " }\n", + " )\n", + "\n", + "with open(\"WRI thermal power plant physical climate vulnerability factors.json\", \"w\") as f:\n", + " vulnerability_json = json.dumps(data, sort_keys=True, indent=4)\n", + " f.write(vulnerability_json)" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/raw.csv b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/raw.csv new file mode 100644 index 00000000..9f7a52b4 --- /dev/null +++ b/notebooks/vulnerability_onboarding/WRI thermal power plant physical climate vulnerability factors/raw.csv @@ -0,0 +1,114 @@ +Hazard Type,Turbine Type,Cooling Technology,Threshold,Threshold Type,Threshold Unit,Vulnerability,Vulnerability Type,Vulnerability Unit +Air Temperature,Gas,,0,Above P90,Degrees Celsius,0,Disruption,Days +Air Temperature,Gas,,10,Above P90,Degrees Celsius,0.1,Disruption,Days +Air Temperature,Gas,,20,Above P90,Degrees Celsius,0.25,Disruption,Days +Air Temperature,Gas,,30,Above P90,Degrees Celsius,0.5,Disruption,Days +Air Temperature,Gas,,40,Above P90,Degrees Celsius,0.8,Disruption,Days +Air Temperature,Gas,,50,Above P90,Degrees Celsius,1,Disruption,Days +Air Temperature,Steam,Dry,0,Above P90,Degrees Celsius,0,Disruption,Days +Air Temperature,Steam,Dry,6,Above P90,Degrees Celsius,0.02,Disruption,Days +Air Temperature,Steam,Dry,12,Above P90,Degrees Celsius,0.04,Disruption,Days +Air Temperature,Steam,Dry,18,Above P90,Degrees Celsius,0.08,Disruption,Days +Air Temperature,Steam,Dry,24,Above P90,Degrees Celsius,0.11,Disruption,Days +Air Temperature,Steam,Dry,30,Above P90,Degrees Celsius,0.15,Disruption,Days +Air Temperature,Steam,Dry,198,Above P90,Degrees Celsius,1,Disruption,Days +Drought,Steam,Once Through,0,SPEI,Unitless,0,Disruption,Days +Drought,Steam,Once Through,-2,SPEI,Unitless,0,Disruption,Days +Drought,Steam,Once Through,-2.5,SPEI,Unitless,0.1,Disruption,Days +Drought,Steam,Once Through,-3,SPEI,Unitless,0.2,Disruption,Days +Drought,Steam,Once Through,-3.6,SPEI,Unitless,1,Disruption,Days +Drought,Steam,Recirculating,0,SPEI,Unitless,0,Disruption,Days +Drought,Steam,Recirculating,-2,SPEI,Unitless,0,Disruption,Days +Drought,Steam,Recirculating,-2.5,SPEI,Unitless,0.1,Disruption,Days +Drought,Steam,Recirculating,-3,SPEI,Unitless,0.2,Disruption,Days +Drought,Steam,Recirculating,-3.6,SPEI,Unitless,1,Disruption,Days +Inundation,Gas,,0,Flood Depth,Metres,0,Disruption,Days +Inundation,Gas,,0.1,Flood Depth,Metres,1,Disruption,Days +Inundation,Gas,,0.2,Flood Depth,Metres,2,Disruption,Days +Inundation,Gas,,0.3,Flood Depth,Metres,7,Disruption,Days +Inundation,Gas,,0.4,Flood Depth,Metres,14,Disruption,Days +Inundation,Gas,,0.5,Flood Depth,Metres,30,Disruption,Days +Inundation,Gas,,0.6,Flood Depth,Metres,60,Disruption,Days +Inundation,Gas,,0.7,Flood Depth,Metres,180,Disruption,Days +Inundation,Gas,,1,Flood Depth,Metres,365,Disruption,Days +Inundation,Steam,Dry,0,Flood Depth,Metres,0,Disruption,Days +Inundation,Steam,Dry,0.1,Flood Depth,Metres,1,Disruption,Days +Inundation,Steam,Dry,0.2,Flood Depth,Metres,2,Disruption,Days +Inundation,Steam,Dry,0.3,Flood Depth,Metres,7,Disruption,Days +Inundation,Steam,Dry,0.4,Flood Depth,Metres,14,Disruption,Days +Inundation,Steam,Dry,0.5,Flood Depth,Metres,30,Disruption,Days +Inundation,Steam,Dry,0.6,Flood Depth,Metres,60,Disruption,Days +Inundation,Steam,Dry,0.7,Flood Depth,Metres,180,Disruption,Days +Inundation,Steam,Dry,1,Flood Depth,Metres,365,Disruption,Days +Inundation,Steam,Once Through,0,Flood Depth,Metres,0,Disruption,Days +Inundation,Steam,Once Through,0.1,Flood Depth,Metres,1,Disruption,Days +Inundation,Steam,Once Through,0.2,Flood Depth,Metres,2,Disruption,Days +Inundation,Steam,Once Through,0.3,Flood Depth,Metres,7,Disruption,Days +Inundation,Steam,Once Through,0.4,Flood Depth,Metres,14,Disruption,Days +Inundation,Steam,Once Through,0.5,Flood Depth,Metres,30,Disruption,Days +Inundation,Steam,Once Through,0.6,Flood Depth,Metres,60,Disruption,Days +Inundation,Steam,Once Through,0.7,Flood Depth,Metres,180,Disruption,Days +Inundation,Steam,Once Through,1,Flood Depth,Metres,365,Disruption,Days +Inundation,Steam,Recirculating,0,Flood Depth,Metres,0,Disruption,Days +Inundation,Steam,Recirculating,0.1,Flood Depth,Metres,1,Disruption,Days +Inundation,Steam,Recirculating,0.2,Flood Depth,Metres,2,Disruption,Days +Inundation,Steam,Recirculating,0.3,Flood Depth,Metres,7,Disruption,Days +Inundation,Steam,Recirculating,0.4,Flood Depth,Metres,14,Disruption,Days +Inundation,Steam,Recirculating,0.5,Flood Depth,Metres,30,Disruption,Days +Inundation,Steam,Recirculating,0.6,Flood Depth,Metres,60,Disruption,Days +Inundation,Steam,Recirculating,0.7,Flood Depth,Metres,180,Disruption,Days +Inundation,Steam,Recirculating,1,Flood Depth,Metres,365,Disruption,Days +Water Stress,Steam,Once Through,0,Water Supply Reduction Rate,Unitless,0,Disruption,Days +Water Stress,Steam,Once Through,0.1,Water Supply Reduction Rate,Unitless,0.02,Disruption,Days +Water Stress,Steam,Once Through,0.25,Water Supply Reduction Rate,Unitless,0.1,Disruption,Days +Water Stress,Steam,Once Through,0.5,Water Supply Reduction Rate,Unitless,0.2,Disruption,Days +Water Stress,Steam,Once Through,0.75,Water Supply Reduction Rate,Unitless,0.5,Disruption,Days +Water Stress,Steam,Once Through,1,Water Supply Reduction Rate,Unitless,1,Disruption,Days +Water Stress,Steam,Recirculating,0,Water Supply Reduction Rate,Unitless,0,Disruption,Days +Water Stress,Steam,Recirculating,0.1,Water Supply Reduction Rate,Unitless,0.02,Disruption,Days +Water Stress,Steam,Recirculating,0.25,Water Supply Reduction Rate,Unitless,0.1,Disruption,Days +Water Stress,Steam,Recirculating,0.5,Water Supply Reduction Rate,Unitless,0.2,Disruption,Days +Water Stress,Steam,Recirculating,0.75,Water Supply Reduction Rate,Unitless,0.5,Disruption,Days +Water Stress,Steam,Recirculating,1,Water Supply Reduction Rate,Unitless,1,Disruption,Days +Water Temperature,Steam,Once Through,0,Above P90,Degrees Celsius,0,Disruption,Days +Water Temperature,Steam,Once Through,1,Above P90,Degrees Celsius,0.003,Disruption,Days +Water Temperature,Steam,Once Through,2,Above P90,Degrees Celsius,0.009,Disruption,Days +Water Temperature,Steam,Once Through,3,Above P90,Degrees Celsius,0.017,Disruption,Days +Water Temperature,Steam,Once Through,4,Above P90,Degrees Celsius,0.027,Disruption,Days +Water Temperature,Steam,Once Through,5,Above P90,Degrees Celsius,0.041,Disruption,Days +Water Temperature,Steam,Once Through,6,Above P90,Degrees Celsius,0.061,Disruption,Days +Water Temperature,Steam,Once Through,7,Above P90,Degrees Celsius,0.089,Disruption,Days +Water Temperature,Steam,Once Through,8,Above P90,Degrees Celsius,0.118,Disruption,Days +Water Temperature,Steam,Once Through,9,Above P90,Degrees Celsius,0.157,Disruption,Days +Water Temperature,Steam,Once Through,10,Above P90,Degrees Celsius,0.205,Disruption,Days +Water Temperature,Steam,Once Through,11,Above P90,Degrees Celsius,0.257,Disruption,Days +Water Temperature,Steam,Once Through,12,Above P90,Degrees Celsius,0.327,Disruption,Days +Water Temperature,Steam,Once Through,13,Above P90,Degrees Celsius,0.411,Disruption,Days +Water Temperature,Steam,Once Through,14,Above P90,Degrees Celsius,0.508,Disruption,Days +Water Temperature,Steam,Once Through,15,Above P90,Degrees Celsius,0.629,Disruption,Days +Water Temperature,Steam,Once Through,16,Above P90,Degrees Celsius,0.775,Disruption,Days +Water Temperature,Steam,Once Through,17,Above P90,Degrees Celsius,1,Disruption,Days +Water Temperature,Steam,Recirculating,0,Above P90,Degrees Celsius,0,Disruption,Days +Water Temperature,Steam,Recirculating,1,Above P90,Degrees Celsius,0.003,Disruption,Days +Water Temperature,Steam,Recirculating,2,Above P90,Degrees Celsius,0.009,Disruption,Days +Water Temperature,Steam,Recirculating,3,Above P90,Degrees Celsius,0.017,Disruption,Days +Water Temperature,Steam,Recirculating,4,Above P90,Degrees Celsius,0.027,Disruption,Days +Water Temperature,Steam,Recirculating,5,Above P90,Degrees Celsius,0.041,Disruption,Days +Water Temperature,Steam,Recirculating,6,Above P90,Degrees Celsius,0.061,Disruption,Days +Water Temperature,Steam,Recirculating,7,Above P90,Degrees Celsius,0.089,Disruption,Days +Water Temperature,Steam,Recirculating,8,Above P90,Degrees Celsius,0.118,Disruption,Days +Water Temperature,Steam,Recirculating,9,Above P90,Degrees Celsius,0.157,Disruption,Days +Water Temperature,Steam,Recirculating,10,Above P90,Degrees Celsius,0.205,Disruption,Days +Water Temperature,Steam,Recirculating,11,Above P90,Degrees Celsius,0.257,Disruption,Days +Water Temperature,Steam,Recirculating,12,Above P90,Degrees Celsius,0.327,Disruption,Days +Water Temperature,Steam,Recirculating,13,Above P90,Degrees Celsius,0.411,Disruption,Days +Water Temperature,Steam,Recirculating,14,Above P90,Degrees Celsius,0.508,Disruption,Days +Water Temperature,Steam,Recirculating,15,Above P90,Degrees Celsius,0.629,Disruption,Days +Water Temperature,Steam,Recirculating,16,Above P90,Degrees Celsius,0.775,Disruption,Days +Water Temperature,Steam,Recirculating,17,Above P90,Degrees Celsius,1,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,27,Intake Water Temperature,Degrees Celsius,0,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,28,Intake Water Temperature,Degrees Celsius,0.1,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,29,Intake Water Temperature,Degrees Celsius,0.2,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,30,Intake Water Temperature,Degrees Celsius,0.4,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,31,Intake Water Temperature,Degrees Celsius,0.5,Disruption,Days +Regulatory Discharge Water Limit,Steam,Once Through,32,Intake Water Temperature,Degrees Celsius,1,Disruption,Days diff --git a/notebooks/vulnerability_onboarding/Wind/Table_A2_Impact_Function_Slope.csv b/notebooks/vulnerability_onboarding/Wind/Table_A2_Impact_Function_Slope.csv new file mode 100644 index 00000000..aab5404b --- /dev/null +++ b/notebooks/vulnerability_onboarding/Wind/Table_A2_Impact_Function_Slope.csv @@ -0,0 +1,12 @@ +region,nb_countries,nb_events,vhalf_a,vhalf_b,vhalf_c,rmsf_a,rmsf_b,rmsf_c,tdr_a,tdr_b,tdr_c +NA1,21,73,74.7,59.6,66.3,11.8,9.8,10.3,0.68,1.44,1 +NA2,2,43,74.7,86,89.2,9.5,8.7,8.7,2.11,1.16,1 +NI,6,31,74.7,58.7,70.8,7.8,6,7.2,0.85,2.03,1 +OC,11,48,74.7,49.7,64.1,22.5,14.7,17.7,0.6,2.31,1 +SI,2,19,74.7,46.8,52.4,20.1,8.6,9.1,0.2,1.8,1 +WP1,4,43,74.7,56.7,66.4,15.2,11.3,12.6,0.62,2.05,1 +WP2,1,83,74.7,84.7,188.4,38.2,36.7,104.9,25.89,16.44,1 +WP3,1,69,74.7,80.2,112.8,15.2,14.8,20.5,5.32,3.83,1 +WP4,5,64,74.7,135.6,190.5,73.8,35.9,43.8,35.56,3.35,1 +Combined,53,473,74.7,–,–,22.2,16.8,24.4,4.69,2.15,1 +Global calibration,53,473,74.7,73.4,110.1,22.2,22.2,33.1,4.69,4.84,1 diff --git a/notebooks/vulnerability_onboarding/Wind/Table_S2_V_half_individual_fitting_per_event.csv b/notebooks/vulnerability_onboarding/Wind/Table_S2_V_half_individual_fitting_per_event.csv new file mode 100644 index 00000000..10022dd6 --- /dev/null +++ b/notebooks/vulnerability_onboarding/Wind/Table_S2_V_half_individual_fitting_per_event.csv @@ -0,0 +1,474 @@ +EM_ID,ibtracsID,v_half_fitted [m/s],country,country_id,region,year,Reported_Damage [USD],reference_year,Normalized_Reported_Damage [USD],Simulated_Damage [USD],log_ratio,unique_ID +1980-0036,1980081S12170,39.2,FJI,242,OC,1980,2256000,2014,8410828.037,8395962.204,-0.001769027,1980-0036FJI +1980-0080,1980214N11330,106.8,USA,840,NA2,1980,860000000,2014,5264166323,5256769039,-0.001406203,1980-0080USA +1980-0099,1980296N05165,58.4,PHL,608,WP2,1980,102300000,2014,897149786.7,897162024.7,1.36E-05,1980-0099PHL +1980-0133,1980126N08150,70.1,PHL,608,WP2,1980,289000,2014,2534470.072,2539189.469,0.001860353,1980-0133PHL +1981-0083,1981256N10150,37.6,PHL,608,WP2,1981,6700000,2014,53489704.34,53126575,-0.006811919,1981-0083PHL +1981-0101,1981319N07163,86.5,PHL,608,WP2,1981,35000000,2014,279423828.6,279356846.9,-0.000239743,1981-0101PHL +1981-0110,1981351S12060,25.8,MDG,450,SI,1981,250000000,2014,742274546.2,139581097.2,-1.671073409,1981-0110MDG +1981-0111,1981353S09172,147.1,NCL,540,OC,1981,200000,2014,551603.3058,552274.4408,0.001215959,1981-0111NCL +1981-0112,1981355N07149,106.6,PHL,608,WP2,1981,26000000,2014,207571987,207434161.8,-0.000664208,1981-0112PHL +1981-0134,1981176N08150,37.1,PHL,608,WP2,1981,7609000,2014,60746740.34,60552182.26,-0.003207914,1981-0134PHL +1981-0138,1981317N14153,325.7,PHL,608,WP2,1981,167000,2014,1333250.839,4690972.593,1.258019737,1981-0138PHL +1982-0034,1982058S10185,78.8,TON,776,OC,1982,21200000,2014,151622251.2,151685209.3,0.000415144,1982-0034TON +1982-0090,1982202N11165,169.7,JPN,392,WP4,1982,137000000,2014,585717153.3,585156603.8,-0.000957489,1982-0090JPN +1982-0093,1982227N09140,136.2,PHL,608,WP2,1982,6500000,2014,49805902.91,49825948.18,0.000402387,1982-0093PHL +1982-0105,1982262N12270,82.7,MEX,484,NA1,1982,82400000,2014,586753507.8,587911847.3,0.001972204,1982-0105MEX +1982-0120,1982309N11064,53,IND,356,NI,1982,625420000,2014,6353835704,6333077626,-0.003272363,1982-0120IND +1982-0124,1982324N10193,25.8,USA,840,NA2,1982,230000000,2014,1204787041,139213192.7,-2.158051582,1982-0124USA +1982-0174,1982190N09148,53,PHL,608,WP2,1982,4500000,2014,34481009.7,34508564.46,0.000798809,1982-0174PHL +1982-0176,1982325N08176,325.7,PHL,608,WP2,1982,5000,2014,38312.233,109565.7082,1.0507552,1982-0176PHL +1982-0255,1982095S11138,25.8,AUS,36,OC,1982,6864000,2014,51906694.26,24595091.36,-0.746900881,1982-0255AUS +1982-0313,1982150N13087,40.7,IND,356,NI,1982,120000000,2014,1219117208,1213968600,-0.00423217,1982-0313IND +1983-0047,1983054S15179,59.9,FJI,242,OC,1983,50000000,2014,199598757.6,200079843.9,0.002407367,1983-0047FJI +1983-0057,1983082S11180,25.8,FJI,242,OC,1983,851000,2014,3397170.854,3043079.372,-0.110073029,1983-0057FJI +1983-0071,1983097S10224,80.2,PYF,258,OC,1983,21000000,2014,54194671.27,54184495.54,-0.00018778,1983-0071PYF +1983-0110,1983228N27270,113.5,USA,840,NA2,1983,3000000000,2014,14448395154,14460245256,0.000819831,1983-0110USA +1983-0121,1983239N10183,247.1,HKG,344,WP4,1983,12544000,2014,122247467.4,122276578.6,0.000238105,1983-0121HKG +1983-0129,1983274N18089,33.8,IND,356,NI,1983,510000000,2014,4764703292,4789699613,0.005232431,1983-0129IND +1983-0157,1983339S10065,34.9,MDG,450,SI,1983,25000000,2014,75988123.11,76350597.6,0.004758805,1983-0157MDG +1984-0034,1984094S10080,25.8,MDG,450,SI,1984,250000000,2014,907770830.7,395535661.3,-0.830751007,1984-0034MDG +1984-0105,1984302N00149,54.4,PHL,608,WP2,1984,96600000,2014,875268514.8,874308890,-0.001096979,1984-0105PHL +1984-0110,1984314N09088,153.5,IND,356,NI,1984,35000000,2014,336397308.3,336760286.3,0.001078434,1984-0110IND +1985-0044,1985070S17175,99.3,FJI,242,OC,1985,3000000,2014,11785953.11,11773200.93,-0.001082567,1985-0044FJI +1985-0063,1985143N16088,73.8,BGD,50,NI,1985,50000000,2014,388010978.9,387689559.2,-0.000828721,1985-0063BGD +1985-0104,1985240N20286,91.5,USA,840,NA2,1985,1100000000,2014,4434115752,4434804314,0.000155275,1985-0104USA +1985-0111,1985260N13336,165,USA,840,NA2,1985,900000000,2014,3627912888,3625496587,-0.000666253,1985-0111USA +1985-0118,1985268N03161,37.7,KOR,410,WP4,1985,14000000,2014,197048615.5,198029017.5,0.004963096,1985-0118KOR +1985-0126,1985299N25270,72.9,USA,840,NA2,1985,1500000000,2014,6046521480,6044937215,-0.000262047,1985-0126USA +1985-0190,1985162N05154,31.9,PHL,608,WP2,1985,20000000,2014,185189963.4,183141092.2,-0.011125276,1985-0190PHL +1986-0029,1986027S13145,45.1,AUS,36,OC,1986,70000000,2014,563514152.7,565444119.6,0.003419026,1986-0029AUS +1986-0042,1986067S11080,53.3,MDG,450,SI,1986,150000000,2014,491370641.1,493215306.8,0.003747094,1986-0042MDG +1986-0065,1986135S07160,37.5,SLB,90,OC,1986,20000000,2014,111253691.9,110450052.8,-0.007249698,1986-0065SLB +1986-0081,1986179N11175,48.1,CHN,156,WP3,1986,380000000,2014,13244203233,13188900322,-0.004184374,1986-0081CHN +1986-0115,1986252N06153,290.1,TWN,158,WP4,1986,80000000,2014,542490477.3,542431362.6,-0.000108975,1986-0115TWN +1986-0143,1986356S07183,25.8,FJI,242,OC,1986,20000000,2014,69498014.25,13482626.58,-1.639896243,1986-0143FJI +1986-0152,1986276N07177,34.1,PHL,608,WP2,1986,36000000,2014,343006780.4,344811815.2,0.00524859,1986-0152PHL +1986-0153,1986343N05176,272.6,PHL,608,WP2,1986,4000000,2014,38111864.49,38099421.55,-0.000326538,1986-0153PHL +1987-0057,1987035S12160,48.9,VUT,548,OC,1987,25000000,2014,155722786.8,155650596.3,-0.000463691,1987-0057VUT +1987-0130,1987188N10151,99.5,KOR,410,WP4,1987,325000000,2014,3138801397,3134047295,-0.001515772,1987-0130KOR +1987-0159,1987245N15133,40.7,CHN,156,WP3,1987,120000000,2014,4608092264,4627134752,0.004123887,1987-0159CHN +1987-0164,1987263N10309,59.4,BMU,60,NA1,1987,50000000,2014,214952184.2,214688521.6,-0.001227363,1987-0164BMU +1987-0212,1987320N03171,170,PHL,608,WP2,1987,56000000,2014,480080890.5,479688254.9,-0.000818188,1987-0212PHL +1987-0239,1987219N08155,105.5,PHL,608,WP2,1987,5600000,2014,48008089.05,47999808.52,-0.000172497,1987-0239PHL +1987-0240,1987343N05154,82,PHL,608,WP2,1987,8500000,2014,72869420.88,72908124.38,0.000530994,1987-0240PHL +1988-0312,1988193N09149,35.6,PHL,608,WP2,1988,11516000,2014,86504877.06,86385597.87,-0.001379824,1988-0312PHL +1988-0430,1988253N12306,168.4,MEX,484,NA1,1988,76000000,2014,550112928,550213379.9,0.000182586,1988-0430MEX +1988-0481,1988285N09318,36.5,NIC,558,NA1,1988,400000000,2014,1806289799,1817479172,0.006175564,1988-0481NIC +1988-0506,1988308N09140,42.9,PHL,608,WP2,1988,149060000,2014,1119695812,1120889297,0.001065333,1988-0506PHL +1988-0557,1988306N15130,31.9,PHL,608,WP2,1988,940000,2014,7061009.416,7131612.632,0.009949366,1988-0557PHL +1989-0115,1989254N13340,49.3,ATG,28,NA1,1989,80000000,2014,232560060.9,233463359.6,0.003876629,1989-0115ATG +1989-0115,1989254N13340,40.7,DMA,212,NA1,1989,20000000,2014,56557045.94,56990741.94,0.007639041,1989-0115DMA +1989-0115,1989254N13340,48.1,KNA,659,NA1,1989,46000000,2014,219073474.2,218799713.4,-0.001250411,1989-0115KNA +1989-0115,1989254N13340,25.8,MSR,500,NA1,1989,240000000,2014,240000000,54629110.58,-1.480072022,1989-0115MSR +1989-0115,1989254N13340,116.4,USA,840,NA2,1989,7000000000,2014,21678841358,21687159989,0.000383648,1989-0115USA +1989-0115,1989254N13340,159.9,VIR,850,NA1,1989,21800000,2014,58754072.56,58790887.87,0.000626404,1989-0115VIR +1989-0120,1989190N20160,81,PHL,608,WP2,1989,61000000,2014,407741184.1,407187405.7,-0.001359085,1989-0120PHL +1989-0125,1989279N07151,249.3,PHL,608,WP2,1989,59200000,2014,395709477,395579902.7,-0.000327502,1989-0125PHL +1989-0126,1989286N14137,87.6,PHL,608,WP2,1989,35400000,2014,236623572.4,236735425.6,0.000472593,1989-0126PHL +1989-0132,1989314N10152,298.7,PHL,608,WP2,1989,325000,2014,2172391.554,2172693.594,0.000139026,1989-0132PHL +1989-0265,1989201N11145,50.1,KOR,410,WP4,1989,176500000,2014,1022890325,1028251601,0.005227613,1989-0265KOR +1989-0398,1989089S11158,71.5,AUS,36,OC,1989,94300000,2014,461534104.7,461070798.7,-0.001004343,1989-0398AUS +1990-0019,1990124N09088,74.1,IND,356,NI,1990,580000000,2014,3684645392,3694898103,0.002778686,1990-0019IND +1990-0037,1990166N06141,41.6,CHN,156,WP3,1990,16000000,2014,464775602.6,466700655,0.004133342,1990-0037CHN +1990-0038,1990171N11148,106.6,CHN,156,WP3,1990,28000000,2014,813357304.5,812769447.7,-0.000723015,1990-0038CHN +1990-0121,1990327S07175,130.3,FJI,242,OC,1990,10000000,2014,33532784.13,33571889.31,0.001165498,1990-0121FJI +1990-0390,1990202N13130,57,CHN,156,WP3,1990,83000000,2014,2411023438,2403268533,-0.003221621,1990-0390CHN +1990-0393,1990216N13281,59.6,MEX,484,NA1,1990,90700000,2014,456380159.7,457720406.3,0.002932385,1990-0393MEX +1990-0402,1990235N10152,85,CHN,156,WP3,1990,688000000,2014,19985350911,20029007455,0.002182045,1990-0402CHN +1990-0405,1990245N16149,103.6,CHN,156,WP3,1990,154000000,2014,4473465175,4474420771,0.000213591,1990-0405CHN +1990-0407,1990251N06171,178.1,JPN,392,WP4,1990,4000000000,2014,6193036523,6196291658,0.000525474,1990-0407JPN +1990-0422,1990350S11165,43.2,AUS,36,OC,1990,155600000,2014,733330742.5,736672274.3,0.0045463,1990-0422AUS +1991-0120,1991113N10091,65.3,BGD,50,NI,1991,1780000000,2014,9940604678,9948189982,0.000762772,1991-0120BGD +1991-0218,1991228N26286,91,USA,840,NA2,1991,1500000000,2014,4256954560,4261266006,0.001012288,1991-0218USA +1991-0364,1991338S08181,42.6,WSM,882,OC,1991,278000000,2014,1782167001,1782019359,-8.28E-05,1991-0364WSM +1991-0382,1991289N06156,54.8,PHL,608,WP2,1991,90000000,2014,563936202.8,562823931.5,-0.001974283,1991-0382PHL +1991-0526,1991200N05157,66.3,JPN,392,WP4,1991,81500000,2014,110285260.8,110330966.9,0.00041435,1991-0526JPN +1991-0539,1991256N13171,131,JPN,392,WP4,1991,10000000000,2014,13531933846,13532696955,5.64E-05,1991-0539JPN +1991-0710,1991220N10133,34.1,THA,764,WP1,1991,8323000,2014,34512098.63,34986428.57,0.013650284,1991-0710THA +1992-0061,1992174N13126,172.1,VNM,704,WP1,1992,400000,2014,7548589.73,7552262.137,0.000486384,1992-0061VNM +1992-0066,1992230N11325,95.4,USA,840,NA2,1992,26500000000,2014,71005515347,71121951504,0.001638475,1992-0066USA +1992-0143,1992289N08135,46.2,VNM,704,WP1,1992,18000000,2014,339686537.9,340038182.3,0.001034667,1992-0143VNM +1992-0162,1992338S04173,260.2,FJI,242,OC,1992,1600000,2014,4683021.159,4685294.259,0.000485274,1992-0162FJI +1992-0271,1992249N12229,25.8,USA,840,NA2,1992,5000000000,2014,13397267047,139213200.5,-4.566799439,1992-0271USA +1992-0342,1992233N16254,44.8,MEX,484,NA1,1992,3000000,2014,10859450.34,10821712.85,-0.003481135,1992-0342MEX +1992-0357,1992237N14144,42.4,TWN,158,WP4,1992,35000000,2014,83218785.15,83608159.72,0.004668014,1992-0357TWN +1992-0463,1992194N07140,95.7,HKG,344,WP4,1992,219700000,2014,614100138.3,614469716.1,0.000601639,1992-0463HKG +1993-0016,1993131S04158,41.5,PNG,598,OC,1993,1500000,2014,6953406.58,6945308.68,-0.001165273,1993-0016PNG +1993-0024,1993083S12181,120.7,VUT,548,OC,1993,6000000,2014,25998065.23,26020847.91,0.000875938,1993-0024VUT +1993-0077,1993271N14134,53.6,PHL,608,WP2,1993,188000000,2014,984067975.5,980564341.4,-0.003566711,1993-0077PHL +1993-0079,1993246N16129,33,JPN,392,WP4,1993,1300000000,2014,1415656470,1419557298,0.002751701,1993-0079JPN +1993-0085,1993331N07108,88.3,IND,356,NI,1993,100000000,2014,730095393.3,729136239.4,-0.001314602,1993-0085IND +1993-0088,1993322N09137,44.8,PHL,608,WP2,1993,2000000,2014,10468808.25,10463170.82,-0.000538643,1993-0088PHL +1993-0088,1993322N09137,81,VNM,704,WP1,1993,15000000,2014,211901951.8,212217305.1,0.001487098,1993-0088VNM +1993-0199,1993224N07153,64.4,CHN,156,WP3,1993,433920000,2014,10227548827,10229454530,0.000186313,1993-0199CHN +1993-0228,1993353N05159,169.8,PHL,608,WP2,1993,17000000,2014,88984870.12,89019101.11,0.000384609,1993-0228PHL +1993-0464,1993253N06150,65.7,CHN,156,WP3,1993,263670000,2014,6214734972,6193264638,-0.003460728,1993-0464CHN +1993-0466,1993298N11154,36.4,CHN,156,WP3,1993,15890000,2014,374529293.1,378991865.8,0.011844724,1993-0466CHN +1993-0473,1993211N07161,240.6,JPN,392,WP4,1993,450000000,2014,490034931.9,490304476.1,0.0005499,1993-0473JPN +1994-0009,1994007S16056,47,MDG,450,SI,1994,10000000,2014,35852773.5,36011276.82,0.004411206,1994-0009MDG +1994-0044,1994117N07096,49.6,BGD,50,NI,1994,125000000,2014,639962654.8,641125319.8,0.001815122,1994-0044BGD +1994-0071,1994197N14115,39,PHL,608,WP2,1994,37600000,2014,166973054.6,167152538,0.001074347,1994-0071PHL +1994-0138,1994153N19113,161,CHN,156,WP3,1994,700000,2014,13002551.31,13003207.86,5.05E-05,1994-0138CHN +1994-0163,1994186N09139,133.9,CHN,156,WP3,1994,96300000,2014,1788779559,1788690280,-4.99E-05,1994-0163CHN +1994-0185,1994211N12152,244.7,TWN,158,WP4,1994,232000000,2014,479958040.5,479778510,-0.000374125,1994-0185TWN +1994-0198,1994224N20152,88.2,CHN,156,WP3,1994,1150000000,2014,21361334294,21319044816,-0.001981683,1994-0198CHN +1994-0198,1994224N20152,208.4,JPN,392,WP4,1994,6000000,2014,5930761.695,5934549.688,0.000638499,1994-0198JPN +1994-0352,1994302N11086,82.5,IND,356,NI,1994,19100000,2014,119004702,119187618.6,0.001535874,1994-0352IND +1994-0519,1994287N14156,259.2,PHL,608,WP2,1994,67400000,2014,299308082,299123832.2,-0.000615775,1994-0519PHL +1994-0594,1994345N06165,83.2,PHL,608,WP2,1994,26813000,2014,119070439.2,118906801,-0.001375242,1994-0594PHL +1995-0183,1995229N12144,70,KOR,410,WP4,1995,425000000,2014,1078553430,1077763418,-0.000732742,1995-0183KOR +1995-0188,1995236N10134,287.4,CHN,156,WP3,1995,87000000,2014,1241534250,1241256622,-0.000223641,1995-0188CHN +1995-0192,1995240N11337,42.7,ATG,28,NA1,1995,350000000,2014,773370483.3,771570241.2,-0.002330501,1995-0192ATG +1995-0192,1995240N11337,160.3,CAN,124,NA2,1995,100000000,2014,297876572.4,298136149.1,0.000871044,1995-0192CAN +1995-0192,1995240N11337,31.6,MSR,500,NA1,1995,20000000,2014,20000000,19952696.78,-0.002367962,1995-0192MSR +1995-0207,1995256N15253,36.5,MEX,484,NA1,1995,800000000,2014,2920653641,2926031587,0.001839657,1995-0207MEX +1995-0256,1995281N14278,28.9,MEX,484,NA1,1995,1500000000,2014,5476225576,5430128348,-0.00845333,1995-0256MEX +1995-0271,1995293N05177,325.7,PHL,608,WP2,1995,244000000,2014,936840738.5,1835824659,0.672735767,1995-0271PHL +1995-0274,1995294N05163,76.2,VNM,704,WP1,1995,21200000,2014,190369759.5,190451480.4,0.000429182,1995-0274VNM +1995-0275,1995310N09096,99.2,IND,356,NI,1995,46300000,2014,262049202.8,261827908.5,-0.000844833,1995-0275IND +1995-0393,1995193N06156,219.9,KOR,410,WP4,1995,140000000,2014,355288188.6,355117111.7,-0.000481632,1995-0393KOR +1995-0404,1995212N22287,66,BHS,44,NA1,1995,400000,2014,1278191.893,1280623.407,0.0019005,1995-0404BHS +1995-0404,1995212N22287,148.4,USA,840,NA2,1995,700000000,2014,1600355804,1601774396,0.000886031,1995-0404USA +1996-0052,1996001S08075,41.3,MOZ,508,SI,1996,14500000,2014,69792056.75,69531437.3,-0.003741218,1996-0052MOZ +1996-0110,1996202N17115,70.5,VNM,704,WP1,1996,362000000,2014,2733698258,2738388826,0.001714362,1996-0110VNM +1996-0168,1996203N12152,170.4,CHN,156,WP3,1996,72000000,2014,873787160.8,873155109.9,-0.000723608,1996-0168CHN +1996-0168,1996203N12152,262.2,TWN,158,WP4,1996,1100000000,2014,1993824461,1994825292,0.000501839,1996-0168TWN +1996-0201,1996248N15319,220.7,CAN,124,NA2,1996,100000,2014,286258.6963,286432.7091,0.000607702,1996-0201CAN +1996-0201,1996248N15319,60.8,PRI,630,NA1,1996,500000000,2014,1129729966,1126838603,-0.002562621,1996-0201PRI +1996-0227,1996260N19107,56.5,VNM,704,WP1,1996,11000000,2014,83068179.11,83458244.78,0.004684739,1996-0227VNM +1996-0256,1996306N15097,79.8,IND,356,NI,1996,1500300000,2014,7786525250,7792066042,0.000711334,1996-0256IND +1996-0282,1996356N08110,52.7,MYS,458,WP1,1996,52000000,2014,174301945.6,173673801.6,-0.003610279,1996-0282MYS +1996-0318,1996095S09133,46.7,AUS,36,OC,1996,46700000,2014,170904206.6,171420114,0.003014146,1996-0318AUS +1996-0325,1996187N10326,152.9,VGB,92,NA1,1996,2000000,2014,2000000,2002311.64,0.001155153,1996-0325VGB +1996-0371,1996201N07137,320.7,TWN,158,WP4,1996,200000000,2014,362513538.4,362428033.8,-0.000235894,1996-0371TWN +1997-0013,1997018S11059,33.9,MDG,450,SI,1997,50000000,2014,150510277.2,150241893.5,-0.00178475,1997-0013MDG +1997-0039,1997061S08171,119.8,FJI,242,OC,1997,27000000,2014,57809242.36,57890816.16,0.001410091,1997-0039FJI +1997-0168,1997210N15120,105.6,CHN,156,WP3,1997,579700000,2014,6319265945,6307948843,-0.001792494,1997-0168CHN +1997-0180,1997217N06168,30.6,JPN,392,WP4,1997,100000000,2014,109868789.5,111620909,0.015821558,1997-0180JPN +1997-0243,1997279N12263,66.7,MEX,484,NA1,1997,447800000,2014,1176350687,1176239750,-9.43E-05,1997-0243MEX +1997-0267,1997298N06140,77.2,KHM,116,WP1,1997,10000,2014,48505.97055,48568.14687,0.001281007,1997-0267KHM +1997-0267,1997298N06140,60.1,THA,764,WP1,1997,5000000,2014,13561680.42,13576851.17,0.001118023,1997-0267THA +1997-0267,1997298N06140,34.2,VNM,704,WP1,1997,470000000,2014,3260213213,3244977455,-0.004684193,1997-0267VNM +1997-0292,1997333N06194,135,GUM,316,OC,1997,200000000,2014,327208271.8,327242287,0.00010395,1997-0292GUM +1997-0358,1997261N13114,163.8,VNM,704,WP1,1997,5000000,2014,34683119.29,34661012.7,-0.000637591,1997-0358VNM +1998-0183,1998152N11075,66.1,IND,356,NI,1998,469000000,2014,2269722074,2272414630,0.00118559,1998-0183IND +1998-0297,1998259N17118,253.5,JPN,392,WP4,1998,3000000000,2014,3608482427,3607032520,-0.000401886,1998-0297JPN +1998-0311,1998281N11151,325.7,JPN,392,WP4,1998,335500000,2014,403548618.1,484163602.1,0.182125899,1998-0311JPN +1998-0374,1998315N09116,35.3,VNM,704,WP1,1998,93200000,2014,637799612.8,631449353.4,-0.010006409,1998-0374VNM +1998-0434,1998342N06141,66.3,VNM,704,WP1,1998,15000000,2014,102650152.3,102751784.9,0.000989597,1998-0434VNM +1999-0051,1999040S15147,64.6,AUS,36,OC,1999,300000000,2014,1131545100,1133239313,0.001496137,1999-0051AUS +1999-0177,1999135N12073,39.4,IND,356,NI,1999,20000000,2014,88885643.29,88908892.65,0.000261531,1999-0177IND +1999-0298,1999231N20266,127.1,USA,840,NA2,1999,70000000,2014,126960980,127028871.6,0.000534601,1999-0298USA +1999-0301,1999241N12133,46.1,CHN,156,WP3,1999,277900000,2014,2662759128,2670524860,0.002912178,1999-0301CHN +1999-0326,1999260N20130,160.7,JPN,392,WP4,1999,5000000000,2014,5316012420,5316956886,0.000177649,1999-0326JPN +1999-0327,1999251N15314,44.8,BHS,44,NA1,1999,450000000,2014,641705937.2,644161991,0.003820076,1999-0327BHS +1999-0327,1999251N15314,84.5,USA,840,NA2,1999,7000000000,2014,12696097995,12675805856,-0.001599576,1999-0327USA +1999-0392,1999275N16135,130.6,CHN,156,WP3,1999,241600000,2014,2314942804,2315198049,0.000110254,1999-0392CHN +1999-0401,1999288N15093,75.1,IND,356,NI,1999,470000000,2014,2088812617,2089705857,0.000427539,1999-0401IND +1999-0425,1999298N12099,82,IND,356,NI,1999,2500000000,2014,11110705411,11095873337,-0.001335827,1999-0425IND +1999-0435,1999286N16278,232.4,USA,840,NA2,1999,100000000,2014,181372828.5,181473486.7,0.000554826,1999-0435USA +1999-0619,1999236N22292,58.6,USA,840,NA2,1999,62500000,2014,113358017.8,113439034,0.000714438,1999-0619USA +1999-0625,1999230N12129,325.7,CHN,156,WP3,1999,18000000,2014,172470904.3,271662597.9,0.454332296,1999-0625CHN +2000-0097,2000056S17152,58.6,AUS,36,OC,2000,90000000,2014,317674987.2,317152965,-0.00164461,2000-0097AUS +2000-0107,2000032S11116,128.8,MDG,450,SI,2000,9000000,2014,24773011.21,24779586.39,0.000265382,2000-0107MDG +2000-0107,2000032S11116,95.7,MOZ,508,SI,2000,1000000,2014,3381086.779,3377433.308,-0.001081145,2000-0107MOZ +2000-0533,2000230N08139,325.7,CHN,156,WP3,2000,69443000,2014,600923967.1,602737067.3,0.003012645,2000-0533CHN +2000-0582,2000248N17117,45.6,VNM,704,WP1,2000,21000000,2014,125440545.4,125979702.1,0.004288895,2000-0582VNM +2000-0601,2000245N14157,319.4,KOR,410,WP4,2000,71000000,2014,178416664.1,178430679.8,7.86E-05,2000-0601KOR +2000-0706,2000299N08139,325.7,PHL,608,WP2,2000,17000000,2014,59708228.94,109753702.6,0.608768939,2000-0706PHL +2000-0706,2000299N08139,113.3,TWN,158,WP4,2000,150000000,2014,240119400,240329121.2,0.000873023,2000-0706TWN +2000-0715,2000305N06136,325.7,PHL,608,WP2,2000,31000000,2014,108879711.6,305722179.5,1.03243307,2000-0715PHL +2001-0293,2001170N11138,176.5,TWN,158,WP4,2001,5000000,2014,8829496.508,8836227.942,0.00076209,2001-0293TWN +2001-0319,2001181N08141,30.6,PHL,608,WP2,2001,68565000,2014,255861625.3,255364774,-0.001943763,2001-0319PHL +2001-0405,2001206N14134,76.5,CHN,156,WP3,2001,40000000,2014,313047801,312341037.7,-0.002260237,2001-0405CHN +2001-0405,2001206N14134,280.3,TWN,158,WP4,2001,240000000,2014,423815832.4,423746134.4,-0.000164467,2001-0405TWN +2001-0436,2001220N17118,33.6,VNM,704,WP1,2001,3200000,2014,18230113.7,18402804.93,0.009428269,2001-0436VNM +2001-0454,2001225N18146,36.4,JPN,392,WP4,2001,800000000,2014,901659328.8,894181173.4,-0.008328355,2001-0454JPN +2001-0522,2001248N23125,158.9,TWN,158,WP4,2001,800000000,2014,1412719441,1414013028,0.000915253,2001-0522TWN +2001-0553,2001278N12302,33.9,BLZ,84,NA1,2001,250000000,2014,485443358.1,487784713.7,0.004811534,2001-0553BLZ +2001-0624,2001309N10130,59,VNM,704,WP1,2001,55000000,2014,313330079.3,313747726,0.001332041,2001-0624VNM +2001-0671,2001303N13276,42.4,BHS,44,NA1,2001,300000000,2014,395198026.4,396338899.1,0.002882679,2001-0671BHS +2001-0711,2001363S10185,25.8,TON,776,OC,2001,51300000,2014,125645747.8,61874934.24,-0.708351264,2001-0711TON +2002-0004,2001364S19036,31.1,MDG,450,SI,2001,181000,2014,439343.7929,445737.8301,0.01444872,2002-0004MDG +2002-0669,2002295N11261,83.5,MEX,484,NA1,2002,200000000,2014,340513691.7,340009362.8,-0.001482181,2002-0669MEX +2002-0724,2002265N10315,94.9,USA,840,NA2,2002,2000000000,2014,3192297819,3187821874,-0.001403091,2002-0724USA +2003-0024,2003011S09182,25.8,FJI,242,OC,2003,30000000,2014,58076952.28,51280394.8,-0.124460383,2003-0024FJI +2003-0135,2003061S21148,149.3,NCL,540,OC,2003,40000000,2014,40000000,39967857.79,-0.000803878,2003-0135NCL +2003-0258,2003144N16119,40.7,PHL,608,WP2,2003,4000000,2014,13566469.09,13653992.95,0.006430762,2003-0258PHL +2003-0346,2003196N04150,132.7,CHN,156,WP3,2003,100000000,2014,631358675.5,631561257.7,0.000320816,2003-0346CHN +2003-0346,2003196N04150,116.9,PHL,608,WP2,2003,26468000,2014,89769325.95,89688671.83,-0.000898864,2003-0346PHL +2003-0443,2003240N20139,263.5,CHN,156,WP3,2003,241000000,2014,1521574408,1521111541,-0.000304249,2003-0443CHN +2003-0448,2003240N15329,109.6,BMU,60,NA1,2003,300000000,2014,399403562.6,399828430.3,0.00106319,2003-0448BMU +2003-0459,2003247N10153,100.4,JPN,392,WP4,2003,50000000,2014,54552255.91,54509967.04,-0.0007755,2003-0459JPN +2003-0459,2003247N10153,120.4,KOR,410,WP4,2003,4500000000,2014,9332563202,9339286292,0.000720131,2003-0459KOR +2003-0468,2003249N14329,92.8,USA,840,NA2,2003,3370000000,2014,5129874055,5119547369,-0.002015078,2003-0468USA +2003-0474,2003262N17254,57.8,MEX,484,NA1,2003,100000000,2014,180241122.2,179863368.4,-0.002098024,2003-0474MEX +2003-0487,2003268N28298,100.4,CAN,124,NA2,2003,110000000,2014,221788181.9,221623043,-0.000744857,2003-0487CAN +2003-0594,2003316N11141,53.1,CHN,156,WP3,2003,196938000,2014,1243385148,1243934556,0.000441767,2003-0594CHN +2003-0605,2003345N05092,72.9,IND,356,NI,2003,28000000,2014,93953651.91,93927917.74,-0.00027394,2003-0605IND +2003-0782,2003179N20271,94.1,USA,840,NA2,2003,50000000,2014,76110891.03,75976612.4,-0.001765808,2003-0782USA +2004-0103,2004061S12072,25.8,MDG,450,SI,2004,250000000,2014,611461783.3,349891343,-0.558229799,2004-0103MDG +2004-0153,2004098S15173,34.6,FJI,242,OC,2004,4000000,2014,6575111.983,6477235.422,-0.014997823,2004-0153FJI +2004-0218,2004134N07132,150.2,PHL,608,WP2,2004,1000000,2014,3114596.181,3112259.337,-0.00075057,2004-0218PHL +2004-0235,2004136N15090,44.3,MMR,104,NI,2004,688000,2014,4260951.721,4238654.264,-0.005246716,2004-0235MMR +2004-0309,2004174N14146,25.8,PHL,608,WP2,2004,19667000,2014,61254763.09,13802716.32,-1.490176204,2004-0309PHL +2004-0415,2004223N11301,104.1,CUB,192,NA1,2004,1000000000,2014,2111247808,2114143610,0.001370667,2004-0415CUB +2004-0415,2004223N11301,38.4,JAM,388,NA1,2004,300000000,2014,409384949.8,407910850.2,-0.003607265,2004-0415JAM +2004-0415,2004223N11301,112,USA,840,NA2,2004,16000000000,2014,22839070991,22861215281,0.00096911,2004-0415USA +2004-0428,2004227N15141,25.8,JPN,392,WP4,2004,500000000,2014,503661847.5,177221668.9,-1.04450379,2004-0428JPN +2004-0428,2004227N15141,134,KOR,410,WP4,2004,1000000,2014,1845168.832,1847692.412,0.001366735,2004-0428KOR +2004-0445,2004230N09172,153.1,GUM,316,OC,2004,1000000,2014,1435830.957,1437478.07,0.001146492,2004-0445GUM +2004-0448,2004241N32282,72.2,USA,840,NA2,2004,62500000,2014,89215121.06,89339480.31,0.001392955,2004-0448USA +2004-0455,2004238N11325,40.8,BHS,44,NA1,2004,1000000000,2014,1210044074,1218435858,0.006911168,2004-0455BHS +2004-0455,2004238N11325,82.9,USA,840,NA2,2004,11000000000,2014,15701861306,15696443197,-0.000345121,2004-0455USA +2004-0462,2004247N10332,75.4,TTO,780,NA1,2004,1000000,2014,2048171.488,2049992.563,0.000888727,2004-0462TTO +2004-0580,2004319N10134,142.4,PHL,608,WP2,2004,6000000,2014,18687577.08,18669893.72,-0.000946711,2004-0580PHL +2004-0580,2004319N10134,34.9,VNM,704,WP1,2004,23000000,2014,94274912.3,94402049.38,0.00134767,2004-0580VNM +2005-0351,2005186N12299,81,CUB,192,NA1,2005,1400000000,2014,2647941893,2645035134,-0.001098346,2005-0351CUB +2005-0351,2005186N12299,97.5,JAM,388,NA1,2005,30000000,2014,37089462.6,37061160.28,-0.000763374,2005-0351JAM +2005-0351,2005186N12299,113.8,USA,840,NA2,2005,2230000000,2014,2984138801,2988710313,0.001530765,2005-0351USA +2005-0381,2005192N22155,265.7,TWN,158,WP4,2005,100000000,2014,141174388.7,141183485.9,6.44E-05,2005-0381TWN +2005-0382,2005192N11318,72.1,MEX,484,NA1,2005,400000000,2014,599247676.6,600546073.2,0.002164367,2005-0382MEX +2005-0467,2005236N23285,53,USA,840,NA2,2005,1.25E+11,2014,1.67E+11,1.67E+11,0.001099496,2005-0467USA +2005-0492,2005237N14148,33.7,CHN,156,WP3,2005,1900000000,2014,8712512761,8710256578,-0.000258992,2005-0492CHN +2005-0492,2005237N14148,291.6,TWN,158,WP4,2005,38000000,2014,53646267.7,53654485.48,0.000153173,2005-0492TWN +2005-0497,2005241N15155,180.7,JPN,392,WP4,2005,1000000000,2014,1019977855,1019949598,-2.77E-05,2005-0497JPN +2005-0510,2005248N08142,100.4,CHN,156,WP3,2005,1750000000,2014,8024682806,8036888245,0.001519832,2005-0510CHN +2005-0536,2005257N15120,42.6,VNM,704,WP1,2005,20000000,2014,64617086.41,64668886.84,0.000801331,2005-0536VNM +2005-0540,2005262N13127,48.9,CHN,156,WP3,2005,1040000000,2014,4768954354,4766559108,-0.000502384,2005-0540CHN +2005-0540,2005262N13127,35.3,VNM,704,WP1,2005,219250000,2014,708364809.8,714572541.1,0.00872529,2005-0540VNM +2005-0547,2005261N21290,59.5,USA,840,NA2,2005,16000000000,2014,21410861355,21364784783,-0.002154338,2005-0547USA +2005-0565,2005268N19146,171.4,CHN,156,WP3,2005,150000000,2014,687829954.8,688297480.7,0.000679481,2005-0565CHN +2005-0565,2005268N19146,325.7,TWN,158,WP4,2005,100000000,2014,141174388.7,446897893.5,1.152334217,2005-0565TWN +2005-0585,2005289N18282,40.3,MEX,484,NA1,2005,5000000000,2014,7490595958,7496293488,0.000760335,2005-0585MEX +2005-0585,2005289N18282,121.1,USA,840,NA2,2005,14300000000,2014,19135957336,19107419455,-0.001492435,2005-0585USA +2005-0611,2005301N13117,69.6,VNM,704,WP1,2005,11000000,2014,35539397.53,35639054.33,0.002800198,2005-0611VNM +2006-0043,2006006S11129,58.3,AUS,36,OC,2006,2354000,2014,4625625.837,4645427.663,0.00427176,2006-0043AUS +2006-0139,2006074S13158,50.5,AUS,36,OC,2006,1180000000,2014,2318707938,2328083679,0.004035367,2006-0139AUS +2006-0251,2006128N09138,96.9,CHN,156,WP3,2006,475000000,2014,1809189080,1810321031,0.000625472,2006-0251CHN +2006-0251,2006128N09138,208.5,PHL,608,WP2,2006,3328000,2014,7749707.397,7751266.496,0.000201161,2006-0251PHL +2006-0388,2006198N08152,39.8,CHN,156,WP3,2006,367000000,2014,1397836616,1388499752,-0.006701918,2006-0388CHN +2006-0410,2006209N13130,39.1,CHN,156,WP3,2006,900000000,2014,3427937205,3442895603,0.00435418,2006-0410CHN +2006-0437,2006216N07151,84.3,CHN,156,WP3,2006,2510000000,2014,9560135983,9551856301,-0.000866438,2006-0437CHN +2006-0466,2006237N13298,117,USA,840,NA2,2006,32860000,2014,41553786.12,41579194.68,0.000611275,2006-0466USA +2006-0504,2006252N13139,152,JPN,392,WP4,2006,2500000000,2014,2676605774,2678884715,0.000851067,2006-0504JPN +2006-0505,2006257N16259,320.3,MEX,484,NA1,2006,2700000,2014,3638886.137,3637726.519,-0.000318725,2006-0505MEX +2006-0517,2006268N12129,325.7,PHL,608,WP2,2006,113000000,2014,263136098.5,971316188.2,1.305980664,2006-0517PHL +2006-0517,2006268N12129,70.6,VNM,704,WP1,2006,624000000,2014,1750622103,1751243470,0.000354878,2006-0517VNM +2006-0600,2006298N12143,214.7,PHL,608,WP2,2006,9077000,2014,21137047.49,21130719.11,-0.000299443,2006-0600PHL +2006-0648,2006329N06150,199.7,PHL,608,WP2,2006,66400000,2014,154621565.9,154647768.3,0.000169447,2006-0648PHL +2006-0648,2006329N06150,78,VNM,704,WP1,2006,456000000,2014,1279300767,1278681154,-0.000484455,2006-0648VNM +2007-0095,2007066S12066,25.8,MDG,450,SI,2007,240000000,2014,348858871.4,125216890.9,-1.0246201,2007-0095MDG +2007-0164,2007151N14072,33.9,OMN,512,NI,2007,3900000000,2014,7513292712,7505906625,-0.000983553,2007-0164OMN +2007-0262,2007188N04148,325.7,JPN,392,WP4,2007,60000000,2014,64453546.68,256696088.4,1.381948092,2007-0262JPN +2007-0360,2007225N12331,122.2,BLZ,84,NA1,2007,14847000,2014,19476548.08,19485983.55,0.000484336,2007-0360BLZ +2007-0360,2007225N12331,47,DMA,212,NA1,2007,20000000,2014,24849111.57,24808275.31,-0.001644721,2007-0360DMA +2007-0360,2007225N12331,141.6,JAM,388,NA1,2007,300000000,2014,323956910.3,324363348.3,0.001253819,2007-0360JAM +2007-0360,2007225N12331,63.3,LCA,662,NA1,2007,40000000,2014,47496877.29,47537233.54,0.0008493,2007-0360LCA +2007-0360,2007225N12331,121.2,MEX,484,NA1,2007,600000000,2014,749255406.1,748407649.8,-0.001132106,2007-0360MEX +2007-0380,2007223N19136,52.6,CHN,156,WP3,2007,890555000,2014,2627998249,2633866360,0.002230431,2007-0380CHN +2007-0439,2007244N12303,79.5,HND,340,NA1,2007,6579000,2014,10588404.38,10570882.76,-0.001656164,2007-0439HND +2007-0457,2007257N16134,100.4,CHN,156,WP3,2007,638000000,2014,1882716826,1880610808,-0.001119232,2007-0457CHN +2007-0463,2007272N17125,35.8,VNM,704,WP1,2007,191000000,2014,459411646.6,458341782.1,-0.002331487,2007-0463VNM +2007-0470,2007254N18140,186.8,KOR,410,WP4,2007,70000000,2014,87997870.47,88050777.92,0.000601055,2007-0470KOR +2007-0479,2007240N17153,268,JPN,392,WP4,2007,1000000000,2014,1074225778,1074609445,0.000357093,2007-0479JPN +2007-0552,2007274N18131,32.4,CHN,156,WP3,2007,1077788000,2014,3180516618,3220120624,0.012375178,2007-0552CHN +2007-0556,2007314N10093,78.6,BGD,50,NI,2007,2300000000,2014,4994688046,4984479477,-0.002045977,2007-0556BGD +2007-0560,2007306N18133,168.8,PHL,608,WP2,2007,2971000,2014,5660826.737,5662083.054,0.000221907,2007-0560PHL +2007-0576,2007324N10140,83,PHL,608,WP2,2007,5000000,2014,9526803.663,9537264.891,0.001097481,2007-0576PHL +2007-0578,2007323N09128,76.1,PHL,608,WP2,2007,1000000,2014,1905360.733,1907024.547,0.000872847,2007-0578PHL +2007-0591,2007058S12135,25.8,AUS,36,OC,2007,100000000,2014,171950641.6,46970609.91,-1.297685383,2007-0591AUS +2007-0655,2007337S12186,59.1,FJI,242,OC,2007,652000,2014,858485.7575,859280.4569,0.000925271,2007-0655FJI +2008-0051,2008026S12179,74.7,FJI,242,OC,2008,30000000,2014,38176381.62,38122406.29,-0.001414841,2008-0051FJI +2008-0070,2008037S10055,39,MDG,450,SI,2008,60000000,2014,68034718.12,67971419.61,-0.000930818,2008-0070MDG +2008-0111,2008062S10064,73.7,MOZ,508,SI,2008,20000000,2014,29510844.15,29531686.48,0.000706011,2008-0111MOZ +2008-0155,2008104N08128,104,CHN,156,WP3,2008,49000000,2014,111703612.9,111831024.7,0.001139974,2008-0155CHN +2008-0155,2008104N08128,30.4,PHL,608,WP2,2008,16000000,2014,26139377.16,26438538.32,0.01137985,2008-0155PHL +2008-0184,2008117N11090,43.7,MMR,104,NI,2008,4000000000,2014,8216064485,8247757415,0.003850014,2008-0184MMR +2008-0197,2008135N12116,91,PHL,608,WP2,2008,99174000,2014,162021661.9,162262628.3,0.001486143,2008-0197PHL +2008-0249,2008169N08135,86.3,CHN,156,WP3,2008,175000000,2014,398941474.7,399337415.4,0.000991986,2008-0249CHN +2008-0249,2008169N08135,186.9,PHL,608,WP2,2008,284694000,2014,465107740.1,465172164.6,0.000138506,2008-0249PHL +2008-0292,2008206N22133,97.7,CHN,156,WP3,2008,73000000,2014,166415586.6,166555055.4,0.000837724,2008-0292CHN +2008-0304,2008203N18276,82.1,MEX,484,NA1,2008,75000000,2014,88822764.83,88951468.92,0.00144795,2008-0304MEX +2008-0304,2008203N18276,91.8,USA,840,NA2,2008,1200000000,2014,1428540902,1428581212,2.82E-05,2008-0304USA +2008-0338,2008229N18293,62.4,USA,840,NA2,2008,180000000,2014,214281135.2,214393281.6,0.000523224,2008-0338USA +2008-0352,2008238N13293,45.3,CUB,192,NA1,2008,2072000000,2014,2748386795,2735471659,-0.004710245,2008-0352CUB +2008-0352,2008238N13293,95.2,JAM,388,NA1,2008,66198000,2014,67028878.91,67033295.53,6.59E-05,2008-0352JAM +2008-0352,2008238N13293,75.2,USA,840,NA2,2008,7000000000,2014,8333155259,8311297726,-0.002626406,2008-0352USA +2008-0369,2008229N13147,83.3,CHN,156,WP3,2008,58000000,2014,132220603.1,132190363,-0.000228736,2008-0369CHN +2008-0369,2008229N13147,325.7,HKG,344,WP4,2008,380000,2014,505083.5374,918937.0251,0.598493758,2008-0369HKG +2008-0369,2008229N13147,40.9,PHL,608,WP2,2008,33870000,2014,55333794.03,54962062.88,-0.006740644,2008-0369PHL +2008-0378,2008241N19303,211.8,USA,840,NA2,2008,160000000,2014,190472120.2,190624644.9,0.000800451,2008-0378USA +2008-0384,2008245N17323,81.6,CUB,192,NA1,2008,1500000000,2014,1989662255,1988880448,-0.000393012,2008-0384CUB +2008-0384,2008245N17323,33.5,TCA,796,NA1,2008,500000000,2014,500000000,500063361,0.000126714,2008-0384TCA +2008-0384,2008245N17323,80.6,USA,840,NA2,2008,30000000000,2014,35713522539,35674564454,-0.001091445,2008-0384USA +2008-0426,2008262N16142,144.7,CHN,156,WP3,2008,824000000,2014,1878444430,1876942978,-0.000799626,2008-0426CHN +2008-0426,2008262N16142,25.8,PHL,608,WP2,2008,7420000,2014,12122136.16,5780902.116,-0.74047347,2008-0426PHL +2008-0437,2008272N15113,58.2,VNM,704,WP1,2008,6500000,2014,12209487.86,12248048.24,0.003153254,2008-0437VNM +2008-0441,2008268N12140,325.7,TWN,158,WP4,2008,90000000,2014,114489207.2,297519658.6,0.954999744,2008-0441TWN +2008-0552,2008320N08122,111.2,VNM,704,WP1,2008,1000000,2014,1878382.747,1876049.31,-0.001243031,2008-0552VNM +2009-0137,2009093S12062,48.7,MDG,450,SI,2009,5000000,2014,6241556.912,6276216.074,0.005537606,2009-0137MDG +2009-0165,2009123N10111,147.5,PHL,608,WP2,2009,30342000,2014,51295833,51293642.52,-4.27E-05,2009-0165PHL +2009-0321,2009215N20133,53.2,TWN,158,WP4,2009,250000000,2014,338247183.2,338441840.9,0.000575324,2009-0321TWN +2009-0384,2009239N12270,61.1,MEX,484,NA1,2009,40000000,2014,58422121.35,58579297.05,0.002686733,2009-0384MEX +2009-0399,2009254N14130,82.1,CHN,156,WP3,2009,295001000,2014,605154271.7,606293567.7,0.001880884,2009-0399CHN +2009-0414,2009268N14128,32.8,PHL,608,WP2,2009,237489000,2014,401496146.7,395915626.6,-0.013996812,2009-0414PHL +2009-0414,2009268N14128,48.9,VNM,704,WP1,2009,785000000,2014,1378777736,1372999963,-0.004199308,2009-0414VNM +2009-0422,2009270N10148,41.3,CHN,156,WP3,2009,35000000,2014,71797721.06,71181089.02,-0.008625556,2009-0422CHN +2009-0422,2009270N10148,39.4,PHL,608,WP2,2009,585379000,2014,989634942.4,984393842.4,-0.005310067,2009-0422PHL +2009-0422,2009270N10148,151.9,VNM,704,WP1,2009,200000,2014,351280.9518,351352.4226,0.000203437,2009-0422VNM +2009-0423,2009272N07164,142.6,JPN,392,WP4,2009,1000000000,2014,927176205.2,926315897.9,-0.00092831,2009-0423JPN +2009-0478,2009299N12153,325.7,PHL,608,WP2,2009,15194000,2014,25686800.03,43613859.99,0.529397747,2009-0478PHL +2009-0478,2009299N12153,45.4,VNM,704,WP1,2009,280000000,2014,491793332.5,489595236.2,-0.004479571,2009-0478VNM +2009-0554,2009346S10172,76.9,FJI,242,OC,2009,13300000,2014,20772286.48,20735822.7,-0.001756948,2009-0554FJI +2009-0609,2009313N11072,44.4,IND,356,NI,2009,300000000,2014,455879199.2,456382862.4,0.001104207,2009-0609IND +2010-0106,2010069S12188,25.8,FJI,242,OC,2010,39427000,2014,56286310.68,13492148.22,-1.428343453,2010-0106FJI +2010-0210,2010151N14065,39.6,OMN,512,NI,2010,1000000000,2014,1382561646,1385900125,0.002411794,2010-0210OMN +2010-0260,2010176N16278,46.5,MEX,484,NA1,2010,2000000000,2014,2485464847,2478270330,-0.002898834,2010-0260MEX +2010-0308,2010191N12138,325.7,CHN,156,WP3,2010,500000,2014,859123.4391,2692108.347,1.142167325,2010-0308CHN +2010-0308,2010191N12138,325.7,PHL,608,WP2,2010,8675000,2014,12369162.52,31196159.09,0.9250885,2010-0308PHL +2010-0308,2010191N12138,165,VNM,704,WP1,2010,500000,2014,803078.7658,803262.4956,0.000228756,2010-0308VNM +2010-0432,2010233N17119,39,VNM,704,WP1,2010,44000000,2014,70670931.39,70678446.19,0.000106329,2010-0432VNM +2010-0468,2010236N12341,41.5,ATG,28,NA1,2010,12600000,2014,13945942.61,14028748.46,0.005920072,2010-0468ATG +2010-0484,2010256N17137,91.9,CHN,156,WP3,2010,298285000,2014,512527270.1,513249853.4,0.001408851,2010-0484CHN +2010-0484,2010256N17137,303,TWN,158,WP4,2010,63100000,2014,75033445.7,75060353.7,0.000358549,2010-0484TWN +2010-0494,2010257N16282,62.4,MEX,484,NA1,2010,3900000000,2014,4846656451,4857140116,0.002160736,2010-0494MEX +2010-0543,2010285N13145,54,CHN,156,WP3,2010,420000000,2014,721663688.8,724623069.9,0.004092391,2010-0543CHN +2010-0543,2010285N13145,91.8,PHL,608,WP2,2010,275745000,2014,393168267.4,392849298.5,-0.000811608,2010-0543PHL +2010-0543,2010285N13145,44.7,TWN,158,WP4,2010,10000000,2014,11891195.83,11938433.23,0.003964599,2010-0543TWN +2010-0554,2010293N17093,53.7,MMR,104,NI,2010,57000000,2014,75300204.74,75386609.11,0.001146807,2010-0554MMR +2010-0571,2010302N09306,122.1,LCA,662,NA1,2010,500000,2014,548667.5019,549287.125,0.001128686,2010-0571LCA +2010-0571,2010302N09306,45.1,VCT,670,NA1,2010,25000000,2014,26706058.15,26769270.95,0.002364187,2010-0571VCT +2011-0070,2011028S13180,34.4,AUS,36,OC,2011,2500000000,2014,2626722487,2625314349,-0.000536226,2011-0070AUS +2011-0091,2011020S13182,129.2,TON,776,OC,2011,3000000,2014,3148217.183,3150478.22,0.000717938,2011-0091TON +2011-0272,2011205N12130,55,PHL,608,WP2,2011,63258000,2014,80315874.36,80536440.16,0.002742465,2011-0272PHL +2011-0328,2011233N15301,53.5,BHS,44,NA1,2011,40000000,2014,43522583.4,43398979.16,-0.002844043,2011-0328BHS +2011-0328,2011233N15301,44.8,DOM,214,NA1,2011,30000000,2014,34283168.75,34047442.12,-0.006899618,2011-0328DOM +2011-0328,2011233N15301,46.2,PRI,630,NA1,2011,500000000,2014,510433804.3,513965889.6,0.00689594,2011-0328PRI +2011-0328,2011233N15301,157.5,USA,840,NA2,2011,7300000000,2014,8229569135,8230835742,0.000153897,2011-0328USA +2011-0341,2011233N12129,55,PHL,608,WP2,2011,34452000,2014,43742174.96,43822836.01,0.001842313,2011-0341PHL +2011-0378,2011270N18139,325.7,PHL,608,WP2,2011,2655000,2014,3370935.635,4948189.699,0.38383145,2011-0378PHL +2011-0379,2011266N13139,72.5,CHN,156,WP3,2011,219000000,2014,303152614.2,303944727.8,0.002609512,2011-0379CHN +2011-0379,2011266N13139,67.4,PHL,608,WP2,2011,344173000,2014,436981179.1,436500548,-0.001100495,2011-0379PHL +2011-0385,2011279N10257,85.2,MEX,484,NA1,2011,27700000,2014,30846033.58,30773744.47,-0.002346297,2011-0385MEX +2011-0456,2011245N27269,39.9,USA,840,NA2,2011,750000000,2014,845503678.2,851364151.3,0.006907428,2011-0456USA +2011-0519,2011346N03156,54.3,PHL,608,WP2,2011,38082000,2014,48351024.81,48256972.54,-0.001947091,2011-0519PHL +2011-0566,2011360N09088,93.6,IND,356,NI,2011,375625000,2014,420146061.5,420291427.8,0.00034593,2011-0566IND +2012-0043,2012039S14075,90.3,MDG,450,SI,2012,100000000,2014,107598319.7,107520211.8,-0.000726185,2012-0043MDG +2012-0259,2012201N15129,160.1,CHN,156,WP3,2012,329000000,2014,402859805.3,402882496,5.63E-05,2012-0259CHN +2012-0260,2012209N11131,306.2,TWN,158,WP4,2012,27000000,2014,28883557.6,28892236.54,0.000300435,2012-0260TWN +2012-0276,2012215N12313,60.3,MEX,484,NA1,2012,300000000,2014,328342750.7,328480450.5,0.00041929,2012-0276MEX +2012-0282,2012215N23146,55,CHN,156,WP3,2012,1500000000,2014,1836746833,1830219334,-0.003560167,2012-0282CHN +2012-0294,2012225N16133,136.9,CHN,156,WP3,2012,262000000,2014,320818446.7,320777868.8,-0.00012649,2012-0294CHN +2012-0294,2012225N16133,33.5,PHL,608,WP2,2012,3000000,2014,3413756.742,3418483.017,0.001383522,2012-0294PHL +2012-0294,2012225N16133,61.3,VNM,704,WP1,2012,6800000,2014,8125989.117,8149995.198,0.00294988,2012-0294VNM +2012-0313,2012234N16315,75.7,USA,840,NA2,2012,2000000000,2014,2163578370,2161055349,-0.001166814,2012-0313USA +2012-0401,2012166N09269,46.1,MEX,484,NA1,2012,555000000,2014,607434088.9,610325397.7,0.00474858,2012-0401MEX +2012-0406,2012296N06135,51.5,CHN,156,WP3,2012,197000000,2014,241226084,241139506.5,-0.00035897,2012-0406CHN +2012-0406,2012296N06135,112.3,PHL,608,WP2,2012,1339000,2014,1523673.426,1521994.525,-0.001102485,2012-0406PHL +2012-0406,2012296N06135,100.4,VNM,704,WP1,2012,336000000,2014,401519462.3,400817204.3,-0.001750533,2012-0406VNM +2012-0410,2012296N14283,162.2,JAM,388,NA1,2012,16542000,2014,15483975.86,15480905.55,-0.000198309,2012-0410JAM +2012-0410,2012296N14283,94.3,USA,840,NA2,2012,50000000000,2014,54089459244,53991359585,-0.001815302,2012-0410USA +2012-0414,2012232N13141,255.1,JPN,392,WP4,2012,86000000,2014,67245080.24,67227907.25,-0.000255412,2012-0414JPN +2012-0414,2012232N13141,31.2,KOR,410,WP4,2012,450000000,2014,519378870.9,517916090.4,-0.002820377,2012-0414KOR +2012-0498,2012346S14180,244,FJI,242,OC,2012,8400000,2014,9481515.123,9479088.612,-0.000255953,2012-0498FJI +2012-0498,2012346S14180,83,WSM,882,OC,2012,133000000,2014,133663025.6,133838355.8,0.001310873,2012-0498WSM +2012-0500,2012331N03157,85.6,PHL,608,WP2,2012,898352000,2014,1022251732,1020502716,-0.00171241,2012-0500PHL +2012-0588,2012254N09135,325.7,JPN,392,WP4,2012,31000000,2014,24239505.67,42923630.54,0.571438735,2012-0588JPN +2012-0588,2012254N09135,198.7,KOR,410,WP4,2012,349000000,2014,402807168.8,403100611.2,0.000728228,2012-0588KOR +2013-0032,2013046S20042,44,MDG,450,SI,2013,25000000,2014,25169373.46,25243534.94,0.002942164,2013-0032MDG +2013-0249,2013187N20156,67.9,CHN,156,WP3,2013,460000000,2014,501902623.4,502723049.4,0.001633297,2013-0249CHN +2013-0258,2013178N09133,95.3,CHN,156,WP3,2013,177000000,2014,193123400.7,192852737.9,-0.001402485,2013-0258CHN +2013-0258,2013178N09133,263.2,PHL,608,WP2,2013,1000000,2014,1046897.37,1046966.916,6.64E-05,2013-0258PHL +2013-0272,2013220N12137,73.3,CHN,156,WP3,2013,2120000000,2014,2313116438,2310410970,-0.001170305,2013-0272CHN +2013-0272,2013220N12137,129.6,PHL,608,WP2,2013,32431000,2014,33951928.59,33991276.65,0.001158264,2013-0272PHL +2013-0306,2013228N23124,108.7,CHN,156,WP3,2013,376000000,2014,410250840,409855850.5,-0.000963264,2013-0306CHN +2013-0306,2013228N23124,36.1,TWN,158,WP4,2013,12000000,2014,12443691.25,12287826.89,-0.012604679,2013-0306TWN +2013-0341,2013210N13123,93.8,CHN,156,WP3,2013,20000000,2014,21821853.19,21795648.16,-0.001201584,2013-0341CHN +2013-0341,2013210N13123,70.9,VNM,704,WP1,2013,1000000,2014,1087504.092,1085162.161,-0.002155814,2013-0341VNM +2013-0401,2013281N12098,89,IND,356,NI,2013,633471000,2014,695703513,696115605.6,0.000592164,2013-0401IND +2013-0419,2013269N15118,43,VNM,704,WP1,2013,663230000,2014,721265338.8,723859353.8,0.003590026,2013-0419VNM +2013-0429,2013272N10135,41.9,CHN,156,WP3,2013,6700000000,2014,7310320819,7266229953,-0.006049579,2013-0429CHN +2013-0430,2013282N14132,148.9,PHL,608,WP2,2013,96723000,2014,101259054.3,101188259.5,-0.00069939,2013-0430PHL +2013-0430,2013282N14132,117.1,VNM,704,WP1,2013,76000000,2014,82650310.98,82572342.38,-0.0009438,2013-0430VNM +2013-0433,2013306N07162,50.9,PHL,608,WP2,2013,10000000000,2014,10468973696,10506773245,0.003604123,2013-0433PHL +2013-0433,2013306N07162,45.5,VNM,704,WP1,2013,734000000,2014,798228003.4,798138198.4,-0.000112512,2013-0433VNM +2013-0437,2013301N13142,141.4,PHL,608,WP2,2013,4729000,2014,4950777.661,4956830.361,0.001221829,2013-0437PHL +2014-0001,2014004S17183,39.4,TON,776,OC,2014,31000000,2014,31000000,30964281.91,-0.001152861,2014-0001TON +2014-0096,2014068S16169,31.3,VUT,548,OC,2014,2000000,2014,2000000,2008990.461,0.004485157,2014-0096VUT +2014-0227,2014190N08154,130.9,CHN,156,WP3,2014,4232973000,2014,4232973000,4228685985,-0.00101328,2014-0227CHN +2014-0227,2014190N08154,248.5,PHL,608,WP2,2014,820576000,2014,820576000,820579917.5,4.77E-06,2014-0227PHL +2014-0227,2014190N08154,40.8,VNM,704,WP1,2014,6200000,2014,6200000,6228610.472,0.004603978,2014-0227VNM +2014-0236,2014184N08147,35.4,JPN,392,WP4,2014,156000000,2014,156000000,156761680.5,0.004870686,2014-0236JPN +2014-0240,2014197N10137,49.9,CHN,156,WP3,2014,500000000,2014,500000000,498637013.1,-0.002729696,2014-0240CHN +2014-0240,2014197N10137,325.7,TWN,158,WP4,2014,20000000,2014,20000000,84602726.23,1.442234217,2014-0240TWN +2014-0310,2014212N11242,37,USA,840,NA2,2014,66000000,2014,66000000,66401401.65,0.006063423,2014-0310USA +2014-0316,2014209N12152,300.1,JPN,392,WP4,2014,200000000,2014,200000000,200068150.7,0.000340696,2014-0316JPN +2014-0333,2014253N13260,66.1,MEX,484,NA1,2014,2500000000,2014,2500000000,2501601560,0.000640419,2014-0333MEX +2014-0390,2014254N10142,79.2,CHN,156,WP3,2014,2900000000,2014,2900000000,2892991825,-0.002419537,2014-0390CHN +2014-0390,2014254N10142,99.7,PHL,608,WP2,2014,19183000,2014,19183000,19172694.48,-0.000537366,2014-0390PHL +2014-0390,2014254N10142,101.3,VNM,704,WP1,2014,4500000,2014,4500000,4496723.336,-0.000728413,2014-0390VNM +2014-0392,2014279N11096,67.7,IND,356,NI,2014,7000000000,2014,7000000000,6989678402,-0.001475602,2014-0392IND +2014-0396,2014275N06166,255.2,JPN,392,WP4,2014,100000000,2014,100000000,100017513.4,0.000175118,2014-0396JPN +2014-0479,2014334N02156,85.4,PHL,608,WP2,2014,113878000,2014,113878000,113992423.7,0.001004287,2014-0479PHL +2014-0497,2014362N07130,57,PHL,608,WP2,2014,17688000,2014,17688000,17619650.11,-0.003871681,2014-0497PHL +2015-0017,2015012N09146,107.6,PHL,608,WP2,2015,1000000,2014,972027.6618,973519.9527,0.001534058,2015-0017PHL +2015-0053,2015045S12145,25.8,AUS,36,OC,2015,78000000,2014,84702479.43,2995180.152,-3.342140494,2015-0053AUS +2015-0079,2015047S15152,102.2,AUS,36,OC,2015,546000000,2014,592917356,592586612.4,-0.00055798,2015-0079AUS +2015-0093,2015066S08170,47.5,VUT,548,OC,2015,449400000,2014,496316510.4,497403875.7,0.002188474,2015-0093VUT +2015-0105,2015085N06162,45.7,FSM,583,OC,2015,11000000,2014,11098409.57,11055456.77,-0.003877684,2015-0105FSM +2015-0176,2015122N07144,228.3,JPN,392,WP4,2015,23200000,2014,25604132.8,25609493.6,0.000209351,2015-0176JPN +2015-0176,2015122N07144,193.8,PHL,608,WP2,2015,348000,2014,338265.6263,338117.5537,-0.000437837,2015-0176PHL +2015-0244,2015180N09160,37.4,CHN,156,WP3,2015,940000000,2014,890531131.4,891327043.9,0.000893351,2015-0244CHN +2015-0278,2015183N13130,169.9,CHN,156,WP3,2015,213000000,2014,201790564.9,201653318.2,-0.000680376,2015-0278CHN +2015-0278,2015183N13130,50.2,PHL,608,WP2,2015,2218000,2014,2155957.354,2153497.386,-0.001141661,2015-0278PHL +2015-0339,2015211N13162,61.1,CHN,156,WP3,2015,1282690000,2014,1215186571,1216891387,0.001401942,2015-0339CHN +2015-0458,2015263N14148,120.2,CHN,156,WP3,2015,661000000,2014,626213912.6,626317002.2,0.00016461,2015-0458CHN +2015-0462,2015285N14151,107,PHL,608,WP2,2015,210985000,2014,205083256.2,204978171.4,-0.000512532,2015-0462PHL +2015-0470,2015293N13266,100,MEX,484,NA1,2015,823000000,2014,924242990.8,923900657.4,-0.000370462,2015-0470MEX +2015-0473,2015242N12343,73.3,CPV,132,NA1,2015,1100000,2014,1281242.483,1282841.967,0.001247607,2015-0473CPV +2015-0479,2015270N27291,25.8,BHS,44,NA1,2015,90000000,2014,83627197.41,1129342.51,-4.304733181,2015-0479BHS +2015-0484,2015301N11065,67.3,YEM,887,NI,2015,200000000,2014,189625777.2,189687123.7,0.000323461,2015-0484YEM +2015-0490,2015273N12130,126.8,CHN,156,WP3,2015,4200000000,2014,3978968885,3980510860,0.000387456,2015-0490CHN +2015-0490,2015273N12130,46.9,PHL,608,WP2,2015,1300000,2014,1263635.96,1268652.402,0.003961988,2015-0490PHL +2015-0543,2015344N07145,189.3,PHL,608,WP2,2015,135217000,2014,131434664.3,131332251,-0.000779499,2015-0543PHL +2016-0041,2016041S14170,65.2,FJI,242,OC,2016,600000000,2014,575865848.2,575863677.2,-3.77E-06,2016-0041FJI +2016-0175,2016138N10081,44.8,BGD,50,NI,2016,600000000,2014,468492129.6,469418303.5,0.001974974,2016-0175BGD +2016-0268,2016207N17116,118.2,VNM,704,WP1,2016,191000000,2014,173254832,172984694.3,-0.00156041,2016-0268VNM +2016-0319,2016248N15255,102.2,MEX,484,NA1,2016,50000000,2014,60984824.31,60888078.84,-0.001587646,2016-0319MEX +2016-0322,2016242N24279,84.6,USA,840,NA2,2016,600000000,2014,561979044.5,561035932.5,-0.001679608,2016-0322USA +2016-0342,2016253N13144,145.2,CHN,156,WP3,2016,2300000000,2014,2154362604,2152625313,-0.000806731,2016-0342CHN +2016-0342,2016253N13144,25.8,PHL,608,WP2,2016,4913000,2014,4585811.217,269488.5479,-2.834196402,2016-0342PHL +2016-0350,2016266N11144,73.3,CHN,156,WP3,2016,830000000,2014,777443896.3,776944854,-0.000642107,2016-0350CHN +2016-0350,2016266N11144,314.9,TWN,158,WP4,2016,110000000,2014,109825691.6,109814310.5,-0.000103634,2016-0350TWN +2016-0361,2016269N15165,109,KOR,410,WP4,2016,126000000,2014,125690947.1,125667384.4,-0.000187483,2016-0361KOR +2016-0485,2016341N08092,116.9,IND,356,NI,2016,1000000000,2014,890280689.2,888918839.2,-0.001530857,2016-0485IND +2016-0503,2016355N07146,192.8,PHL,608,WP2,2016,103661000,2014,96757536.45,96759817.38,2.36E-05,2016-0503PHL +2017-0051,2017043S19040,42.6,MOZ,508,SI,2017,17000000,2014,22801692.01,22709916.84,-0.00403305,2017-0051MOZ +2017-0075,2017061S11061,63.7,MDG,450,SI,2017,20000000,2014,18562954.38,18597400.42,0.001853914,2017-0075MDG +2017-0105,2017081S13152,31.1,AUS,36,OC,2017,2700000000,2014,2988753820,3008926312,0.00672679,2017-0105AUS +2017-0281,2017195N16114,53.8,CHN,156,WP3,2017,3600000,2014,3083629.94,3082466.557,-0.000377348,2017-0281CHN +2017-0281,2017195N16114,30.3,VNM,704,WP1,2017,71000000,2014,59078283.51,58458649.26,-0.01054375,2017-0281VNM +2017-0334,2017219N16279,146.1,MEX,484,NA1,2017,2000000,2014,2284434.574,2285643.247,0.000528951,2017-0334MEX +2017-0352,2017232N19130,147.2,CHN,156,WP3,2017,3500000000,2014,2997973553,2996468738,-0.00050207,2017-0352CHN +2017-0352,2017232N19130,151.2,HKG,344,WP4,2017,755500000,2014,644890817.2,644839735.2,-7.92E-05,2017-0352HKG +2017-0352,2017232N19130,131.8,MAC,446,WP4,2017,1420000000,2014,1560609286,1560092036,-0.000331496,2017-0352MAC +2017-0362,2017228N14314,28.7,USA,840,NA2,2017,95000000000,2014,85426343701,85468133336,0.000489069,2017-0362USA +2017-0381,2017242N16333,60.9,ATG,28,NA1,2017,250000000,2014,211176356.8,211075260.6,-0.000478843,2017-0381ATG +2017-0381,2017242N16333,25.8,BHS,44,NA1,2017,2000000,2014,1801876.321,611837.893,-1.080116435,2017-0381BHS +2017-0381,2017242N16333,33.7,CUB,192,NA1,2017,13200000000,2014,10992753818,10904237007,-0.008084882,2017-0381CUB +2017-0381,2017242N16333,105.5,KNA,659,NA1,2017,20000000,2014,18484889.46,18486650.71,9.53E-05,2017-0381KNA +2017-0381,2017242N16333,48.7,TCA,796,NA1,2017,500000000,2014,500000000,500378956.6,0.000757626,2017-0381TCA +2017-0381,2017242N16333,84.4,USA,840,NA2,2017,57000000000,2014,51255806221,51206754325,-0.00095746,2017-0381USA +2017-0381,2017242N16333,25.8,VGB,92,NA1,2017,3000000000,2014,3000000000,623619994.9,-1.570826367,2017-0381VGB +2017-0383,2017260N12310,25.8,DMA,212,NA1,2017,1456000000,2014,1534596212,895118589.3,-0.53906636,2017-0383DMA +2017-0383,2017260N12310,45.1,DOM,214,NA1,2017,63000000,2014,54813712.03,54934660.22,0.002204101,2017-0383DOM +2017-0383,2017260N12310,59.3,PRI,630,NA1,2017,68000000000,2014,67009049565,67027181962,0.00027056,2017-0383PRI +2017-0406,2017253N14130,48.6,VNM,704,WP1,2017,484000000,2014,402730834.1,401345556.4,-0.00344564,2017-0406VNM +2017-0410,2017277N11279,90.2,USA,840,NA2,2017,250000000,2014,224806167.6,224976913.1,0.000759235,2017-0410USA +2017-0422,2017304N11127,58.9,VNM,704,WP1,2017,1000000000,2014,832088500.2,835046981.4,0.003549183,2017-0422VNM +2017-0432,2017288N09138,270,JPN,392,WP4,2017,1000000000,2014,995484463.5,995126307.3,-0.000359846,2017-0432JPN +2017-0468,2017252N14147,325.7,JPN,392,WP4,2017,500000000,2014,497742231.7,622698955.4,0.223980848,2017-0468JPN +2017-0485,2017236N15129,285.9,CHN,156,WP3,2017,56000000,2014,47967576.85,47983654.85,0.000335128,2017-0485CHN +2017-0508,2017333N06082,44.1,LKA,144,NI,2017,346000000,2014,314311010.2,316529153.6,0.007032376,2017-0508LKA diff --git a/notebooks/vulnerability_onboarding/Wind/onboard.ipynb b/notebooks/vulnerability_onboarding/Wind/onboard.ipynb new file mode 100644 index 00000000..6a6a9bfb --- /dev/null +++ b/notebooks/vulnerability_onboarding/Wind/onboard.ipynb @@ -0,0 +1,150 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Regional tropical cyclone impact functions\n", + "\n", + "### CITATION\n", + "Eberenz S, Lüthi S, Bresch DN. Regional tropical cyclone impact functions for globally consistent risk assessments. Natural Hazards and Earth System Sciences. 2021 Jan 29;21(1):393-415.\n", + "\n", + "### LINK\n", + "https://nhess.copernicus.org/articles/21/393/2021/\n", + "\n", + "### NOTES\n", + "The data is used to provide a 'generic asset' vulnerability distribution. The fitted results are copied into Table_A2_Impact_Function_Slope.csv. \n", + "(a) the global default impact function (uncalibrated), (b) calibrated by optimizing RMSF, and (c) calibrated by optimizing TDR. The regions NA1 to WP4 are defined in Table A1. The row “combined” summarizes results for all regions combined based on the regionalized calibration; the row “global calibration” is based on one unified global calibration based on all matched TC 473 events. RMSF: root-mean-squared fraction; TDR: total damage ratio.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['NA1' 'NA2' 'NI' 'OC' 'SI' 'WP1' 'WP2' 'WP3' 'WP4' 'Combined'\n", + " 'Global calibration']\n", + "['59.6' '86' '58.7' '49.7' '46.8' '56.7' '84.7' '80.2' '135.6' '–' '73.4']\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "\n", + "df = pd.read_csv(\"Table_A2_Impact_Function_Slope.csv\")\n", + "region = df[\"region\"].to_numpy()\n", + "v_half_by_region = df[\"vhalf_b\"].to_numpy()\n", + "print(region)\n", + "print(v_half_by_region)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ 50.55 188.1 ]\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXAAAAD5CAYAAAA+0W6bAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAOBUlEQVR4nO3cYaxkZX3H8e+vQNEIKSA3mw1CFw2p4UW7kBui0Rir1Sq+ABPTwAu7L0jWtJJoal9sNak06QtsqiZNDHYNxG1jRYsaSEUrpSTEpMUuusLChoJ2Tdms7FqDwhst+O+LOau3lzt3Zu/MvbP/3e8nmcw5zzlzz//ZJ/PbM8+cM6kqJEn9/NqiC5AkbYwBLklNGeCS1JQBLklNGeCS1JQBLklNnT1phyQvAx4Ezh32v6uqPprkcuBO4JXAw8B7q+rn6/2tiy++uHbs2DFz0ZJ0Jnn44Yd/VFVLq9snBjjwM+AtVfV8knOAbyb5GvAnwCer6s4knwZuAm5b7w/t2LGD/fv3b6B8STpzJfnBWu0Tp1Bq5Plh9ZzhUcBbgLuG9n3A9bOXKUma1lRz4EnOSnIAOAbcB3wPeLaqXhh2eRq4ZFMqlCStaaoAr6oXq2on8CrgGuC10x4gye4k+5PsP378+MaqlCS9xEldhVJVzwIPAK8HLkhyYg79VcCRMa/ZW1XLVbW8tPSSOXhJ0gZNDPAkS0kuGJZfDrwNOMQoyN8z7LYLuHuTapQkrWGaq1C2A/uSnMUo8L9YVf+U5HHgziR/CXwHuH0T65QkrTIxwKvqEeCqNdq/z2g+XJK0AN6JKUlNGeCS1NQ0c+CnhB17vrqwYx++9V0LO7YkjeMZuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMGuCQ1ZYBLUlMTAzzJpUkeSPJ4kseSfGBovyXJkSQHhse1m1+uJOmEs6fY5wXgQ1X17STnAw8nuW/Y9smq+uvNK0+SNM7EAK+qo8DRYfm5JIeASza7MEnS+k5qDjzJDuAq4KGh6eYkjyS5I8mFY16zO8n+JPuPHz8+W7WSpF+aOsCTnAd8CfhgVf0UuA14DbCT0Rn6x9d6XVXtrarlqlpeWlqavWJJEjBlgCc5h1F4f66qvgxQVc9U1YtV9QvgM8A1m1emJGm1aa5CCXA7cKiqPrGiffuK3d4NHJx/eZKkcaa5CuUNwHuBR5McGNo+DNyYZCdQwGHgfZtQnyRpjGmuQvkmkDU23Tv/ciRJ0/JOTElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqygCXpKYMcElqamKAJ7k0yQNJHk/yWJIPDO0XJbkvyZPD84WbX64k6YRpzsBfAD5UVVcCrwPen+RKYA9wf1VdAdw/rEuStsjEAK+qo1X17WH5OeAQcAlwHbBv2G0fcP0m1ShJWsNJzYEn2QFcBTwEbKuqo8OmHwLbxrxmd5L9SfYfP358llolSStMHeBJzgO+BHywqn66cltVFVBrva6q9lbVclUtLy0tzVSsJOlXpgrwJOcwCu/PVdWXh+Znkmwftm8Hjm1OiZKktUxzFUqA24FDVfWJFZvuAXYNy7uAu+dfniRpnLOn2OcNwHuBR5McGNo+DNwKfDHJTcAPgD/YlAolSWuaGOBV9U0gYza/db7lSJKm5Z2YktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTRngktSUAS5JTU0M8CR3JDmW5OCKtluSHElyYHhcu7llSpJWm+YM/LPAO9Zo/2RV7Rwe9863LEnSJBMDvKoeBH68BbVIkk7CLHPgNyd5ZJhiuXBuFUmSprLRAL8NeA2wEzgKfHzcjkl2J9mfZP/x48c3eDhJ0mobCvCqeqaqXqyqXwCfAa5ZZ9+9VbVcVctLS0sbrVOStMqGAjzJ9hWr7wYOjttXkrQ5zp60Q5LPA28GLk7yNPBR4M1JdgIFHAbet3klSpLWMjHAq+rGNZpv34RaJEknwTsxJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpA1ySmjLAJampsxddQAc79nx1Icc9fOu7FnJcST14Bi5JTRngktSUAS5JTU0M8CR3JDmW5OCKtouS3JfkyeH5ws0tU5K02jRn4J8F3rGqbQ9wf1VdAdw/rEuSttDEAK+qB4Efr2q+Dtg3LO8Drp9vWZKkSTZ6GeG2qjo6LP8Q2DZuxyS7gd0Al1122QYPd2by8kVpvhb1noLNeV/N/CVmVRVQ62zfW1XLVbW8tLQ06+EkSYONBvgzSbYDDM/H5leSJGkaGw3we4Bdw/Iu4O75lCNJmtY0lxF+Hvg34LeSPJ3kJuBW4G1JngR+b1iXJG2hiV9iVtWNYza9dc61SJJOgndiSlJTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTBrgkNWWAS1JTZy+6AGmlHXu+upDjHr71XQs5rjQLz8AlqSkDXJKaMsAlqamZ5sCTHAaeA14EXqiq5XkUJUmabB5fYv5uVf1oDn9HknQSnEKRpKZmPQMv4BtJCvjbqtq7eocku4HdAJdddtmMh9NWWNSlfIu0yD57CaM2atYz8DdW1dXAO4H3J3nT6h2qam9VLVfV8tLS0oyHkySdMFOAV9WR4fkY8BXgmnkUJUmabMMBnuQVSc4/sQy8HTg4r8IkSeubZQ58G/CVJCf+zj9U1dfnUpUkaaINB3hVfR/4nTnWIkk6CV5GKElN+WuE0hnKSyf78wxckpoywCWpKQNckpoywCWpKQNckpoywCWpKQNckpoywCWpKQNckpoywCWpKQNckpoywCWpKQNckpoywCWpKX9OVlqwRf6sq3rzDFySmjLAJakpA1ySmjLAJakpA1ySmjLAJakpLyOUtOW8dHI+PAOXpKYMcElqaqYAT/KOJE8keSrJnnkVJUmabMMBnuQs4FPAO4ErgRuTXDmvwiRJ65vlDPwa4Kmq+n5V/Ry4E7huPmVJkiaZJcAvAf57xfrTQ5skaQts+mWESXYDu4fV55M8sdnHnMHFwI8WXcQcnC79APtyqjpd+rJl/cjHZnr5b67VOEuAHwEuXbH+qqHt/6mqvcDeGY6zZZLsr6rlRdcxq9OlH2BfTlWnS1+692OWKZT/AK5IcnmSXwduAO6ZT1mSpEk2fAZeVS8kuRn4Z+As4I6qemxulUmS1jXTHHhV3QvcO6daTgUtpnqmcLr0A+zLqep06UvrfqSqFl2DJGkDvJVekpo6YwM8yeEkjyY5kGT/0HZRkvuSPDk8X7joOteS5I4kx5IcXNG2Zu0Z+Zvh5w4eSXL14ip/qTF9uSXJkWFsDiS5dsW2Pxv68kSS319M1S+V5NIkDyR5PMljST4wtLcbl3X60nFcXpbkW0m+O/TlL4b2y5M8NNT8heFCDJKcO6w/NWzfsdAOTFJVZ+QDOAxcvKrtr4A9w/Ie4GOLrnNM7W8CrgYOTqoduBb4GhDgdcBDi65/ir7cAvzpGvteCXwXOBe4HPgecNai+zDUth24elg+H/jPod5247JOXzqOS4DzhuVzgIeGf+8vAjcM7Z8G/mhY/mPg08PyDcAXFt2H9R5n7Bn4GNcB+4blfcD1iytlvKp6EPjxquZxtV8H/F2N/DtwQZLtW1LoFMb0ZZzrgDur6mdV9V/AU4x+0mHhqupoVX17WH4OOMTozuR247JOX8Y5lcelqur5YfWc4VHAW4C7hvbV43JivO4C3pokW1PtyTuTA7yAbyR5eLhbFGBbVR0dln8IbFtMaRsyrvauP3lw8zC1cMeKqawWfRk+dl/F6Gyv9bis6gs0HJckZyU5ABwD7mP0CeHZqnph2GVlvb/sy7D9J8Art7Tgk3AmB/gbq+pqRr+m+P4kb1q5sUafoVpeotO59sFtwGuAncBR4OMLreYkJDkP+BLwwar66cpt3cZljb60HJeqerGqdjK6W/wa4LWLrWh+ztgAr6ojw/Mx4CuMBvaZEx9jh+dji6vwpI2rfaqfPDiVVNUzw5vuF8Bn+NXH8VO6L0nOYRR4n6uqLw/NLcdlrb50HZcTqupZ4AHg9YymrE7cB7Oy3l/2Zdj+G8D/bG2l0zsjAzzJK5Kcf2IZeDtwkNFPAewadtsF3L2YCjdkXO33AH84XPXwOuAnKz7Sn5JWzQW/m9HYwKgvNwxXClwOXAF8a6vrW8swT3o7cKiqPrFiU7txGdeXpuOylOSCYfnlwNsYzek/ALxn2G31uJwYr/cA/zp8cjo1Lfpb1EU8gFcz+tb8u8BjwEeG9lcC9wNPAv8CXLToWsfU/3lGH2H/l9H83U3jamf0LfynGM37PQosL7r+Kfry90OtjzB6Q21fsf9Hhr48Abxz0fWvqOuNjKZHHgEODI9rO47LOn3pOC6/DXxnqPkg8OdD+6sZ/SfzFPCPwLlD+8uG9aeG7a9edB/We3gnpiQ1dUZOoUjS6cAAl6SmDHBJasoAl6SmDHBJasoAl6SmDHBJasoAl6Sm/g/OHplXgng2+wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "df = pd.read_csv(\"Table_S2_V_half_individual_fitting_per_event.csv\")\n", + "v_half = df[df.region == \"WP2\"][\"v_half_fitted [m/s]\"]\n", + "\n", + "plt.hist(v_half)\n", + "print(np.quantile(v_half, [0.25, 0.75]))" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD7CAYAAAB68m/qAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAoTklEQVR4nO3deXyU9bn+8c+dyUp2SNiyGCAIsi8RUMFaqxa0BT1uoD2tv2pptahtrVZPrXWpPWoXtZb2HNvac9oKQXED97UteiyyhH0Ni5AIJEASCFln5v79MUMMMcgEZvLMcr9fTefZkrnMTC6++U7meURVMcYYE/ninA5gjDEmOKzQjTEmSlihG2NMlLBCN8aYKGGFbowxUcIK3RhjokRAhS4iU0Vks4iUi8idxznmKhHZICLrRWRecGMaY4w5ETnR36GLiAvYAlwIVADLgFmquqHdMYOBZ4DzVbVGRHqralXoYhtjjOkoPoBjJgDlqrodQERKgRnAhnbHfAuYq6o1AIGUeU5OjhYVFXU5sDHGxLIVK1bsV9XczvYFUuh5wO526xXAxA7HnA4gIh8ALuBeVX39875oUVERy5cvD+DujTHGHCUiHx9vXyCFHoh4YDBwHpAP/FNERqpqbYcgs4HZAIWFhUG6a2OMMRDYi6KVQEG79Xz/tvYqgEWq2qqqO/DNuQ/u+IVU9UlVLVHVktzcTn9jMMYYc5ICKfRlwGARGSAiicBMYFGHY17ENzpHRHLwTcFsD15MY4wxJ3LCQldVNzAHeAPYCDyjqutF5H4Rme4/7A3ggIhsAN4DblfVA6EKbYwx5rNO+GeLoVJSUqL2oqgxxnSNiKxQ1ZLO9tk7RY0xJkpYoRtjTJSwQjfGmG7i9SoPvrKBXQcaQvL1rdCNMaabPLlkO39YsoP/27Y/JF/fCt0YY7pB2a4afvnGZi4Z2Y+rzyw48SecBCt0Y4wJsbrGVm6eX0bfzGR+/m8jEZGQ3E+w3vpvjDGmE6rKf7ywlj11TTz7nbPITEkI2X3ZCN0YY0JowbLdvLJmDz+8aAjjCrNDel9W6MYYEyJb9x3m3sXrmTI4h2+fOzDk92eFbowxIdDU6mHOvDLSkuL51VWjiYsLzbx5ezaHbowxIfDAyxvYvO8wf/nmBHqnJ3fLfdoI3Rhjguy1tXt4eukuvv2FgZx7evedKtwK3RhjgqiipoEfPbeG0QVZ/PCiId1631boxhgTJK0eL7fML0MVnpg5lgRX91aszaEbY0yQPPb2FlbuquWJWWMp7NWj2+/fRujGGBMEH5Tv53d/38bMMwv46uj+jmSwQjfGmFO0v76Z7y1YxaDcNH761eGO5bApF2OMOQVer/LDZ1dT19jKX6+fQEqiy7EsNkI3xphT8Kf3d/D3zdX85CvDGNo3w9EsVujGGHOSVu+u5ZE3NjF1eF++NrHQ6ThW6MYYczION/lOids7PZmHLx8VslPidoXNoRtjTBepKj9+YR2VtY0smD2JzB6hOyVuV9gI3RhjuujZFRUsWv0J379gMCVFPZ2O08YK3RhjuqC8qp6fvrSeswb24sbzip2OcwwrdGOMCZDvlLgrSUl08djMMbi64ZS4XRFQoYvIVBHZLCLlInJnJ/uvE5FqEVnl/7gh+FGNMcZZ//nqRjbtPcyvrhxNn4zuOSVuV5zwRVERcQFzgQuBCmCZiCxS1Q0dDl2gqnNCkNEYYxz3xvq9/O+HH3PD5AF8cWhvp+N0KpAR+gSgXFW3q2oLUArMCG0sY4wJH5/UNnLHwjWMzMvkjqlDnY5zXIEUeh6wu916hX9bR5eLyBoRWSgiBUFJZ4wxDnN7vNxaWobb4+WJWWNJjA/flx6DlWwxUKSqo4C3gP/t7CARmS0iy0VkeXV1dZDu2hhjQuc375azbGcND142kqKcVKfjfK5ACr0SaD/izvdva6OqB1S12b/6R2B8Z19IVZ9U1RJVLcnN7b7LMhljzMn4cNsBnnh3K1eMz+fSsZ1NTISXQAp9GTBYRAaISCIwE1jU/gAR6ddudTqwMXgRjTGm+x080sL3FpQxICeV+6Y7d0rcrjjhX7moqltE5gBvAC7gKVVdLyL3A8tVdRFwi4hMB9zAQeC6EGY2xpiQUlVuf3Y1NUdaeeq6M0lNioyzpASUUlVfBV7tsO2edst3AXcFN5oxxjjjzx/s5J1NVdz71WEM75/pdJyAhe/LtcYY44B1lXX852sbueCMPnzj7CKn43SJFboxxvjVN7u5eX4ZOWlJ/OKK8DglbldExsSQMcZ0g3teXMfHB44w/1uTyE5NdDpOl9kI3RhjgOdWVPB8WSW3fGkwEwf2cjrOSbFCN8bEvO3V9fzkpXVMHNCTm88f7HSck2aFboyJac1uDzfPLyMxPi4sT4nbFTaHboyJaQ+9ton1nxzij18voV9mitNxTomN0I0xMevtDfv48wc7ue7sIi4Y1sfpOKfMCt0YE5P21jVx+8LVDO+fwV0Xh+8pcbvCCt0YE3M8XuXW0jKa3b5T4ibFu5yOFBQ2h26MiTm/fbecpTsO8qsrRzMwN83pOEFjI3RjTEz5aMdBHn9nC5eNzePy8flOxwkqK3RjTMyoOdLCraVlFPbswQOXjnA6TtDZlIsxJiaoKnc8t4b99c08f+M5pEXIKXG7wkboxpiY8JcPP+atDfu4c9oZjMyPnFPidoUVujEm6q3/pI4HX9nI+UN7881zipyOEzJW6MaYqNbQ4jslbnZqQkSeErcrom8SyRhj2vnpS+vZsf8IT98wkV5pSU7HCSkboRtjotZLqyp5dkUFN3+xmLMH5TgdJ+Ss0I0xUWnn/iP8+IV1nFmUzS1fitxT4naFFboxJuq0uL3cUlqGK054bOZY4l2xUXU2h26MiTr/9Y9trKmo47++No68rMg+JW5XxMY/W8aYmLF572GeeHcr00f3Z+qIfk7H6VZW6MaYqOH2eLlj4WoykhO4d/pwp+N0O5tyMcZEjac+2MHqijqemDWWnqmJTsfpdjZCN8ZEhR37j/CrN7dw0bA+fGVUbE21HBVQoYvIVBHZLCLlInLn5xx3uYioiJQEL6Ixxnw+r1f50cI1JMXH8bNLR0T1u0E/zwkLXURcwFxgGjAMmCUiwzo5Lh24FVga7JDGGPN5nl76MR/tPMhPvjKM3hnJTsdxTCAj9AlAuapuV9UWoBSY0clxDwAPA01BzGeMMZ+roqaBh17bxLmn53JFlF2woqsCKfQ8YHe79Qr/tjYiMg4oUNVXgpjNGGM+l6py1/NrAfj5ZbE71XLUKb8oKiJxwK+B2wI4draILBeR5dXV1ad618aYGPfsigqWbN3PndOGkp/dw+k4jguk0CuBgnbr+f5tR6UDI4C/i8hOYBKwqLMXRlX1SVUtUdWS3Nzck09tjIl5+w418bOXNzBhQE+unXia03HCQiCFvgwYLCIDRCQRmAksOrpTVetUNUdVi1S1CPgXMF1Vl4cksTEm5qkqP35hHc1uLw9fPoq4uNieajnqhIWuqm5gDvAGsBF4RlXXi8j9IjI91AGNMaajxWv28PbGffzwoiEMyEl1Ok7YCOidoqr6KvBqh233HOfY8049ljHGdO5AfTP3LlrP6IIsvjl5gNNxwoq9U9QYE1HuXbyBw02t/OKKUbhsquUYVujGmIjx5vq9LF79CTefP5jT+6Q7HSfsWKEbYyJCXWMrd7+4jjP6ZXDjeYOcjhOWrNCNMRHhwVc2cOBIC7+4YhQJMXIFoq6y74oxJuz9c0s1zyyv4NvnDmREXqbTccKWFboxJqzVN7u56/m1DMpNjZmLPZ8su8CFMSasPfL6Jj6pa2Thd84iOcHldJywZiN0Y0zY+mjHQf7y4cdcd3YR40/r6XScsGeFbowJS02tHn703BoKeqZw+5eHOB0nItiUizEmLD361hZ27D/CvBsm0iPRqioQNkI3xoSd1btr+cOS7cyaUMjZxTlOx4kYVujGmLDS7PZw+8LV9E5P5q6LhzodJ6LY7zHGmLAy971tbNlXz1PXlZCRnOB0nIhiI3RjTNjYuOcQv3uvnMvG5nH+0D5Ox4k4VujGmLDg9ni5Y+EasnokcM9XhjkdJyLZlIsxJiz8YckO1lbW8btrx5Gdmuh0nIhkI3RjjOO2Vdfz6NtbmDaiLxeP7Od0nIhlhW6McZTHq9yxcA0pCS7umzHc6TgRzQrdGOOov3y4kxUf1/DTrw6jd3qy03EimhW6McYxuw828MjrmzlvSC6Xjc1zOk7Es0I3xjhCVbnz+TW44oSfXzYSEbs+6KmyQjfGOGLBst18UH6Auy4eSv+sFKfjRAUrdGNMt9tT18iDr2zkrIG9mHVmodNxooYVujGmW6kqd7+wjlavl4cuH0lcnE21BIsVujGmW7206hPe2VTF7V8eymm9Up2OE1UCKnQRmSoim0WkXETu7GT/d0RkrYisEpH3RcTet2uM+Yzqw83cu3g94wqzuO7sIqfjRJ0TFrqIuIC5wDRgGDCrk8Kep6ojVXUM8Ajw62AHNcZEvnsXraeh2cMjV4zCZVMtQRfICH0CUK6q21W1BSgFZrQ/QFUPtVtNBTR4EY0x0eD1dXt4Ze0ebr1gMMW9052OE5UCOTlXHrC73XoFMLHjQSLyXeAHQCJwflDSGWOiQm1DC3e/uJ7h/TOYfe5Ap+NEraC9KKqqc1V1EPAj4O7OjhGR2SKyXESWV1dXB+uujTFh7oGXN1Lb0MIjV4wiwWV/ixEqgXxnK4GCduv5/m3HUwpc2tkOVX1SVUtUtSQ3NzfgkMaYyPXe5iqeW1nBjecNYnj/TKfjRLVACn0ZMFhEBohIIjATWNT+ABEZ3G71EmBr8CIaYyLV4aZWfvz8Wgb3TmPO+cVOx4l6J5xDV1W3iMwB3gBcwFOqul5E7geWq+oiYI6IXAC0AjXAN0IZ2hgTGR56bRN7DzXx3I1nkxTvcjpO1AvoikWq+irwaodt97RbvjXIuYwxEe7DbQd4eukubpg8gLGF2U7HiQn26oQxJugaWzzc+fwaTuvVg9suGuJ0nJhh1xQ1xgTdr97czMcHGiidPYmURJtq6S42QjfGBNXKXTX86YMdfG1SIZMG9nI6TkyxQjfGBE2z28MdC9fQLyOZH00d6nScmGNTLsaYoHninXLKq+r5n/93JunJCU7HiTk2QjfGBMW6yjp+/49tXD4un/OG9HY6TkyyQjfGnLJWj5c7Fq6hZ2oiP/nKGU7HiVk25WKMOWVP/nM7G/Yc4r++Np6sHolOx4lZNkI3xpySrfsO8/jbW7lkVD+mjujrdJyYZoVujDlpHq9yx3NrSE1ycd/04U7HiXk25WKMOWm/eWcrZbtqeXzmGHLSkpyOE/NshG6MOSlLtlbzm3e3csX4fGaMyXM6jsEK3RhzEvYdauJ7pasY3DuNB2aMcDqO8bNCN8Z0idvj5eZ5ZTS2evjdtePsXC1hxObQjTFd8qu3tvDRzoM8dvUYu9hzmLERujEmYO9tquL3f9/GrAmFXDrW5s3DjRW6MSYglbWNfP+ZVQzrl8FPvzrM6TimE1boxpgTanF7mTNvJW6P8rtrx5GcYPPm4cjm0I0xJ/TI65so21XL3GvGUZST6nQccxw2QjfGfK7X1+3lj+/v4Lqzi7hkVD+n45jPYYVujDmuXQcauH3hakbnZ3LXxXbBinBnhW6M6VSz28N3561EgN9eM46keJs3D3c2h26M6dSDr2xkbWUdT/77eAp69nA6jgmAjdCNMZ/x8ppP+MuHH/OtKQO4aLidEjdSWKEbY46xvbqeO59by7jCLO6wCz1HlIAKXUSmishmESkXkTs72f8DEdkgImtE5B0ROS34UY0xodbU6uGmp1eS4BJ+e804Elw25oskJ3y0RMQFzAWmAcOAWSLS8W1iZUCJqo4CFgKPBDuoMSb07lu8nk17D/Prq8fQPyvF6TimiwL553cCUK6q21W1BSgFZrQ/QFXfU9UG/+q/gPzgxjTGhNoLZRXM/2g3N503iC8O6e10HHMSAin0PGB3u/UK/7bjuR547VRCGWO619Z9h/mP59cxYUBPfnDh6U7HMScpqH+2KCJfA0qALxxn/2xgNkBhYWEw79oYc5IaWtzc9PRKUpNcPDFrLPE2bx6xAnnkKoGCduv5/m3HEJELgB8D01W1ubMvpKpPqmqJqpbk5uaeTF5jTBCpKne/uI7y6noenzmWPhnJTkcypyCQQl8GDBaRASKSCMwEFrU/QETGAv+Nr8yrgh/TGBMKzy6v4PmVldz6pcGcU5zjdBxzik5Y6KrqBuYAbwAbgWdUdb2I3C8i0/2H/QJIA54VkVUisug4X84YEyY27jnET15ax+TiHG4+f7DTcUwQBDSHrqqvAq922HZPu+ULgpzLGBNC9c1uvvv0SjJTEnhs5hhcceJ0JBMEdi4XY2KMqnLX82vZeeAI8781iZy0JKcjmSCxl7ONiTF/W7qLxas/4baLhjBxYC+n45ggskI3Joasq6zjgcUbOG9ILjd+YZDTcUyQWaEbEyPqGlu56emV5KQl8uhVY4izefOoY3PoxsQAVeWOhav5pLaRBd8+i+zURKcjmRCwEboxMeDPH+zkjfX7uHPaUMaflu10HBMiVujGRLmyXTX8/NWNXDisD9dPHuB0HBNCVujGRLHahhbmzCujb2Yyv7xiNCI2bx7NbA7dmCjl9Sq3PbOa6sPNLLzxLDJ7JDgdyYSYjdCNiVJPLtnOO5uq+PElZzAqP8vpOKYbWKEbE4WW7TzIL97YzCUj+/H1s+yKkLHCCt2YKHOgvpk581ZSkJ3CQ5ePtHnzGGKFbkwU8XqV7y1YRU1DK3OvHUd6ss2bxxIrdGOiyNz3ylmydT/3TR/O8P6ZTscx3cwK3Zgo8X/b9vPo21u4bGweM88sOPEnmKhjhW5MFKg63MQt81cxICeVn106wubNY5T9HboxEc7jVW6ZX0Z9cyvzvjWR1CT7sY5V9sgbE+Eee3sL/9p+kF9eOZrT+6Q7Hcc4yKZcjIlg/9hSzW/fK+eqknyuGJ/vdBzjMCt0YyLUnrpGvr9gFUP6pHPf9BFOxzFhwArdmAjU6vFyy/wymls9zL12HCmJLqcjmTBgc+jGRKBfvrmZZTtreHzmGAblpjkdx4QJG6EbE2He2biP//7Hdr42qZAZY/KcjmPCiBW6MRGkoqaBHzyzmuH9M7j7kmFOxzFhxgrdmAjR4vby3XlleL3K764dR3KCzZubY9kcujER4qHXNrF6dy2/v3Ycp/VKdTqOCUMBjdBFZKqIbBaRchG5s5P954rIShFxi8gVwY9pTOxye7w89NomnvpgB//vnCKmjezndCRzElq9rby5802++cY3WbpnaUju44QjdBFxAXOBC4EKYJmILFLVDe0O2wVcB/wwFCGNiVX7DjVx8/wyPtpxkFkTCrlr2hlORzJdVNVQxXNbnmPhloVUNVbRP7U/9a31IbmvQKZcJgDlqrodQERKgRlAW6Gr6k7/Pm8IMhoTk/6vfD+3lJZxpNnDo1eP5rKx9k7QSKGqLN+3nNJNpby7613c6uacvHO4Z8g9TM6bjCsuNK9/BFLoecDudusVwMSQpDHG4PUqc98r59G3tzAwN4353xrHYDtHS0Sob6ln8fbFLNi0gG1128hIzODaM67l6iFXU5AR+lMad+uLoiIyG5gNUFhY2J13bUxEOHikhe8vWMU/tlRz6Zj+PHjZSDt7YgTYWrOVBZsXsHjbYhrcDQzrNYz7z76faQOmkRyf3G05AnmmVALt/2nJ92/rMlV9EngSoKSkRE/maxgTrVZ8XMOceSs5UN/Cg5eN4JoJhXZe8zDW6m3lnV3vULqplBX7VpAYl8jUAVOZOWQmI3NHOpIpkEJfBgwWkQH4inwmcE1IUxkTQ1SVP72/g4de20T/rBSev+lsRuTZ5ePC1b4j+1i4dSELtyxkf+N+8tLy+MH4H3Bp8aVkJ2c7mu2Eha6qbhGZA7wBuICnVHW9iNwPLFfVRSJyJvACkA18VUTuU9XhIU1uTBQ41NTKHc+u4fX1e7loWB9+ceVoMlPsws7hRlVZtncZpZt9L3J61cvkvMnMHDqTc/qfE7IXObsqoMk5VX0VeLXDtnvaLS/DNxVjjAnQuso6vjtvJZU1jdx9yRlcP3mATbGEmfqWehZtW8SCzQvYXredzKRMvj7s61w55EoK0sPvuq32aosx3UxVmf/Rbu5dvJ6ePRIpnT2JkqKeTscy7Wyp2cKCTQtYvH0xje5GRuaM5Gfn/IwvF325W1/k7CordGO6UUOLm7tfWMfzZZVMGZzDY1ePoVdaktOxDNDqaeXtXW9TuqmUlVUrSXIlMW3ANGYOmcnwnMiYQbZCN6ablFcd5qanV7K1qp7vX3A6c84vxhVnUyxO23tkL89ueZbntjzHgaYD5Kflc9v427i0+FKykrOcjtclVujGdIOXVlVy1/NrSUlw8ddvTmTy4BynI8U0VWXp3qWUbirl77v/jle9nJt/LjOHzuTs/mcTJ5F5IlordGNCqKnVwwMvb+Dppbs4syibJ2aNo29m+M7BRrvDLYdZtG0RpZtK2XloJ1lJWXxj+De4ashV5KVF/sVCrNCNCZFdBxq4ad4K1lUe4ttfGMjtFw0h3hWZI79It/ngZko3l/LK9ldodDcyKmcUP5/8cy4quogkV/S8hmGFbkwIvLl+L7c9uxoB/vD1Ei4c1sfpSDGn1dPKWx+/RenmUsqqykh2JXPxwIu5ashVDO8VGS9ydpUVujFB1Orx8sjrm/jDkh2Mys9k7jXjKOjZw+lYMaOuuY4P93zIkoolLKlYQk1zDYXphdxecjszimeQmRTd78C1QjcmSPbWNTFn3kqWf1zDv086jbu/cgZJ8eHxDsJopapsqdnCkkpfga+uXo1HPWQkZnBO3jnMGDSDs/qfFbEvcnaVFboxQbBkazW3lq6iqdXDb2aNZfro/k5Hilr1LfX8a8+/WFK5hPcr3qeqsQqAM3qewfUjr2dK3hRG5owMm7fjdycrdGNOgcer/Oadrfzm3a2c3jududeOo7h3mtOxooqqsq12m28UXrmEsn1luNVNekI6Z/U/i8l5k5mcN5ncHrlOR3WcFboxJ2l/fTPfK13F++X7uXxcPj+7dAQpibE3KgyFhtYGlu5Z6huFV77PniN7ADg9+3S+PvzrTMmbwujeo0mIsxOZtWeFbsxJWLbzIHPmraS2oZWHLx/JVSUFdmKtU6Cq7Dy0kyUVvgJfvm85rd5WesT3YFK/ScweNZvJeZPpm9rX6ahhzQrdmC5QVf6wZDsPv76ZguwU/nzTBIb1z3A6VkRqdDeybO8y3q98nyUVS6iorwBgYOZArhl6DVPypzCu9zgSXDYKD5QVujEBqmto5bZnV/P2xn1cPLIvD18+ivRkK5uu2H1od9tc+LK9y2j2NJPsSmZiv4lcN/w6JudPjop3bDrFCt2YAKytqOOmeSvYU9vET786jOvOLrIplgC0eFpYvm9521TKzkM7ATgt4zSuPP1KpuRNYXzf8VH1bk0nWaEb8zlUlb8t3cUDizeQk5bIM985i3GFzl5mLNx9Uv9J2zTK0r1LaXQ3khiXyJn9zmTm0JlMyZtCYUaMXCTe64X6vXBwu/9jh+92/HUw6ItBvzsrdGOO40izm/94YS0vrfqE84bk8uhVY8hOTXQ6Vthp9bRSVlXW9hcp5bXlAOSl5TFj0Aym5E/hzL5nkhKf4nDSEPG44VBFh9L2F3fNDnA3fXpsXDxkF0HjwZBEsUI3phNb9h3mxr+tYMf+I9z+5SHc+IVBxMX4uctVlaqGKspryymvLWdb7ba25UZ3I/Fx8ZT0KeHS4kuZkj+FARlRdEk9dzPU7vrsSPvgdqj9GLzuT4+NT4bsAdBzIBR/yXfb07+ekQ+u0NWuFbox7Xi8yotlldz94jpSk+L52w0TOXtQbJ27XFU50HTgmMLeVruN8ppyDrcebjuuZ3JPBmcN5t8G/xsT+k5gYr+JpCakOpj8FLUcgZqdnZT2Dt8IXL2fHpuY7ivpviNh2IxPC7vnQEjrC3HOnGrACt3ELFVlT10Tq3fXsqqillW7allbWUdDi4eJA3ryxKyx9M6I7nOX1zbVthX21tqtbKvdxrbabdQ017Qdk5mUyaDMQVw88GIGZQ2iOKuYQVmD6JkcgddBbaz1TYN0nBo5uN03191eSk9fQRdOOrawew6EHr0gDH/7sEI3MeNQUytrK+pYtbuWVbtrWb27lqrDzQAkuuI4o38GV5UUMO60bC4e0Teqzl1+uOVw24i7fXnvb9zfdkxqQirFWcWcX3h+W2kXZxWTk5ITGVMnXg80HID6fVBf5fs4ZsS9/bNz12l9/VMjF0DPok8LO3sApGQ58B9xaqzQTVRqcXvZvPcwq3bXsGp3HasratlWXY+qb//AnFQmF+cwuiCLMQVZDO2XHhVnRmxobWB73XbfVElNOeV1vtt9DfvajkmJT2Fg5kDO6X8OxVnFFGcXU5xVTJ8efcKvuL0eaDjoK+kjVVBf3clyta+8G/YfOy0CgEBmgW+EPWz6saPs7CJIjOApok5YoZuIp6rsOthwzMh73SeHaHH7frhz0hIZU5DFjNH9GVOYxai8LDJ7RPYbgprcTeyo2/GZFycr6yvbjkmMS2Rg1kDO7Htm22i7OKuY/mn9nT2drNfrGykfHUkfqe5kudpX2keqOylpwJUEaX0gLddX2HnjIa23b1tq7qfLmfkQHzt/426FbiLOwSMtvnnvowVeUUttQysAKQkuRuZl8o2zTmsbfedlpYTfyPM4VJVGdyO1zbXUNNdQ11RHTXMNtc21HGg80Db63n14N15/0cXHxVOUUcTInJFcVnxZ23RJQXpB951C1uuFxpoTjKT90yBH9oN6Pvs1XEm+Ik7N9RVx3lhI7f1pcbdfTsoIyzlspwVU6CIyFXgccAF/VNWHOuxPAv4CjAcOAFer6s7gRjWxqKnVw/pP6li1u65t9L3rYAMAcQKn90nny8P6tpX36X3SwmbuW1VpcDdQ21xLbVNtW0kfXW77aKptK+3aplpavC2dfj2XuChIL+D07NOZNmBa24i7MKPw5M866GmF5sPQUu+7ba6HlqO3nWxrf2zb/sOfU9KJ/iLuDRl50G+Mv5T9xd1+OTnTSvoUnbDQRcQFzAUuBCqAZSKySFU3tDvseqBGVYtFZCbwMHB1KAKb6OX1Ktuq648ZeW/acxi31zfx3T8zmdEFWVwzsZAxBVmMyMskLal7fslUVY60HjmmiGuaaj5TynXNdceMrFu9rZ1+PUHISsoiMymT7ORs+qf1Z3iv4WQlZ5GVlEV2Unbbvqwk37aMxAxcEud7o8rRIm2sh9qPjlO0AZSzpzmwb4ArCZLSIDHNNzpOSoMeOb556KR0XyGn9vaNntP6fLqcnGUl3Y0C+WmYAJSr6nYAESkFZgDtC30GcK9/eSHwWxER1aMvQZlYoaqoglcVxX+rdLrtSLObtZWfjrzXVNRR3+x7g0Z6kouRBel8c0oeI/LSGNY/jaxUF26v2/9xmL0NNbiPuNu2tXpb25Y96ul0u9vbgtvjxu1t9X8c3d+Kx39Mi6eZuuY6/4i6jrqWQ9S0HMLd2QgUiEPIik8h05VMdlwyBa4kRiZkk5XYmyxJIEtcZOMiiziyFLK8kO4Fl7cVmlugoQk8h8CzHTwt4G31jZw9Lf4P/7K7xVfMx8nxGQmpvrJtK+J0yCrwL3co57Zt6Z/9nMQ0iLd3yEaCQAo9D9jdbr0CmHi8Y1TVLSJ1QC9gP0H26wU38fahfwb7y8akz/vX9nj7urr9c+9fwAN4BDwuSC+CFMAt4BZhLbC2CqgCyk7iDgIUp0q8QjxKvCoJClleD5leL6d5vGR5vWR5PGR7vGR6vWR7PP5bL1leD+le5XMneeLifVMPcQngSvAtt90eXfavJ/bosL3D53Us2rby9d8e3Z6YCjF4CbZY160viorIbGA2QGHhyZ2cJyMll7510fWnRt1CBXz/O0ZnvwyL//+k073tP0faju3sa7Vf//S3bjlmf0q8ixSXi0RxES9CvMQRT5zv1r+eQBzx4vLvE1xHl8VFgrTb1/YR326/y7c/zn8rLlwSR0LbejxxcXEgcf7/aP9y+5I9poAT/AXbyfbObuMSHHvXoIk9gRR6JVDQbj3fv62zYypEJB7IxPfi6DFU9UngSYCSkpKTmo65Yfp93MB9J/OpxhgT1QIZOiwDBovIABFJBGYCizocswj4hn/5CuBdmz83xpjudcIRun9OfA7wBr4/W3xKVdeLyP3AclVdBPwJ+KuIlAMH8ZW+McaYbhTQHLqqvgq82mHbPe2Wm4ArgxvNGGNMV9irNcYYEyWs0I0xJkpYoRtjTJSwQjfGmChhhW6MMVFCnPpzcRGpBj4+yU/PIQSnFQgCy9U1lqvrwjWb5eqaU8l1mqrmdrbDsUI/FSKyXFVLnM7RkeXqGsvVdeGazXJ1Tahy2ZSLMcZECSt0Y4yJEpFa6E86HeA4LFfXWK6uC9dslqtrQpIrIufQjTHGfFakjtCNMcZ0EHGFLiJTRWSziJSLyJ0O5nhKRKpEZF27bT1F5C0R2eq/zXYgV4GIvCciG0RkvYjcGg7ZRCRZRD4SkdX+XPf5tw8QkaX+x3OB/xTN3U5EXCJSJiIvh0suEdkpImtFZJWILPdvC4fnWJaILBSRTSKyUUTOcjqXiAzxf5+OfhwSke85ncuf7fv+5/w6EZnv/1kIyfMrogq93QWrpwHDgFkiMsyhOP8DTO2w7U7gHVUdDLzjX+9ubuA2VR0GTAK+6/8eOZ2tGThfVUcDY4CpIjIJ3wXFH1XVYqAG3wXHnXArsLHderjk+qKqjmn3J25OP44AjwOvq+pQYDS+75ujuVR1s//7NAYYDzQALzidS0TygFuAElUdge8U5DMJ1fPLd1HfyPgAzgLeaLd+F3CXg3mKgHXt1jcD/fzL/YDNYfA9ewm4MJyyAT2AlfiuTbsfiO/s8e3GPPn4ftjPB17Gd3W8cMi1E8jpsM3RxxHf1ch24H/9LVxydchyEfBBOOTi0+st98R3uvKXgS+H6vkVUSN0Or9gdZ5DWTrTR1X3+Jf3An2cDCMiRcBYYClhkM0/rbEK32Wf3wK2AbWq6vYf4tTj+RhwB+D1r/cKk1wKvCkiK/zX4wXnH8cBQDXwZ/8U1R9FJDUMcrU3E5jvX3Y0l6pWAr8EdgF7gDpgBSF6fkVaoUcM9f3T69ifEIlIGvAc8D1VPdR+n1PZVNWjvl+J84EJwNDuztCRiHwFqFLVFU5n6cRkVR2Hb4rxuyJybvudDj2O8cA44PeqOhY4QodpDCef+/656OnAsx33OZHLP2c/A98/hP2BVD47VRs0kVbogVyw2kn7RKQfgP+2yokQIpKAr8yfVtXnwykbgKrWAu/h+1Uzy39hcXDm8TwHmC4iO4FSfNMuj4dBrqOjO1S1Ct988AScfxwrgApVXepfX4iv4J3OddQ0YKWq7vOvO53rAmCHqlaraivwPL7nXEieX5FW6IFcsNpJ7S+W/Q1889fdSkQE3zVeN6rqr8Mlm4jkikiWfzkF37z+RnzFfoVTuVT1LlXNV9UifM+nd1X1WqdziUiqiKQfXcY3L7wOhx9HVd0L7BaRIf5NXwI2OJ2rnVl8Ot0CzufaBUwSkR7+n82j36/QPL+ceuHiFF5kuBjYgm/+9ccO5piPb06sFd+o5Xp8c6/vAFuBt4GeDuSajO/XyjXAKv/HxU5nA0YBZf5c64B7/NsHAh8B5fh+TU5y8DE9D3g5HHL573+1/2P90ee604+jP8MYYLn/sXwRyA6TXKnAASCz3bZwyHUfsMn/vP8rkBSq55e9U9QYY6JEpE25GGOMOQ4rdGOMiRJW6MYYEyWs0I0xJkpYoRtjTJSwQjfGmChhhW6MMVHCCt0YY6LE/wdcS4sTF/Ke5wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "def vul(v, v_half):\n", + " v_thresh = 25.7\n", + " # v_half = 135.6 # wp4 74.7 135.6 190.5\n", + " vn = np.where(v > v_thresh, v - v_thresh, 0) / (v_half - v_thresh)\n", + " f = vn**3 / (1 + vn**3)\n", + " return f\n", + "\n", + "\n", + "v = np.arange(0, 90, 10)\n", + "fig, ax = plt.subplots()\n", + "# for item in v_half.to_numpy():\n", + "# ax.plot(v, vul(v, item))\n", + "ax.plot(v, vul(v, 74.7))\n", + "ax.plot(v, vul(v, 190.5))\n", + "ax.plot(v, vul(v, 135.6))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "physrisk-og59E7IU", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pdm.lock b/pdm.lock new file mode 100644 index 00000000..20f26632 --- /dev/null +++ b/pdm.lock @@ -0,0 +1,3053 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[metadata] +groups = ["default", "docs", "lint", "test"] +strategy = ["cross_platform", "inherit_metadata"] +lock_version = "4.4.1" +content_hash = "sha256:9c0bdba5962c53be1370cfe2e86513ec1f27c833e5b9bdf278fdc161ca075f40" + +[[package]] +name = "accessible-pygments" +version = "0.0.4" +summary = "A collection of accessible pygments styles" +groups = ["docs"] +dependencies = [ + "pygments>=1.5", +] +files = [ + {file = "accessible-pygments-0.0.4.tar.gz", hash = "sha256:e7b57a9b15958e9601c7e9eb07a440c813283545a20973f2574a5f453d0e953e"}, + {file = "accessible_pygments-0.0.4-py2.py3-none-any.whl", hash = "sha256:416c6d8c1ea1c5ad8701903a20fcedf953c6e720d64f33dc47bfb2d3f2fa4e8d"}, +] + +[[package]] +name = "affine" +version = "2.4.0" +requires_python = ">=3.7" +summary = "Matrices describing affine transformation of the plane" +groups = ["default"] +files = [ + {file = "affine-2.4.0-py3-none-any.whl", hash = "sha256:8a3df80e2b2378aef598a83c1392efd47967afec4242021a0b06b4c7cbc61a92"}, + {file = "affine-2.4.0.tar.gz", hash = "sha256:a24d818d6a836c131976d22f8c27b8d3ca32d0af64c1d8d29deb7bafa4da1eea"}, +] + +[[package]] +name = "aiobotocore" +version = "2.12.0" +requires_python = ">=3.8" +summary = "Async client for aws services using botocore and aiohttp" +groups = ["default"] +dependencies = [ + "aiohttp<4.0.0,>=3.7.4.post0", + "aioitertools<1.0.0,>=0.5.1", + "botocore<1.34.52,>=1.34.41", + "wrapt<2.0.0,>=1.10.10", +] +files = [ + {file = "aiobotocore-2.12.0-py3-none-any.whl", hash = "sha256:e121503dca049cf361dea19730e335aff2e7508f7f8b24db2e6a43a6fb70299e"}, + {file = "aiobotocore-2.12.0.tar.gz", hash = "sha256:bc841cef234afcb781f2d600680c9e49d01b9a2e8169790a32579bb0df51777f"}, +] + +[[package]] +name = "aiohttp" +version = "3.9.3" +requires_python = ">=3.8" +summary = "Async http client/server framework (asyncio)" +groups = ["default"] +dependencies = [ + "aiosignal>=1.1.2", + "async-timeout<5.0,>=4.0; python_version < \"3.11\"", + "attrs>=17.3.0", + "frozenlist>=1.1.1", + "multidict<7.0,>=4.5", + "yarl<2.0,>=1.0", +] +files = [ + {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54"}, + {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc"}, + {file = "aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52"}, + {file = "aiohttp-3.9.3-cp310-cp310-win32.whl", hash = "sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b"}, + {file = "aiohttp-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2"}, + {file = "aiohttp-3.9.3-cp38-cp38-win32.whl", hash = "sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63"}, + {file = "aiohttp-3.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d"}, + {file = "aiohttp-3.9.3-cp39-cp39-win32.whl", hash = "sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051"}, + {file = "aiohttp-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc"}, + {file = "aiohttp-3.9.3.tar.gz", hash = "sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7"}, +] + +[[package]] +name = "aioitertools" +version = "0.11.0" +requires_python = ">=3.6" +summary = "itertools and builtins for AsyncIO and mixed iterables" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.0; python_version < \"3.10\"", +] +files = [ + {file = "aioitertools-0.11.0-py3-none-any.whl", hash = "sha256:04b95e3dab25b449def24d7df809411c10e62aab0cbe31a50ca4e68748c43394"}, + {file = "aioitertools-0.11.0.tar.gz", hash = "sha256:42c68b8dd3a69c2bf7f2233bf7df4bb58b557bca5252ac02ed5187bbc67d6831"}, +] + +[[package]] +name = "aiosignal" +version = "1.3.1" +requires_python = ">=3.7" +summary = "aiosignal: a list of registered asynchronous callbacks" +groups = ["default"] +dependencies = [ + "frozenlist>=1.1.0", +] +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[[package]] +name = "alabaster" +version = "0.7.13" +requires_python = ">=3.6" +summary = "A configurable sidebar-enabled Sphinx theme" +groups = ["docs"] +files = [ + {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, + {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, +] + +[[package]] +name = "annotated-types" +version = "0.6.0" +requires_python = ">=3.8" +summary = "Reusable constraint types to use with typing.Annotated" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.0.0; python_version < \"3.9\"", +] +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[[package]] +name = "apeye" +version = "1.4.1" +requires_python = ">=3.6.1" +summary = "Handy tools for working with URLs and APIs." +groups = ["docs"] +dependencies = [ + "apeye-core>=1.0.0b2", + "domdf-python-tools>=2.6.0", + "platformdirs>=2.3.0", + "requests>=2.24.0", +] +files = [ + {file = "apeye-1.4.1-py3-none-any.whl", hash = "sha256:44e58a9104ec189bf42e76b3a7fe91e2b2879d96d48e9a77e5e32ff699c9204e"}, + {file = "apeye-1.4.1.tar.gz", hash = "sha256:14ea542fad689e3bfdbda2189a354a4908e90aee4bf84c15ab75d68453d76a36"}, +] + +[[package]] +name = "apeye-core" +version = "1.1.5" +requires_python = ">=3.6.1" +summary = "Core (offline) functionality for the apeye library." +groups = ["docs"] +dependencies = [ + "domdf-python-tools>=2.6.0", + "idna>=2.5", +] +files = [ + {file = "apeye_core-1.1.5-py3-none-any.whl", hash = "sha256:dc27a93f8c9e246b3b238c5ea51edf6115ab2618ef029b9f2d9a190ec8228fbf"}, + {file = "apeye_core-1.1.5.tar.gz", hash = "sha256:5de72ed3d00cc9b20fea55e54b7ab8f5ef8500eb33a5368bc162a5585e238a55"}, +] + +[[package]] +name = "asciitree" +version = "0.3.3" +summary = "Draws ASCII trees." +groups = ["default"] +files = [ + {file = "asciitree-0.3.3.tar.gz", hash = "sha256:4aa4b9b649f85e3fcb343363d97564aa1fb62e249677f2e18a96765145cc0f6e"}, +] + +[[package]] +name = "async-timeout" +version = "4.0.3" +requires_python = ">=3.7" +summary = "Timeout context manager for asyncio programs" +groups = ["default"] +marker = "python_version < \"3.11\"" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "23.2.0" +requires_python = ">=3.7" +summary = "Classes Without Boilerplate" +groups = ["default", "docs"] +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[[package]] +name = "autodocsumm" +version = "0.2.12" +requires_python = ">=3.7" +summary = "Extended sphinx autodoc including automatic autosummaries" +groups = ["docs"] +dependencies = [ + "Sphinx<8.0,>=2.2", +] +files = [ + {file = "autodocsumm-0.2.12-py3-none-any.whl", hash = "sha256:b842b53c686c07a4f174721ca4e729b027367703dbf42e2508863a3c6d6c049c"}, + {file = "autodocsumm-0.2.12.tar.gz", hash = "sha256:848fe8c38df433c6635489499b969cb47cc389ed3d7b6e75c8ccbc94d4b3bf9e"}, +] + +[[package]] +name = "babel" +version = "2.14.0" +requires_python = ">=3.7" +summary = "Internationalization utilities" +groups = ["docs"] +dependencies = [ + "pytz>=2015.7; python_version < \"3.9\"", +] +files = [ + {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, + {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +requires_python = ">=3.6.0" +summary = "Screen-scraping library" +groups = ["docs"] +dependencies = [ + "soupsieve>1.2", +] +files = [ + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, +] + +[[package]] +name = "black" +version = "24.2.0" +requires_python = ">=3.8" +summary = "The uncompromising code formatter." +groups = ["lint"] +dependencies = [ + "click>=8.0.0", + "mypy-extensions>=0.4.3", + "packaging>=22.0", + "pathspec>=0.9.0", + "platformdirs>=2", + "tomli>=1.1.0; python_version < \"3.11\"", + "typing-extensions>=4.0.1; python_version < \"3.11\"", +] +files = [ + {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"}, + {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"}, + {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"}, + {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"}, + {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"}, + {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"}, + {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"}, + {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"}, + {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"}, + {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"}, + {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"}, + {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"}, + {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"}, + {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"}, +] + +[[package]] +name = "bleach" +version = "6.1.0" +requires_python = ">=3.8" +summary = "An easy safelist-based HTML-sanitizing tool." +groups = ["docs"] +dependencies = [ + "six>=1.9.0", + "webencodings", +] +files = [ + {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, + {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, +] + +[[package]] +name = "blinker" +version = "1.7.0" +requires_python = ">=3.8" +summary = "Fast, simple object-to-object and broadcast signaling" +groups = ["test"] +files = [ + {file = "blinker-1.7.0-py3-none-any.whl", hash = "sha256:c3f865d4d54db7abc53758a01601cf343fe55b84c1de4e3fa910e420b438d5b9"}, + {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"}, +] + +[[package]] +name = "botocore" +version = "1.34.51" +requires_python = ">= 3.8" +summary = "Low-level, data-driven core of boto 3." +groups = ["default"] +dependencies = [ + "jmespath<2.0.0,>=0.7.1", + "python-dateutil<3.0.0,>=2.1", + "urllib3<1.27,>=1.25.4; python_version < \"3.10\"", + "urllib3<2.1,>=1.25.4; python_version >= \"3.10\"", +] +files = [ + {file = "botocore-1.34.51-py3-none-any.whl", hash = "sha256:01d5156247f991b3466a8404e3d7460a9ecbd9b214f9992d6ba797d9ddc6f120"}, + {file = "botocore-1.34.51.tar.gz", hash = "sha256:5086217442e67dd9de36ec7e87a0c663f76b7790d5fb6a12de565af95e87e319"}, +] + +[[package]] +name = "cachecontrol" +version = "0.14.0" +requires_python = ">=3.7" +summary = "httplib2 caching for requests" +groups = ["docs", "test"] +dependencies = [ + "msgpack<2.0.0,>=0.5.2", + "requests>=2.16.0", +] +files = [ + {file = "cachecontrol-0.14.0-py3-none-any.whl", hash = "sha256:f5bf3f0620c38db2e5122c0726bdebb0d16869de966ea6a2befe92470b740ea0"}, + {file = "cachecontrol-0.14.0.tar.gz", hash = "sha256:7db1195b41c81f8274a7bbd97c956f44e8348265a1bc7641c37dfebc39f0c938"}, +] + +[[package]] +name = "cachecontrol" +version = "0.14.0" +extras = ["filecache"] +requires_python = ">=3.7" +summary = "httplib2 caching for requests" +groups = ["docs", "test"] +dependencies = [ + "cachecontrol==0.14.0", + "filelock>=3.8.0", +] +files = [ + {file = "cachecontrol-0.14.0-py3-none-any.whl", hash = "sha256:f5bf3f0620c38db2e5122c0726bdebb0d16869de966ea6a2befe92470b740ea0"}, + {file = "cachecontrol-0.14.0.tar.gz", hash = "sha256:7db1195b41c81f8274a7bbd97c956f44e8348265a1bc7641c37dfebc39f0c938"}, +] + +[[package]] +name = "cachetools" +version = "5.3.3" +requires_python = ">=3.7" +summary = "Extensible memoizing collections and decorators" +groups = ["lint"] +files = [ + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, +] + +[[package]] +name = "certifi" +version = "2024.2.2" +requires_python = ">=3.6" +summary = "Python package for providing Mozilla's CA Bundle." +groups = ["default", "docs", "test"] +files = [ + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +requires_python = ">=3.8" +summary = "Foreign Function Interface for Python calling C code." +groups = ["docs"] +marker = "implementation_name == \"pypy\"" +dependencies = [ + "pycparser", +] +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +requires_python = ">=3.8" +summary = "Validate configuration and produce human readable error messages." +groups = ["lint"] +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "chardet" +version = "5.2.0" +requires_python = ">=3.7" +summary = "Universal encoding detector for Python 3" +groups = ["lint"] +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +requires_python = ">=3.7.0" +summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +groups = ["default", "docs", "test"] +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +requires_python = ">=3.7" +summary = "Composable command line interface toolkit" +groups = ["lint"] +dependencies = [ + "colorama; platform_system == \"Windows\"", +] +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Cross-platform colored terminal text." +groups = ["docs", "lint", "test"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.4.3" +requires_python = ">=3.8" +summary = "Code coverage measurement for Python" +groups = ["test"] +files = [ + {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, + {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, + {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, + {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, + {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, + {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, + {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, + {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, + {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, + {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, +] + +[[package]] +name = "coverage" +version = "7.4.3" +extras = ["toml"] +requires_python = ">=3.8" +summary = "Code coverage measurement for Python" +groups = ["test"] +dependencies = [ + "coverage==7.4.3", + "tomli; python_full_version <= \"3.11.0a6\"", +] +files = [ + {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, + {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, + {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, + {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, + {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, + {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, + {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, + {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, + {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, + {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, +] + +[[package]] +name = "cssutils" +version = "2.9.0" +requires_python = ">=3.8" +summary = "A CSS Cascading Style Sheets library for Python" +groups = ["docs"] +files = [ + {file = "cssutils-2.9.0-py3-none-any.whl", hash = "sha256:f8b013169e281c0c6083207366c5005f5dd4549055f7aba840384fb06a78745c"}, + {file = "cssutils-2.9.0.tar.gz", hash = "sha256:89477b3d17d790e97b9fb4def708767061055795aae6f7c82ae32e967c9be4cd"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +summary = "XML bomb protection for Python stdlib modules" +groups = ["docs"] +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "dep-logic" +version = "0.2.0" +requires_python = ">=3.8" +summary = "Python dependency specifications supporting logical operations" +groups = ["test"] +dependencies = [ + "packaging>=22", +] +files = [ + {file = "dep_logic-0.2.0-py3-none-any.whl", hash = "sha256:af439c2acaf7522dd321a71f30e0c4fd59b7553eb99967af07d53320902cde39"}, + {file = "dep_logic-0.2.0.tar.gz", hash = "sha256:cff502b515aff2d413d19d6afc70174fc67da19e821be4a9b68460ccee2514c9"}, +] + +[[package]] +name = "dependency-injector" +version = "4.41.0" +summary = "Dependency injection framework for Python" +groups = ["default"] +dependencies = [ + "six<=1.16.0,>=1.7.0", +] +files = [ + {file = "dependency-injector-4.41.0.tar.gz", hash = "sha256:939dfc657104bc3e66b67afd3fb2ebb0850c9a1e73d0d26066f2bbdd8735ff9c"}, + {file = "dependency_injector-4.41.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2381a251b04244125148298212550750e6e1403e9b2850cc62e0e829d050ad3"}, + {file = "dependency_injector-4.41.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75280dfa23f7c88e1bf56c3920d58a43516816de6f6ab2a6650bb8a0f27d5c2c"}, + {file = "dependency_injector-4.41.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63bfba21f8bff654a80e9b9d06dd6c43a442990b73bf89cd471314c11c541ec2"}, + {file = "dependency_injector-4.41.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3535d06416251715b45f8412482b58ec1c6196a4a3baa207f947f0b03a7c4b44"}, + {file = "dependency_injector-4.41.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d09c08c944a25dabfb454238c1a889acd85102b93ae497de523bf9ab7947b28a"}, + {file = "dependency_injector-4.41.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:586a0821720b15932addbefb00f7370fbcd5831d6ebbd6494d774b44ff96d23a"}, + {file = "dependency_injector-4.41.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7fa4970f12a3fc95d8796938b11c41276ad1ff4c447b0e589212eab3fc527a90"}, + {file = "dependency_injector-4.41.0-cp310-cp310-win32.whl", hash = "sha256:d557e40673de984f78dab13ebd68d27fbb2f16d7c4e3b663ea2fa2f9fae6765b"}, + {file = "dependency_injector-4.41.0-cp310-cp310-win_amd64.whl", hash = "sha256:3744c327d18408e74781bd6d8b7738745ee80ef89f2c8daecf9ebd098cb84972"}, + {file = "dependency_injector-4.41.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8686fa330c83251c75c8238697686f7a0e0f6d40658538089165dc72df9bcff"}, + {file = "dependency_injector-4.41.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d670a844268dcd758195e58e9a5b39fc74bb8648aba99a13135a4a10ec9cfac"}, + {file = "dependency_injector-4.41.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3b9d41e0eff4c8e16fea1e33de66ff0030fe51137ca530f3c52ce110447914"}, + {file = "dependency_injector-4.41.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33a724e0a737baadb4378f5dc1b079867cc3a88552fcca719b3dba84716828b2"}, + {file = "dependency_injector-4.41.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3588bd887b051d16b8bcabaae1127eb14059a0719a8fe34c8a75ba59321b352c"}, + {file = "dependency_injector-4.41.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:409441122f40e1b4b8582845fdd76deb9dc5c9d6eb74a057b85736ef9e9c671f"}, + {file = "dependency_injector-4.41.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7dcba8665cafec825b7095d5dd80afb5cf14404450eca3fe8b66e1edbf4dbc10"}, + {file = "dependency_injector-4.41.0-cp38-cp38-win32.whl", hash = "sha256:8b51efeaebacaf79ef68edfc65e9687699ccffb3538c4a3ab30d0d77e2db7189"}, + {file = "dependency_injector-4.41.0-cp38-cp38-win_amd64.whl", hash = "sha256:1662e2ef60ac6e681b9e11b5d8b7c17a0f733688916cf695f9540f8f50a61b1e"}, + {file = "dependency_injector-4.41.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51217cb384b468d7cc355544cec20774859f00812f9a1a71ed7fa701c957b2a7"}, + {file = "dependency_injector-4.41.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3890a12423ae3a9eade035093beba487f8d092ee6c6cb8706f4e7080a56e819"}, + {file = "dependency_injector-4.41.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99ed73b1521bf249e2823a08a730c9f9413a58f4b4290da022e0ad4fb333ba3d"}, + {file = "dependency_injector-4.41.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:300838e9d4f3fbf539892a5a4072851728e23b37a1f467afcf393edd994d88f0"}, + {file = "dependency_injector-4.41.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56d37b9d2f50a18f059d9abdbea7669a7518bd42b81603c21a27910a2b3f1657"}, + {file = "dependency_injector-4.41.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4a44ca3ce5867513a70b31855b218be3d251f5068ce1c480cc3a4ad24ffd3280"}, + {file = "dependency_injector-4.41.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:67b369592c57549ccdcad0d5fef1ddb9d39af7fed8083d76e789ab0111fc6389"}, + {file = "dependency_injector-4.41.0-cp39-cp39-win32.whl", hash = "sha256:740a8e8106a04d3f44b52b25b80570fdac96a8a3934423de7c9202c5623e7936"}, + {file = "dependency_injector-4.41.0-cp39-cp39-win_amd64.whl", hash = "sha256:22b11dbf696e184f0b3d5ac4e5418aeac3c379ba4ea758c04a83869b7e5d1cbf"}, + {file = "dependency_injector-4.41.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b365a8548e9a49049fa6acb24d3cd939f619eeb8e300ca3e156e44402dcc07ec"}, + {file = "dependency_injector-4.41.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5168dc59808317dc4cdd235aa5d7d556d33e5600156acaf224cead236b48a3e8"}, + {file = "dependency_injector-4.41.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3229d83e99e255451605d5276604386e06ad948e3d60f31ddd796781c77f76f"}, + {file = "dependency_injector-4.41.0-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1baee908f21190bdc46a65ce4c417a5175e9397ca62354928694fce218f84487"}, + {file = "dependency_injector-4.41.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b37f36ecb0c1227f697e1d4a029644e3eda8dd0f0716aa63ad04d96dbb15bbbb"}, + {file = "dependency_injector-4.41.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b0c9c966ff66c77364a2d43d08de9968aff7e3903938fe912ba49796b2133344"}, + {file = "dependency_injector-4.41.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12e91ac0333e7e589421943ff6c6bf9cf0d9ac9703301cec37ccff3723406332"}, + {file = "dependency_injector-4.41.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2440b32474d4e747209528ca3ae48f42563b2fbe3d74dbfe949c11dfbfef7c4"}, + {file = "dependency_injector-4.41.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54032d62610cf2f4421c9d92cef52957215aaa0bca403cda580c58eb3f726eda"}, + {file = "dependency_injector-4.41.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:76b94c8310929e54136f3cb3de3adc86d1a657b3984299f40bf1cd2ba0bae548"}, + {file = "dependency_injector-4.41.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6ee9810841c6e0599356cb884d16453bfca6ab739d0e4f0248724ed8f9ee0d79"}, + {file = "dependency_injector-4.41.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b98945edae88e777091bf0848f869fb94bd76dfa4066d7c870a5caa933391d0"}, + {file = "dependency_injector-4.41.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a2dee5d4abdd21f1a30a51d46645c095be9dcc404c7c6e9f81d0a01415a49e64"}, + {file = "dependency_injector-4.41.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d03f5fa0fa98a18bd0dfce846db80e2798607f0b861f1f99c97f441f7669d7a2"}, + {file = "dependency_injector-4.41.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f2842e15bae664a9f69932e922b02afa055c91efec959cb1896f6c499bf68180"}, +] + +[[package]] +name = "dict2css" +version = "0.3.0.post1" +requires_python = ">=3.6" +summary = "A μ-library for constructing cascading style sheets from Python dictionaries." +groups = ["docs"] +dependencies = [ + "cssutils>=2.2.0", + "domdf-python-tools>=2.2.0", +] +files = [ + {file = "dict2css-0.3.0.post1-py3-none-any.whl", hash = "sha256:f006a6b774c3e31869015122ae82c491fd25e7de4a75607a62aa3e798f837e0d"}, + {file = "dict2css-0.3.0.post1.tar.gz", hash = "sha256:89c544c21c4ca7472c3fffb9d37d3d926f606329afdb751dc1de67a411b70719"}, +] + +[[package]] +name = "distlib" +version = "0.3.8" +summary = "Distribution utilities" +groups = ["lint", "test"] +files = [ + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, +] + +[[package]] +name = "docutils" +version = "0.20.1" +requires_python = ">=3.7" +summary = "Docutils -- Python Documentation Utilities" +groups = ["docs"] +files = [ + {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, + {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, +] + +[[package]] +name = "domdf-python-tools" +version = "3.8.0.post2" +requires_python = ">=3.6" +summary = "Helpful functions for Python 🐍 🛠️" +groups = ["docs"] +dependencies = [ + "importlib-metadata>=3.6.0; python_version < \"3.9\"", + "natsort>=7.0.1", + "typing-extensions>=3.7.4.1", +] +files = [ + {file = "domdf_python_tools-3.8.0.post2-py3-none-any.whl", hash = "sha256:ad2c763c8d00850a7fa92ad95e9891a1918281ea25322c4dbb1734fd32f905dd"}, + {file = "domdf_python_tools-3.8.0.post2.tar.gz", hash = "sha256:a1fd255ea29f767b08de462d2da39d360262304389227d980bc307ee8aa3366a"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +requires_python = ">=3.7" +summary = "Backport of PEP 654 (exception groups)" +groups = ["test"] +marker = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[[package]] +name = "fasteners" +version = "0.19" +requires_python = ">=3.6" +summary = "A python package that provides useful locks" +groups = ["default"] +files = [ + {file = "fasteners-0.19-py3-none-any.whl", hash = "sha256:758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237"}, + {file = "fasteners-0.19.tar.gz", hash = "sha256:b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c"}, +] + +[[package]] +name = "fastjsonschema" +version = "2.19.1" +summary = "Fastest Python implementation of JSON schema" +groups = ["docs"] +files = [ + {file = "fastjsonschema-2.19.1-py3-none-any.whl", hash = "sha256:3672b47bc94178c9f23dbb654bf47440155d4db9df5f7bc47643315f9c405cd0"}, + {file = "fastjsonschema-2.19.1.tar.gz", hash = "sha256:e3126a94bdc4623d3de4485f8d468a12f02a67921315ddc87836d6e456dc789d"}, +] + +[[package]] +name = "filelock" +version = "3.13.1" +requires_python = ">=3.8" +summary = "A platform independent file lock." +groups = ["docs", "lint", "test"] +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[[package]] +name = "findpython" +version = "0.4.1" +requires_python = ">=3.7" +summary = "A utility to find python versions on your system" +groups = ["test"] +dependencies = [ + "packaging>=20", +] +files = [ + {file = "findpython-0.4.1-py3-none-any.whl", hash = "sha256:ca3a5272704b0b8a2f5e8d03d816701ec99f13eafee9bb2a316cbf099c937ede"}, + {file = "findpython-0.4.1.tar.gz", hash = "sha256:d7d014558681b3761d57a5b2342a713a8bf302f6c1fc9d99f81b9d8bd1681b04"}, +] + +[[package]] +name = "flake8" +version = "5.0.4" +requires_python = ">=3.6.1" +summary = "the modular source code checker: pep8 pyflakes and co" +groups = ["lint"] +dependencies = [ + "mccabe<0.8.0,>=0.7.0", + "pycodestyle<2.10.0,>=2.9.0", + "pyflakes<2.6.0,>=2.5.0", +] +files = [ + {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"}, + {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"}, +] + +[[package]] +name = "frozenlist" +version = "1.4.1" +requires_python = ">=3.8" +summary = "A list-like structure which implements collections.abc.MutableSequence" +groups = ["default"] +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "fsspec" +version = "2024.2.0" +requires_python = ">=3.8" +summary = "File-system specification" +groups = ["default"] +files = [ + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, +] + +[[package]] +name = "html5lib" +version = "1.1" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +summary = "HTML parser based on the WHATWG HTML specification" +groups = ["docs"] +dependencies = [ + "six>=1.9", + "webencodings", +] +files = [ + {file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"}, + {file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"}, +] + +[[package]] +name = "identify" +version = "2.5.35" +requires_python = ">=3.8" +summary = "File identification library for Python" +groups = ["lint"] +files = [ + {file = "identify-2.5.35-py2.py3-none-any.whl", hash = "sha256:c4de0081837b211594f8e877a6b4fad7ca32bbfc1a9307fdd61c28bfe923f13e"}, + {file = "identify-2.5.35.tar.gz", hash = "sha256:10a7ca245cfcd756a554a7288159f72ff105ad233c7c4b9c6f0f4d108f5f6791"}, +] + +[[package]] +name = "idna" +version = "3.6" +requires_python = ">=3.5" +summary = "Internationalized Domain Names in Applications (IDNA)" +groups = ["default", "docs", "test"] +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "Getting image size from png/jpeg/jpeg2000/gif file" +groups = ["docs"] +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] + +[[package]] +name = "importlib-metadata" +version = "7.0.1" +requires_python = ">=3.8" +summary = "Read metadata from Python packages" +groups = ["default", "docs", "test"] +marker = "python_version < \"3.10\"" +dependencies = [ + "zipp>=0.5", +] +files = [ + {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, + {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, +] + +[[package]] +name = "importlib-resources" +version = "6.1.2" +requires_python = ">=3.8" +summary = "Read resources from Python packages" +groups = ["docs", "test"] +marker = "python_version < \"3.9\"" +dependencies = [ + "zipp>=3.1.0; python_version < \"3.10\"", +] +files = [ + {file = "importlib_resources-6.1.2-py3-none-any.whl", hash = "sha256:9a0a862501dc38b68adebc82970140c9e4209fc99601782925178f8386339938"}, + {file = "importlib_resources-6.1.2.tar.gz", hash = "sha256:308abf8474e2dba5f867d279237cd4076482c3de7104a40b41426370e891549b"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +requires_python = ">=3.7" +summary = "brain-dead simple config-ini parsing" +groups = ["test"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "installer" +version = "0.7.0" +requires_python = ">=3.7" +summary = "A library for installing Python wheels." +groups = ["test"] +files = [ + {file = "installer-0.7.0-py3-none-any.whl", hash = "sha256:05d1933f0a5ba7d8d6296bb6d5018e7c94fa473ceb10cf198a92ccea19c27b53"}, + {file = "installer-0.7.0.tar.gz", hash = "sha256:a26d3e3116289bb08216e0d0f7d925fcef0b0194eedfa0c944bcaaa106c4b631"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +requires_python = ">=3.8.0" +summary = "A Python utility / library to sort Python imports." +groups = ["lint"] +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[[package]] +name = "jinja2" +version = "3.1.3" +requires_python = ">=3.7" +summary = "A very fast and expressive template engine." +groups = ["docs"] +dependencies = [ + "MarkupSafe>=2.0", +] +files = [ + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +requires_python = ">=3.7" +summary = "JSON Matching Expressions" +groups = ["default"] +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsonschema" +version = "4.21.1" +requires_python = ">=3.8" +summary = "An implementation of JSON Schema validation for Python" +groups = ["docs"] +dependencies = [ + "attrs>=22.2.0", + "importlib-resources>=1.4.0; python_version < \"3.9\"", + "jsonschema-specifications>=2023.03.6", + "pkgutil-resolve-name>=1.3.10; python_version < \"3.9\"", + "referencing>=0.28.4", + "rpds-py>=0.7.1", +] +files = [ + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, +] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +requires_python = ">=3.8" +summary = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +groups = ["docs"] +dependencies = [ + "importlib-resources>=1.4.0; python_version < \"3.9\"", + "referencing>=0.31.0", +] +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[[package]] +name = "jupyter-client" +version = "8.6.0" +requires_python = ">=3.8" +summary = "Jupyter protocol implementation and client libraries" +groups = ["docs"] +dependencies = [ + "importlib-metadata>=4.8.3; python_version < \"3.10\"", + "jupyter-core!=5.0.*,>=4.12", + "python-dateutil>=2.8.2", + "pyzmq>=23.0", + "tornado>=6.2", + "traitlets>=5.3", +] +files = [ + {file = "jupyter_client-8.6.0-py3-none-any.whl", hash = "sha256:909c474dbe62582ae62b758bca86d6518c85234bdee2d908c778db6d72f39d99"}, + {file = "jupyter_client-8.6.0.tar.gz", hash = "sha256:0642244bb83b4764ae60d07e010e15f0e2d275ec4e918a8f7b80fbbef3ca60c7"}, +] + +[[package]] +name = "jupyter-core" +version = "5.7.1" +requires_python = ">=3.8" +summary = "Jupyter core package. A base package on which Jupyter projects rely." +groups = ["docs"] +dependencies = [ + "platformdirs>=2.5", + "pywin32>=300; sys_platform == \"win32\" and platform_python_implementation != \"PyPy\"", + "traitlets>=5.3", +] +files = [ + {file = "jupyter_core-5.7.1-py3-none-any.whl", hash = "sha256:c65c82126453a723a2804aa52409930434598fd9d35091d63dfb919d2b765bb7"}, + {file = "jupyter_core-5.7.1.tar.gz", hash = "sha256:de61a9d7fc71240f688b2fb5ab659fbb56979458dc66a71decd098e03c79e218"}, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +requires_python = ">=3.8" +summary = "Pygments theme using JupyterLab CSS variables" +groups = ["docs"] +files = [ + {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, + {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, +] + +[[package]] +name = "llvmlite" +version = "0.41.1" +requires_python = ">=3.8" +summary = "lightweight wrapper around basic LLVM functionality" +groups = ["default"] +files = [ + {file = "llvmlite-0.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1e1029d47ee66d3a0c4d6088641882f75b93db82bd0e6178f7bd744ebce42b9"}, + {file = "llvmlite-0.41.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:150d0bc275a8ac664a705135e639178883293cf08c1a38de3bbaa2f693a0a867"}, + {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eee5cf17ec2b4198b509272cf300ee6577229d237c98cc6e63861b08463ddc6"}, + {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd0338da625346538f1173a17cabf21d1e315cf387ca21b294ff209d176e244"}, + {file = "llvmlite-0.41.1-cp310-cp310-win32.whl", hash = "sha256:fa1469901a2e100c17eb8fe2678e34bd4255a3576d1a543421356e9c14d6e2ae"}, + {file = "llvmlite-0.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b76acee82ea0e9304be6be9d4b3840208d050ea0dcad75b1635fa06e949a0ae"}, + {file = "llvmlite-0.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5940bc901fb0325970415dbede82c0b7f3e35c2d5fd1d5e0047134c2c46b3281"}, + {file = "llvmlite-0.41.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0a9a47c28f67a269bb62f6256e63cef28d3c5f13cbae4fab587c3ad506778b"}, + {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8afdfa6da33f0b4226af8e64cfc2b28986e005528fbf944d0a24a72acfc9432"}, + {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8454c1133ef701e8c050a59edd85d238ee18bb9a0eb95faf2fca8b909ee3c89a"}, + {file = "llvmlite-0.41.1-cp38-cp38-win32.whl", hash = "sha256:2d92c51e6e9394d503033ffe3292f5bef1566ab73029ec853861f60ad5c925d0"}, + {file = "llvmlite-0.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:df75594e5a4702b032684d5481db3af990b69c249ccb1d32687b8501f0689432"}, + {file = "llvmlite-0.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04725975e5b2af416d685ea0769f4ecc33f97be541e301054c9f741003085802"}, + {file = "llvmlite-0.41.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bf14aa0eb22b58c231243dccf7e7f42f7beec48970f2549b3a6acc737d1a4ba4"}, + {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c32356f669e036eb01016e883b22add883c60739bc1ebee3a1cc0249a50828"}, + {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24091a6b31242bcdd56ae2dbea40007f462260bc9bdf947953acc39dffd54f8f"}, + {file = "llvmlite-0.41.1-cp39-cp39-win32.whl", hash = "sha256:880cb57ca49e862e1cd077104375b9d1dfdc0622596dfa22105f470d7bacb309"}, + {file = "llvmlite-0.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:92f093986ab92e71c9ffe334c002f96defc7986efda18397d0f08534f3ebdc4d"}, + {file = "llvmlite-0.41.1.tar.gz", hash = "sha256:f19f767a018e6ec89608e1f6b13348fa2fcde657151137cb64e56d48598a92db"}, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +requires_python = ">=3.8" +summary = "Python port of markdown-it. Markdown parsing, done right!" +groups = ["test"] +dependencies = [ + "mdurl~=0.1", +] +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[[package]] +name = "markupsafe" +version = "2.1.5" +requires_python = ">=3.7" +summary = "Safely add untrusted strings to HTML/XML markup." +groups = ["docs"] +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +requires_python = ">=3.6" +summary = "McCabe checker, plugin for flake8" +groups = ["lint"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +requires_python = ">=3.7" +summary = "Markdown URL utilities" +groups = ["test"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "mistune" +version = "3.0.2" +requires_python = ">=3.7" +summary = "A sane and fast Markdown parser with useful plugins and renderers" +groups = ["docs"] +files = [ + {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, + {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, +] + +[[package]] +name = "msgpack" +version = "1.0.7" +requires_python = ">=3.8" +summary = "MessagePack serializer" +groups = ["docs", "test"] +files = [ + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"}, + {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"}, + {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"}, + {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"}, + {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"}, + {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"}, + {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"}, + {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, +] + +[[package]] +name = "multidict" +version = "6.0.5" +requires_python = ">=3.7" +summary = "multidict implementation" +groups = ["default"] +files = [ + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, + {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, + {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, + {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, + {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, + {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, + {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, + {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, + {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, +] + +[[package]] +name = "mypy" +version = "1.8.0" +requires_python = ">=3.8" +summary = "Optional static typing for Python" +groups = ["lint"] +dependencies = [ + "mypy-extensions>=1.0.0", + "tomli>=1.1.0; python_version < \"3.11\"", + "typing-extensions>=4.1.0", +] +files = [ + {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, + {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, + {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, + {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, + {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, + {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, + {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, + {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, + {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, + {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, + {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, + {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, + {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, + {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, + {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, + {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, + {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +requires_python = ">=3.5" +summary = "Type system extensions for programs checked with the mypy type checker." +groups = ["lint"] +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "natsort" +version = "8.4.0" +requires_python = ">=3.7" +summary = "Simple yet flexible natural sorting in Python." +groups = ["docs"] +files = [ + {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, + {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, +] + +[[package]] +name = "nbclient" +version = "0.9.0" +requires_python = ">=3.8.0" +summary = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +groups = ["docs"] +dependencies = [ + "jupyter-client>=6.1.12", + "jupyter-core!=5.0.*,>=4.12", + "nbformat>=5.1", + "traitlets>=5.4", +] +files = [ + {file = "nbclient-0.9.0-py3-none-any.whl", hash = "sha256:a3a1ddfb34d4a9d17fc744d655962714a866639acd30130e9be84191cd97cd15"}, + {file = "nbclient-0.9.0.tar.gz", hash = "sha256:4b28c207877cf33ef3a9838cdc7a54c5ceff981194a82eac59d558f05487295e"}, +] + +[[package]] +name = "nbconvert" +version = "7.16.1" +requires_python = ">=3.8" +summary = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." +groups = ["docs"] +dependencies = [ + "beautifulsoup4", + "bleach!=5.0.0", + "defusedxml", + "importlib-metadata>=3.6; python_version < \"3.10\"", + "jinja2>=3.0", + "jupyter-core>=4.7", + "jupyterlab-pygments", + "markupsafe>=2.0", + "mistune<4,>=2.0.3", + "nbclient>=0.5.0", + "nbformat>=5.7", + "packaging", + "pandocfilters>=1.4.1", + "pygments>=2.4.1", + "tinycss2", + "traitlets>=5.1", +] +files = [ + {file = "nbconvert-7.16.1-py3-none-any.whl", hash = "sha256:3188727dffadfdc9c6a1c7250729063d7bc78b355ad7aa023138afa030d1cd07"}, + {file = "nbconvert-7.16.1.tar.gz", hash = "sha256:e79e6a074f49ba3ed29428ed86487bf51509d9aab613bd8522ac08f6d28fd7fd"}, +] + +[[package]] +name = "nbformat" +version = "5.9.2" +requires_python = ">=3.8" +summary = "The Jupyter Notebook format" +groups = ["docs"] +dependencies = [ + "fastjsonschema", + "jsonschema>=2.6", + "jupyter-core", + "traitlets>=5.1", +] +files = [ + {file = "nbformat-5.9.2-py3-none-any.whl", hash = "sha256:1c5172d786a41b82bcfd0c23f9e6b6f072e8fb49c39250219e4acfff1efe89e9"}, + {file = "nbformat-5.9.2.tar.gz", hash = "sha256:5f98b5ba1997dff175e77e0c17d5c10a96eaed2cbd1de3533d1fc35d5e111192"}, +] + +[[package]] +name = "nbsphinx" +version = "0.9.3" +requires_python = ">=3.6" +summary = "Jupyter Notebook Tools for Sphinx" +groups = ["docs"] +dependencies = [ + "docutils", + "jinja2", + "nbconvert!=5.4", + "nbformat", + "sphinx>=1.8", + "traitlets>=5", +] +files = [ + {file = "nbsphinx-0.9.3-py3-none-any.whl", hash = "sha256:6e805e9627f4a358bd5720d5cbf8bf48853989c79af557afd91a5f22e163029f"}, + {file = "nbsphinx-0.9.3.tar.gz", hash = "sha256:ec339c8691b688f8676104a367a4b8cf3ea01fd089dc28d24dec22d563b11562"}, +] + +[[package]] +name = "nodeenv" +version = "1.8.0" +requires_python = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +summary = "Node.js virtual environment builder" +groups = ["lint"] +dependencies = [ + "setuptools", +] +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[[package]] +name = "numba" +version = "0.58.1" +requires_python = ">=3.8" +summary = "compiling Python code using LLVM" +groups = ["default"] +dependencies = [ + "importlib-metadata; python_version < \"3.9\"", + "llvmlite<0.42,>=0.41.0dev0", + "numpy<1.27,>=1.22", +] +files = [ + {file = "numba-0.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07f2fa7e7144aa6f275f27260e73ce0d808d3c62b30cff8906ad1dec12d87bbe"}, + {file = "numba-0.58.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7bf1ddd4f7b9c2306de0384bf3854cac3edd7b4d8dffae2ec1b925e4c436233f"}, + {file = "numba-0.58.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bc2d904d0319d7a5857bd65062340bed627f5bfe9ae4a495aef342f072880d50"}, + {file = "numba-0.58.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e79b6cc0d2bf064a955934a2e02bf676bc7995ab2db929dbbc62e4c16551be6"}, + {file = "numba-0.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:81fe5b51532478149b5081311b0fd4206959174e660c372b94ed5364cfb37c82"}, + {file = "numba-0.58.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea5bfcf7d641d351c6a80e8e1826eb4a145d619870016eeaf20bbd71ef5caa22"}, + {file = "numba-0.58.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e63d6aacaae1ba4ef3695f1c2122b30fa3d8ba039c8f517784668075856d79e2"}, + {file = "numba-0.58.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6fe7a9d8e3bd996fbe5eac0683227ccef26cba98dae6e5cee2c1894d4b9f16c1"}, + {file = "numba-0.58.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:898af055b03f09d33a587e9425500e5be84fc90cd2f80b3fb71c6a4a17a7e354"}, + {file = "numba-0.58.1-cp38-cp38-win_amd64.whl", hash = "sha256:d3e2fe81fe9a59fcd99cc572002101119059d64d31eb6324995ee8b0f144a306"}, + {file = "numba-0.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c765aef472a9406a97ea9782116335ad4f9ef5c9f93fc05fd44aab0db486954"}, + {file = "numba-0.58.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9356e943617f5e35a74bf56ff6e7cc83e6b1865d5e13cee535d79bf2cae954"}, + {file = "numba-0.58.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:240e7a1ae80eb6b14061dc91263b99dc8d6af9ea45d310751b780888097c1aaa"}, + {file = "numba-0.58.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:45698b995914003f890ad839cfc909eeb9c74921849c712a05405d1a79c50f68"}, + {file = "numba-0.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd3dda77955be03ff366eebbfdb39919ce7c2620d86c906203bed92124989032"}, + {file = "numba-0.58.1.tar.gz", hash = "sha256:487ded0633efccd9ca3a46364b40006dbdaca0f95e99b8b83e778d1195ebcbaa"}, +] + +[[package]] +name = "numcodecs" +version = "0.12.1" +requires_python = ">=3.8" +summary = "A Python package providing buffer compression and transformation codecs for use in data storage and communication applications." +groups = ["default"] +dependencies = [ + "numpy>=1.7", +] +files = [ + {file = "numcodecs-0.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d37f628fe92b3699e65831d5733feca74d2e33b50ef29118ffd41c13c677210e"}, + {file = "numcodecs-0.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:941b7446b68cf79f089bcfe92edaa3b154533dcbcd82474f994b28f2eedb1c60"}, + {file = "numcodecs-0.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e79bf9d1d37199ac00a60ff3adb64757523291d19d03116832e600cac391c51"}, + {file = "numcodecs-0.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:82d7107f80f9307235cb7e74719292d101c7ea1e393fe628817f0d635b7384f5"}, + {file = "numcodecs-0.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:caf1a1e6678aab9c1e29d2109b299f7a467bd4d4c34235b1f0e082167846b88f"}, + {file = "numcodecs-0.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c17687b1fd1fef68af616bc83f896035d24e40e04e91e7e6dae56379eb59fe33"}, + {file = "numcodecs-0.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29dfb195f835a55c4d490fb097aac8c1bcb96c54cf1b037d9218492c95e9d8c5"}, + {file = "numcodecs-0.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:2f1ba2f4af3fd3ba65b1bcffb717fe65efe101a50a91c368f79f3101dbb1e243"}, + {file = "numcodecs-0.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2fbb12a6a1abe95926f25c65e283762d63a9bf9e43c0de2c6a1a798347dfcb40"}, + {file = "numcodecs-0.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f2207871868b2464dc11c513965fd99b958a9d7cde2629be7b2dc84fdaab013b"}, + {file = "numcodecs-0.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abff3554a6892a89aacf7b642a044e4535499edf07aeae2f2e6e8fc08c9ba07f"}, + {file = "numcodecs-0.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:ef964d4860d3e6b38df0633caf3e51dc850a6293fd8e93240473642681d95136"}, + {file = "numcodecs-0.12.1.tar.gz", hash = "sha256:05d91a433733e7eef268d7e80ec226a0232da244289614a8f3826901aec1098e"}, +] + +[[package]] +name = "numpy" +version = "1.24.4" +requires_python = ">=3.8" +summary = "Fundamental package for array computing in Python" +groups = ["default"] +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "packaging" +version = "23.2" +requires_python = ">=3.7" +summary = "Core utilities for Python packages" +groups = ["docs", "lint", "test"] +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "Utilities for writing pandoc filters in python" +groups = ["docs"] +files = [ + {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, + {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +requires_python = ">=3.8" +summary = "Utility library for gitignore style pattern matching of file paths." +groups = ["lint"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pdm" +version = "2.12.4" +requires_python = ">=3.8" +summary = "A modern Python package and dependency manager supporting the latest PEP standards" +groups = ["test"] +dependencies = [ + "blinker", + "cachecontrol[filecache]>=0.13.0", + "certifi", + "dep-logic<1.0,>=0.2.0", + "findpython<1.0.0a0,>=0.4.0", + "importlib-metadata>=3.6; python_version < \"3.10\"", + "importlib-resources>=5; python_version < \"3.9\"", + "installer<0.8,>=0.7", + "packaging!=22.0,>=20.9", + "platformdirs", + "pyproject-hooks", + "python-dotenv>=0.15", + "requests-toolbelt", + "resolvelib>=1.0.1", + "rich>=12.3.0", + "shellingham>=1.3.2", + "tomli>=1.1.0; python_version < \"3.11\"", + "tomlkit<1,>=0.11.1", + "truststore; python_version >= \"3.10\"", + "unearth>=0.12.1", + "virtualenv>=20", +] +files = [ + {file = "pdm-2.12.4-py3-none-any.whl", hash = "sha256:56949eeacbe6100f193f2ede6e380edc956d309f68384593c5a4feefdf98ce8b"}, + {file = "pdm-2.12.4.tar.gz", hash = "sha256:d04877362f95cf9ffc1d2c38b851f693706e4928840e48986ae576dad5741496"}, +] + +[[package]] +name = "pdm" +version = "2.12.4" +extras = ["pytest"] +requires_python = ">=3.8" +summary = "A modern Python package and dependency manager supporting the latest PEP standards" +groups = ["test"] +dependencies = [ + "pdm==2.12.4", + "pytest", + "pytest-mock", +] +files = [ + {file = "pdm-2.12.4-py3-none-any.whl", hash = "sha256:56949eeacbe6100f193f2ede6e380edc956d309f68384593c5a4feefdf98ce8b"}, + {file = "pdm-2.12.4.tar.gz", hash = "sha256:d04877362f95cf9ffc1d2c38b851f693706e4928840e48986ae576dad5741496"}, +] + +[[package]] +name = "pillow" +version = "10.2.0" +requires_python = ">=3.8" +summary = "Python Imaging Library (Fork)" +groups = ["default"] +files = [ + {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"}, + {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"}, + {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"}, + {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"}, + {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"}, + {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"}, + {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"}, + {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"}, + {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"}, + {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"}, + {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"}, +] + +[[package]] +name = "pint" +version = "0.21.1" +requires_python = ">=3.8" +summary = "Physical quantities module" +groups = ["default"] +files = [ + {file = "Pint-0.21.1-py3-none-any.whl", hash = "sha256:230ebccc312693117ee925c6492b3631c772ae9f7851a4e86080a15e7be692d8"}, + {file = "Pint-0.21.1.tar.gz", hash = "sha256:5d5b6b518d0c5a7ab03a776175db500f1ed1523ee75fb7fafe38af8149431c8d"}, +] + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +requires_python = ">=3.6" +summary = "Resolve a name to an object." +groups = ["docs"] +marker = "python_version < \"3.9\"" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.0" +requires_python = ">=3.8" +summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +groups = ["docs", "lint", "test"] +files = [ + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, +] + +[[package]] +name = "pluggy" +version = "1.4.0" +requires_python = ">=3.8" +summary = "plugin and hook calling mechanisms for python" +groups = ["lint", "test"] +files = [ + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, +] + +[[package]] +name = "pre-commit" +version = "3.5.0" +requires_python = ">=3.8" +summary = "A framework for managing and maintaining multi-language pre-commit hooks." +groups = ["lint"] +dependencies = [ + "cfgv>=2.0.0", + "identify>=1.0.0", + "nodeenv>=0.11.1", + "pyyaml>=5.1", + "virtualenv>=20.10.0", +] +files = [ + {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, + {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, +] + +[[package]] +name = "pycodestyle" +version = "2.9.1" +requires_python = ">=3.6" +summary = "Python style guide checker" +groups = ["lint"] +files = [ + {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, + {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "C parser in Python" +groups = ["docs"] +marker = "implementation_name == \"pypy\"" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "2.6.3" +requires_python = ">=3.8" +summary = "Data validation using Python type hints" +groups = ["default"] +dependencies = [ + "annotated-types>=0.4.0", + "pydantic-core==2.16.3", + "typing-extensions>=4.6.1", +] +files = [ + {file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"}, + {file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"}, +] + +[[package]] +name = "pydantic-core" +version = "2.16.3" +requires_python = ">=3.8" +summary = "" +groups = ["default"] +dependencies = [ + "typing-extensions!=4.7.0,>=4.6.0", +] +files = [ + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, +] + +[[package]] +name = "pydata-sphinx-theme" +version = "0.14.4" +requires_python = ">=3.8" +summary = "Bootstrap-based Sphinx theme from the PyData community" +groups = ["docs"] +dependencies = [ + "Babel", + "accessible-pygments", + "beautifulsoup4", + "docutils!=0.17.0", + "packaging", + "pygments>=2.7", + "sphinx>=5.0", + "typing-extensions", +] +files = [ + {file = "pydata_sphinx_theme-0.14.4-py3-none-any.whl", hash = "sha256:ac15201f4c2e2e7042b0cad8b30251433c1f92be762ddcefdb4ae68811d918d9"}, + {file = "pydata_sphinx_theme-0.14.4.tar.gz", hash = "sha256:f5d7a2cb7a98e35b9b49d3b02cec373ad28958c2ed5c9b1ffe6aff6c56e9de5b"}, +] + +[[package]] +name = "pyflakes" +version = "2.5.0" +requires_python = ">=3.6" +summary = "passive checker of Python programs" +groups = ["lint"] +files = [ + {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"}, + {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, +] + +[[package]] +name = "pygments" +version = "2.17.2" +requires_python = ">=3.7" +summary = "Pygments is a syntax highlighting package written in Python." +groups = ["docs", "test"] +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[[package]] +name = "pyproj" +version = "3.5.0" +requires_python = ">=3.8" +summary = "Python interface to PROJ (cartographic projections and coordinate transformations library)" +groups = ["default"] +dependencies = [ + "certifi", +] +files = [ + {file = "pyproj-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6475ce653880938468a1a1b7321267243909e34b972ba9e53d5982c41d555918"}, + {file = "pyproj-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61e4ad57d89b03a7b173793b31bca8ee110112cde1937ef0f42a70b9120c827d"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdd2021bb6f7f346bfe1d2a358aa109da017d22c4704af2d994e7c7ee0a7a53"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5674923351e76222e2c10c58b5e1ac119d7a46b270d822c463035971b06f724b"}, + {file = "pyproj-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd5e2b6aa255023c4acd0b977590f1f7cc801ba21b4d806fcf6dfac3474ebb83"}, + {file = "pyproj-3.5.0-cp310-cp310-win32.whl", hash = "sha256:6f316a66031a14e9c5a88c91f8b77aa97f5454895674541ed6ab630b682be35d"}, + {file = "pyproj-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:f7c2f4d9681e810cf40239caaca00079930a6d9ee6591139b88d592d36051d82"}, + {file = "pyproj-3.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2b708fd43453b985642b737d4a6e7f1d6a0ab1677ffa4e14cc258537b49224b0"}, + {file = "pyproj-3.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b60d93a200639e8367c6542a964fd0aa2dbd152f256c1831dc18cd5aa470fb8a"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38862fe07316ae12b79d82d298e390973a4f00b684f3c2d037238e20e00610ba"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b65f2a38cd9e16883dbb0f8ae82bdf8f6b79b1b02975c78483ab8428dbbf2f"}, + {file = "pyproj-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b752b7d9c4b08181c7e8c0d9c7f277cbefff42227f34d3310696a87c863d9dd3"}, + {file = "pyproj-3.5.0-cp38-cp38-win32.whl", hash = "sha256:b937215bfbaf404ec8f03ca741fc3f9f2c4c2c5590a02ccddddd820ae3c71331"}, + {file = "pyproj-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:97ed199033c2c770e7eea2ef80ff5e6413426ec2d7ec985b869792f04ab95d05"}, + {file = "pyproj-3.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:052c49fce8b5d55943a35c36ccecb87350c68b48ba95bc02a789770c374ef819"}, + {file = "pyproj-3.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1507138ea28bf2134d31797675380791cc1a7156a3aeda484e65a78a4aba9b62"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02742ef3d846401861a878a61ef7ad911ea7539d6cc4619ddb52dbdf7b45aee"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:385b0341861d3ebc8cad98337a738821dcb548d465576527399f4955ca24b6ed"}, + {file = "pyproj-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fe6bb1b68a35d07378d38be77b5b2f8dd2bea5910c957bfcc7bee55988d3910"}, + {file = "pyproj-3.5.0-cp39-cp39-win32.whl", hash = "sha256:5c4b85ac10d733c42d73a2e6261c8d6745bf52433a31848dd1b6561c9a382da3"}, + {file = "pyproj-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:1798ff7d65d9057ebb2d017ffe8403268b8452f24d0428b2140018c25c7fa1bc"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d711517a8487ef3245b08dc82f781a906df9abb3b6cb0ce0486f0eeb823ca570"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:788a5dadb532644a64efe0f5f01bf508c821eb7e984f13a677d56002f1e8a67a"}, + {file = "pyproj-3.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73f7960a97225812f9b1d7aeda5fb83812f38de9441e3476fcc8abb3e2b2f4de"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fde5ece4d2436b5a57c8f5f97b49b5de06a856d03959f836c957d3e609f2de7e"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e08db25b61cf024648d55973cc3d1c3f1d0818fabf594d5f5a8e2318103d2aa0"}, + {file = "pyproj-3.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a87b419a2a352413fbf759ecb66da9da50bd19861c8f26db6a25439125b27b9"}, + {file = "pyproj-3.5.0.tar.gz", hash = "sha256:9859d1591c1863414d875ae0759e72c2cffc01ab989dc64137fbac572cc81bf6"}, +] + +[[package]] +name = "pyproject-api" +version = "1.6.1" +requires_python = ">=3.8" +summary = "API to interact with the python pyproject.toml based projects" +groups = ["lint"] +dependencies = [ + "packaging>=23.1", + "tomli>=2.0.1; python_version < \"3.11\"", +] +files = [ + {file = "pyproject_api-1.6.1-py3-none-any.whl", hash = "sha256:4c0116d60476b0786c88692cf4e325a9814965e2469c5998b830bba16b183675"}, + {file = "pyproject_api-1.6.1.tar.gz", hash = "sha256:1817dc018adc0d1ff9ca1ed8c60e1623d5aaca40814b953af14a9cf9a5cae538"}, +] + +[[package]] +name = "pyproject-flake8" +version = "5.0.4.post1" +summary = "pyproject-flake8 (`pflake8`), a monkey patching wrapper to connect flake8 with pyproject.toml configuration " +groups = ["lint"] +dependencies = [ + "flake8==5.0.4", + "tomli; python_version < \"3.11\"", +] +files = [ + {file = "pyproject-flake8-5.0.4.post1.tar.gz", hash = "sha256:c2dfdf1064f47efbb2e4faf1a32b0b6a6ea67dc4d1debb98d862b0cdee377941"}, + {file = "pyproject_flake8-5.0.4.post1-py2.py3-none-any.whl", hash = "sha256:457e52dde1b7a1f84b5230c70d61afa58ced64a44b81a609f19e972319fa68ed"}, +] + +[[package]] +name = "pyproject-hooks" +version = "1.0.0" +requires_python = ">=3.7" +summary = "Wrappers to call pyproject.toml-based build backend hooks." +groups = ["test"] +dependencies = [ + "tomli>=1.1.0; python_version < \"3.11\"", +] +files = [ + {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, + {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"}, +] + +[[package]] +name = "pytest" +version = "8.0.2" +requires_python = ">=3.8" +summary = "pytest: simple powerful testing with Python" +groups = ["test"] +dependencies = [ + "colorama; sys_platform == \"win32\"", + "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", + "iniconfig", + "packaging", + "pluggy<2.0,>=1.3.0", + "tomli>=1.0.0; python_version < \"3.11\"", +] +files = [ + {file = "pytest-8.0.2-py3-none-any.whl", hash = "sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096"}, + {file = "pytest-8.0.2.tar.gz", hash = "sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd"}, +] + +[[package]] +name = "pytest-cov" +version = "4.1.0" +requires_python = ">=3.7" +summary = "Pytest plugin for measuring coverage." +groups = ["test"] +dependencies = [ + "coverage[toml]>=5.2.1", + "pytest>=4.6", +] +files = [ + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, +] + +[[package]] +name = "pytest-mock" +version = "3.12.0" +requires_python = ">=3.8" +summary = "Thin-wrapper around the mock package for easier use with pytest" +groups = ["test"] +dependencies = [ + "pytest>=5.0", +] +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +summary = "Extensions to the standard Python datetime module" +groups = ["default", "docs"] +dependencies = [ + "six>=1.5", +] +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +requires_python = ">=3.8" +summary = "Read key-value pairs from a .env file and set them as environment variables" +groups = ["default", "test"] +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[[package]] +name = "pytz" +version = "2024.1" +summary = "World timezone definitions, modern and historical" +groups = ["docs"] +marker = "python_version < \"3.9\"" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pywin32" +version = "306" +summary = "Python for Window Extensions" +groups = ["docs"] +marker = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +requires_python = ">=3.6" +summary = "YAML parser and emitter for Python" +groups = ["lint"] +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "pyzmq" +version = "25.1.2" +requires_python = ">=3.6" +summary = "Python bindings for 0MQ" +groups = ["docs"] +dependencies = [ + "cffi; implementation_name == \"pypy\"", +] +files = [ + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e624c789359f1a16f83f35e2c705d07663ff2b4d4479bad35621178d8f0f6ea4"}, + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49151b0efece79f6a79d41a461d78535356136ee70084a1c22532fc6383f4ad0"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9a5f194cf730f2b24d6af1f833c14c10f41023da46a7f736f48b6d35061e76e"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:faf79a302f834d9e8304fafdc11d0d042266667ac45209afa57e5efc998e3872"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f51a7b4ead28d3fca8dda53216314a553b0f7a91ee8fc46a72b402a78c3e43d"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0ddd6d71d4ef17ba5a87becf7ddf01b371eaba553c603477679ae817a8d84d75"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:246747b88917e4867e2367b005fc8eefbb4a54b7db363d6c92f89d69abfff4b6"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:00c48ae2fd81e2a50c3485de1b9d5c7c57cd85dc8ec55683eac16846e57ac979"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a68d491fc20762b630e5db2191dd07ff89834086740f70e978bb2ef2668be08"}, + {file = "pyzmq-25.1.2-cp310-cp310-win32.whl", hash = "sha256:09dfe949e83087da88c4a76767df04b22304a682d6154de2c572625c62ad6886"}, + {file = "pyzmq-25.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:fa99973d2ed20417744fca0073390ad65ce225b546febb0580358e36aa90dba6"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bef02cfcbded83473bdd86dd8d3729cd82b2e569b75844fb4ea08fee3c26ae41"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a4b5a4b1192d74853cc71a5e9fd022594573926c2a3a4802020360aa719d8"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8c5f80e578427d4695adac6fdf4370c14a2feafdc8cb35549c219b90652536ae"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5dde6751e857910c1339890f3524de74007958557593b9e7e8c5f01cd919f8a7"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea1608dd169da230a0ad602d5b1ebd39807ac96cae1845c3ceed39af08a5c6df"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0f513130c4c361201da9bc69df25a086487250e16b5571ead521b31ff6b02220"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:019744b99da30330798bb37df33549d59d380c78e516e3bab9c9b84f87a9592f"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e2713ef44be5d52dd8b8e2023d706bf66cb22072e97fc71b168e01d25192755"}, + {file = "pyzmq-25.1.2-cp38-cp38-win32.whl", hash = "sha256:07cd61a20a535524906595e09344505a9bd46f1da7a07e504b315d41cd42eb07"}, + {file = "pyzmq-25.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb7e49a17fb8c77d3119d41a4523e432eb0c6932187c37deb6fbb00cc3028088"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:94504ff66f278ab4b7e03e4cba7e7e400cb73bfa9d3d71f58d8972a8dc67e7a6"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd0d50bbf9dca1d0bdea219ae6b40f713a3fb477c06ca3714f208fd69e16fd8"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:004ff469d21e86f0ef0369717351073e0e577428e514c47c8480770d5e24a565"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c0b5ca88a8928147b7b1e2dfa09f3b6c256bc1135a1338536cbc9ea13d3b7add"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c9a79f1d2495b167119d02be7448bfba57fad2a4207c4f68abc0bab4b92925b"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:518efd91c3d8ac9f9b4f7dd0e2b7b8bf1a4fe82a308009016b07eaa48681af82"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1ec23bd7b3a893ae676d0e54ad47d18064e6c5ae1fadc2f195143fb27373f7f6"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db36c27baed588a5a8346b971477b718fdc66cf5b80cbfbd914b4d6d355e44e2"}, + {file = "pyzmq-25.1.2-cp39-cp39-win32.whl", hash = "sha256:39b1067f13aba39d794a24761e385e2eddc26295826530a8c7b6c6c341584289"}, + {file = "pyzmq-25.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:8e9f3fabc445d0ce320ea2c59a75fe3ea591fdbdeebec5db6de530dd4b09412e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8c1d566344aee826b74e472e16edae0a02e2a044f14f7c24e123002dcff1c05"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759cfd391a0996345ba94b6a5110fca9c557ad4166d86a6e81ea526c376a01e8"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c61e346ac34b74028ede1c6b4bcecf649d69b707b3ff9dc0fab453821b04d1e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb8fc1f8d69b411b8ec0b5f1ffbcaf14c1db95b6bccea21d83610987435f1a4"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3c00c9b7d1ca8165c610437ca0c92e7b5607b2f9076f4eb4b095c85d6e680a1d"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:df0c7a16ebb94452d2909b9a7b3337940e9a87a824c4fc1c7c36bb4404cb0cde"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:45999e7f7ed5c390f2e87ece7f6c56bf979fb213550229e711e45ecc7d42ccb8"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ac170e9e048b40c605358667aca3d94e98f604a18c44bdb4c102e67070f3ac9b"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b604734bec94f05f81b360a272fc824334267426ae9905ff32dc2be433ab96"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a793ac733e3d895d96f865f1806f160696422554e46d30105807fdc9841b9f7d"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0806175f2ae5ad4b835ecd87f5f85583316b69f17e97786f7443baaf54b9bb98"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ef12e259e7bc317c7597d4f6ef59b97b913e162d83b421dd0db3d6410f17a244"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea253b368eb41116011add00f8d5726762320b1bda892f744c91997b65754d73"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9b1f2ad6498445a941d9a4fee096d387fee436e45cc660e72e768d3d8ee611"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8b14c75979ce932c53b79976a395cb2a8cd3aaf14aef75e8c2cb55a330b9b49d"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:889370d5174a741a62566c003ee8ddba4b04c3f09a97b8000092b7ca83ec9c49"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18fff090441a40ffda8a7f4f18f03dc56ae73f148f1832e109f9bffa85df15"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a6b36f95c98839ad98f8c553d8507644c880cf1e0a57fe5e3a3f3969040882"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4345c9a27f4310afbb9c01750e9461ff33d6fb74cd2456b107525bbeebcb5be3"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3516e0b6224cf6e43e341d56da15fd33bdc37fa0c06af4f029f7d7dfceceabbc"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:146b9b1f29ead41255387fb07be56dc29639262c0f7344f570eecdcd8d683314"}, + {file = "pyzmq-25.1.2.tar.gz", hash = "sha256:93f1aa311e8bb912e34f004cf186407a4e90eec4f0ecc0efd26056bf7eda0226"}, +] + +[[package]] +name = "referencing" +version = "0.33.0" +requires_python = ">=3.8" +summary = "JSON Referencing + Python" +groups = ["docs"] +dependencies = [ + "attrs>=22.2.0", + "rpds-py>=0.7.0", +] +files = [ + {file = "referencing-0.33.0-py3-none-any.whl", hash = "sha256:39240f2ecc770258f28b642dd47fd74bc8b02484de54e1882b74b35ebd779bd5"}, + {file = "referencing-0.33.0.tar.gz", hash = "sha256:c775fedf74bc0f9189c2a3be1c12fd03e8c23f4d371dce795df44e06c5b412f7"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +requires_python = ">=3.7" +summary = "Python HTTP for Humans." +groups = ["default", "docs", "test"] +dependencies = [ + "certifi>=2017.4.17", + "charset-normalizer<4,>=2", + "idna<4,>=2.5", + "urllib3<3,>=1.21.1", +] +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "A utility belt for advanced users of python-requests" +groups = ["test"] +dependencies = [ + "requests<3.0.0,>=2.0.1", +] +files = [ + {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, + {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, +] + +[[package]] +name = "resolvelib" +version = "1.0.1" +summary = "Resolve abstract dependencies into concrete ones" +groups = ["test"] +files = [ + {file = "resolvelib-1.0.1-py2.py3-none-any.whl", hash = "sha256:d2da45d1a8dfee81bdd591647783e340ef3bcb104b54c383f70d422ef5cc7dbf"}, + {file = "resolvelib-1.0.1.tar.gz", hash = "sha256:04ce76cbd63fded2078ce224785da6ecd42b9564b1390793f64ddecbe997b309"}, +] + +[[package]] +name = "rich" +version = "13.7.1" +requires_python = ">=3.7.0" +summary = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +groups = ["test"] +dependencies = [ + "markdown-it-py>=2.2.0", + "pygments<3.0.0,>=2.13.0", + "typing-extensions<5.0,>=4.0.0; python_version < \"3.9\"", +] +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[[package]] +name = "rpds-py" +version = "0.18.0" +requires_python = ">=3.8" +summary = "Python bindings to Rust's persistent data structures (rpds)" +groups = ["docs"] +files = [ + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, +] + +[[package]] +name = "ruamel-yaml" +version = "0.18.6" +requires_python = ">=3.7" +summary = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +groups = ["docs"] +dependencies = [ + "ruamel-yaml-clib>=0.2.7; platform_python_implementation == \"CPython\" and python_version < \"3.13\"", +] +files = [ + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, +] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.8" +requires_python = ">=3.6" +summary = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +groups = ["docs"] +marker = "platform_python_implementation == \"CPython\" and python_version < \"3.13\"" +files = [ + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, + {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, +] + +[[package]] +name = "s3fs" +version = "2024.2.0" +requires_python = ">= 3.8" +summary = "Convenient Filesystem interface over S3" +groups = ["default"] +dependencies = [ + "aiobotocore<3.0.0,>=2.5.4", + "aiohttp!=4.0.0a0,!=4.0.0a1", + "fsspec==2024.2.0", +] +files = [ + {file = "s3fs-2024.2.0-py3-none-any.whl", hash = "sha256:c140de37175c157cb662aa6ad7423365df732ac5f10ef5bf7b76078c6333a942"}, + {file = "s3fs-2024.2.0.tar.gz", hash = "sha256:f8064f522ad088b56b043047c825734847c0269df19f2613c956d4c20de15b62"}, +] + +[[package]] +name = "scipy" +version = "1.10.1" +requires_python = "<3.12,>=3.8" +summary = "Fundamental algorithms for scientific computing in Python" +groups = ["default"] +dependencies = [ + "numpy<1.27.0,>=1.19.5", +] +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +] + +[[package]] +name = "setuptools" +version = "69.1.1" +requires_python = ">=3.8" +summary = "Easily download, build, install, upgrade, and uninstall Python packages" +groups = ["lint"] +files = [ + {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, + {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, +] + +[[package]] +name = "shapely" +version = "2.0.3" +requires_python = ">=3.7" +summary = "Manipulation and analysis of geometric objects" +groups = ["default"] +dependencies = [ + "numpy<2,>=1.14", +] +files = [ + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, + {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, + {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, + {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, + {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, + {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, + {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, + {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +requires_python = ">=3.7" +summary = "Tool to Detect Surrounding Shell" +groups = ["test"] +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "six" +version = "1.16.0" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +summary = "Python 2 and 3 compatibility utilities" +groups = ["default", "docs"] +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +summary = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +groups = ["docs"] +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "soupsieve" +version = "2.5" +requires_python = ">=3.8" +summary = "A modern CSS selector implementation for Beautiful Soup." +groups = ["docs"] +files = [ + {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, + {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, +] + +[[package]] +name = "sphinx" +version = "7.1.2" +requires_python = ">=3.8" +summary = "Python documentation generator" +groups = ["docs"] +dependencies = [ + "Jinja2>=3.0", + "Pygments>=2.13", + "alabaster<0.8,>=0.7", + "babel>=2.9", + "colorama>=0.4.5; sys_platform == \"win32\"", + "docutils<0.21,>=0.18.1", + "imagesize>=1.3", + "importlib-metadata>=4.8; python_version < \"3.10\"", + "packaging>=21.0", + "requests>=2.25.0", + "snowballstemmer>=2.0", + "sphinxcontrib-applehelp", + "sphinxcontrib-devhelp", + "sphinxcontrib-htmlhelp>=2.0.0", + "sphinxcontrib-jsmath", + "sphinxcontrib-qthelp", + "sphinxcontrib-serializinghtml>=1.1.5", +] +files = [ + {file = "sphinx-7.1.2-py3-none-any.whl", hash = "sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe"}, + {file = "sphinx-7.1.2.tar.gz", hash = "sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f"}, +] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "2.0.0" +requires_python = ">=3.8" +summary = "Type hints (PEP 484) support for the Sphinx autodoc extension" +groups = ["docs"] +dependencies = [ + "sphinx>=7.1.2", +] +files = [ + {file = "sphinx_autodoc_typehints-2.0.0-py3-none-any.whl", hash = "sha256:12c0e161f6fe191c2cdfd8fa3caea271f5387d9fbc67ebcd6f4f1f24ce880993"}, + {file = "sphinx_autodoc_typehints-2.0.0.tar.gz", hash = "sha256:7f2cdac2e70fd9787926b6e9e541cd4ded1e838d2b46fda2a1bb0a75ec5b7f3a"}, +] + +[[package]] +name = "sphinx-design" +version = "0.5.0" +requires_python = ">=3.8" +summary = "A sphinx extension for designing beautiful, view size responsive web components." +groups = ["docs"] +dependencies = [ + "sphinx<8,>=5", +] +files = [ + {file = "sphinx_design-0.5.0-py3-none-any.whl", hash = "sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e"}, + {file = "sphinx_design-0.5.0.tar.gz", hash = "sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00"}, +] + +[[package]] +name = "sphinx-jinja2-compat" +version = "0.2.0.post1" +requires_python = ">=3.6" +summary = "Patches Jinja2 v3 to restore compatibility with earlier Sphinx versions." +groups = ["docs"] +dependencies = [ + "jinja2>=2.10", + "markupsafe>=1", +] +files = [ + {file = "sphinx_jinja2_compat-0.2.0.post1-py3-none-any.whl", hash = "sha256:f9d329174bdde8db19dc12c62528367196eb2f6b46c91754eca604acd0c0f6ad"}, + {file = "sphinx_jinja2_compat-0.2.0.post1.tar.gz", hash = "sha256:974289a12a9f402108dead621e9c15f7004e945d5cfcaea8d6419e94d3fa95a3"}, +] + +[[package]] +name = "sphinx-prompt" +version = "1.7.0" +requires_python = ">=3.8,<3.11" +summary = "Sphinx directive to add unselectable prompt" +groups = ["docs"] +dependencies = [ + "Sphinx<8.0.0,>=7.0.0", + "docutils", + "pygments", +] +files = [ + {file = "sphinx_prompt-1.7.0-py3-none-any.whl", hash = "sha256:7ee415d07f90f7ce1577a2c4c7f2560694af008926a69b4c940f20737621b089"}, + {file = "sphinx_prompt-1.7.0.tar.gz", hash = "sha256:f95c0b44d73621fc0b493f84b0c2866eb8741140ef0260c20a0f7578457f44ad"}, +] + +[[package]] +name = "sphinx-tabs" +version = "3.4.5" +requires_python = "~=3.7" +summary = "Tabbed views for Sphinx" +groups = ["docs"] +dependencies = [ + "docutils", + "pygments", + "sphinx", +] +files = [ + {file = "sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"}, + {file = "sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09"}, +] + +[[package]] +name = "sphinx-toolbox" +version = "3.5.0" +requires_python = ">=3.7" +summary = "Box of handy tools for Sphinx 🧰 📔" +groups = ["docs"] +dependencies = [ + "apeye>=0.4.0", + "autodocsumm>=0.2.0", + "beautifulsoup4>=4.9.1", + "cachecontrol[filecache]>=0.13.0", + "dict2css>=0.2.3", + "docutils>=0.16", + "domdf-python-tools>=2.9.0", + "filelock>=3.8.0", + "html5lib>=1.1", + "ruamel-yaml>=0.16.12", + "sphinx-autodoc-typehints>=1.11.1", + "sphinx-jinja2-compat>=0.1.0", + "sphinx-prompt>=1.1.0", + "sphinx-tabs<3.5.0,>=1.2.1", + "sphinx>=3.2.0", + "tabulate>=0.8.7", + "typing-extensions!=3.10.0.1,>=3.7.4.3", +] +files = [ + {file = "sphinx_toolbox-3.5.0-py3-none-any.whl", hash = "sha256:20dfd3566717db6f2da7a400a54dc4b946f064fb31250fa44802d54cfb9b8a03"}, + {file = "sphinx_toolbox-3.5.0.tar.gz", hash = "sha256:e5b5a7153f1997572d71a06aaf6cec225483492ec2c60097a84f15aad6df18b7"}, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "1.0.4" +requires_python = ">=3.8" +summary = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, + {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "1.0.2" +requires_python = ">=3.5" +summary = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." +groups = ["docs"] +files = [ + {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, + {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.0.1" +requires_python = ">=3.8" +summary = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, + {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +requires_python = ">=3.5" +summary = "A sphinx extension which renders display math in HTML via JavaScript" +groups = ["docs"] +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "1.0.3" +requires_python = ">=3.5" +summary = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." +groups = ["docs"] +files = [ + {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, + {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "1.1.5" +requires_python = ">=3.5" +summary = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." +groups = ["docs"] +files = [ + {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, + {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, +] + +[[package]] +name = "tabulate" +version = "0.9.0" +requires_python = ">=3.7" +summary = "Pretty-print tabular data" +groups = ["docs"] +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[[package]] +name = "tinycss2" +version = "1.2.1" +requires_python = ">=3.7" +summary = "A tiny CSS parser" +groups = ["docs"] +dependencies = [ + "webencodings>=0.4", +] +files = [ + {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, + {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +requires_python = ">=3.7" +summary = "A lil' TOML parser" +groups = ["lint", "test"] +marker = "python_version < \"3.11\"" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.12.4" +requires_python = ">=3.7" +summary = "Style preserving TOML library" +groups = ["test"] +files = [ + {file = "tomlkit-0.12.4-py3-none-any.whl", hash = "sha256:5cd82d48a3dd89dee1f9d64420aa20ae65cfbd00668d6f094d7578a78efbb77b"}, + {file = "tomlkit-0.12.4.tar.gz", hash = "sha256:7ca1cfc12232806517a8515047ba66a19369e71edf2439d0f5824f91032b6cc3"}, +] + +[[package]] +name = "tornado" +version = "6.4" +requires_python = ">= 3.8" +summary = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +groups = ["docs"] +files = [ + {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, + {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, + {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, + {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, + {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, +] + +[[package]] +name = "tox" +version = "4.13.0" +requires_python = ">=3.8" +summary = "tox is a generic virtualenv management and test command line tool" +groups = ["lint"] +dependencies = [ + "cachetools>=5.3.2", + "chardet>=5.2", + "colorama>=0.4.6", + "filelock>=3.13.1", + "packaging>=23.2", + "platformdirs>=4.1", + "pluggy>=1.3", + "pyproject-api>=1.6.1", + "tomli>=2.0.1; python_version < \"3.11\"", + "virtualenv>=20.25", +] +files = [ + {file = "tox-4.13.0-py3-none-any.whl", hash = "sha256:1143c7e2489c68026a55d3d4ae84c02c449f073b28e62f80e3e440a3b72a4afa"}, + {file = "tox-4.13.0.tar.gz", hash = "sha256:dd789a554c16c4b532924ba393c92fc8991323c4b3d466712bfecc8c9b9f24f7"}, +] + +[[package]] +name = "traitlets" +version = "5.14.1" +requires_python = ">=3.8" +summary = "Traitlets Python configuration system" +groups = ["docs"] +files = [ + {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, + {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, +] + +[[package]] +name = "truststore" +version = "0.8.0" +requires_python = ">= 3.10" +summary = "Verify certificates using native system trust stores" +groups = ["test"] +marker = "python_version >= \"3.10\"" +files = [ + {file = "truststore-0.8.0-py3-none-any.whl", hash = "sha256:e37a5642ae9fc48caa8f120b6283d77225d600d224965a672c9e8ef49ce4bb4c"}, + {file = "truststore-0.8.0.tar.gz", hash = "sha256:dc70da89634944a579bfeec70a7a4523c53ffdb3cf52d1bb4a431fda278ddb96"}, +] + +[[package]] +name = "typing-extensions" +version = "4.10.0" +requires_python = ">=3.8" +summary = "Backported and Experimental Type Hints for Python 3.8+" +groups = ["default", "docs", "lint", "test"] +files = [ + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, +] + +[[package]] +name = "unearth" +version = "0.14.0" +requires_python = ">=3.8" +summary = "A utility to fetch and download python packages" +groups = ["test"] +dependencies = [ + "packaging>=20", + "requests>=2.25", +] +files = [ + {file = "unearth-0.14.0-py3-none-any.whl", hash = "sha256:a2b937ca22198043f5360192bce38708f11ddc5d4cdea973ee38583219b97d5d"}, + {file = "unearth-0.14.0.tar.gz", hash = "sha256:f3cddfb94ac0f865fbcf964231556ef7183010379c00b01205517a50c78a186d"}, +] + +[[package]] +name = "urllib3" +version = "1.26.18" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +summary = "HTTP library with thread-safe connection pooling, file post, and more." +groups = ["default", "docs", "test"] +files = [ + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, +] + +[[package]] +name = "virtualenv" +version = "20.25.1" +requires_python = ">=3.7" +summary = "Virtual Python Environment builder" +groups = ["lint", "test"] +dependencies = [ + "distlib<1,>=0.3.7", + "filelock<4,>=3.12.2", + "platformdirs<5,>=3.9.1", +] +files = [ + {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, + {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +summary = "Character encoding aliases for legacy web content" +groups = ["docs"] +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "wrapt" +version = "1.16.0" +requires_python = ">=3.6" +summary = "Module for decorators, wrappers and monkey patching." +groups = ["default"] +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "yarl" +version = "1.9.4" +requires_python = ">=3.7" +summary = "Yet another URL library" +groups = ["default"] +dependencies = [ + "idna>=2.0", + "multidict>=4.0", +] +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[[package]] +name = "zarr" +version = "2.16.1" +requires_python = ">=3.8" +summary = "An implementation of chunked, compressed, N-dimensional arrays for Python" +groups = ["default"] +dependencies = [ + "asciitree", + "fasteners", + "numcodecs>=0.10.0", + "numpy!=1.21.0,>=1.20", +] +files = [ + {file = "zarr-2.16.1-py3-none-any.whl", hash = "sha256:de4882433ccb5b42cc1ec9872b95e64ca3a13581424666b28ed265ad76c7056f"}, + {file = "zarr-2.16.1.tar.gz", hash = "sha256:4276cf4b4a653431042cd53ff2282bc4d292a6842411e88529964504fb073286"}, +] + +[[package]] +name = "zipp" +version = "3.17.0" +requires_python = ">=3.8" +summary = "Backport of pathlib-compatible object wrapper for zip files" +groups = ["default", "docs", "test"] +marker = "python_version < \"3.10\"" +files = [ + {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, + {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, +] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..9ebcc15b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,151 @@ +[project] +name = "physrisk-lib" +# Could test changing the below to be sourced "dynamically" +# dynamic = ['version'] +version = "0.32.0" +description = "OS-Climate Physical Risk Library" +authors = [ + {name = "Joe Moorhouse",email = "5102656+joemoorhouse@users.noreply.github.com"}, +] +requires-python = ">=3.8,<3.11" +readme = "README.md" +license = {file = "LICENSE"} +keywords = ["Physical", "Climate", "Risk", "Finance"] + +classifiers = [ + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: Unix", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.8", + "Topic :: Office/Business :: Financial", + "Topic :: Scientific/Engineering", + "Topic :: Software Development", +] + +dependencies = [ + "affine>=2.4.0", + "dependency-injector>=4.41.0", + "numba>=0.56.4", + "numpy>=1.22.0", + "pint", + "pillow>=10.2.0", + "pydantic>=2.4.2", + "pyproj>=3.5.0", + "python-dotenv>=0.19.2", + "requests>=2.27.1", + "scipy>=1.7.3", + "shapely>=2.0.1", + "s3fs>=2022.1.0", + "zarr>=2.10.3" +] + +[project.urls] +Homepage = "https://github.com/os-climate/physrisk" +Repository = "https://github.com/os-climate/physrisk" +Downloads = "https://github.com/os-climate/physrisk/releases" +"Bug Tracker" = "https://github.com/os-climate/physrisk/issues" +Documentation = "https://github.com/os-climate/physrisk/tree/main/docs" +"Source Code" = "https://github.com/os-climate/physrisk" + +[metadata] +license-files = ["LICENSE"] + +[build-system] +requires = [ + "setuptools>=42", + "wheel", + "pdm" +] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["src"] +include = ["physrisk*"] + +[tool.pdm.dev-dependencies] +test = [ + "pdm[pytest]", + "pytest", + "pytest-cov", + "sphinx-pyproject" +] +lint = [ + "isort", + "black", + "pyproject-flake8", + "flake8", + "mypy", + "pre-commit", + "tox" +] +"black[jupyter]" = [] +pandas = [] +dev = [ + "pandas>=2.0.3", + "geopandas>=0.13.2" +] + +[tool.pdm.scripts] +pre_release = "scripts/dev-versioning.sh" +release = "scripts/release-versioning.sh" +test = "pytest" +tox = "tox" +docs = { shell = "cd docs && mkdocs serve", help = "Start the dev server for doc preview" } +lint = "pre-commit run --all-files" +complete = { call = "tasks.complete:main", help = "Create autocomplete files for bash and fish" } + +[tool.pytest.ini_options] +testpaths = "tests" +addopts = "-v" +#addopts = "--cov --cov-report html --cov-report term-missing --cov-fail-under 95" + +[tool.mypy] +warn_unreachable = true +ignore_missing_imports = true + +[tool.isort] +profile = "black" +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 120 + +[tool.coverage.run] +source = "src" +omit = "tests/*" +relative_files = true + +[tool.yapf] +blank_line_before_nested_class_or_def = true +column_limit = 88 + +[tool.black] +line-length = 120 +exclude = ''' +/( + \.git + | \.tox + | \venv + | \.venv + | \*.env + | \build + | \dist +)/ +''' + +[tool.flake8] +max-line-length = "120" +extend-ignore = [ + "E501", +] diff --git a/scripts/dev-versioning.sh b/scripts/dev-versioning.sh new file mode 100755 index 00000000..d7522687 --- /dev/null +++ b/scripts/dev-versioning.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +#set -x + +FILEPATH="pyproject.toml" + +if [ $# -ne 1 ] && [ $# -ne 0 ]; then + echo "Usage: $0 [version-string]" + echo "Substitutes the version string in pyproject.toml"; exit 1 +elif [ $# -eq 1 ]; then + VERSION=$1 + echo "Received version string: $VERSION" +else + datetime=$(date +'%Y%m%d%H%M') + pyver=$(python --version | awk '{print $2}') + VERSION="${pyver}.${datetime}" + echo "Defined version string: $VERSION" +fi + +echo "Performing string substitution on: $FILEPATH" +sed -i "s/.*version =.*/version = \"$VERSION\"/" "$FILEPATH" +echo "Versioning set to:" +grep version "$FILEPATH" +echo "Script completed!"; exit 0 diff --git a/scripts/version.sh b/scripts/version.sh new file mode 100755 index 00000000..05181f51 --- /dev/null +++ b/scripts/version.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +#set -x + +FILEPATH="pyproject.toml" + +grep "version.*=" "$FILEPATH" | tr -s ' ' | tr -d '"' | tr -d "'" | cut -d' ' -f3 diff --git a/setup.py b/setup.py deleted file mode 100644 index 4ca48a80..00000000 --- a/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Package manifest for this template repo.""" - -from setuptools import find_packages, setup - -__version__ = "0.1.0" - -setup( - name="src", - packages=find_packages(), - version=__version__, - description="template for the team to use", - author="aicoe-aiops", - license="", - install_requires=["click", "python-dotenv>=0.5.1"], -) diff --git a/src/data/__init__.py b/src/data/__init__.py deleted file mode 100644 index db5d274d..00000000 --- a/src/data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Data collection module.""" diff --git a/src/data/make_dataset.py b/src/data/make_dataset.py deleted file mode 100644 index 60ed23be..00000000 --- a/src/data/make_dataset.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Data collection code.""" -import click -import logging -from pathlib import Path -from dotenv import find_dotenv, load_dotenv - - -@click.command() -@click.argument("input_filepath", type=click.Path(exists=True)) -@click.argument("output_filepath", type=click.Path()) -def main(input_filepath, output_filepath): - """Run data processing scripts to turn raw data to cleaned data. - - Transforms raw data from (../raw) into cleaned data ready to be analyzed - (saved in ../processed). - """ - logger = logging.getLogger(__name__) - logger.info("making final data set from raw data") - - -if __name__ == "__main__": - log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" - logging.basicConfig(level=logging.INFO, format=log_fmt) - - # not used in this stub but often useful for finding various files - project_dir = Path(__file__).resolve().parents[2] - - # find .env automagically by walking up directories until it's found, then - # load up the .env entries as environment variables - load_dotenv(find_dotenv()) - - main() diff --git a/src/features/__init__.py b/src/features/__init__.py deleted file mode 100644 index 0482c9bf..00000000 --- a/src/features/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Feature extraction module.""" diff --git a/src/features/build_features.py b/src/features/build_features.py deleted file mode 100644 index 9adb1629..00000000 --- a/src/features/build_features.py +++ /dev/null @@ -1 +0,0 @@ -"""Feature extraction code.""" diff --git a/src/models/__init__.py b/src/models/__init__.py deleted file mode 100644 index cbd01c12..00000000 --- a/src/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Module for models.""" diff --git a/src/models/predict_model.py b/src/models/predict_model.py deleted file mode 100644 index d42b0c8d..00000000 --- a/src/models/predict_model.py +++ /dev/null @@ -1 +0,0 @@ -"""Here goes the prediction code.""" diff --git a/src/models/train_model.py b/src/models/train_model.py deleted file mode 100644 index 75dae17e..00000000 --- a/src/models/train_model.py +++ /dev/null @@ -1 +0,0 @@ -"""Here goes the training code.""" diff --git a/src/physrisk/__init__.py b/src/physrisk/__init__.py index 757bb4cf..e69de29b 100644 --- a/src/physrisk/__init__.py +++ b/src/physrisk/__init__.py @@ -1,4 +0,0 @@ -from physrisk.kernel import Asset, AssetEventDistrib, ExceedanceCurve, VulnerabilityDistrib -#from physrisk.kernel import get_impact_distrib -#from physrisk.kernel import Drought, Inundation -#from physrisk.kernel import calculate_impacts \ No newline at end of file diff --git a/src/__init__.py b/src/physrisk/api/__init__.py similarity index 100% rename from src/__init__.py rename to src/physrisk/api/__init__.py diff --git a/src/physrisk/data/hazard/__init__.py b/src/physrisk/api/v1/__init__.py similarity index 100% rename from src/physrisk/data/hazard/__init__.py rename to src/physrisk/api/v1/__init__.py diff --git a/src/physrisk/api/v1/common.py b/src/physrisk/api/v1/common.py new file mode 100644 index 00000000..60d9b67c --- /dev/null +++ b/src/physrisk/api/v1/common.py @@ -0,0 +1,161 @@ +from typing import Dict, List, Optional + +import numpy as np +from pydantic import BaseModel, Field + + +class TypedArray(np.ndarray): + @classmethod + def __get_validators__(cls): + yield cls.validate_type + + @classmethod + def validate_type(cls, val): + return np.array(val, dtype=cls.inner_type) # type: ignore + + +class ArrayMeta(type): + def __getitem__(cls, t): + return type("Array", (TypedArray,), {"inner_type": t}) + + +class Array(np.ndarray, metaclass=ArrayMeta): + pass + + +class Asset(BaseModel): + """Defines an asset. An asset is identified first by its asset_class and then by its type within the class. + An asset's value may be impacted through damage or through disruption + disruption being reduction of an asset's ability to generate cashflows + (or equivalent value, e.g. by reducing expenses or increasing sales). + """ + + asset_class: str = Field( + description="name of asset class; corresponds to physrisk class names, e.g. PowerGeneratingAsset" + ) + latitude: float = Field(description="Latitude in degrees") + longitude: float = Field(description="Longitude in degrees") + type: Optional[str] = Field(None, description="Type of the asset //") + location: Optional[str] = Field( + None, description="Location (e.g. Africa, Asia, Europe, Global, Oceania, North America, South America)" + ) + capacity: Optional[float] = Field(None, description="Power generation capacity") + attributes: Optional[Dict[str, str]] = Field( + None, description="Bespoke attributes (e.g. number of storeys, structure type, occupancy type)" + ) + + +class Assets(BaseModel): + """Defines a collection of assets.""" + + items: List[Asset] + + +class BaseHazardRequest(BaseModel): + group_ids: List[str] = Field( + ["public"], + description="""List of data groups which can be used to service the request, + e.g. 'osc': available to OS-Climate members (e.g. pending license decision), + 'public'.""", + ) + + +class Country(BaseModel): + """Country information.""" + + country: str + continent: str + country_iso_a3: str + + +class Countries(BaseModel): + """List of Country.""" + + items: List[Country] + + +class IntensityCurve(BaseModel): + """Hazard indicator intensity curve. Acute hazards are parameterized by event intensities and + return periods in years. Chronic hazards are parameterized by a set of index values. + Index values are defined per indicator.""" + + intensities: List[float] = Field([], description="Hazard indicator intensities.") + return_periods: Optional[List[float]] = Field( + [], description="[Deprecated] Return period in years in the case of an acute hazard." + ) + index_values: Optional[List[float]] = Field( + [], + description="Set of index values. \ + This is return period in years in the case of an acute hazard or \ + a set of indicator value thresholds in the case of a multi-threshold chronic hazard.", + ) + index_name: str = Field( + "", + description="Name of the index. In the case of an acute hazard this is 'return period'; \ + for a multi-threshold chronic hazard this is 'threshold'.", + ) + + +class ExceedanceCurve(BaseModel): + """General exceedance curve (e.g. hazazrd, impact).""" + + values: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + exceed_probabilities: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + + class Config: + arbitrary_types_allowed = True + + +class Distribution(BaseModel): + """General exceedance curve (e.g. hazazrd, impact).""" + + bin_edges: np.ndarray = Field(default_factory=lambda: np.zeros(11), description="") + probabilities: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + + class Config: + arbitrary_types_allowed = True + + +class HazardEventDistrib(BaseModel): + """Intensity curve of an acute hazard.""" + + intensity_bin_edges: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + probabilities: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + + class Config: + arbitrary_types_allowed = True + + +class VulnerabilityCurve(BaseModel): + """Defines a damage or disruption curve.""" + + asset_type: str = Field(...) + location: str = Field(...) + event_type: str = Field(description="hazard event type, e.g. RiverineInundation") + impact_type: str = Field(description="'Damage' or 'Disruption'") + # intensity: Array = Field(...) + # intensity: np.ndarray = np.zeros(1) #Field(default_factory=lambda: np.zeros(1)) + intensity: List[float] = Field(...) + intensity_units: str = Field(description="units of the intensity") + impact_mean: List[float] = Field(description="mean impact (damage or disruption)") + impact_std: List[float] = Field(description="standard deviation of impact (damage or disruption)") + + class Config: + arbitrary_types_allowed = True + + +class VulnerabilityCurves(BaseModel): + """List of VulnerabilityCurve.""" + + items: List[VulnerabilityCurve] + + +class VulnerabilityDistrib(BaseModel): + """Defines a vulnerability matrix.""" + + intensity_bin_edges: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + impact_bin_edges: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + prob_matrix: np.ndarray = Field(default_factory=lambda: np.zeros(10), description="") + + class Config: + arbitrary_types_allowed = True diff --git a/src/physrisk/api/v1/example_portfolios.py b/src/physrisk/api/v1/example_portfolios.py new file mode 100644 index 00000000..e1740fff --- /dev/null +++ b/src/physrisk/api/v1/example_portfolios.py @@ -0,0 +1,13 @@ +from pydantic import BaseModel + +from physrisk.api.v1.common import Assets + + +class ExamplePortfoliosRequest(BaseModel): + """Example portfolios request.""" + + +class ExamplePortfoliosResponse(BaseModel): + """Example portfolios response.""" + + assets: Assets diff --git a/src/physrisk/api/v1/exposure_req_resp.py b/src/physrisk/api/v1/exposure_req_resp.py new file mode 100644 index 00000000..41594272 --- /dev/null +++ b/src/physrisk/api/v1/exposure_req_resp.py @@ -0,0 +1,48 @@ +from typing import Dict, List, Optional + +from pydantic import BaseModel, Field + +from physrisk.api.v1.common import Assets +from physrisk.api.v1.impact_req_resp import CalcSettings + + +class AssetExposureRequest(BaseModel): + """Impact calculation request.""" + + assets: Assets + calc_settings: CalcSettings = Field( + default_factory=CalcSettings, description="Interpolation method." # type:ignore + ) + scenario: str = Field("rcp8p5", description="Name of scenario ('rcp8p5')") + year: int = Field( + 2050, + description="Projection year (2030, 2050, 2080). Any year before 2030, e.g. 1980, is treated as historical.", + ) + provider_max_requests: Dict[str, int] = Field( + {}, + description="The maximum permitted number of \ + requests to external providers. This setting is intended in particular for paid-for data. The key \ + is the provider ID and the value is the maximum permitted requests.", + ) + + +class Exposure(BaseModel): + category: str + value: Optional[float] + + +class AssetExposure(BaseModel): + """Impact at asset level. Each asset can have impacts for multiple hazard types.""" + + asset_id: Optional[str] = Field( + None, + description="""Asset identifier; will appear if provided in the request + otherwise order of assets in response is identical to order of assets in request.""", + ) + exposures: Dict[str, Exposure] = Field({}, description="Category (value) for each hazard type (key).") + + +class AssetExposureResponse(BaseModel): + """Response to impact request.""" + + items: List[AssetExposure] diff --git a/src/physrisk/api/v1/hazard_data.py b/src/physrisk/api/v1/hazard_data.py new file mode 100644 index 00000000..92b54367 --- /dev/null +++ b/src/physrisk/api/v1/hazard_data.py @@ -0,0 +1,208 @@ +from enum import Flag, auto +from typing import Dict, Iterable, List, Optional, Tuple + +from pydantic import BaseModel, Field + +from physrisk.api.v1.common import BaseHazardRequest, IntensityCurve + + +class Colormap(BaseModel): + """Provides details of colormap.""" + + min_index: Optional[int] = Field( + 1, description="Value of colormap minimum. Constant min for a group of maps can facilitate comparison." + ) + min_value: float = Field( + description="Value of colormap minimum. Constant min for a group of maps can facilitate comparison." + ) + max_index: Optional[int] = Field( + 255, description="Value of colormap maximum. Constant max for a group of maps can facilitate comparison." + ) + max_value: float = Field( + description="Value of colormap maximum. Constant max for a group of maps can facilitate comparison." + ) + name: str = Field(description="Name of colormap, e.g. 'flare', 'heating'.") + nodata_index: Optional[int] = Field(0, description="Index used for no data.") + units: str = Field(description="Units, e.g. 'degree days', 'metres'.") + + +class MapInfo(BaseModel): + """Provides information about map layer.""" + + colormap: Optional[Colormap] = Field(description="Details of colormap.") + path: str = Field( + description="Name of array reprojected to Web Mercator for on-the-fly display or to hash to obtain tile ID. If not supplied, convention is to add '_map' to path." # noqa + ) + bounds: List[Tuple[float, float]] = Field( + [(-180.0, 85.0), (180.0, 85.0), (180.0, -85.0), (-180.0, -85.0)], + description="Bounds (top/left, top/right, bottom/right, bottom/left) as degrees. Note applied to map reprojected into Web Mercator CRS.", # noqa + ) + # note that the bounds should be consistent with the array attributes + source: Optional[str] = Field( + description="""Source of map image. These are + 'map_array': single Mercator projection array at path above + 'map_array_pyramid': pyramid of Mercator projection arrays + 'mapbox'. + """ + ) + + +class Period(BaseModel): + """Provides information about a period, which currently corresponds to a year, belonging to a scenario.""" + + year: int + map_id: str = Field(description="If present, identifier to be used for looking up map tiles from server.") + + +class Scenario(BaseModel): + """Scenario ID and the list of available years for that scenario e.g. RCP8.5 = 'rcp8.5'""" + + id: str + years: List[int] + # periods: Optional[List[Period]] + + +def expanded(item: str, key: str, param: str): + return item and item.replace("{" + key + "}", param) + + +class HazardResource(BaseModel): + """Provides information about a set of hazard indicators, including available scenarios and years.""" + + hazard_type: str = Field(description="Type of hazard.") + group_id: Optional[str] = Field("public", description="Identifier of the resource group (used for authentication).") + path: str = Field(description="Full path to the indicator array.") + indicator_id: str = Field( + description="Identifier of the hazard indicator (i.e. the modelled quantity), e.g. 'flood_depth'." + ) + indicator_model_id: Optional[str] = Field( + None, + description="Identifier specifying the type of model used in the derivation of the indicator " + "(e.g. whether flood model includes impact of sea-level rise).", + ) + indicator_model_gcm: str = Field( + description="Identifier of general circulation model(s) used in the derivation of the indicator." + ) + params: Dict[str, List[str]] = Field({}, description="Parameters used to expand wild-carded fields.") + display_name: str = Field(description="Text used to display indicator.") + display_groups: List[str] = Field([], description="Text used to group the (expanded) indicators for display.") + description: str = Field( + description="Brief description in mark down of the indicator and model that generated the indicator." + ) + map: Optional[MapInfo] = Field(None, description="Optional information used for display of the indicator in a map.") + scenarios: List[Scenario] = Field(description="Climate change scenarios for which the indicator is available.") + units: str = Field(description="Units of the hazard indicator.") + + def expand(self): + keys = list(self.params.keys()) + return expand_resource(self, keys, self.params) + + def key(self): + """Unique key for the resource. array_path should be unique, although indicator_id is typically not. + Vulnerability models request a hazard indicator by indicator_id from the Hazard Model. The Hazard Model + selects based on its own logic (e.g. selects a particular General Circulation Model).""" + return self.path + + +def expand(item: str, key: str, param: str): + return item and item.replace("{" + key + "}", param) + + +def expand_resource( + resource: HazardResource, keys: List[str], params: Dict[str, List[str]] +) -> Iterable[HazardResource]: + if len(keys) == 0: + yield resource.model_copy(deep=True, update={"params": {}}) + else: + keys = keys.copy() + key = keys.pop() + for item in expand_resource(resource, keys, params): + for param in params[key]: + yield item.model_copy( + deep=True, + update={ + "indicator_id": expand(item.indicator_id, key, param), + "indicator_model_gcm": expand(item.indicator_model_gcm, key, param), + "display_name": expand(item.display_name, key, param), + "path": expand(item.path, key, param), + "map": ( + None + if item.map is None + else ( + item.map.model_copy( + deep=True, + update={ + "path": expand(item.map.path if item.map.path is not None else "", key, param) + }, + ) + ) + ), + }, + ) + + +class InventorySource(Flag): + """Source of inventory. Where multiple are selected, order is as shown: + results from HAZARD_TEST override those of HAZARD and EMBEDDED (to facilitate testing). + """ + + EMBEDDED = auto() # inventory embedded in physrisk + HAZARD = auto() # inventory stored in the S3 hazard location + HAZARD_TEST = auto() # inventory stored in the S3 hazard_test location + + +class HazardAvailabilityRequest(BaseModel): + types: Optional[List[str]] = [] # e.g. ["RiverineInundation"] + sources: Optional[List[str]] = Field( + None, description="Sources of inventory, can be 'embedded', 'hazard' or 'hazard_test'." + ) + + +class HazardAvailabilityResponse(BaseModel): + models: List[HazardResource] + colormaps: dict + + +class HazardDescriptionRequest(BaseModel): + paths: List[str] = Field(description="List of paths to markdown objects.") + + +class HazardDescriptionResponse(BaseModel): + descriptions: Dict[str, str] = Field(description="For each path (key), the description markdown (value).") + + +class HazardDataRequestItem(BaseModel): + longitudes: List[float] + latitudes: List[float] + request_item_id: str + hazard_type: Optional[str] = None # e.g. RiverineInundation + event_type: Optional[str] = None # e.g. RiverineInundation; deprecated: use hazard_type + indicator_id: str + indicator_model_gcm: Optional[str] = "" + path: Optional[str] = None + scenario: str # e.g. rcp8p5 + year: int + + +class HazardDataRequest(BaseHazardRequest): + interpolation: str = "floor" + provider_max_requests: Dict[str, int] = Field( + {}, + description="The maximum permitted number of \ + requests to external providers. This setting is intended in particular for paid-for data. The key \ + is the provider ID and the value is the maximum permitted requests.", + ) + items: List[HazardDataRequestItem] + + +class HazardDataResponseItem(BaseModel): + intensity_curve_set: List[IntensityCurve] + request_item_id: str + event_type: Optional[str] + model: str + scenario: str + year: int + + +class HazardDataResponse(BaseModel): + items: List[HazardDataResponseItem] diff --git a/src/physrisk/api/v1/hazard_image.py b/src/physrisk/api/v1/hazard_image.py new file mode 100644 index 00000000..217b3d51 --- /dev/null +++ b/src/physrisk/api/v1/hazard_image.py @@ -0,0 +1,31 @@ +from typing import NamedTuple, Optional + +from pydantic import BaseModel, Field + +from physrisk.api.v1.common import BaseHazardRequest + +# class Tile(BaseHazardRequest): +# x: int +# y: int +# z: int + + +class Tile(NamedTuple): + x: int + y: int + z: int + + +class HazardImageRequest(BaseHazardRequest): + resource: str = Field(description="Full path to the array; formed by '{path}/{id}'.") + scenario_id: str + year: int + colormap: Optional[str] = Field("heating") + format: Optional[str] = Field("PNG") + min_value: Optional[float] + max_value: Optional[float] + tile: Optional[Tile] + + +class HazardImageResponse(BaseModel): + image: bytes diff --git a/src/physrisk/api/v1/impact_req_resp.py b/src/physrisk/api/v1/impact_req_resp.py new file mode 100644 index 00000000..951f1c4a --- /dev/null +++ b/src/physrisk/api/v1/impact_req_resp.py @@ -0,0 +1,225 @@ +from enum import Enum +from typing import Dict, List, NamedTuple, Optional, Sequence + +from pydantic import BaseModel, Field, computed_field + +from physrisk.api.v1.common import Assets, Distribution, ExceedanceCurve, VulnerabilityDistrib +from physrisk.api.v1.hazard_data import Scenario + + +class CalcSettings(BaseModel): + hazard_interp: str = Field("floor", description="Method used for interpolation of hazards: 'floor' or 'bilinear'.") + + +class AssetImpactRequest(BaseModel): + """Impact calculation request.""" + + assets: Assets + calc_settings: CalcSettings = Field( + default_factory=CalcSettings, description="Interpolation method." # type:ignore + ) + include_asset_level: bool = Field(True, description="If true, include asset-level impacts.") + include_measures: bool = Field(False, description="If true, include calculation of risk measures.") + include_calc_details: bool = Field(True, description="If true, include impact calculation details.") + provider_max_requests: Dict[str, int] = Field( + {}, + description="The maximum permitted number of requests \ + to external providers. This setting is intended in particular for paid-for data. The key is the provider \ + ID and the value is the maximum permitted requests.", + ) + scenarios: Optional[Sequence[str]] = Field([], description="Name of scenarios ('rcp8p5')") + years: Optional[Sequence[int]] = Field( + [], + description="""Projection year (2030, 2050, 2080). Any year before 2030, + e.g. 1980, is treated as historical.""", + ) + # to be deprecated + scenario: str = Field("rcp8p5", description="Name of scenario ('rcp8p5')") + year: int = Field( + [2050], + description="""Projection years (e.g. 2030, 2050, 2080). Any year before 2030, + e.g. 1980, is treated as historical.""", + ) + + +class Category(int, Enum): + NODATA = 0 + LOW = 1 + MEDIUM = 2 + HIGH = 3 + REDFLAG = 4 + + +class RiskMeasureDefinition(BaseModel): + measure_id: str = Field(None, description="Identifier for the risk measure.") + label: str = Field( + " str: + return self.key.hazard_type + + @computed_field # deprecated: use key instead + def year(self) -> str: + return self.key.year + + impact_type: str = Field( + "damage", + description="""'damage' or 'disruption'. Whether the impact is fractional damage to the asset + ('damage') or disruption to an operation, expressed as + fractional decrease to an equivalent cash amount.""", + ) + impact_distribution: Optional[Distribution] + impact_exceedance: Optional[ExceedanceCurve] + impact_mean: float + impact_std_deviation: float + calc_details: Optional[AcuteHazardCalculationDetails] = Field( + None, + description="""Details of impact calculation for acute hazard calculations.""", + ) + + class Config: + arbitrary_types_allowed = True + + +class AssetLevelImpact(BaseModel): + """Impact at asset level. Each asset can have impacts for multiple hazard types.""" + + asset_id: Optional[str] = Field( + None, + description="""Asset identifier; will appear if provided in the request + otherwise order of assets in response is identical to order of assets in request.""", + ) + impacts: List[AssetSingleImpact] = Field([], description="Impacts for each hazard type.") + + +class AssetImpactResponse(BaseModel): + """Response to impact request.""" + + asset_impacts: Optional[List[AssetLevelImpact]] = None + risk_measures: Optional[RiskMeasures] = None + + +class RiskMeasuresHelper: + def __init__(self, risk_measures: RiskMeasures): + """Helper class to assist in extracting results from a RiskMeasures object. + + Args: + risk_measures (RiskMeasures): RiskMeasures result. + """ + self.measures = {self._key(m.key): m for m in risk_measures.measures_for_assets} + self.measure_definition = risk_measures.score_based_measure_set_defn + self.measure_set_id = self.measure_definition.measure_set_id + + def _key(self, key: RiskMeasureKey): + return self.Key( + hazard_type=key.hazard_type, scenario_id=key.scenario_id, year=key.year, measure_id=key.measure_id + ) + + def get_measure(self, hazard_type: str, scenario: str, year: int): + measure_key = self.Key( + hazard_type=hazard_type, scenario_id=scenario, year=str(year), measure_id=self.measure_set_id + ) + measure = self.measures[measure_key] + asset_scores, asset_measures = ( + measure.scores, + [measure.measures_0, measure.measures_1], + ) # scores for each asset + # measure IDs for each asset (for the hazard type in question) + measure_ids = self.measure_definition.asset_measure_ids_for_hazard[hazard_type] + # measure definitions for each asset + measure_definitions = [ + self.measure_definition.score_definitions[mid] if mid != "na" else None for mid in measure_ids + ] + return asset_scores, asset_measures, measure_definitions + + def get_score_details(self, score: int, definition: ScoreBasedRiskMeasureDefinition): + rs_value = next(v for v in definition.values if v.value == score) + return rs_value.label, rs_value.description + + class Key(NamedTuple): # hashable key for looking up measures + hazard_type: str + scenario_id: str + year: str + measure_id: str diff --git a/src/test/__init__.py b/src/physrisk/api/v2/__init__.py similarity index 100% rename from src/test/__init__.py rename to src/physrisk/api/v2/__init__.py diff --git a/src/physrisk/container.py b/src/physrisk/container.py new file mode 100644 index 00000000..e5945950 --- /dev/null +++ b/src/physrisk/container.py @@ -0,0 +1,71 @@ +from typing import Dict, MutableMapping, Optional + +from dependency_injector import containers, providers + +from physrisk.data.hazard_data_provider import SourcePath +from physrisk.data.inventory import EmbeddedInventory +from physrisk.data.inventory_reader import InventoryReader +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.kernel import calculation as calc +from physrisk.kernel.hazard_model import HazardModelFactory +from physrisk.kernel.vulnerability_model import ( + DictBasedVulnerabilityModels, + VulnerabilityModels, + VulnerabilityModelsFactory, +) +from physrisk.requests import Requester, _create_inventory, create_source_paths + + +class ZarrHazardModelFactory(HazardModelFactory): + def __init__( + self, + source_paths: Dict[type, SourcePath], + store: Optional[MutableMapping] = None, + reader: Optional[ZarrReader] = None, + ): + self.source_paths = source_paths + self.store = store + self.reader = reader + + def hazard_model(self, interpolation: str = "floor", provider_max_requests: Dict[str, int] = {}): + # this is done to allow interpolation to be set dynamically, e.g. different requests can have different + # parameters. + return ZarrHazardModel( + source_paths=self.source_paths, store=self.store, reader=self.reader, interpolation=interpolation + ) + + +class DictBasedVulnerabilityModelsFactory(VulnerabilityModelsFactory): + def vulnerability_models(self) -> VulnerabilityModels: + return DictBasedVulnerabilityModels(calc.get_default_vulnerability_models()) + + +class Container(containers.DeclarativeContainer): + config = providers.Configuration(default={"zarr_sources": ["embedded", "hazard"]}) + + colormaps = providers.Singleton(lambda: EmbeddedInventory().colormaps()) + + inventory_reader = providers.Singleton(InventoryReader) + + inventory = providers.Singleton(_create_inventory, reader=inventory_reader, sources=config.zarr_sources) + + source_paths = providers.Factory(create_source_paths, inventory=inventory) + + zarr_store = providers.Singleton(ZarrReader.create_s3_zarr_store) + + zarr_reader = providers.Singleton(ZarrReader, store=zarr_store) + + hazard_model_factory = providers.Factory(ZarrHazardModelFactory, reader=zarr_reader, source_paths=source_paths) + + vulnerability_models_factory = providers.Factory(DictBasedVulnerabilityModelsFactory) + + requester = providers.Singleton( + Requester, + hazard_model_factory=hazard_model_factory, + vulnerability_models_factory=vulnerability_models_factory, + inventory=inventory, + inventory_reader=inventory_reader, + reader=zarr_reader, + colormaps=colormaps, + ) diff --git a/src/physrisk/data/__init__.py b/src/physrisk/data/__init__.py index 3b2d4ab9..e69de29b 100644 --- a/src/physrisk/data/__init__.py +++ b/src/physrisk/data/__init__.py @@ -1 +0,0 @@ -from .data_requests import EventDataRequest, ReturnPeriodEvDataResp, process_requests \ No newline at end of file diff --git a/src/physrisk/data/colormap_provider.py b/src/physrisk/data/colormap_provider.py new file mode 100644 index 00000000..8208bcd6 --- /dev/null +++ b/src/physrisk/data/colormap_provider.py @@ -0,0 +1,788 @@ +# based on Seaborn 'flare' with index 0 and index 1 transparent +map_flare = { + "0": [255, 255, 255, 0], + "1": [255, 255, 255, 0], + "2": [236, 173, 127, 200], + "3": [236, 172, 126, 200], + "4": [236, 171, 126, 200], + "5": [236, 170, 125, 200], + "6": [236, 169, 124, 200], + "7": [236, 168, 124, 200], + "8": [236, 167, 123, 200], + "9": [236, 166, 123, 200], + "10": [236, 166, 122, 200], + "11": [236, 165, 121, 200], + "12": [235, 164, 121, 200], + "13": [235, 163, 120, 200], + "14": [235, 162, 120, 200], + "15": [235, 161, 119, 200], + "16": [235, 160, 118, 200], + "17": [235, 159, 118, 200], + "18": [235, 158, 117, 200], + "19": [235, 157, 117, 200], + "20": [235, 156, 116, 200], + "21": [235, 155, 116, 200], + "22": [235, 154, 115, 200], + "23": [234, 153, 114, 200], + "24": [234, 152, 114, 200], + "25": [234, 151, 113, 200], + "26": [234, 150, 113, 200], + "27": [234, 149, 112, 200], + "28": [234, 148, 111, 200], + "29": [234, 147, 111, 200], + "30": [234, 146, 110, 200], + "31": [234, 145, 110, 200], + "32": [233, 144, 109, 200], + "33": [233, 143, 109, 200], + "34": [233, 142, 108, 200], + "35": [233, 141, 107, 200], + "36": [233, 140, 107, 200], + "37": [233, 139, 106, 200], + "38": [233, 138, 106, 200], + "39": [233, 137, 105, 200], + "40": [232, 136, 105, 200], + "41": [232, 135, 104, 200], + "42": [232, 134, 103, 200], + "43": [232, 133, 103, 200], + "44": [232, 132, 102, 200], + "45": [232, 131, 102, 200], + "46": [232, 130, 101, 200], + "47": [232, 129, 101, 200], + "48": [231, 128, 100, 200], + "49": [231, 127, 100, 200], + "50": [231, 126, 99, 200], + "51": [231, 125, 99, 200], + "52": [231, 124, 98, 200], + "53": [231, 123, 98, 200], + "54": [230, 122, 98, 200], + "55": [230, 121, 97, 200], + "56": [230, 120, 97, 200], + "57": [230, 119, 96, 200], + "58": [230, 118, 96, 200], + "59": [229, 118, 96, 200], + "60": [229, 117, 95, 200], + "61": [229, 116, 95, 200], + "62": [229, 115, 95, 200], + "63": [229, 114, 94, 200], + "64": [228, 113, 94, 200], + "65": [228, 112, 94, 200], + "66": [228, 111, 93, 200], + "67": [228, 110, 93, 200], + "68": [227, 109, 93, 200], + "69": [227, 108, 93, 200], + "70": [227, 107, 92, 200], + "71": [227, 106, 92, 200], + "72": [226, 105, 92, 200], + "73": [226, 104, 92, 200], + "74": [226, 103, 92, 200], + "75": [225, 102, 92, 200], + "76": [225, 101, 91, 200], + "77": [225, 100, 91, 200], + "78": [224, 99, 91, 200], + "79": [224, 98, 91, 200], + "80": [224, 97, 91, 200], + "81": [223, 96, 91, 200], + "82": [223, 95, 91, 200], + "83": [223, 94, 91, 200], + "84": [222, 93, 91, 200], + "85": [222, 92, 91, 200], + "86": [221, 91, 91, 200], + "87": [221, 90, 91, 200], + "88": [221, 89, 91, 200], + "89": [220, 89, 92, 200], + "90": [220, 88, 92, 200], + "91": [219, 87, 92, 200], + "92": [219, 86, 92, 200], + "93": [218, 85, 92, 200], + "94": [218, 84, 92, 200], + "95": [217, 83, 93, 200], + "96": [217, 83, 93, 200], + "97": [216, 82, 93, 200], + "98": [215, 81, 93, 200], + "99": [215, 80, 94, 200], + "100": [214, 79, 94, 200], + "101": [214, 79, 94, 200], + "102": [213, 78, 94, 200], + "103": [212, 77, 95, 200], + "104": [212, 77, 95, 200], + "105": [211, 76, 95, 200], + "106": [211, 75, 96, 200], + "107": [210, 75, 96, 200], + "108": [209, 74, 96, 200], + "109": [208, 73, 97, 200], + "110": [208, 73, 97, 200], + "111": [207, 72, 97, 200], + "112": [206, 71, 98, 200], + "113": [206, 71, 98, 200], + "114": [205, 70, 98, 200], + "115": [204, 70, 99, 200], + "116": [203, 69, 99, 200], + "117": [202, 69, 99, 200], + "118": [202, 68, 100, 200], + "119": [201, 68, 100, 200], + "120": [200, 67, 101, 200], + "121": [199, 67, 101, 200], + "122": [198, 66, 101, 200], + "123": [197, 66, 102, 200], + "124": [197, 66, 102, 200], + "125": [196, 65, 102, 200], + "126": [195, 65, 103, 200], + "127": [194, 65, 103, 200], + "128": [193, 64, 103, 200], + "129": [192, 64, 104, 200], + "130": [191, 63, 104, 200], + "131": [190, 63, 104, 200], + "132": [189, 63, 104, 200], + "133": [188, 63, 105, 200], + "134": [187, 62, 105, 200], + "135": [186, 62, 105, 200], + "136": [186, 62, 105, 200], + "137": [185, 61, 106, 200], + "138": [184, 61, 106, 200], + "139": [183, 61, 106, 200], + "140": [182, 61, 106, 200], + "141": [181, 60, 107, 200], + "142": [180, 60, 107, 200], + "143": [179, 60, 107, 200], + "144": [178, 60, 107, 200], + "145": [177, 59, 108, 200], + "146": [176, 59, 108, 200], + "147": [175, 59, 108, 200], + "148": [174, 59, 108, 200], + "149": [173, 58, 108, 200], + "150": [172, 58, 109, 200], + "151": [171, 58, 109, 200], + "152": [170, 58, 109, 200], + "153": [169, 57, 109, 200], + "154": [168, 57, 109, 200], + "155": [168, 57, 110, 200], + "156": [167, 57, 110, 200], + "157": [166, 56, 110, 200], + "158": [165, 56, 110, 200], + "159": [164, 56, 110, 200], + "160": [163, 56, 110, 200], + "161": [162, 55, 110, 200], + "162": [161, 55, 111, 200], + "163": [160, 55, 111, 200], + "164": [159, 55, 111, 200], + "165": [158, 54, 111, 200], + "166": [157, 54, 111, 200], + "167": [156, 54, 111, 200], + "168": [155, 54, 111, 200], + "169": [154, 53, 111, 200], + "170": [154, 53, 111, 200], + "171": [153, 53, 112, 200], + "172": [152, 53, 112, 200], + "173": [151, 52, 112, 200], + "174": [150, 52, 112, 200], + "175": [149, 52, 112, 200], + "176": [148, 52, 112, 200], + "177": [147, 51, 112, 200], + "178": [146, 51, 112, 200], + "179": [145, 51, 112, 200], + "180": [144, 51, 112, 200], + "181": [143, 50, 112, 200], + "182": [142, 50, 112, 200], + "183": [141, 50, 112, 200], + "184": [141, 50, 112, 200], + "185": [140, 49, 112, 200], + "186": [139, 49, 112, 200], + "187": [138, 49, 112, 200], + "188": [137, 49, 112, 200], + "189": [136, 48, 112, 200], + "190": [135, 48, 112, 200], + "191": [134, 48, 112, 200], + "192": [133, 48, 112, 200], + "193": [132, 48, 112, 200], + "194": [131, 47, 112, 200], + "195": [130, 47, 112, 200], + "196": [129, 47, 112, 200], + "197": [128, 47, 112, 200], + "198": [127, 47, 112, 200], + "199": [126, 46, 112, 200], + "200": [126, 46, 112, 200], + "201": [125, 46, 111, 200], + "202": [124, 46, 111, 200], + "203": [123, 46, 111, 200], + "204": [122, 45, 111, 200], + "205": [121, 45, 111, 200], + "206": [120, 45, 111, 200], + "207": [119, 45, 111, 200], + "208": [118, 45, 110, 200], + "209": [117, 44, 110, 200], + "210": [116, 44, 110, 200], + "211": [115, 44, 110, 200], + "212": [114, 44, 110, 200], + "213": [113, 44, 109, 200], + "214": [112, 44, 109, 200], + "215": [111, 43, 109, 200], + "216": [110, 43, 109, 200], + "217": [109, 43, 109, 200], + "218": [108, 43, 108, 200], + "219": [108, 43, 108, 200], + "220": [107, 43, 108, 200], + "221": [106, 42, 108, 200], + "222": [105, 42, 107, 200], + "223": [104, 42, 107, 200], + "224": [103, 42, 107, 200], + "225": [102, 42, 107, 200], + "226": [101, 41, 106, 200], + "227": [100, 41, 106, 200], + "228": [99, 41, 106, 200], + "229": [98, 41, 105, 200], + "230": [97, 41, 105, 200], + "231": [96, 40, 105, 200], + "232": [95, 40, 104, 200], + "233": [94, 40, 104, 200], + "234": [93, 40, 104, 200], + "235": [93, 40, 104, 200], + "236": [92, 39, 103, 200], + "237": [91, 39, 103, 200], + "238": [90, 39, 103, 200], + "239": [89, 39, 102, 200], + "240": [88, 38, 102, 200], + "241": [87, 38, 102, 200], + "242": [86, 38, 101, 200], + "243": [85, 38, 101, 200], + "244": [84, 38, 101, 200], + "245": [83, 37, 100, 200], + "246": [82, 37, 100, 200], + "247": [82, 37, 100, 200], + "248": [81, 36, 100, 200], + "249": [80, 36, 99, 200], + "250": [79, 36, 99, 200], + "251": [78, 36, 99, 200], + "252": [77, 35, 98, 200], + "253": [76, 35, 98, 200], + "254": [75, 35, 98, 200], + "255": [74, 34, 98, 200], +} + +# based on warming part of Seaborn 'coolwarm' with index 0 and index 1 transparent +map_heating = { + "0": [255, 255, 255, 0], + "1": [255, 255, 255, 0], + "2": [222, 219, 218, 200], + "3": [222, 219, 218, 200], + "4": [223, 219, 217, 200], + "5": [223, 219, 217, 200], + "6": [224, 218, 215, 200], + "7": [224, 218, 215, 200], + "8": [225, 218, 214, 200], + "9": [225, 218, 214, 200], + "10": [226, 217, 212, 200], + "11": [226, 217, 212, 200], + "12": [227, 217, 211, 200], + "13": [227, 217, 211, 200], + "14": [228, 216, 209, 200], + "15": [228, 216, 209, 200], + "16": [229, 216, 208, 200], + "17": [229, 216, 208, 200], + "18": [230, 215, 207, 200], + "19": [230, 215, 207, 200], + "20": [231, 214, 205, 200], + "21": [231, 214, 205, 200], + "22": [231, 214, 204, 200], + "23": [231, 214, 204, 200], + "24": [232, 213, 202, 200], + "25": [232, 213, 202, 200], + "26": [233, 212, 201, 200], + "27": [233, 212, 201, 200], + "28": [234, 211, 199, 200], + "29": [234, 211, 199, 200], + "30": [235, 211, 198, 200], + "31": [235, 211, 198, 200], + "32": [236, 210, 196, 200], + "33": [236, 210, 196, 200], + "34": [236, 209, 195, 200], + "35": [236, 209, 195, 200], + "36": [237, 208, 193, 200], + "37": [237, 208, 193, 200], + "38": [237, 207, 192, 200], + "39": [237, 207, 192, 200], + "40": [238, 207, 190, 200], + "41": [238, 207, 190, 200], + "42": [239, 206, 188, 200], + "43": [239, 206, 188, 200], + "44": [239, 205, 187, 200], + "45": [239, 205, 187, 200], + "46": [240, 204, 185, 200], + "47": [240, 204, 185, 200], + "48": [241, 203, 184, 200], + "49": [241, 203, 184, 200], + "50": [241, 202, 182, 200], + "51": [241, 202, 182, 200], + "52": [242, 201, 181, 200], + "53": [242, 201, 181, 200], + "54": [242, 200, 179, 200], + "55": [242, 200, 179, 200], + "56": [242, 199, 178, 200], + "57": [242, 199, 178, 200], + "58": [243, 198, 176, 200], + "59": [243, 198, 176, 200], + "60": [243, 197, 175, 200], + "61": [243, 197, 175, 200], + "62": [244, 196, 173, 200], + "63": [244, 196, 173, 200], + "64": [244, 195, 171, 200], + "65": [244, 195, 171, 200], + "66": [244, 194, 170, 200], + "67": [244, 194, 170, 200], + "68": [245, 193, 168, 200], + "69": [245, 193, 168, 200], + "70": [245, 192, 167, 200], + "71": [245, 192, 167, 200], + "72": [245, 191, 165, 200], + "73": [245, 191, 165, 200], + "74": [246, 189, 164, 200], + "75": [246, 189, 164, 200], + "76": [246, 188, 162, 200], + "77": [246, 188, 162, 200], + "78": [246, 187, 160, 200], + "79": [246, 187, 160, 200], + "80": [246, 186, 159, 200], + "81": [246, 186, 159, 200], + "82": [246, 185, 157, 200], + "83": [246, 185, 157, 200], + "84": [246, 183, 156, 200], + "85": [246, 183, 156, 200], + "86": [246, 182, 154, 200], + "87": [246, 182, 154, 200], + "88": [247, 181, 152, 200], + "89": [247, 181, 152, 200], + "90": [247, 179, 151, 200], + "91": [247, 179, 151, 200], + "92": [247, 178, 149, 200], + "93": [247, 178, 149, 200], + "94": [247, 177, 148, 200], + "95": [247, 177, 148, 200], + "96": [247, 176, 146, 200], + "97": [247, 176, 146, 200], + "98": [247, 174, 145, 200], + "99": [247, 174, 145, 200], + "100": [247, 173, 143, 200], + "101": [247, 173, 143, 200], + "102": [246, 171, 141, 200], + "103": [246, 171, 141, 200], + "104": [246, 170, 140, 200], + "105": [246, 170, 140, 200], + "106": [246, 169, 138, 200], + "107": [246, 169, 138, 200], + "108": [246, 167, 137, 200], + "109": [246, 167, 137, 200], + "110": [246, 166, 135, 200], + "111": [246, 166, 135, 200], + "112": [246, 164, 134, 200], + "113": [246, 164, 134, 200], + "114": [246, 163, 132, 200], + "115": [246, 163, 132, 200], + "116": [245, 161, 130, 200], + "117": [245, 161, 130, 200], + "118": [245, 160, 129, 200], + "119": [245, 160, 129, 200], + "120": [245, 158, 127, 200], + "121": [245, 158, 127, 200], + "122": [244, 157, 126, 200], + "123": [244, 157, 126, 200], + "124": [244, 155, 124, 200], + "125": [244, 155, 124, 200], + "126": [244, 154, 123, 200], + "127": [244, 154, 123, 200], + "128": [243, 152, 121, 200], + "129": [243, 152, 121, 200], + "130": [243, 150, 120, 200], + "131": [243, 150, 120, 200], + "132": [243, 149, 118, 200], + "133": [243, 149, 118, 200], + "134": [242, 147, 117, 200], + "135": [242, 147, 117, 200], + "136": [242, 145, 115, 200], + "137": [242, 145, 115, 200], + "138": [241, 144, 114, 200], + "139": [241, 144, 114, 200], + "140": [241, 142, 112, 200], + "141": [241, 142, 112, 200], + "142": [240, 141, 111, 200], + "143": [240, 141, 111, 200], + "144": [240, 139, 109, 200], + "145": [240, 139, 109, 200], + "146": [239, 137, 108, 200], + "147": [239, 137, 108, 200], + "148": [238, 135, 106, 200], + "149": [238, 135, 106, 200], + "150": [238, 134, 105, 200], + "151": [238, 134, 105, 200], + "152": [237, 132, 103, 200], + "153": [237, 132, 103, 200], + "154": [236, 130, 102, 200], + "155": [236, 130, 102, 200], + "156": [236, 128, 100, 200], + "157": [236, 128, 100, 200], + "158": [235, 127, 99, 200], + "159": [235, 127, 99, 200], + "160": [234, 125, 97, 200], + "161": [234, 125, 97, 200], + "162": [234, 123, 96, 200], + "163": [234, 123, 96, 200], + "164": [233, 121, 94, 200], + "165": [233, 121, 94, 200], + "166": [232, 119, 93, 200], + "167": [232, 119, 93, 200], + "168": [231, 117, 92, 200], + "169": [231, 117, 92, 200], + "170": [230, 116, 90, 200], + "171": [230, 116, 90, 200], + "172": [230, 114, 89, 200], + "173": [230, 114, 89, 200], + "174": [229, 112, 87, 200], + "175": [229, 112, 87, 200], + "176": [228, 110, 86, 200], + "177": [228, 110, 86, 200], + "178": [227, 108, 84, 200], + "179": [227, 108, 84, 200], + "180": [226, 106, 83, 200], + "181": [226, 106, 83, 200], + "182": [225, 104, 82, 200], + "183": [225, 104, 82, 200], + "184": [224, 102, 80, 200], + "185": [224, 102, 80, 200], + "186": [223, 100, 79, 200], + "187": [223, 100, 79, 200], + "188": [222, 98, 78, 200], + "189": [222, 98, 78, 200], + "190": [221, 96, 76, 200], + "191": [221, 96, 76, 200], + "192": [220, 94, 75, 200], + "193": [220, 94, 75, 200], + "194": [219, 92, 74, 200], + "195": [219, 92, 74, 200], + "196": [218, 90, 72, 200], + "197": [218, 90, 72, 200], + "198": [217, 88, 71, 200], + "199": [217, 88, 71, 200], + "200": [216, 86, 70, 200], + "201": [216, 86, 70, 200], + "202": [215, 84, 68, 200], + "203": [215, 84, 68, 200], + "204": [214, 82, 67, 200], + "205": [214, 82, 67, 200], + "206": [212, 79, 66, 200], + "207": [212, 79, 66, 200], + "208": [211, 77, 64, 200], + "209": [211, 77, 64, 200], + "210": [210, 75, 63, 200], + "211": [210, 75, 63, 200], + "212": [209, 73, 62, 200], + "213": [209, 73, 62, 200], + "214": [207, 70, 61, 200], + "215": [207, 70, 61, 200], + "216": [206, 68, 60, 200], + "217": [206, 68, 60, 200], + "218": [205, 66, 58, 200], + "219": [205, 66, 58, 200], + "220": [204, 63, 57, 200], + "221": [204, 63, 57, 200], + "222": [202, 61, 56, 200], + "223": [202, 61, 56, 200], + "224": [201, 59, 55, 200], + "225": [201, 59, 55, 200], + "226": [200, 56, 53, 200], + "227": [200, 56, 53, 200], + "228": [198, 53, 52, 200], + "229": [198, 53, 52, 200], + "230": [197, 50, 51, 200], + "231": [197, 50, 51, 200], + "232": [196, 48, 50, 200], + "233": [196, 48, 50, 200], + "234": [194, 45, 49, 200], + "235": [194, 45, 49, 200], + "236": [193, 42, 48, 200], + "237": [193, 42, 48, 200], + "238": [191, 40, 46, 200], + "239": [191, 40, 46, 200], + "240": [190, 35, 45, 200], + "241": [190, 35, 45, 200], + "242": [188, 31, 44, 200], + "243": [188, 31, 44, 200], + "244": [187, 26, 43, 200], + "245": [187, 26, 43, 200], + "246": [185, 22, 42, 200], + "247": [185, 22, 42, 200], + "248": [184, 17, 41, 200], + "249": [184, 17, 41, 200], + "250": [182, 13, 40, 200], + "251": [182, 13, 40, 200], + "252": [181, 8, 39, 200], + "253": [181, 8, 39, 200], + "254": [179, 3, 38, 200], + "255": [179, 3, 38, 200], +} + +# based on warming part of Seaborn 'coolwarm' with index 0 only transparent +map_heating_2 = { + "0": [255, 255, 255, 0], + "1": [221, 220, 219, 200], + "2": [222, 219, 218, 200], + "3": [222, 219, 218, 200], + "4": [223, 219, 217, 200], + "5": [223, 219, 217, 200], + "6": [224, 218, 215, 200], + "7": [224, 218, 215, 200], + "8": [225, 218, 214, 200], + "9": [225, 218, 214, 200], + "10": [226, 217, 212, 200], + "11": [226, 217, 212, 200], + "12": [227, 217, 211, 200], + "13": [227, 217, 211, 200], + "14": [228, 216, 209, 200], + "15": [228, 216, 209, 200], + "16": [229, 216, 208, 200], + "17": [229, 216, 208, 200], + "18": [230, 215, 207, 200], + "19": [230, 215, 207, 200], + "20": [231, 214, 205, 200], + "21": [231, 214, 205, 200], + "22": [231, 214, 204, 200], + "23": [231, 214, 204, 200], + "24": [232, 213, 202, 200], + "25": [232, 213, 202, 200], + "26": [233, 212, 201, 200], + "27": [233, 212, 201, 200], + "28": [234, 211, 199, 200], + "29": [234, 211, 199, 200], + "30": [235, 211, 198, 200], + "31": [235, 211, 198, 200], + "32": [236, 210, 196, 200], + "33": [236, 210, 196, 200], + "34": [236, 209, 195, 200], + "35": [236, 209, 195, 200], + "36": [237, 208, 193, 200], + "37": [237, 208, 193, 200], + "38": [237, 207, 192, 200], + "39": [237, 207, 192, 200], + "40": [238, 207, 190, 200], + "41": [238, 207, 190, 200], + "42": [239, 206, 188, 200], + "43": [239, 206, 188, 200], + "44": [239, 205, 187, 200], + "45": [239, 205, 187, 200], + "46": [240, 204, 185, 200], + "47": [240, 204, 185, 200], + "48": [241, 203, 184, 200], + "49": [241, 203, 184, 200], + "50": [241, 202, 182, 200], + "51": [241, 202, 182, 200], + "52": [242, 201, 181, 200], + "53": [242, 201, 181, 200], + "54": [242, 200, 179, 200], + "55": [242, 200, 179, 200], + "56": [242, 199, 178, 200], + "57": [242, 199, 178, 200], + "58": [243, 198, 176, 200], + "59": [243, 198, 176, 200], + "60": [243, 197, 175, 200], + "61": [243, 197, 175, 200], + "62": [244, 196, 173, 200], + "63": [244, 196, 173, 200], + "64": [244, 195, 171, 200], + "65": [244, 195, 171, 200], + "66": [244, 194, 170, 200], + "67": [244, 194, 170, 200], + "68": [245, 193, 168, 200], + "69": [245, 193, 168, 200], + "70": [245, 192, 167, 200], + "71": [245, 192, 167, 200], + "72": [245, 191, 165, 200], + "73": [245, 191, 165, 200], + "74": [246, 189, 164, 200], + "75": [246, 189, 164, 200], + "76": [246, 188, 162, 200], + "77": [246, 188, 162, 200], + "78": [246, 187, 160, 200], + "79": [246, 187, 160, 200], + "80": [246, 186, 159, 200], + "81": [246, 186, 159, 200], + "82": [246, 185, 157, 200], + "83": [246, 185, 157, 200], + "84": [246, 183, 156, 200], + "85": [246, 183, 156, 200], + "86": [246, 182, 154, 200], + "87": [246, 182, 154, 200], + "88": [247, 181, 152, 200], + "89": [247, 181, 152, 200], + "90": [247, 179, 151, 200], + "91": [247, 179, 151, 200], + "92": [247, 178, 149, 200], + "93": [247, 178, 149, 200], + "94": [247, 177, 148, 200], + "95": [247, 177, 148, 200], + "96": [247, 176, 146, 200], + "97": [247, 176, 146, 200], + "98": [247, 174, 145, 200], + "99": [247, 174, 145, 200], + "100": [247, 173, 143, 200], + "101": [247, 173, 143, 200], + "102": [246, 171, 141, 200], + "103": [246, 171, 141, 200], + "104": [246, 170, 140, 200], + "105": [246, 170, 140, 200], + "106": [246, 169, 138, 200], + "107": [246, 169, 138, 200], + "108": [246, 167, 137, 200], + "109": [246, 167, 137, 200], + "110": [246, 166, 135, 200], + "111": [246, 166, 135, 200], + "112": [246, 164, 134, 200], + "113": [246, 164, 134, 200], + "114": [246, 163, 132, 200], + "115": [246, 163, 132, 200], + "116": [245, 161, 130, 200], + "117": [245, 161, 130, 200], + "118": [245, 160, 129, 200], + "119": [245, 160, 129, 200], + "120": [245, 158, 127, 200], + "121": [245, 158, 127, 200], + "122": [244, 157, 126, 200], + "123": [244, 157, 126, 200], + "124": [244, 155, 124, 200], + "125": [244, 155, 124, 200], + "126": [244, 154, 123, 200], + "127": [244, 154, 123, 200], + "128": [243, 152, 121, 200], + "129": [243, 152, 121, 200], + "130": [243, 150, 120, 200], + "131": [243, 150, 120, 200], + "132": [243, 149, 118, 200], + "133": [243, 149, 118, 200], + "134": [242, 147, 117, 200], + "135": [242, 147, 117, 200], + "136": [242, 145, 115, 200], + "137": [242, 145, 115, 200], + "138": [241, 144, 114, 200], + "139": [241, 144, 114, 200], + "140": [241, 142, 112, 200], + "141": [241, 142, 112, 200], + "142": [240, 141, 111, 200], + "143": [240, 141, 111, 200], + "144": [240, 139, 109, 200], + "145": [240, 139, 109, 200], + "146": [239, 137, 108, 200], + "147": [239, 137, 108, 200], + "148": [238, 135, 106, 200], + "149": [238, 135, 106, 200], + "150": [238, 134, 105, 200], + "151": [238, 134, 105, 200], + "152": [237, 132, 103, 200], + "153": [237, 132, 103, 200], + "154": [236, 130, 102, 200], + "155": [236, 130, 102, 200], + "156": [236, 128, 100, 200], + "157": [236, 128, 100, 200], + "158": [235, 127, 99, 200], + "159": [235, 127, 99, 200], + "160": [234, 125, 97, 200], + "161": [234, 125, 97, 200], + "162": [234, 123, 96, 200], + "163": [234, 123, 96, 200], + "164": [233, 121, 94, 200], + "165": [233, 121, 94, 200], + "166": [232, 119, 93, 200], + "167": [232, 119, 93, 200], + "168": [231, 117, 92, 200], + "169": [231, 117, 92, 200], + "170": [230, 116, 90, 200], + "171": [230, 116, 90, 200], + "172": [230, 114, 89, 200], + "173": [230, 114, 89, 200], + "174": [229, 112, 87, 200], + "175": [229, 112, 87, 200], + "176": [228, 110, 86, 200], + "177": [228, 110, 86, 200], + "178": [227, 108, 84, 200], + "179": [227, 108, 84, 200], + "180": [226, 106, 83, 200], + "181": [226, 106, 83, 200], + "182": [225, 104, 82, 200], + "183": [225, 104, 82, 200], + "184": [224, 102, 80, 200], + "185": [224, 102, 80, 200], + "186": [223, 100, 79, 200], + "187": [223, 100, 79, 200], + "188": [222, 98, 78, 200], + "189": [222, 98, 78, 200], + "190": [221, 96, 76, 200], + "191": [221, 96, 76, 200], + "192": [220, 94, 75, 200], + "193": [220, 94, 75, 200], + "194": [219, 92, 74, 200], + "195": [219, 92, 74, 200], + "196": [218, 90, 72, 200], + "197": [218, 90, 72, 200], + "198": [217, 88, 71, 200], + "199": [217, 88, 71, 200], + "200": [216, 86, 70, 200], + "201": [216, 86, 70, 200], + "202": [215, 84, 68, 200], + "203": [215, 84, 68, 200], + "204": [214, 82, 67, 200], + "205": [214, 82, 67, 200], + "206": [212, 79, 66, 200], + "207": [212, 79, 66, 200], + "208": [211, 77, 64, 200], + "209": [211, 77, 64, 200], + "210": [210, 75, 63, 200], + "211": [210, 75, 63, 200], + "212": [209, 73, 62, 200], + "213": [209, 73, 62, 200], + "214": [207, 70, 61, 200], + "215": [207, 70, 61, 200], + "216": [206, 68, 60, 200], + "217": [206, 68, 60, 200], + "218": [205, 66, 58, 200], + "219": [205, 66, 58, 200], + "220": [204, 63, 57, 200], + "221": [204, 63, 57, 200], + "222": [202, 61, 56, 200], + "223": [202, 61, 56, 200], + "224": [201, 59, 55, 200], + "225": [201, 59, 55, 200], + "226": [200, 56, 53, 200], + "227": [200, 56, 53, 200], + "228": [198, 53, 52, 200], + "229": [198, 53, 52, 200], + "230": [197, 50, 51, 200], + "231": [197, 50, 51, 200], + "232": [196, 48, 50, 200], + "233": [196, 48, 50, 200], + "234": [194, 45, 49, 200], + "235": [194, 45, 49, 200], + "236": [193, 42, 48, 200], + "237": [193, 42, 48, 200], + "238": [191, 40, 46, 200], + "239": [191, 40, 46, 200], + "240": [190, 35, 45, 200], + "241": [190, 35, 45, 200], + "242": [188, 31, 44, 200], + "243": [188, 31, 44, 200], + "244": [187, 26, 43, 200], + "245": [187, 26, 43, 200], + "246": [185, 22, 42, 200], + "247": [185, 22, 42, 200], + "248": [184, 17, 41, 200], + "249": [184, 17, 41, 200], + "250": [182, 13, 40, 200], + "251": [182, 13, 40, 200], + "252": [181, 8, 39, 200], + "253": [181, 8, 39, 200], + "254": [179, 3, 38, 200], + "255": [179, 3, 38, 200], +} + +# for test cases +map_test = dict((str(i), [i, 0, 0, 0]) for i in range(256)) + +colormaps = {"flare": map_flare, "heating": map_heating, "heating_2": map_heating_2, "test": map_test} + + +def colormap(id: str): + return colormaps[id] diff --git a/src/physrisk/data/data_requests.py b/src/physrisk/data/data_requests.py deleted file mode 100644 index a5fba5de..00000000 --- a/src/physrisk/data/data_requests.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import List -from collections import defaultdict - -class EventDataRequest: - def __init__(self, event_type: type, longitude: float, latitude: float, **kwargs): - self.event_type = event_type - self.longitude = longitude - self.latitude = latitude - self.key = '_'.join(str(k) + ":" + str(v) for k, v in sorted(kwargs.items())) - self.props = dict(**kwargs) - #self.__dict__.update((k, v) for k, v in kwargs.items()) - #allowed_keys = {'a', 'b', 'c'} - #self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys) - -class ReturnPeriodEvDataResp: - def __init__(self, return_periods, intensities): - self.return_periods = return_periods - self.intensities = intensities - -def process_requests(requests : List[EventDataRequest], event_provider): - prop_groups = defaultdict(list) - for request in requests: - prop_groups[request.key].append(request) - - responses = {} - for key in prop_groups.keys(): - request_group = prop_groups[key] - props = request_group[0].props - event_type = request_group[0].event_type - longitudes = [req.longitude for req in request_group] - latitudes = [req.latitude for req in request_group] - ret_period, intensities = event_provider[event_type](longitudes, latitudes, **props) - - for i, req in enumerate(request_group): - responses[req] = ReturnPeriodEvDataResp(ret_period, intensities[i, :]) - - return responses diff --git a/src/physrisk/data/raster_reader.py b/src/physrisk/data/geotiff_reader.py similarity index 53% rename from src/physrisk/data/raster_reader.py rename to src/physrisk/data/geotiff_reader.py index 3cb822b1..51ad2913 100644 --- a/src/physrisk/data/raster_reader.py +++ b/src/physrisk/data/geotiff_reader.py @@ -1,34 +1,38 @@ from itertools import chain + import numpy as np import zarr -import rasterio, rasterio.sample -from rasterio.io import MemoryFile -from rasterio.windows import from_bounds -from tifffile import imread -from affine import Affine -from typing import List, Tuple -from tifffile.tifffile import TiffFile +import zarr.core + +from ..utils.lazy import lazy_import + +rasterio = lazy_import("rasterio") +tifffile = lazy_import("tifffile") +affine = lazy_import("affine") + def zarr_read(path, longitudes, latitudes): """A version that uses Zarr rather than GDAL. Typically faster than GDAL / rasterio.""" - with TiffFile(path) as tif: - scale: Tuple[float, float, float] = tif.geotiff_metadata["ModelPixelScale"] - tie_point: List[float] = tif.geotiff_metadata["ModelTiepoint"] + with tifffile.tifffile.Tifffile(path) as tif: + scale = tif.geotiff_metadata["ModelPixelScale"] + tie_point = tif.geotiff_metadata["ModelTiepoint"] store = tif.series[0].aszarr() - zarray = zarr.open(store, mode='r') - shape: List[int] = tif.series[0].shape - i, j, k, x, y, z = tie_point[0:6] - sx, sy, sz = scale - trans = Affine(sx, 0.0, x - i * sx, 0.0, -sy, y + j * sy) - inv_trans = ~trans - mat = np.array(inv_trans).reshape(3,3) - coords = np.vstack((longitudes, latitudes, np.ones(len(longitudes)))) - frac_image_coords = mat @ coords - image_coords = np.floor(frac_image_coords).astype(int) - data = zarray.get_coordinate_selection((image_coords[1,:], image_coords[0,:])) - return data - -def dataset_read_bounded(dataset, longitudes, latitudes, window_half_width = 0.01): + zarray = zarr.open(store, mode="r") + # shape: List[int] = tif.series[0].shape + i, j, k, x, y, z = tie_point[0:6] + sx, sy, sz = scale + trans = affine.Affine(sx, 0.0, x - i * sx, 0.0, -sy, y + j * sy) + inv_trans = ~trans + mat = np.array(inv_trans).reshape(3, 3) + coords = np.vstack((longitudes, latitudes, np.ones(len(longitudes)))) + frac_image_coords = mat @ coords + image_coords = np.floor(frac_image_coords).astype(int) + assert zarray is zarr.core.Array + data = zarray.get_coordinate_selection((image_coords[1, :], image_coords[0, :])) # type: ignore + return data + + +def dataset_read_bounded(dataset, longitudes, latitudes, window_half_width=0.01): hw = window_half_width offsets = [[0, 0], [-hw, -hw], [-hw, hw], [hw, hw], [hw, -hw]] points = [] @@ -38,31 +42,34 @@ def dataset_read_bounded(dataset, longitudes, latitudes, window_half_width = 0.0 samples = np.array(list(rasterio.sample.sample_gen(dataset, points))) samples.resize([len(offsets), len(longitudes)]) max_samples = np.max(samples, 0) - + return max_samples -def dataset_read_points(dataset, longitudes, latitudes, window_half_width = 0.01): + +def dataset_read_points(dataset, longitudes, latitudes, window_half_width=0.01): points = [[lon, lat] for (lon, lat) in zip(longitudes, latitudes)] samples = np.array(list(rasterio.sample.sample_gen(dataset, points))) return samples -def dataset_read_windows(dataset, longitudes, latitudes, window_half_width = 0.01): + +def dataset_read_windows(dataset, longitudes, latitudes, window_half_width=0.01): # seem to need to do one window at a time: potentially slow hw = window_half_width samples = [] - for (lon, lat) in zip(longitudes, latitudes): - win = from_bounds(lon - hw, lat - hw, lon + hw, lat + hw, dataset.transform) # left, bottom, right, top - max_intensity = np.max(dataset.read(1, window = win)) + for lon, lat in zip(longitudes, latitudes): + win = rasterio.windows.from_bounds( + lon - hw, lat - hw, lon + hw, lat + hw, dataset.transform + ) # left, bottom, right, top + max_intensity = np.max(dataset.read(1, window=win)) samples.append(max_intensity[0]) return samples -def file_read_bounded(path, longitudes, latitudes, window_half_width = 0.01): - #with MemoryFile() as memfile: - # with memfile.open(driver = 'GTiff', count) +def file_read_bounded(path, longitudes, latitudes, window_half_width=0.01): with rasterio.open(path) as dataset: return dataset_read_bounded(dataset, longitudes, latitudes, window_half_width) + def file_read_points(path, longitudes, latitudes): with rasterio.open(path) as dataset: return dataset_read_points(dataset, longitudes, latitudes) diff --git a/src/physrisk/data/hazard/event_provider_wri.py b/src/physrisk/data/hazard/event_provider_wri.py deleted file mode 100644 index 3420778d..00000000 --- a/src/physrisk/data/hazard/event_provider_wri.py +++ /dev/null @@ -1,114 +0,0 @@ -import logging, os.path, rasterio, rasterio.sample, requests -import numpy as np -import physrisk.data.raster_reader as rr - -# requires raterio and gdal; for binaries to install on Windows, at time of writing https://pypi.org/project/rasterio/ directs us to https://www.lfd.uci.edu/~gohlke/pythonlibs/#rasterio - -class EventProviderWri(): - """Class to obtain World Resources Institute (WRI) hazard data from various sources.""" - - __return_periods = [5, 10, 25, 50, 100, 250, 500, 1000] - __wri_public_url = "http://wri-projects.s3.amazonaws.com/AqueductFloodTool/download/v2/" - __riverine_circ_models = { "000000000WATCH" : "Baseline condition", - "00000NorESM1-M GCM" : "Bjerknes Centre for Climate Research, Norwegian Meteorological Institute", - "0000GFDL_ESM2M GCM" : "Geophysical Fluid Dynamics Laboratory (NOAA)", - "20000HadGEM2-ES" : "Met Office Hadley Centre", - "00IPSL-CM5A-LR GCM" : "Institut Pierre Simon Laplace", - "MIROC-ESM-CHEM GCM" : "Atmosphere and Ocean Research Institute (The University of Tokyo), National Institute for Environmental Studies, and Japan Agency for Marine-Earth Science and Technology" } - - - def __init__(self, src_key, event_type = "inundation", **kwargs): - # different sources can be specified - - if event_type != "inundation": - raise NotImplementedError("Sourcing of {0} data not supported".format(event_type)) - - # for 'file', data must exist in folder specified - if src_key == 'file': - if 'folder' in kwargs: - self.__get_events = lambda lons, lats, return_periods, scenario, sea_level, type, subsidence, model, year, cf = kwargs['folder'] : self.__get_inundation_file_based( - cf, lats, lons, return_periods, scenario, sea_level, type, subsidence, model, year) - else: - # enforced: otherwise very slow and hits WRI servers frequently - raise KeyError("folder must be supplied") - - # for 'web', data will be downloaded from WRI public site as per http://wri-projects.s3.amazonaws.com/AqueductFloodTool/download/v2/index.html and cached locally - if src_key == 'web': - if 'cache_folder' in kwargs: - # cache GeoTiffs in folder specified (important for local development) - download = self.__download_inundation - self.__get_events = lambda lons, lats, return_periods, scenario, sea_level, type, subsidence, model, year, cf = kwargs['cache_folder'], d = download : self.__get_inundation_file_based( - cf, lats, lons, return_periods, scenario, sea_level, type, subsidence, model, year, download_flood_data = d) - else: - # enforced: otherwise very slow and hits WRI servers frequently - raise KeyError("cache_folder must be supplied") - - # for 'service': do we add ability to invoke a service to return event data? - else: - raise NotImplementedError("Source Key : {0} not handled.".format(src_key)) - - def get_inundation_depth(self, lons, lats, return_periods = None, scenario = "rcp8p5", sea_level = 0, type = "coast", subsidence = True, model = None, year = 2080): - """Return inundation depths for available return periods. - - Args: - lats (List[float]): latitudes in degrees - lons (List[float]): longitudes in degrees - return_periods (List[int]): list of returns periods in years - scenario (str): climate scenario ("historical", "rcp4p5" or "rcp8p5") - sea_level (int): sea level rise as a percentile (0, 5 or 50) - subsidence (bool): include subsidence ("nosub" or "wtsub") - type (str): "coast" or "river" - year (int): 2030, 2050 or 2080 - """ - ret_period = np.array(EventProviderWri.__return_periods if return_periods is None else return_periods) - return ret_period, self.__get_events(lats, lons, ret_period, scenario, sea_level, type, subsidence, model, year) - - #region inundation - - def __get_inundation_file_based(self, folder, lons, lats, return_periods, scenario, sea_level, type, subsidence, model, year, download_flood_data = None): - """Get inundation data by reading GeoTiff files.""" - - intensities = [] - for period in [5, 10, 25, 50, 100, 250, 500, 1000]: - if type == "coast": - filename_stub = self.get_inundation_file_name_stub_coast(period, scenario, sea_level, type, subsidence, year) - elif type == "river": - filename_stub = self.get_inundation_file_name_stub_river(period, scenario, type, model, year) - else: - raise NotImplementedError("uknown type " + type) - filename = filename_stub + ".tif" - path = os.path.join(folder, filename) - - if not os.path.isfile(path): - if download_flood_data is None: - raise KeyError("file {0} not found".format(path)) - else: - with open(path, 'wb') as stream: - download_flood_data(stream, filename) - - intensities.append(rr.file_read_bounded(path, lons, lats)) - - return np.stack(intensities, -1) - - def __download_inundation(self, stream, filename): - url = EventProviderWri.__wri_public_url + filename - logging.info("Downloading file " + filename) - # small enough to download and write, but could chunk and stream - r = requests.get(url, allow_redirects = True) - stream.write(r.content) - logging.info("Downloaded") - - def get_inundation_file_name_stub_coast(self, return_period, scenario, sea_level, type, with_subsidence, year): - if type not in ["coast", "river"]: - raise ValueError("invalid type") - - return "inun{0}_{1}_{2}_{3}_rp{4:04d}_{5}".format(type, scenario, - "wtsub" if with_subsidence else "nosub", year, return_period, "0" if sea_level == 0 else "0_perc_{:02d}".format(sea_level)) - - def get_inundation_file_name_stub_river(self, return_period, scenario, type, model, year): - if type not in ["coast", "river"]: - raise ValueError("invalid type") - - return "inun{0}_{1}_{2}_{3}_rp{4:05d}".format(type, scenario, model, year, return_period) - - #endregion \ No newline at end of file diff --git a/src/physrisk/data/hazard_data_provider.py b/src/physrisk/data/hazard_data_provider.py new file mode 100644 index 00000000..aa6cdf39 --- /dev/null +++ b/src/physrisk/data/hazard_data_provider.py @@ -0,0 +1,161 @@ +from abc import ABC +from dataclasses import dataclass +from typing import List, MutableMapping, Optional + +from shapely import Point +from typing_extensions import Protocol + +from .zarr_reader import ZarrReader + + +@dataclass +class HazardDataHint: + """Requestors of hazard data may provide a hint which may be taken into account by the Hazard Model. + A hazard resource path can be specified which uniquely defines the hazard resource; otherwise the resource + is inferred from the indicator_id.""" + + path: Optional[str] = None + # consider adding: indicator_model_gcm: str + + def group_key(self): + return self.path + + +class SourcePath(Protocol): + """Provides path to hazard event data source. Each source should have its own implementation. + + Args: + model: model identifier. + scenario: identifier of scenario, e.g. rcp8p5 (RCP 8.5). + year: projection year, e.g. 2080. + """ + + def __call__( + self, *, indicator_id: str, scenario: str, year: int, hint: Optional[HazardDataHint] = None + ) -> str: ... + + +class HazardDataProvider(ABC): + def __init__( + self, + get_source_path: SourcePath, + *, + store: Optional[MutableMapping] = None, + zarr_reader: Optional[ZarrReader] = None, + interpolation: Optional[str] = "floor", + ): + """Create an EventProvider. + + Args: + get_source_path: provides the path to the hazard event data source depending on year/scenario/model. + """ + self._get_source_path = get_source_path + self._reader = zarr_reader if zarr_reader is not None else ZarrReader(store=store) + if interpolation not in ["floor", "linear", "max", "min"]: + raise ValueError("interpolation must be 'floor', 'linear', 'max' or 'min'") + self._interpolation = interpolation + + +class AcuteHazardDataProvider(HazardDataProvider): + """Provides hazard event intensities for a single Hazard (type of hazard event).""" + + def __init__( + self, + get_source_path: SourcePath, + *, + store: Optional[MutableMapping] = None, + zarr_reader: Optional[ZarrReader] = None, + interpolation: Optional[str] = "floor", + ): + super().__init__(get_source_path, store=store, zarr_reader=zarr_reader, interpolation=interpolation) + + def get_intensity_curves( + self, + longitudes: List[float], + latitudes: List[float], + *, + indicator_id: str, + scenario: str, + year: int, + hint: Optional[HazardDataHint] = None, + buffer: Optional[int] = None, + ): + """Get intensity curve for each latitude and longitude coordinate pair. + + Args: + longitudes: list of longitudes. + latitudes: list of latitudes. + model: model identifier. + scenario: identifier of scenario, e.g. rcp8p5 (RCP 8.5). + year: projection year, e.g. 2080. + buffer: delimitation of the area for the hazard data expressed in metres (within [0,1000]). + + Returns: + curves: numpy array of intensity (no. coordinate pairs, no. return periods). + return_periods: return periods in years. + """ + + path = self._get_source_path(indicator_id=indicator_id, scenario=scenario, year=year, hint=hint) + if buffer is None: + curves, return_periods = self._reader.get_curves( + path, longitudes, latitudes, self._interpolation + ) # type: ignore + else: + if buffer < 0 or 1000 < buffer: + raise Exception("The buffer must be an integer between 0 and 1000 metres.") + curves, return_periods = self._reader.get_max_curves( + path, + [ + ( + Point(longitude, latitude) + if buffer == 0 + else Point(longitude, latitude).buffer( + ZarrReader._get_equivalent_buffer_in_arc_degrees(latitude, buffer) + ) + ) + for longitude, latitude in zip(longitudes, latitudes) + ], + self._interpolation, + ) # type: ignore + return curves, return_periods + + +class ChronicHazardDataProvider(HazardDataProvider): + """Provides hazard parameters for a single type of chronic hazard.""" + + def __init__( + self, + get_source_path: SourcePath, + *, + store: Optional[MutableMapping] = None, + zarr_reader: Optional[ZarrReader] = None, + interpolation: Optional[str] = "floor", + ): + super().__init__(get_source_path, store=store, zarr_reader=zarr_reader, interpolation=interpolation) + + def get_parameters( + self, + longitudes: List[float], + latitudes: List[float], + *, + indicator_id: str, + scenario: str, + year: int, + hint: Optional[HazardDataHint] = None, + ): + """Get hazard parameters for each latitude and longitude coordinate pair. + + Args: + longitudes: list of longitudes. + latitudes: list of latitudes. + model: model identifier. + scenario: identifier of scenario, e.g. rcp8p5 (RCP 8.5). + year: projection year, e.g. 2080. + + Returns: + parameters: numpy array of parameters + """ + + path = self._get_source_path(indicator_id=indicator_id, scenario=scenario, year=year, hint=hint) + parameters, defns = self._reader.get_curves(path, longitudes, latitudes, self._interpolation) + return parameters, defns diff --git a/src/physrisk/data/image_creator.py b/src/physrisk/data/image_creator.py new file mode 100644 index 00000000..8cbba013 --- /dev/null +++ b/src/physrisk/data/image_creator.py @@ -0,0 +1,210 @@ +import io +import logging +from functools import lru_cache +from pathlib import PurePosixPath +from typing import Callable, List, NamedTuple, Optional + +import numpy as np +import PIL.Image as Image +import zarr.storage + +from physrisk.data import colormap_provider +from physrisk.data.zarr_reader import ZarrReader + +logger = logging.getLogger(__name__) + + +class Tile(NamedTuple): + x: int + y: int + z: int + + +class ImageCreator: + """Convert small arrays into images for map display. + Intended for arrays <~1500x1500 (otherwise, recommended to use Mapbox tiles - or similar). + """ + + def __init__(self, reader: Optional[ZarrReader] = None): + self.reader = ZarrReader() if reader is None else reader + + def convert( + self, + path: str, + format="PNG", + colormap: str = "heating", + tile: Optional[Tile] = None, + min_value: Optional[float] = None, + max_value: Optional[float] = None, + ) -> bytes: + """Create image for path specified as array of bytes. + + Args: + resource (str): Full path to array. + format (str, optional): Image format. Defaults to "PNG". + colormap (str, optional): Colormap name. Defaults to "heating". + min_value (Optional[float], optional): Min value. Defaults to None. + max_value (Optional[float], optional): Max value. Defaults to None. + + Returns: + bytes: Image data. + """ + try: + image = self._to_image(path, colormap, tile=tile, min_value=min_value, max_value=max_value) + except Exception as e: + logger.exception(e) + image = Image.fromarray(np.array([[0]]), mode="RGBA") + image_bytes = io.BytesIO() + image.save(image_bytes, format=format) + return image_bytes.getvalue() + + def to_file( + self, + filename: str, + path: str, + format="PNG", + colormap: str = "heating", + min_value: Optional[float] = None, + max_value: Optional[float] = None, + ): + """Create image for path specified and save as file. + + Args: + filename (str): Filename. + path (str): Path to array. + format (str, optional): Image format. Defaults to "PNG". + colormap (str, optional): Colormap name. Defaults to "heating". + min_value (Optional[float], optional): Min value. Defaults to None. + max_value (Optional[float], optional): Max value. Defaults to None. + """ + image = self._to_image(path, colormap, min_value=min_value, max_value=max_value) + image.save(filename, format=format) + + def _to_image( + self, + path, + colormap: str = "heating", + tile: Optional[Tile] = None, + index: Optional[int] = None, + min_value: Optional[float] = None, + max_value: Optional[float] = None, + ) -> Image.Image: + """Get image for path specified as array of bytes.""" + tile_path = path if tile is None else str(PurePosixPath(path, f"{tile.z + 1}")) + data = get_data(self.reader, tile_path) + tile_size = 512 + # data = self.reader.all_data(tile_path) + if len(data.shape) == 3: + index = len(self.reader.get_index_values(data)) - 1 if index is None else index + if tile is None: + # return whole array + data = data[index, :, :] # .squeeze(axis=0) + else: + # (from zarr 2.16.0 we can also use block indexing) + data = data[ + index, tile_size * tile.y : tile_size * (tile.y + 1), tile_size * tile.x : tile_size * (tile.x + 1) + ] + + if any(dim > 4000 for dim in data.shape): + raise Exception("dimension too large (over 1500).") + map_defn = colormap_provider.colormap(colormap) + + def get_colors(index: int): + return map_defn[str(index)] + + rgba = self._to_rgba(data, get_colors, min_value=min_value, max_value=max_value) + image = Image.fromarray(rgba, mode="RGBA") + return image + + def _to_rgba( # noqa: C901 + self, + data: np.ndarray, + get_colors: Callable[[int], List[int]], + min_value: Optional[float] = None, + max_value: Optional[float] = None, + nodata_lower: Optional[float] = None, + nodata_upper: Optional[float] = None, + nodata_bin_transparent: bool = False, + min_bin_transparent: bool = False, + ) -> np.ndarray: + """Convert the data to an RGBA image using values provided by get_colors. + We are particular about min and max values, ensuring that these get their own indices + from the colormap. Thee rules are: + 0: value is nodata + 1: value <= min_value + 2: min_value < value < (max_value - min_value) / 253 + 254: (max_value - min_value) / 253 <= value < max_value + 255 is >= max_value + + Args: + data (np.ndarray): Two dimensional array. + get_colors (Callable[[int], Tuple[int, int, int]]): When passed an integer index in range 0:256, returns RGB components as integers in range 0:256. + min_value (Optional[float]): Minimum value. Defaults to None. + max_value (Optional[float]): Maximum value. Defaults to None. + nodata_lower (Optional[float], optional): If supplied, values smaller than or equal to nodata_lower threshold are considered nodata. Defaults to None. + nodata_upper (Optional[float], optional): If supplied, values larger than or equal to nodata_upper threshold are considered nodata. Defaults to None. + nodata_bin_transparent (bool, optional): If True make no_data bin transparent. Defaults to False. + min_bin_transparent (bool, optional): If True make min_bin transparent. Defaults to False. + + Returns: + np.ndarray: RGBA array. + """ # noqa + + red = np.zeros(256, dtype=np.uint32) + green = np.zeros(256, dtype=np.uint32) + blue = np.zeros(256, dtype=np.uint32) + a = np.zeros(256, dtype=np.uint32) + for i in range(256): + (red[i], green[i], blue[i], a[i]) = get_colors(i) + if nodata_bin_transparent: + a[0] = 0 + if min_bin_transparent: + a[1] = 0 + mask_nodata = np.isnan(data) + if nodata_lower: + mask_nodata = data <= nodata_lower + if nodata_upper: + mask_nodata = (mask_nodata | (data >= nodata_upper)) if mask_nodata is not None else (data >= nodata_upper) + + if min_value is None: + min_value = np.nanmin(data) + if max_value is None: + max_value = np.nanmax(data) + + mask_ge_max = data >= max_value + mask_le_min = data <= min_value + + np.add(data, -min_value, out=data) + np.multiply(data, 253.0 / (max_value - min_value), out=data) + np.add(data, 2.0, out=data) # np.clip seems a bit slow so we do not use + + result = data.astype(np.uint8, casting="unsafe", copy=False) + del data + + if mask_nodata is not None: + result[mask_nodata] = 0 + del mask_nodata + + result[mask_ge_max] = 255 + result[mask_le_min] = 1 + del mask_ge_max, mask_le_min + + final = red[result] + (green[result] << 8) + (blue[result] << 16) + (a[result] << 24) + return final + + @staticmethod + def test_store(path: str): + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + x, y = np.meshgrid((np.arange(1000) - 500.0) / 500.0, (np.arange(1000) - 500.0) / 500.0) + im = np.exp(-(x**2 + y**2)) + z = root.create_dataset( # type: ignore + path, shape=(1, im.shape[0], im.shape[1]), chunks=(1, im.shape[0], im.shape[1]), dtype="f4" + ) + z[0, :, :] = im + return store + + +@lru_cache(maxsize=32) +def get_data(reader, path): + return reader.all_data(path) diff --git a/src/physrisk/data/inventory.py b/src/physrisk/data/inventory.py new file mode 100644 index 00000000..f4ab6246 --- /dev/null +++ b/src/physrisk/data/inventory.py @@ -0,0 +1,108 @@ +# flake8: noqa: E501 +import hashlib +import importlib.resources +import json +import logging +from collections import defaultdict +from typing import DefaultDict, Dict, Iterable, List, Tuple + +from pydantic import TypeAdapter, parse_obj_as + +import physrisk.data.colormap_provider as colormap_provider +import physrisk.data.static.hazard +from physrisk.data.inventory_reader import HazardModels + +from ..api.v1.hazard_data import HazardResource, Period + +# from physrisk.kernel.hazards import ChronicHeat + + +class Inventory: + def __init__(self, hazard_resources: Iterable[HazardResource]): + """Store the hazard resources with look up via: + - key: combination of path and model identifier which is unique, or + - type and model identifier: (requires choice of provider/version) + + Args: + hazard_resources (Iterable[HazardResource]): list of resources + """ + self.resources: Dict[str, HazardResource] = {} + self.resources_by_type_id: DefaultDict[Tuple[str, str], List[HazardResource]] = defaultdict(list) + for resource in hazard_resources: + self.resources[resource.key()] = resource + self.resources_by_type_id[(resource.hazard_type, resource.indicator_id)].append(resource) + + def json_ordered(self): + sorted_resources = sorted(self.resources_by_type_id.items()) + resource_list = [] + for _, resources in sorted_resources: + resource_list.extend(resources) + models = HazardModels(resources=resource_list) + return json.dumps(models.dict(), indent=4) + + +class EmbeddedInventory(Inventory): + """Load up inventory embedded in file src/physrisk/data/static/hazard/inventory.json. + This file is automatically generated by the hazard repo. In general the inventory of + hazard resources used by physrisk is a combination of embedded and non-embedded inventories + + """ + + def __init__(self): + with importlib.resources.open_text(physrisk.data.static.hazard, "inventory.json") as f: + models = TypeAdapter(HazardModels).validate_python(json.load(f)).resources + expanded_models = expand(models) + super().__init__(expanded_models) + + def colormaps(self): + """Color maps. Key can be identical to a model identifier or more descriptive (if shared by many models).""" + return colormap_provider.colormaps + + +def alphanumeric(text): + """Return alphanumeric hash from supplied string.""" + hash_int = int.from_bytes(hashlib.sha1(text.encode("utf-8")).digest(), "big") + return base36encode(hash_int) + + +def base36encode(number, alphabet="0123456789abcdefghijklmnopqrstuvwxyz"): + """Converts an integer to a base36 string.""" + if not isinstance(number, int): + raise TypeError("number must be an integer") + + base36 = "" + + if number < 0: + raise TypeError("number must be positive") + + if 0 <= number < len(alphabet): + return alphabet[number] + + while number != 0: + number, i = divmod(number, len(alphabet)) + base36 = alphabet[i] + base36 + + return base36 + + +def expand(resources: List[HazardResource]) -> List[HazardResource]: + expanded_models = [e for model in resources for e in model.expand()] + # we populate map_id hashes programmatically + for model in expanded_models: + if model.map and model.map.source == "mapbox" and model.map.path: + for scenario in model.scenarios: + test_periods = scenario.periods + scenario.periods = [] + for year in scenario.years: + name_format = model.map.path + path = name_format.format(scenario=scenario.id, year=year, return_period=1000) + id = alphanumeric(path)[0:6] + scenario.periods.append(Period(year=year, map_id=id)) + # if a period was specified explicitly, we check that hash is the same: a build-in check + if test_periods is not None: + for period, test_period in zip(scenario.periods, test_periods): + if period.map_id != test_period.map_id: + raise Exception( + f"validation error: hash {period.map_id} different to specified hash {test_period.map_id}" # noqa: E501 + ) + return expanded_models diff --git a/src/physrisk/data/inventory_reader.py b/src/physrisk/data/inventory_reader.py new file mode 100644 index 00000000..4cf0a6dc --- /dev/null +++ b/src/physrisk/data/inventory_reader.py @@ -0,0 +1,83 @@ +import json +from pathlib import PurePosixPath +from typing import Callable, Dict, Iterable, List, Optional + +import s3fs +from fsspec import AbstractFileSystem +from pydantic import BaseModel, TypeAdapter + +from physrisk.api.v1.hazard_data import HazardResource + +from .zarr_reader import get_env + + +class HazardModels(BaseModel): + resources: List[HazardResource] + + +class InventoryReader: + # environment variable names: + __access_key = "OSC_S3_ACCESS_KEY" + __secret_key = "OSC_S3_SECRET_KEY" + __S3_bucket = "OSC_S3_BUCKET" # e.g. redhat-osc-physical-landing-647521352890 + + def __init__( + self, + *, + get_env: Callable[[str, Optional[str]], str] = get_env, + fs: Optional[AbstractFileSystem] = None, + base_path: Optional[AbstractFileSystem] = None, + ): + """Class to read and update inventory stored in S3 or supplied AbstractFileSystem. + + Args: + get_env (Callable[[str, Optional[str]], str], optional): Get environment variable. Defaults to get_env. + fs (Optional[AbstractFileSystem], optional): AbstractFileSystem. Defaults to None in which case S3FileSystem will be created. # noqa: E501 + """ + if fs is None: + access_key = get_env(self.__access_key, None) + secret_key = get_env(self.__secret_key, None) + fs = s3fs.S3FileSystem(anon=False, key=access_key, secret=secret_key) + + bucket = get_env(self.__S3_bucket, "redhat-osc-physical-landing-647521352890") + self._base_path = bucket if base_path is None else base_path + self._fs = fs + + def read(self, path: str) -> List[HazardResource]: + """Read inventory at path provided and return HazardModels.""" + if not self._fs.exists(self._full_path(path)): + return [] + json_str = self.read_json(path) + models = TypeAdapter(HazardModels).validate_python(json.loads(json_str)).resources + return models + + def read_description_markdown(self, paths: List[str]) -> Dict[str, str]: + """Read description markdown at path provided.""" + md: Dict[str, str] = {} + for path in paths: + try: + with self._fs.open(self._full_path(path), "r") as f: + md[path] = f.read() + finally: + continue + return md + + def read_json(self, path: str) -> str: + """Read inventory at path provided and return json.""" + with self._fs.open(self._full_path(path), "r") as f: + json_str = f.read() + return json_str + + def append(self, path: str, hazard_models: Iterable[HazardResource]): + combined = dict((i.key(), i) for i in self.read(path)) + for model in hazard_models: + combined[model.key()] = model + models = HazardModels(resources=list(combined.values())) + json_str = json.dumps(models.model_dump()) + with self._fs.open(self._full_path(path), "w") as f: + f.write(json_str) + + def _full_path(self, path: str): + if path not in ["hazard", "hazard_test"]: + raise ValueError("not supported path.") + return str(PurePosixPath(self._base_path, path, "inventory.json")) diff --git a/src/physrisk/data/pregenerated_hazard_model.py b/src/physrisk/data/pregenerated_hazard_model.py new file mode 100644 index 00000000..a47ec0a8 --- /dev/null +++ b/src/physrisk/data/pregenerated_hazard_model.py @@ -0,0 +1,142 @@ +import concurrent.futures +from collections import defaultdict +from typing import Dict, List, Mapping, MutableMapping, Optional, cast + +import numpy as np + +from physrisk.data.zarr_reader import ZarrReader +from physrisk.kernel.hazards import Hazard, HazardKind + +from ..kernel.hazard_model import ( + HazardDataFailedResponse, + HazardDataRequest, + HazardDataResponse, + HazardEventDataResponse, + HazardModel, + HazardParameterDataResponse, +) +from .hazard_data_provider import AcuteHazardDataProvider, ChronicHazardDataProvider, HazardDataProvider, SourcePath + + +class PregeneratedHazardModel(HazardModel): + """Hazard event model that processes requests using EventProviders.""" + + def __init__( + self, + hazard_data_providers: Dict[type, HazardDataProvider], + ): + self.acute_hazard_data_providers = dict( + (k, cast(AcuteHazardDataProvider, v)) + for (k, v) in hazard_data_providers.items() + if Hazard.kind(k) == HazardKind.acute + ) + self.chronic_hazard_data_providers = dict( + (k, cast(ChronicHazardDataProvider, v)) + for (k, v) in hazard_data_providers.items() + if Hazard.kind(k) == HazardKind.chronic + ) + + def get_hazard_events( # noqa: C901 + self, requests: List[HazardDataRequest] + ) -> Mapping[HazardDataRequest, HazardDataResponse]: + # A note on concurrency. + # The requests will be batched up with batches accessing the same data set + # (e.g. same Zarr array in case of Zarr data). + # Within each batch if there are multiple lats/lons, the necessary chunks are + # accessed asynchronously (thanks to async chunk stores in case of Zarr). + # Across batches we could + # 1) make async and use event loop executor for CPU-bound parts + # e.g. asyncio.get_event_loop().run_in_executor + # 2) use thread pool + # for now we do 2; 1 might be preferred if the number of threads needed to download + # data in parallel becomes large (probably not, given use of async in Zarr). + + return self._get_hazard_events(requests) + + def _get_hazard_events( # noqa: C901 + self, requests: List[HazardDataRequest] + ) -> Mapping[HazardDataRequest, HazardDataResponse]: + batches = defaultdict(list) + for request in requests: + batches[request.group_key()].append(request) + + responses: MutableMapping[HazardDataRequest, HazardDataResponse] = {} + # can change max_workers=1 for debugging + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: + futures = [ + executor.submit(self._get_hazard_events_batch, batches[key], responses) for key in batches.keys() + ] + concurrent.futures.wait(futures) + return responses + + def _get_hazard_events_batch( + self, batch: List[HazardDataRequest], responses: MutableMapping[HazardDataRequest, HazardDataResponse] + ): + try: + hazard_type, indicator_id, scenario, year, hint, buffer = ( + batch[0].hazard_type, + batch[0].indicator_id, + batch[0].scenario, + batch[0].year, + batch[0].hint, + batch[0].buffer, + ) + longitudes = [req.longitude for req in batch] + latitudes = [req.latitude for req in batch] + if hazard_type.kind == HazardKind.acute: # type: ignore + intensities, return_periods = self.acute_hazard_data_providers[hazard_type].get_intensity_curves( + longitudes, + latitudes, + indicator_id=indicator_id, + scenario=scenario, + year=year, + hint=hint, + buffer=buffer, + ) + + for i, req in enumerate(batch): + valid = ~np.isnan(intensities[i, :]) + valid_periods, valid_intensities = return_periods[valid], intensities[i, :][valid] + if len(valid_periods) == 0: + valid_periods, valid_intensities = np.array([100]), np.array([0]) + responses[req] = HazardEventDataResponse(valid_periods, valid_intensities) + elif hazard_type.kind == HazardKind.chronic: # type: ignore + parameters, defns = self.chronic_hazard_data_providers[hazard_type].get_parameters( + longitudes, latitudes, indicator_id=indicator_id, scenario=scenario, year=year, hint=hint + ) + + for i, req in enumerate(batch): + valid = ~np.isnan(parameters[i, :]) + responses[req] = HazardParameterDataResponse(parameters[i, :][valid], defns[valid]) + except Exception as err: + # e.g. the requested data is unavailable + for i, req in enumerate(batch): + responses[req] = HazardDataFailedResponse(err) + return + + +class ZarrHazardModel(PregeneratedHazardModel): + def __init__( + self, + *, + source_paths: Dict[type, SourcePath], + reader: Optional[ZarrReader] = None, + store=None, + interpolation="floor", + ): + # share ZarrReaders across HazardDataProviders + zarr_reader = ZarrReader(store=store) if reader is None else reader + + super().__init__( + dict( + ( + t, + ( + AcuteHazardDataProvider(sp, zarr_reader=zarr_reader, interpolation=interpolation) + if Hazard.kind(t) == HazardKind.acute + else ChronicHazardDataProvider(sp, zarr_reader=zarr_reader, interpolation=interpolation) + ), + ) + for t, sp in source_paths.items() + ) + ) diff --git a/src/test/data/__init__.py b/src/physrisk/data/static/__init__.py similarity index 100% rename from src/test/data/__init__.py rename to src/physrisk/data/static/__init__.py diff --git a/src/test/data/hazard/__init__.py b/src/physrisk/data/static/example_portfolios/__init__.py similarity index 100% rename from src/test/data/hazard/__init__.py rename to src/physrisk/data/static/example_portfolios/__init__.py diff --git a/src/physrisk/data/static/example_portfolios/industrial_activity_small.json b/src/physrisk/data/static/example_portfolios/industrial_activity_small.json new file mode 100644 index 00000000..5a937be9 --- /dev/null +++ b/src/physrisk/data/static/example_portfolios/industrial_activity_small.json @@ -0,0 +1,67 @@ +{ + "items": [ + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "Asia", + "latitude": 32.322, + "longitude": 65.119 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "South America", + "latitude": -39.1009, + "longitude": -68.5982 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "South America", + "latitude": -35.055, + "longitude": -64.2406 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "South America", + "latitude": -38.7833, + "longitude": -61.8984 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "Oceania", + "latitude": -32.0739, + "longitude": 115.8914 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "Oceania", + "latitude": -20.485, + "longitude": 147.75 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "nan", + "latitude": -38.3916, + "longitude": 144.8553 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "Oceania", + "latitude": -33.85, + "longitude": 150.9495 + }, + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "Oceania", + "latitude": -34.8348, + "longitude": 138.5572 + } + ] +} diff --git a/src/physrisk/data/static/example_portfolios/mixed_small.json b/src/physrisk/data/static/example_portfolios/mixed_small.json new file mode 100644 index 00000000..35fb6049 --- /dev/null +++ b/src/physrisk/data/static/example_portfolios/mixed_small.json @@ -0,0 +1,25 @@ +{ + "items": [ + { + "asset_class": "IndustrialActivity", + "type": "Construction", + "location": "South America", + "latitude": -39.1009, + "longitude": -68.5982 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Europe", + "latitude": 39.9116, + "longitude": 20.1047 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.6839, + "longitude": 90.5314 + } + ] +} diff --git a/src/physrisk/data/static/example_portfolios/power_generating_small.json b/src/physrisk/data/static/example_portfolios/power_generating_small.json new file mode 100644 index 00000000..2de7cf3c --- /dev/null +++ b/src/physrisk/data/static/example_portfolios/power_generating_small.json @@ -0,0 +1,74 @@ +{ + "items": [ + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Asia", + "latitude": 34.556, + "longitude": 69.4787 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Asia", + "latitude": 35.9416, + "longitude": 68.71 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Europe", + "latitude": 39.9116, + "longitude": 20.1047 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Gas", + "location": "Africa", + "latitude": 36.8789, + "longitude": 6.9366 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Gas", + "location": "Africa", + "latitude": 36.88, + "longitude": 6.935 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Oil", + "location": "Africa", + "latitude": -12.4706, + "longitude": 13.7319 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Africa", + "latitude": -12.4706, + "longitude": 13.7319 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "Africa", + "latitude": -9.7523, + "longitude": 14.4809 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Oil", + "location": "South America", + "latitude": -39.2145, + "longitude": -70.9157 + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Hydro", + "location": "South America", + "latitude": -31.5192, + "longitude": -68.9814 + } + ] +} diff --git a/src/physrisk/data/static/example_portfolios/real_estate_small.json b/src/physrisk/data/static/example_portfolios/real_estate_small.json new file mode 100644 index 00000000..03a90bbf --- /dev/null +++ b/src/physrisk/data/static/example_portfolios/real_estate_small.json @@ -0,0 +1,74 @@ +{ + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 24.0426, + "longitude": 91.0158 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 22.6588, + "longitude": 90.3373 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.6473, + "longitude": 90.3473 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.9186, + "longitude": 90.6926 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.6839, + "longitude": 90.5314 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 22.2972, + "longitude": 91.8062 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.6783, + "longitude": 90.4295 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.5699, + "longitude": 90.4804 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 22.8646, + "longitude": 89.5357 + }, + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.9904, + "longitude": 90.3429 + } + ] +} diff --git a/src/test/kernel/__init__.py b/src/physrisk/data/static/hazard/__init__.py similarity index 100% rename from src/test/kernel/__init__.py rename to src/physrisk/data/static/hazard/__init__.py diff --git a/src/physrisk/data/static/hazard/inventory.json b/src/physrisk/data/static/hazard/inventory.json new file mode 100644 index 00000000..04007dc7 --- /dev/null +++ b/src/physrisk/data/static/hazard/inventory.json @@ -0,0 +1,2820 @@ +{ + "resources": [ + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_000000000WATCH_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "historical", + "params": {}, + "display_name": "Flood depth/baseline (WRI)", + "display_groups": [], + "description": "\nWorld Resources Institute Aqueduct Floods baseline riverine model using historical data.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_000000000WATCH_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1980 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_00000NorESM1-M_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "NorESM1-M", + "params": {}, + "display_name": "Flood depth/NorESM1-M (WRI)", + "display_groups": [], + "description": "\nWorld Resources Institute Aqueduct Floods riverine model using GCM model from\nBjerknes Centre for Climate Research, Norwegian Meteorological Institute.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_00000NorESM1-M_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_0000GFDL-ESM2M_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "GFDL-ESM2M", + "params": {}, + "display_name": "Flood depth/GFDL-ESM2M (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods riverine model using GCM model from\nGeophysical Fluid Dynamics Laboratory (NOAA).\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_0000GFDL-ESM2M_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_0000HadGEM2-ES_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "HadGEM2-ES", + "params": {}, + "display_name": "Flood depth/HadGEM2-ES (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods riverine model using GCM model:\nMet Office Hadley Centre.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_0000HadGEM2-ES_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_00IPSL-CM5A-LR_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "IPSL-CM5A-LR", + "params": {}, + "display_name": "Flood depth/IPSL-CM5A-LR (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods riverine model using GCM model from\nInstitut Pierre Simon Laplace\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_00IPSL-CM5A-LR_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "public", + "path": "inundation/wri/v2/inunriver_{scenario}_MIROC-ESM-CHEM_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": null, + "indicator_model_gcm": "MIROC-ESM-CHEM", + "params": {}, + "display_name": "Flood depth/MIROC-ESM-CHEM (WRI)", + "display_groups": [], + "description": "World Resource Institute Aqueduct Floods riverine model using\n GCM model from Atmosphere and Ocean Research Institute\n (The University of Tokyo), National Institute for Environmental Studies, and Japan Agency\n for Marine-Earth Science and Technology.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inunriver_{scenario}_MIROC-ESM-CHEM_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_historical_nosub_hist_0", + "indicator_id": "flood_depth", + "indicator_model_id": "nosub", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/baseline, no subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resources Institute Aqueduct Floods baseline coastal model using historical data. Model excludes subsidence.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_historical_nosub_hist_0_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1980 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0", + "indicator_id": "flood_depth", + "indicator_model_id": "nosub/95", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/95%, no subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods coastal model, excluding subsidence; 95th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0_perc_05", + "indicator_id": "flood_depth/nosub/5", + "indicator_model_id": "nosub/5", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/5%, no subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods coastal model, excluding subsidence; 5th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0_perc_05_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0_perc_50", + "indicator_id": "flood_depth", + "indicator_model_id": "nosub/50", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/50%, no subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods model, excluding subsidence; 50th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_nosub_{year}_0_perc_50_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_historical_wtsub_hist_0", + "indicator_id": "flood_depth", + "indicator_model_id": "wtsub", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/baseline, with subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods model, excluding subsidence; baseline (based on historical data).\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_historical_wtsub_hist_0_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1980 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0", + "indicator_id": "flood_depth", + "indicator_model_id": "wtsub/95", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/95%, with subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods model, including subsidence; 95th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0_perc_05", + "indicator_id": "flood_depth", + "indicator_model_id": "wtsub/5", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/5%, with subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods model, including subsidence; 5th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0_perc_05_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "CoastalInundation", + "group_id": "public", + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0_perc_50", + "indicator_id": "flood_depth", + "indicator_model_id": "wtsub/50", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flood depth/50%, with subsidence (WRI)", + "display_groups": [], + "description": "\nWorld Resource Institute Aqueduct Floods model, including subsidence; 50th percentile sea level rise.\n\n \nThe World Resources Institute (WRI) [Aqueduct Floods model](https://www.wri.org/aqueduct) is an acute riverine and coastal flood hazard model with a spatial resolution of 30 \u00d7 30 arc seconds (approx. 1 km at the equator). Flood intensity is provided as a _return period_ map: each point comprises a curve of inundation depths for 9 different return periods (also known as reoccurrence periods). If a flood event has depth $d_i$ with return period of $r_i$ this implies that the probability of a flood event with depth greater than $d_i$ occurring in any one year is $1 / r_i$; this is the _exceedance probability_. Aqueduct Floods is based on Global Flood Risk with IMAGE Scenarios (GLOFRIS); see [here](https://www.wri.org/aqueduct/publications) for more details.\n\nFor more details and relevant citations see the\n[OS-Climate Physical Climate Risk Methodology document](https://github.com/os-climate/physrisk/blob/main/methodology/PhysicalRiskMethodology.pdf).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "flare", + "nodata_index": 0, + "units": "m" + }, + "path": "inundation/wri/v2/inuncoast_{scenario}_wtsub_{year}_0_perc_50_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 8 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "rcp4p5", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "metres" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_{gcm}_{scenario}_{year}", + "indicator_id": "mean_degree_days/above/32c", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MIROC6", + "MPI-ESM1-2-LR", + "NorESM2-MM" + ] + }, + "display_name": "Mean degree days above 32\u00b0C/{gcm}", + "display_groups": [ + "Mean degree days" + ], + "description": "Degree days indicators are calculated by integrating over time the absolute difference in temperature\nof the medium over a reference temperature. The exact method of calculation may vary;\nhere the daily maximum near-surface temperature 'tasmax' is used to calculate an annual indicator:\n\n$$\nI^\\text{dd} = \\frac{365}{n_y} \\sum_{i = 1}^{n_y} | T^\\text{max}_i - T^\\text{ref} | \\nonumber\n$$\n\n$I^\\text{dd}$ is the indicator, $T^\\text{max}$ is the daily maximum near-surface temperature, $n_y$ is the number of days in the year and $i$ is the day index.\nand $T^\\text{ref}$ is the reference temperature of 32\u00b0C. The OS-Climate-generated indicators are inferred\nfrom downscaled CMIP6 data, averaged over 6 models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe downscaled data is sourced from the [NASA Earth Exchange Global Daily Downscaled Projections](https://www.nccs.nasa.gov/services/data-collections/land-based-products/nex-gddp-cmip6).\nThe indicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050)\nand 2050 (2041-2060).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 4000.0, + "name": "heating", + "nodata_index": 0, + "units": "degree days" + }, + "path": "mean_degree_days_v2_above_32c_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050 + ] + } + ], + "units": "degree days" + }, + { + "hazard_type": "Fire", + "group_id": "jupiter_osc", + "path": "fire/jupiter/v1/fire_probability_{scenario}_{year}", + "indicator_id": "fire_probability", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Fire probability (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis fire model computes the maximum monthly probability per annum of a wildfire within 100 km of\na given location based on several parameters from multiple bias corrected and downscaled Global Climate Models (GCMs).\nFor example, if the probability of occurrence of a wildfire is 5% in July, 20% in August, 10% in September\nand 0% for other months, the hazard indicator value is 20%.\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 0.7, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "fire_probability_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "" + }, + { + "hazard_type": "Drought", + "group_id": "jupiter_osc", + "path": "drought/jupiter/v1/months_spei3m_below_-2_{scenario}_{year}", + "indicator_id": "months/spei3m/below/-2", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Drought (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis drought model is based on the Standardized Precipitation-Evapotranspiration Index (SPEI).\nThe SPEl is an extension of the Standardized Precipitation Index which also considers Potential Evapotranspiration (PET)\nin determining drought events.\nThe SPEl is calculated from a log-logistic probability distribution function of climatic water balance\n(precipitation minus evapotranspiration) over a given time scale.\nThe SPEI itself is a standardized variable with a mean value 0 and standard deviation 1.\nThis drought model computes the number of months per annum where the 3-month rolling average\nof SPEI is below -2 based on the mean values of several parameters from\nbias-corrected and downscaled multiple Global Climate Models (GCMs).\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 12.0, + "name": "heating", + "nodata_index": 0, + "units": "months/year" + }, + "path": "months_spei3m_below_-2_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "months/year" + }, + { + "hazard_type": "Precipitation", + "group_id": "jupiter_osc", + "path": "precipitation/jupiter/v1/max_daily_water_equivalent_{scenario}_{year}", + "indicator_id": "max/daily/water_equivalent", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Precipitation (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis model computes the maximum daily water equivalent precipitation (in mm) measured at the 100 year\nreturn period based on the mean of the precipitation distribution from multiple bias corrected and\ndownscaled Global Climate Models (GCMs).\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 1000.0, + "name": "heating", + "nodata_index": 0, + "units": "mm" + }, + "path": "max_daily_water_equivalent_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "mm" + }, + { + "hazard_type": "Hail", + "group_id": "jupiter_osc", + "path": "hail/jupiter/v1/days_above_5cm_{scenario}_{year}", + "indicator_id": "days/above/5cm", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Large hail days per year (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis hail model computes the number of days per annum where hail exceeding 5 cm diameter is possible\nbased on the mean distribution of several parameters\nacross multiple bias-corrected and downscaled Global Climate Models (GCMs).\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 10.0, + "name": "heating", + "nodata_index": 0, + "units": "days/year" + }, + "path": "days_above_5cm_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "days/year" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "jupiter_osc", + "path": "chronic_heat/jupiter/v1/days_above_35c_{scenario}_{year}", + "indicator_id": "days/above/35c", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Days per year above 35\u00b0C (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis heat model computes the number of days exceeding 35\u00b0C per annum based on the mean of distribution fits\nto the bias-corrected and downscaled high temperature distribution\nacross multiple Global Climate Models (GCMs).\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 365.0, + "name": "heating", + "nodata_index": 0, + "units": "days/year" + }, + "path": "days_above_35c_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "days/year" + }, + { + "hazard_type": "Wind", + "group_id": "jupiter_osc", + "path": "wind/jupiter/v1/max_1min_{scenario}_{year}", + "indicator_id": "max_speed", + "indicator_model_id": "1min", + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Max 1 minute sustained wind speed (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nThis wind speed model computes the maximum 1-minute sustained wind speed (in km/hr) experienced over a\n100 year return period based on mean wind speed distributions\nfrom multiple Global Climate Models (GCMs).\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 120.0, + "name": "heating", + "nodata_index": 0, + "units": "km/hour" + }, + "path": "max_1min_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "km/hour" + }, + { + "hazard_type": "CombinedInundation", + "group_id": "jupiter_osc", + "path": "combined_flood/jupiter/v1/fraction_{scenario}_{year}", + "indicator_id": "flooded_fraction", + "indicator_model_id": null, + "indicator_model_gcm": "unknown", + "params": {}, + "display_name": "Flooded fraction (Jupiter)", + "display_groups": [], + "description": "\nThese data should not be used in any manner relating to emergency management or planning, public safety,\nphysical safety or property endangerment. For higher-resolution data based on up-to-date methods,\nsubject to greater validation, and suitable for bottom-up risk analysis please contact\n[Jupiter Intelligence](https://www.jupiterintel.com).\n\nFlooded fraction provides the spatial fraction of land flooded in a defined grid.\nIt is derived from higher-resolution flood hazards, and computed directly as the fraction of\ncells within the 30-km cell that have non-zero flooding at that return period.\nThis model uses a 30-km grid that experiences flooding at the 200-year return period.\nOpen oceans are excluded.\n ", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 1.0, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "fraction_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "ssp126", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + }, + { + "id": "ssp585", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2075, + 2100 + ] + } + ], + "units": "none" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/mean_work_loss_{intensity}_{gcm}_{scenario}_{year}", + "indicator_id": "mean_work_loss/{intensity}", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "intensity": [ + "low", + "medium", + "high" + ], + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MPI-ESM1-2-LR", + "MIROC6", + "NorESM2-MM" + ] + }, + "display_name": "Mean work loss, {intensity} intensity/{gcm}", + "display_groups": [ + "Mean work loss" + ], + "description": "The mean work loss indicator is calculated from the 'Wet Bulb Globe Temperature' (WBGT) indicator:\n\n$$\nI^\\text{WBGT}_i = 0.567 \\times T^\\text{avg}_i + 0.393 \\times p^\\text{vapour}_i + 3.94\n$$\n\n$I^\\text{WBGT}_i$ is the WBGT indicator, $T^\\text{avg}_i$ is the average daily near-surface surface temperature (in degress Celsius) on day index, $i$, and $p^\\text{vapour}$\nis the water vapour partial pressure (in kPa). $p^\\text{vapour}$ is calculated from relative humidity $h_r$ via:\n\n$$\np^\\text{vapour}_i = \\frac{h_r}{100} \\times 6.105 \\times \\exp \\left( \\frac{17.27 \\times T^\\text{avg}_i}{237.7 + T^\\text{avg}_i} \\right)\n$$\n\nThe work ability indicator, $I^{\\text{WA}}$ is finally calculated via:\n\n$$\nI^{\\text{WA}}_i = 0.1 + 0.9 / \\left( 1 + (I^\\text{WBGT}_i / \\alpha_1)^{\\alpha_2} \\right)\n$$\n\nAn annual average work loss indicator, $I^{\\text{WL}}$ is calculated via:\n\n$$\nI^{\\text{WL}} = 1 - \\frac{1}{n_y} \\sum_{i = 1}^{n_y} I^{\\text{WA}}_i,\n$$\n\n$n_y$ being the number of days in the year. The OS-Climate-generated indicators are inferred from CMIP6 data, averaged over 6 models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe indicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050) and 2050 (2041-2060).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 0.8, + "name": "heating", + "nodata_index": 0, + "units": "fractional loss" + }, + "path": "mean_work_loss_{intensity}_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050 + ] + } + ], + "units": "fractional loss" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/days_tas_above_{temp_c}c_{gcm}_{scenario}_{year}", + "indicator_id": "days_tas/above/{temp_c}c", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "temp_c": [ + "25", + "30", + "35", + "40", + "45", + "50", + "55" + ], + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MPI-ESM1-2-LR", + "MIROC6", + "NorESM2-MM" + ] + }, + "display_name": "Days with average temperature above {temp_c}\u00b0C/{gcm}", + "display_groups": [ + "Days with average temperature above" + ], + "description": "Days per year for which the average near-surface temperature 'tas' is above a threshold specified in \u00b0C.\n\n$$\nI = \\frac{365}{n_y} \\sum_{i = 1}^{n_y} \\boldsymbol{\\mathbb{1}}_{\\; \\, T^{avg}_i > T^\\text{ref}} \\nonumber\n$$\n\n$I$ is the indicator, $T^\\text{avg}_i$ is the daily average near-surface temperature for day index $i$ in \u00b0C, $n_y$ is the number of days in the year\nand $T^\\text{ref}$ is the reference temperature.\nThe OS-Climate-generated indicators are inferred from downscaled CMIP6 data. This is done for 6 Global Circulation Models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe downscaled data is sourced from the [NASA Earth Exchange Global Daily Downscaled Projections](https://www.nccs.nasa.gov/services/data-collections/land-based-products/nex-gddp-cmip6).\nIndicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050)\nand 2050 (2041-2060).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 100.0, + "name": "heating", + "nodata_index": 0, + "units": "days/year" + }, + "path": "days_tas_above_{temp_c}c_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": null, + "source": "map_array" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050 + ] + } + ], + "units": "days/year" + }, + { + "hazard_type": "Wind", + "group_id": "iris_osc", + "path": "wind/iris/v1/max_speed_{scenario}_{year}", + "indicator_id": "max_speed", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Max wind speed (IRIS)", + "display_groups": [], + "description": "Assessing tropical cyclone risk on a global scale given the infrequency of landfalling tropical cyclones and the short period of reliable observations remains a challenge. Synthetic tropical cyclone datasets can help overcome these problems. Here we present a new global dataset created by IRIS, the ImpeRIal college Storm Model. IRIS is novel because, unlike other synthetic TC models, it only simulates the decay from the point of lifetime maximum intensity. This minimises the bias in the dataset. It takes input from 42 years of observed tropical cyclones and creates a 10,000 year synthetic dataset which is then validated against the observations. IRIS captures important statistical characteristics of the observed data. The return periods of the landfall maximum wind speed (1 minute sustained in m/s) are realistic globally. Climate model projections are used to adjust the life-time maximum intensity.\nhttps://www.imperial.ac.uk/grantham/research/climate-science/modelling-tropical-cyclones/\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 120.0, + "name": "heating", + "nodata_index": 0, + "units": "m/s" + }, + "path": "wind/iris/v1/max_speed_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 60.0 + ], + [ + 180.0, + 60.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2010 + ] + }, + { + "id": "ssp119", + "years": [ + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2050 + ] + } + ], + "units": "m/s" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/mean_degree_days_above_index_{gcm}_{scenario}_{year}", + "indicator_id": "mean_degree_days/above/index", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MPI-ESM1-2-LR", + "MIROC6", + "NorESM2-MM" + ] + }, + "display_name": "Mean degree days above index value/{gcm}", + "display_groups": [ + "Mean degree days" + ], + "description": "Degree days indicators are calculated by integrating over time the absolute difference in temperature\nof the medium over a reference temperature. The exact method of calculation may vary;\nhere the daily maximum near-surface temperature 'tasmax' is used to calculate an annual indicator:\n\n$$\nI^\\text{dd} = \\frac{365}{n_y} \\sum_{i = 1}^{n_y} | T^\\text{max}_i - T^\\text{ref} | \\nonumber\n$$\n\n$I^\\text{dd}$ is the indicator, $T^\\text{max}$ is the daily maximum near-surface temperature, $n_y$ is the number of days in the year and $i$ is the day index.\nand $T^\\text{ref}$ is the reference temperature of 32\u00b0C. The OS-Climate-generated indicators are inferred\nfrom downscaled CMIP6 data, averaged over 6 models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe downscaled data is sourced from the [NASA Earth Exchange Global Daily Downscaled Projections](https://www.nccs.nasa.gov/services/data-collections/land-based-products/nex-gddp-cmip6).\nThe indicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050)\nand 2050 (2041-2060).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 4000.0, + "name": "heating", + "nodata_index": 0, + "units": "degree days" + }, + "path": "mean_degree_days_above_index_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": [ + 16, + 20, + 24 + ], + "source": "map_array" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050 + ] + } + ], + "units": "degree days" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/mean_degree_days_below_index_{gcm}_{scenario}_{year}", + "indicator_id": "mean_degree_days/below/index", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MPI-ESM1-2-LR", + "MIROC6", + "NorESM2-MM" + ] + }, + "display_name": "Mean degree days below index value/{gcm}", + "display_groups": [ + "Mean degree days" + ], + "description": "Degree days indicators are calculated by integrating over time the absolute difference in temperature\nof the medium over a reference temperature. The exact method of calculation may vary;\nhere the daily maximum near-surface temperature 'tasmax' is used to calculate an annual indicator:\n\n$$\nI^\\text{dd} = \\frac{365}{n_y} \\sum_{i = 1}^{n_y} | T^\\text{max}_i - T^\\text{ref} | \\nonumber\n$$\n\n$I^\\text{dd}$ is the indicator, $T^\\text{max}$ is the daily maximum near-surface temperature, $n_y$ is the number of days in the year and $i$ is the day index.\nand $T^\\text{ref}$ is the reference temperature of 32\u00b0C. The OS-Climate-generated indicators are inferred\nfrom downscaled CMIP6 data, averaged over 6 models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe downscaled data is sourced from the [NASA Earth Exchange Global Daily Downscaled Projections](https://www.nccs.nasa.gov/services/data-collections/land-based-products/nex-gddp-cmip6).\nThe indicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050)\nand 2050 (2041-2060).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 4000.0, + "name": "heating", + "nodata_index": 0, + "units": "degree days" + }, + "path": "mean_degree_days_below_index_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": [ + 16, + 20, + 24 + ], + "source": "map_array" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050 + ] + } + ], + "units": "degree days" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/nluu/v2/weeks_water_temp_above_{gcm}_{scenario}_{year}", + "indicator_id": "weeks_water_temp_above", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "GFDL", + "HadGEM", + "IPSL", + "MIROC", + "NorESM" + ] + }, + "display_name": "Weeks with average water temperature above threshold in \u00b0C/{gcm}", + "display_groups": [ + "Weeks with average water temperature above threshold in \u00b0C" + ], + "description": "Weeks per year for which the average water temperature is above a threshold specified in \u00b0C:\n\n$$\nI = \\frac{52}{n_y} \\sum_{i = 1}^{n_y} \\boldsymbol{\\mathbb{1}}_{\\; \\, T^{avg}_i > T^\\text{ref}} \\nonumber\n$$\n\n$I$ is the indicator, $T^\\text{avg}_i$ is the weekly average water temperature for week index $i$ in \u00b0C, $n_y$ is the number of weeks in the sample\nand $T^\\text{ref}$ is the reference temperature.\n\nThe OS-Climate-generated indicators are inferred from downscaled CMIP5 data. This is done for 5 Global Circulation Models: GFDL-ESM2M, HadGEM2-ES, ISPL-CM5A-LR, MIROC-ESM-CHEM and NorESM1-M.\nThe downscaled data is sourced from the [Futurestreams dataset](https://geo.public.data.uu.nl/vault-futurestreams/research-futurestreams%5B1633685642%5D/original/waterTemp/) on the data publication platform of Utrecht University.\nIndicators are generated for periods: 'historical' (averaged over 1976-2005), 2020 (2006-2030), 2030 (2021-2040), 2040 (2031-2050), 2050 (2041-2060), 2060 (2051-2070), 2070 (2061-2080), 2080 (2071-2090) and 2090 (2081-2100).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 100.0, + "name": "heating", + "nodata_index": 0, + "units": "weeks/year" + }, + "path": "maps/chronic_heat/nluu/v2/weeks_water_temp_above_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 5, + 7.5, + 10, + 12.5, + 15, + 17.5, + 20, + 22.5, + 25, + 27.5, + 30, + 32.5, + 35, + 37.5, + 40 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1991 + ] + }, + { + "id": "rcp2p6", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "rcp4p5", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "rcp6p0", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2020, + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + } + ], + "units": "weeks/year" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/nluu/v2/weeks_water_temp_above_E2O_{scenario}_{year}", + "indicator_id": "weeks_water_temp_above", + "indicator_model_id": null, + "indicator_model_gcm": "E2O", + "params": {}, + "display_name": "Weeks with average water temperature above threshold in \u00b0C/E2O", + "display_groups": [ + "Weeks with average water temperature above threshold in \u00b0C" + ], + "description": "Weeks per year for which the average water temperature is above a threshold specified in \u00b0C:\n\n$$\nI = \\frac{52}{n_y} \\sum_{i = 1}^{n_y} \\boldsymbol{\\mathbb{1}}_{\\; \\, T^{avg}_i > T^\\text{ref}} \\nonumber\n$$\n\n$I$ is the indicator, $T^\\text{avg}_i$ is the weekly average water temperature for week index $i$ in \u00b0C, $n_y$ is the number of weeks in the sample\nand $T^\\text{ref}$ is the reference temperature.\n\nThe OS-Climate-generated indicators are inferred from downscaled CMIP5 data. This is done for 5 Global Circulation Models: GFDL-ESM2M, HadGEM2-ES, ISPL-CM5A-LR, MIROC-ESM-CHEM and NorESM1-M.\nThe downscaled data is sourced from the [Futurestreams dataset](https://geo.public.data.uu.nl/vault-futurestreams/research-futurestreams%5B1633685642%5D/original/waterTemp/) on the data publication platform of Utrecht University.\nIndicators are generated for periods: 'historical' (averaged over 1979-2005), 2020 (2006-2030), 2030 (2021-2040), 2040 (2031-2050), 2050 (2041-2060), 2060 (2051-2070), 2070 (2061-2080), 2080 (2071-2090) and 2090 (2081-2100).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 100.0, + "name": "heating", + "nodata_index": 0, + "units": "weeks/year" + }, + "path": "maps/chronic_heat/nluu/v2/weeks_water_temp_above_E2O_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 5, + 7.5, + 10, + 12.5, + 15, + 17.5, + 20, + 22.5, + 25, + 27.5, + 30, + 32.5, + 35, + 37.5, + 40 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1991 + ] + } + ], + "units": "weeks/year" + }, + { + "hazard_type": "ChronicHeat", + "group_id": "", + "path": "chronic_heat/osc/v2/days_wbgt_above_{gcm}_{scenario}_{year}", + "indicator_id": "days_wbgt_above", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "ACCESS-CM2", + "CMCC-ESM2", + "CNRM-CM6-1", + "MPI-ESM1-2-LR", + "MIROC6", + "NorESM2-MM" + ] + }, + "display_name": "Days with wet-bulb globe temperature above threshold in \u00b0C/{gcm}", + "display_groups": [ + "Days with wet-bulb globe temperature above threshold in \u00b0C" + ], + "description": "Days per year for which the 'Wet Bulb Globe Temperature' indicator is above a threshold specified in \u00b0C:\n\n$$\nI = \\frac{365}{n_y} \\sum_{i = 1}^{n_y} \\boldsymbol{\\mathbb{1}}_{\\; \\, T^\\text{WBGT}_i > T^\\text{ref}} \\nonumber\n$$\n\n$I$ is the indicator, $n_y$ is the number of days in the sample and $T^\\text{ref}$ is the reference temperature.\n\nThe 'Wet-Bulb Globe Temperature' (WBGT) indicator is calculated from both the average daily near-surface surface temperature in \u00b0C denoted $T^\\text{avg}$ and the water vapour partial pressure in kPa denoted $p^\\text{vapour}$:\n\n$$\nT^\\text{WBGT}_i = 0.567 \\times T^\\text{avg}_i + 0.393 \\times p^\\text{vapour}_i + 3.94\n$$\n\nThe water vapour partial pressure $p^\\text{vapour}$ is calculated from relative humidity $h^\\text{relative}$:\n\n$$\np^\\text{vapour}_i = \\frac{h^\\text{relative}_i}{100} \\times 6.105 \\times \\exp \\left( \\frac{17.27 \\times T^\\text{avg}_i}{237.7 + T^\\text{avg}_i} \\right)\n$$\n\nThe OS-Climate-generated indicators are inferred from downscaled CMIP6 data, averaged over for 6 Global Circulation Models: ACCESS-CM2, CMCC-ESM2, CNRM-CM6-1, MPI-ESM1-2-LR, MIROC6 and NorESM2-MM.\nThe downscaled data is sourced from the [NASA Earth Exchange Global Daily Downscaled Projections](https://www.nccs.nasa.gov/services/data-collections/land-based-products/nex-gddp-cmip6).\nIndicators are generated for periods: 'historical' (averaged over 1995-2014), 2030 (2021-2040), 2040 (2031-2050), 2050 (2041-2060), 2060 (2051-2070), 2070 (2061-2080), 2080 (2071-2090) and 2090 (2081-2100).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 100.0, + "name": "heating", + "nodata_index": 0, + "units": "days/year" + }, + "path": "maps/chronic_heat/osc/v2/days_wbgt_above_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": [ + 5, + 10, + 15, + 20, + 25, + 30, + 35, + 40, + 45, + 50, + 55, + 60 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 2005 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "ssp245", + "years": [ + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2040, + 2050, + 2060, + 2070, + 2080, + 2090 + ] + } + ], + "units": "days/year" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_demand_{scenario}_{year}", + "indicator_id": "water_demand", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water demand in centimeters/year (Aqueduct 4.0)", + "display_groups": [ + "Water demand in centimeters/year (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 100.0, + "name": "heating", + "nodata_index": 0, + "units": "centimeters/year" + }, + "path": "maps/water_risk/wri/v2/water_demand_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "centimeters/year" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_supply_{scenario}_{year}", + "indicator_id": "water_supply", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water supply in centimeters/year (Aqueduct 4.0)", + "display_groups": [ + "Water supply in centimeters/year (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2000.0, + "name": "heating", + "nodata_index": 0, + "units": "centimeters/year" + }, + "path": "maps/water_risk/wri/v2/water_supply_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "centimeters/year" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_stress_{scenario}_{year}", + "indicator_id": "water_stress", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water stress (Aqueduct 4.0)", + "display_groups": [ + "Water stress (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "maps/water_risk/wri/v2/water_stress_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_depletion_{scenario}_{year}", + "indicator_id": "water_depletion", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water depletion (Aqueduct 4.0)", + "display_groups": [ + "Water depletion (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 2.0, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "maps/water_risk/wri/v2/water_depletion_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_stress_category_{scenario}_{year}", + "indicator_id": "water_stress_category", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water stress category (Aqueduct 4.0)", + "display_groups": [ + "Water stress category (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": -5.0, + "max_index": 255, + "max_value": 5.0, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "maps/water_risk/wri/v2/water_stress_category_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "" + }, + { + "hazard_type": "WaterRisk", + "group_id": "", + "path": "water_risk/wri/v2/water_depletion_category_{scenario}_{year}", + "indicator_id": "water_depletion_category", + "indicator_model_id": null, + "indicator_model_gcm": "combined", + "params": {}, + "display_name": "Water depletion category (Aqueduct 4.0)", + "display_groups": [ + "Water depletion category (Aqueduct 4.0)" + ], + "description": "The World Resources Institute (WRI) [Aqueduct 4.0](https://www.wri.org/data/aqueduct-global-maps-40-data) is the latest iteration of [WRI\u2019s water risk framework](https://www.wri.org/data/aqueduct-water-risk-atlas) designed to translate complex\nhydrological data into intuitive indicators of water-related risk:\n\n* **Water demand**: gross demand is the maximum potential water required to meet sectoral demands. Sectoral water demand includes: domestic, industrial, irrigation, and livestock. Demand is displayed as a flux (centimeters/year).\n\n* **Water supply**: available blue water, the total amount of renewable freshwater available to a sub-basin with upstream consumption removed, includes surface flow, interflow, and groundwater recharge. Available blue water is displayed as a flux (centimeters/year).\n\n* **Water stress**: an indicator of competition for water resources defined informally as the ratio of demand for water by human society divided by available water. It can be classified into six categories: -1: Arid and low water use, 0: Low (<10%), 1: Low-medium (10-20%), 2: Medium-high (20-40%), 3: High (40-80%), 4: Extremely high (>80%).\n\n* **Water depletion**: the ratio of total water consumption to available renewable water supplies. Total water consumption includes domestic, industrial, irrigation, and livestock consumptive uses. Available renewable water supplies include the impact of upstream consumptive water users and large dams on downstream water availability. Higher values indicate larger impact on the local water supply and decreased water availability for downstream users. Water depletion is similar to water stress; however, instead of looking at total water demand, water depletion is calculated using consumptive withdrawal only. It can be classified into six categories: -1: Arid and low water use, 0 : Low (<5%), 1: Low-medium (5-25%), 2 : Medium-high (25-50%), 3: High (50-75%), 4 : Extremely high (>75%).\n\n[Aqueduct 4.0 FAQ](https://github.com/wri/Aqueduct40/blob/master/data_FAQ.md) explains why the water supply and demand values are measured as fluxes instead of volumes. Volumes (cubic meters) can vary significantly based on the size of each sub-basin, potentially misleading as they might primarily reflect a larger geographical area rather than indicating a higher rate of water flow. On the other hand, fluxes (centimeters/year), which measure the rate of water flow, offer a more direct and equitable means of comparing water availability between different sub-basins. Volume = Flux x Area.\n\nThe spatial resolution is 5 \u00d7 5 arc minutes which equates roughly to 10 kilometer (km) \u00d7 10 km pixels.\nThe future projections were created using CMIP6 climate forcings based on three future scenarios: optimistic (ssp126), business-as-usual (ssp370), and pessimistic (ssp585) available at [HYPFLOWSCI6](https://public.yoda.uu.nl/geo/UU01/YM7A5H.html). WRI's original data are presented at the HydroBASINS Level 6 scale. Indicators are available for periods: 'historical' (averaged over 1979-2019), 2030 (2015-2045), 2050 (2035-2065) and 2080 (2065-2095).\n", + "map": { + "colormap": { + "min_index": 1, + "min_value": -5.0, + "max_index": 255, + "max_value": 5.0, + "name": "heating", + "nodata_index": 0, + "units": "" + }, + "path": "maps/water_risk/wri/v2/water_depletion_category_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -85.0 + ], + [ + -180.0, + -85.0 + ] + ], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1999 + ] + }, + { + "id": "ssp126", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp370", + "years": [ + 2030, + 2050, + 2080 + ] + }, + { + "id": "ssp585", + "years": [ + 2030, + 2050, + 2080 + ] + } + ], + "units": "" + }, + { + "hazard_type": "Drought", + "group_id": "", + "path": "drought/osc/v1/months_spei12m_below_index_{gcm}_{scenario}_{year}", + "indicator_id": "months/spei12m/below/index", + "indicator_model_id": null, + "indicator_model_gcm": "{gcm}", + "params": { + "gcm": [ + "MIROC6" + ] + }, + "display_name": "Drought SPEI index", + "display_groups": [ + "Drought SPEI index" + ], + "description": "", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 12.0, + "name": "heating", + "nodata_index": 0, + "units": "months/year" + }, + "path": "drought/osc/v1/months_spei12m_below_index_{gcm}_{scenario}_{year}_map", + "bounds": [ + [ + -180.0, + 85.0 + ], + [ + 180.0, + 85.0 + ], + [ + 180.0, + -60.0 + ], + [ + -180.0, + -60.0 + ] + ], + "index_values": [ + 0, + -1, + -1.5, + -2, + -2.5, + -3, + -3.6 + ], + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "ssp585", + "years": [ + 2005, + 2030, + 2040, + 2050, + 2080 + ] + } + ], + "units": "months/year" + }, + { + "hazard_type": "RiverineInundation", + "group_id": "", + "path": "inundation/river_tudelft/v2/flood_depth_{scenario}_{year}", + "indicator_id": "flood_depth", + "indicator_model_id": "tudelft", + "indicator_model_gcm": "CLMcom-CCLM4-8-17-EC-EARTH", + "params": {}, + "display_name": "Flood depth (TUDelft)", + "display_groups": [], + "description": "Flood depth for riverine floods occurring in Europe in the present and future climate, including protection levels\nfrom the FLOPROS database.\nBased on the CLMcom-CCLM4-8-17-EC-EARTH regional climate simulation (EURO-CORDEX), sets are available for RCP 4.5 and RCP 8.5\nfor projected (central) years 2035 and 2085. The set has a spatial resolution of 100m and comprises return periods of\n10, 30, 100, 300 and 1000 years.\n\nThe data set is [here](https://data.4tu.nl/datasets/df7b63b0-1114-4515-a562-117ca165dc5b), part of the \n[RAIN data set](https://data.4tu.nl/collections/1e84bf47-5838-40cb-b381-64d3497b3b36)\n(Risk Analysis of Infrastructure Networks in Response to Extreme Weather). The RAIN report is\n[here](http://rain-project.eu/wp-content/uploads/2016/09/D2.5_REPORT_final.pdf).\n\nTo derive this indicator, 'River_flood_extent_X_Y_Z_with_protection' and 'River_flood_depth_X_Y_Z_R' data sets are combined to\nyield protected flood depths.", + "map": { + "colormap": { + "min_index": 1, + "min_value": 0.0, + "max_index": 255, + "max_value": 5.0, + "name": "flare", + "nodata_index": 0, + "units": "metres" + }, + "path": "maps/inundation/river_tudelft/v2/flood_depth_{scenario}_{year}_map", + "bounds": [], + "index_values": null, + "source": "map_array_pyramid" + }, + "scenarios": [ + { + "id": "historical", + "years": [ + 1971 + ] + }, + { + "id": "rcp4p5", + "years": [ + 2035, + 2085 + ] + }, + { + "id": "rcp8p5", + "years": [ + 2035, + 2085 + ] + } + ], + "units": "metres" + } + ] +} diff --git a/src/physrisk/data/static/vulnerability/EU JRC global flood depth-damage functions.json b/src/physrisk/data/static/vulnerability/EU JRC global flood depth-damage functions.json new file mode 100644 index 00000000..6618bed7 --- /dev/null +++ b/src/physrisk/data/static/vulnerability/EU JRC global flood depth-damage functions.json @@ -0,0 +1,711 @@ +{ + "items": [ + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.25, + 0.4, + 0.5, + 0.6, + 0.75, + 0.85, + 0.95, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.201804543, + 0.443269857, + 0.582754693, + 0.682521912, + 0.783957148, + 0.854348922, + 0.923670101, + 0.958522773, + 1.0 + ], + "impact_std": [ + 0.0, + 0.168357605, + 0.141121464, + 0.137452367, + 0.166725182, + 0.14072086, + 0.129131694, + 0.102073428, + 0.059134697, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.490885951, + 0.711294067, + 0.842026011, + 0.949369096, + 0.983636977, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.209427814, + 0.135409866, + 0.081630245, + 0.060853453, + 0.024070255, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.326556502, + 0.494050324, + 0.616572124, + 0.720711764, + 0.869528213, + 0.931487084, + 0.983604148, + 1.0 + ], + "impact_std": [ + 0.0, + 0.251622626, + 0.215442216, + 0.214468998, + 0.207322898, + 0.167536629, + 0.124508994, + 0.047803103, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.219925401, + 0.378226846, + 0.530589082, + 0.635636733, + 0.81693978, + 0.903434688, + 0.957152173, + 1.0 + ], + "impact_std": [ + 0.0, + 0.042003678, + 0.114296315, + 0.198396224, + 0.207821558, + 0.205246932, + 0.141856441, + 0.076208799, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.475418119, + 0.640393124, + 0.714614662, + 0.787726348, + 0.928779884, + 0.967381853, + 0.982795444, + 1.0 + ], + "impact_std": [ + 0.088039918, + 0.141050712, + 0.163528188, + 0.169484243, + 0.166855806, + 0.112877499, + 0.058153405, + 0.03589275, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Residential", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.15, + 0.3, + 0.45, + 0.55, + 0.75, + 0.9, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.018404908, + 0.239263804, + 0.374233129, + 0.466257669, + 0.552147239, + 0.687116564, + 0.82208589, + 0.90797546, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.611477587, + 0.839531094, + 0.923588457, + 0.991972477, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.077023435, + 0.035924027, + 0.026876525, + 0.016055046, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.376789623, + 0.537681619, + 0.659336684, + 0.762845232, + 0.883348656, + 0.941854895, + 0.98075938, + 1.0 + ], + "impact_std": [ + 0.0, + 0.240462285, + 0.240596279, + 0.243605156, + 0.250253511, + 0.171703625, + 0.11240992, + 0.052781064, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.238953575, + 0.481199682, + 0.673795091, + 0.864583333, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.142878204, + 0.204113206, + 0.190903594, + 0.178000078, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Commercial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.323296918, + 0.506529105, + 0.63459558, + 0.744309656, + 0.864093044, + 0.932788157, + 0.977746968, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.15, + 0.27, + 0.4, + 0.52, + 0.7, + 0.85, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Europe" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.025714286, + 0.322857143, + 0.511428571, + 0.637142857, + 0.74, + 0.86, + 0.937142857, + 0.98, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.01, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "North America" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.6670194, + 0.888712522, + 0.946737213, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0 + ], + "impact_std": [ + 0.0, + 0.174459885, + 0.098191042, + 0.046492655, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "South America" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.283181524, + 0.481615653, + 0.629218894, + 0.717240588, + 0.85667503, + 0.908577004, + 0.955327463, + 1.0 + ], + "impact_std": [ + 0.0, + 0.243322302, + 0.295987071, + 0.300583358, + 0.268517907, + 0.234498136, + 0.159197865, + 0.079457988, + 0.0 + ], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Asia" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.062682043, + 0.247196046, + 0.403329984, + 0.494488633, + 0.684652389, + 0.918589786, + 1.0, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Africa" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Oceania" + }, + { + "asset_type": "Buildings/Industrial", + "event_type": "RiverineInundation", + "impact_mean": [ + 0.0, + 0.297148022, + 0.479790559, + 0.60328579, + 0.694345844, + 0.820265484, + 0.922861929, + 0.987065493, + 1.0 + ], + "impact_std": [], + "impact_type": "Damage", + "intensity": [ + 0.0, + 0.5, + 1.0, + 1.5, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0 + ], + "intensity_units": "m", + "location": "Global" + } + ] +} diff --git a/src/physrisk/data/static/vulnerability/WRI thermal power plant physical climate vulnerability factors.json b/src/physrisk/data/static/vulnerability/WRI thermal power plant physical climate vulnerability factors.json new file mode 100644 index 00000000..1eed6eb5 --- /dev/null +++ b/src/physrisk/data/static/vulnerability/WRI thermal power plant physical climate vulnerability factors.json @@ -0,0 +1,399 @@ +{ + "items": [ + { + "asset_type": "Steam/OnceThrough", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Dry", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Gas", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "Inundation", + "impact_mean": [ + 0.0, + 1.0, + 2.0, + 7.0, + 14.0, + 30.0, + 60.0, + 180.0, + 365.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 1.0 + ], + "intensity_units": "Metres", + "location": "Global" + }, + { + "asset_type": "Steam/Dry", + "event_type": "AirTemperature", + "impact_mean": [ + 0.0, + 0.02, + 0.04, + 0.08, + 0.11, + 0.15, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 6.0, + 12.0, + 18.0, + 24.0, + 30.0, + 198.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Gas", + "event_type": "AirTemperature", + "impact_mean": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.8, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 10.0, + 20.0, + 30.0, + 40.0, + 50.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "Drought", + "impact_mean": [ + 0.0, + 0.0, + 0.1, + 0.2, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + -2.0, + -2.5, + -3.0, + -3.6 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "Drought", + "impact_mean": [ + 0.0, + 0.0, + 0.1, + 0.2, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + -2.0, + -2.5, + -3.0, + -3.6 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "WaterTemperature", + "impact_mean": [ + 0.0, + 0.003, + 0.009, + 0.017, + 0.027, + 0.041, + 0.061, + 0.089, + 0.118, + 0.157, + 0.205, + 0.257, + 0.327, + 0.411, + 0.508, + 0.629, + 0.775, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 7.0, + 8.0, + 9.0, + 10.0, + 11.0, + 12.0, + 13.0, + 14.0, + 15.0, + 16.0, + 17.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "WaterTemperature", + "impact_mean": [ + 0.0, + 0.003, + 0.009, + 0.017, + 0.027, + 0.041, + 0.061, + 0.089, + 0.118, + 0.157, + 0.205, + 0.257, + 0.327, + 0.411, + 0.508, + 0.629, + 0.775, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 7.0, + 8.0, + 9.0, + 10.0, + 11.0, + 12.0, + 13.0, + 14.0, + 15.0, + 16.0, + 17.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "WaterStress", + "impact_mean": [ + 0.0, + 0.02, + 0.1, + 0.2, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/Recirculating", + "event_type": "WaterStress", + "impact_mean": [ + 0.0, + 0.02, + 0.1, + 0.2, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 0.0, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0 + ], + "intensity_units": "Unitless", + "location": "Global" + }, + { + "asset_type": "Steam/OnceThrough", + "event_type": "RegulatoryDischargeWaterLimit", + "impact_mean": [ + 0.0, + 0.1, + 0.2, + 0.4, + 0.5, + 1.0 + ], + "impact_std": [], + "impact_type": "Disruption", + "impact_units": "Days", + "intensity": [ + 27.0, + 28.0, + 29.0, + 30.0, + 31.0, + 32.0 + ], + "intensity_units": "DegreesCelsius", + "location": "Global" + } + ] +} diff --git a/src/test/models/__init__.py b/src/physrisk/data/static/vulnerability/__init__.py similarity index 100% rename from src/test/models/__init__.py rename to src/physrisk/data/static/vulnerability/__init__.py diff --git a/src/physrisk/data/static/world.json b/src/physrisk/data/static/world.json new file mode 100644 index 00000000..3e923901 --- /dev/null +++ b/src/physrisk/data/static/world.json @@ -0,0 +1,889 @@ +{ + "items": [ + { + "continent": "Oceania", + "country": "Fiji", + "country_iso_a3": "FJI" + }, + { + "continent": "Africa", + "country": "Tanzania", + "country_iso_a3": "TZA" + }, + { + "continent": "Africa", + "country": "W. Sahara", + "country_iso_a3": "ESH" + }, + { + "continent": "North America", + "country": "Canada", + "country_iso_a3": "CAN" + }, + { + "continent": "North America", + "country": "United States of America", + "country_iso_a3": "USA" + }, + { + "continent": "Asia", + "country": "Kazakhstan", + "country_iso_a3": "KAZ" + }, + { + "continent": "Asia", + "country": "Uzbekistan", + "country_iso_a3": "UZB" + }, + { + "continent": "Oceania", + "country": "Papua New Guinea", + "country_iso_a3": "PNG" + }, + { + "continent": "Asia", + "country": "Indonesia", + "country_iso_a3": "IDN" + }, + { + "continent": "South America", + "country": "Argentina", + "country_iso_a3": "ARG" + }, + { + "continent": "South America", + "country": "Chile", + "country_iso_a3": "CHL" + }, + { + "continent": "Africa", + "country": "Dem. Rep. Congo", + "country_iso_a3": "COD" + }, + { + "continent": "Africa", + "country": "Somalia", + "country_iso_a3": "SOM" + }, + { + "continent": "Africa", + "country": "Kenya", + "country_iso_a3": "KEN" + }, + { + "continent": "Africa", + "country": "Sudan", + "country_iso_a3": "SDN" + }, + { + "continent": "Africa", + "country": "Chad", + "country_iso_a3": "TCD" + }, + { + "continent": "North America", + "country": "Haiti", + "country_iso_a3": "HTI" + }, + { + "continent": "North America", + "country": "Dominican Rep.", + "country_iso_a3": "DOM" + }, + { + "continent": "Europe", + "country": "Russia", + "country_iso_a3": "RUS" + }, + { + "continent": "North America", + "country": "Bahamas", + "country_iso_a3": "BHS" + }, + { + "continent": "South America", + "country": "Falkland Is.", + "country_iso_a3": "FLK" + }, + { + "continent": "Europe", + "country": "Norway", + "country_iso_a3": "-99" + }, + { + "continent": "North America", + "country": "Greenland", + "country_iso_a3": "GRL" + }, + { + "continent": "Seven seas (open ocean)", + "country": "Fr. S. Antarctic Lands", + "country_iso_a3": "ATF" + }, + { + "continent": "Asia", + "country": "Timor-Leste", + "country_iso_a3": "TLS" + }, + { + "continent": "Africa", + "country": "South Africa", + "country_iso_a3": "ZAF" + }, + { + "continent": "Africa", + "country": "Lesotho", + "country_iso_a3": "LSO" + }, + { + "continent": "North America", + "country": "Mexico", + "country_iso_a3": "MEX" + }, + { + "continent": "South America", + "country": "Uruguay", + "country_iso_a3": "URY" + }, + { + "continent": "South America", + "country": "Brazil", + "country_iso_a3": "BRA" + }, + { + "continent": "South America", + "country": "Bolivia", + "country_iso_a3": "BOL" + }, + { + "continent": "South America", + "country": "Peru", + "country_iso_a3": "PER" + }, + { + "continent": "South America", + "country": "Colombia", + "country_iso_a3": "COL" + }, + { + "continent": "North America", + "country": "Panama", + "country_iso_a3": "PAN" + }, + { + "continent": "North America", + "country": "Costa Rica", + "country_iso_a3": "CRI" + }, + { + "continent": "North America", + "country": "Nicaragua", + "country_iso_a3": "NIC" + }, + { + "continent": "North America", + "country": "Honduras", + "country_iso_a3": "HND" + }, + { + "continent": "North America", + "country": "El Salvador", + "country_iso_a3": "SLV" + }, + { + "continent": "North America", + "country": "Guatemala", + "country_iso_a3": "GTM" + }, + { + "continent": "North America", + "country": "Belize", + "country_iso_a3": "BLZ" + }, + { + "continent": "South America", + "country": "Venezuela", + "country_iso_a3": "VEN" + }, + { + "continent": "South America", + "country": "Guyana", + "country_iso_a3": "GUY" + }, + { + "continent": "South America", + "country": "Suriname", + "country_iso_a3": "SUR" + }, + { + "continent": "Europe", + "country": "France", + "country_iso_a3": "-99" + }, + { + "continent": "South America", + "country": "Ecuador", + "country_iso_a3": "ECU" + }, + { + "continent": "North America", + "country": "Puerto Rico", + "country_iso_a3": "PRI" + }, + { + "continent": "North America", + "country": "Jamaica", + "country_iso_a3": "JAM" + }, + { + "continent": "North America", + "country": "Cuba", + "country_iso_a3": "CUB" + }, + { + "continent": "Africa", + "country": "Zimbabwe", + "country_iso_a3": "ZWE" + }, + { + "continent": "Africa", + "country": "Botswana", + "country_iso_a3": "BWA" + }, + { + "continent": "Africa", + "country": "Namibia", + "country_iso_a3": "NAM" + }, + { + "continent": "Africa", + "country": "Senegal", + "country_iso_a3": "SEN" + }, + { + "continent": "Africa", + "country": "Mali", + "country_iso_a3": "MLI" + }, + { + "continent": "Africa", + "country": "Mauritania", + "country_iso_a3": "MRT" + }, + { + "continent": "Africa", + "country": "Benin", + "country_iso_a3": "BEN" + }, + { + "continent": "Africa", + "country": "Niger", + "country_iso_a3": "NER" + }, + { + "continent": "Africa", + "country": "Nigeria", + "country_iso_a3": "NGA" + }, + { + "continent": "Africa", + "country": "Cameroon", + "country_iso_a3": "CMR" + }, + { + "continent": "Africa", + "country": "Togo", + "country_iso_a3": "TGO" + }, + { + "continent": "Africa", + "country": "Ghana", + "country_iso_a3": "GHA" + }, + { + "continent": "Africa", + "country": "C\u00f4te d'Ivoire", + "country_iso_a3": "CIV" + }, + { + "continent": "Africa", + "country": "Guinea", + "country_iso_a3": "GIN" + }, + { + "continent": "Africa", + "country": "Guinea-Bissau", + "country_iso_a3": "GNB" + }, + { + "continent": "Africa", + "country": "Liberia", + "country_iso_a3": "LBR" + }, + { + "continent": "Africa", + "country": "Sierra Leone", + "country_iso_a3": "SLE" + }, + { + "continent": "Africa", + "country": "Burkina Faso", + "country_iso_a3": "BFA" + }, + { + "continent": "Africa", + "country": "Central African Rep.", + "country_iso_a3": "CAF" + }, + { + "continent": "Africa", + "country": "Congo", + "country_iso_a3": "COG" + }, + { + "continent": "Africa", + "country": "Gabon", + "country_iso_a3": "GAB" + }, + { + "continent": "Africa", + "country": "Eq. Guinea", + "country_iso_a3": "GNQ" + }, + { + "continent": "Africa", + "country": "Zambia", + "country_iso_a3": "ZMB" + }, + { + "continent": "Africa", + "country": "Malawi", + "country_iso_a3": "MWI" + }, + { + "continent": "Africa", + "country": "Mozambique", + "country_iso_a3": "MOZ" + }, + { + "continent": "Africa", + "country": "eSwatini", + "country_iso_a3": "SWZ" + }, + { + "continent": "Africa", + "country": "Angola", + "country_iso_a3": "AGO" + }, + { + "continent": "Africa", + "country": "Burundi", + "country_iso_a3": "BDI" + }, + { + "continent": "Asia", + "country": "Israel", + "country_iso_a3": "ISR" + }, + { + "continent": "Asia", + "country": "Lebanon", + "country_iso_a3": "LBN" + }, + { + "continent": "Africa", + "country": "Madagascar", + "country_iso_a3": "MDG" + }, + { + "continent": "Asia", + "country": "Palestine", + "country_iso_a3": "PSE" + }, + { + "continent": "Africa", + "country": "Gambia", + "country_iso_a3": "GMB" + }, + { + "continent": "Africa", + "country": "Tunisia", + "country_iso_a3": "TUN" + }, + { + "continent": "Africa", + "country": "Algeria", + "country_iso_a3": "DZA" + }, + { + "continent": "Asia", + "country": "Jordan", + "country_iso_a3": "JOR" + }, + { + "continent": "Asia", + "country": "United Arab Emirates", + "country_iso_a3": "ARE" + }, + { + "continent": "Asia", + "country": "Qatar", + "country_iso_a3": "QAT" + }, + { + "continent": "Asia", + "country": "Kuwait", + "country_iso_a3": "KWT" + }, + { + "continent": "Asia", + "country": "Iraq", + "country_iso_a3": "IRQ" + }, + { + "continent": "Asia", + "country": "Oman", + "country_iso_a3": "OMN" + }, + { + "continent": "Oceania", + "country": "Vanuatu", + "country_iso_a3": "VUT" + }, + { + "continent": "Asia", + "country": "Cambodia", + "country_iso_a3": "KHM" + }, + { + "continent": "Asia", + "country": "Thailand", + "country_iso_a3": "THA" + }, + { + "continent": "Asia", + "country": "Laos", + "country_iso_a3": "LAO" + }, + { + "continent": "Asia", + "country": "Myanmar", + "country_iso_a3": "MMR" + }, + { + "continent": "Asia", + "country": "Vietnam", + "country_iso_a3": "VNM" + }, + { + "continent": "Asia", + "country": "North Korea", + "country_iso_a3": "PRK" + }, + { + "continent": "Asia", + "country": "South Korea", + "country_iso_a3": "KOR" + }, + { + "continent": "Asia", + "country": "Mongolia", + "country_iso_a3": "MNG" + }, + { + "continent": "Asia", + "country": "India", + "country_iso_a3": "IND" + }, + { + "continent": "Asia", + "country": "Bangladesh", + "country_iso_a3": "BGD" + }, + { + "continent": "Asia", + "country": "Bhutan", + "country_iso_a3": "BTN" + }, + { + "continent": "Asia", + "country": "Nepal", + "country_iso_a3": "NPL" + }, + { + "continent": "Asia", + "country": "Pakistan", + "country_iso_a3": "PAK" + }, + { + "continent": "Asia", + "country": "Afghanistan", + "country_iso_a3": "AFG" + }, + { + "continent": "Asia", + "country": "Tajikistan", + "country_iso_a3": "TJK" + }, + { + "continent": "Asia", + "country": "Kyrgyzstan", + "country_iso_a3": "KGZ" + }, + { + "continent": "Asia", + "country": "Turkmenistan", + "country_iso_a3": "TKM" + }, + { + "continent": "Asia", + "country": "Iran", + "country_iso_a3": "IRN" + }, + { + "continent": "Asia", + "country": "Syria", + "country_iso_a3": "SYR" + }, + { + "continent": "Asia", + "country": "Armenia", + "country_iso_a3": "ARM" + }, + { + "continent": "Europe", + "country": "Sweden", + "country_iso_a3": "SWE" + }, + { + "continent": "Europe", + "country": "Belarus", + "country_iso_a3": "BLR" + }, + { + "continent": "Europe", + "country": "Ukraine", + "country_iso_a3": "UKR" + }, + { + "continent": "Europe", + "country": "Poland", + "country_iso_a3": "POL" + }, + { + "continent": "Europe", + "country": "Austria", + "country_iso_a3": "AUT" + }, + { + "continent": "Europe", + "country": "Hungary", + "country_iso_a3": "HUN" + }, + { + "continent": "Europe", + "country": "Moldova", + "country_iso_a3": "MDA" + }, + { + "continent": "Europe", + "country": "Romania", + "country_iso_a3": "ROU" + }, + { + "continent": "Europe", + "country": "Lithuania", + "country_iso_a3": "LTU" + }, + { + "continent": "Europe", + "country": "Latvia", + "country_iso_a3": "LVA" + }, + { + "continent": "Europe", + "country": "Estonia", + "country_iso_a3": "EST" + }, + { + "continent": "Europe", + "country": "Germany", + "country_iso_a3": "DEU" + }, + { + "continent": "Europe", + "country": "Bulgaria", + "country_iso_a3": "BGR" + }, + { + "continent": "Europe", + "country": "Greece", + "country_iso_a3": "GRC" + }, + { + "continent": "Asia", + "country": "Turkey", + "country_iso_a3": "TUR" + }, + { + "continent": "Europe", + "country": "Albania", + "country_iso_a3": "ALB" + }, + { + "continent": "Europe", + "country": "Croatia", + "country_iso_a3": "HRV" + }, + { + "continent": "Europe", + "country": "Switzerland", + "country_iso_a3": "CHE" + }, + { + "continent": "Europe", + "country": "Luxembourg", + "country_iso_a3": "LUX" + }, + { + "continent": "Europe", + "country": "Belgium", + "country_iso_a3": "BEL" + }, + { + "continent": "Europe", + "country": "Netherlands", + "country_iso_a3": "NLD" + }, + { + "continent": "Europe", + "country": "Portugal", + "country_iso_a3": "PRT" + }, + { + "continent": "Europe", + "country": "Spain", + "country_iso_a3": "ESP" + }, + { + "continent": "Europe", + "country": "Ireland", + "country_iso_a3": "IRL" + }, + { + "continent": "Oceania", + "country": "New Caledonia", + "country_iso_a3": "NCL" + }, + { + "continent": "Oceania", + "country": "Solomon Is.", + "country_iso_a3": "SLB" + }, + { + "continent": "Oceania", + "country": "New Zealand", + "country_iso_a3": "NZL" + }, + { + "continent": "Oceania", + "country": "Australia", + "country_iso_a3": "AUS" + }, + { + "continent": "Asia", + "country": "Sri Lanka", + "country_iso_a3": "LKA" + }, + { + "continent": "Asia", + "country": "China", + "country_iso_a3": "CHN" + }, + { + "continent": "Asia", + "country": "Taiwan", + "country_iso_a3": "TWN" + }, + { + "continent": "Europe", + "country": "Italy", + "country_iso_a3": "ITA" + }, + { + "continent": "Europe", + "country": "Denmark", + "country_iso_a3": "DNK" + }, + { + "continent": "Europe", + "country": "United Kingdom", + "country_iso_a3": "GBR" + }, + { + "continent": "Europe", + "country": "Iceland", + "country_iso_a3": "ISL" + }, + { + "continent": "Asia", + "country": "Azerbaijan", + "country_iso_a3": "AZE" + }, + { + "continent": "Asia", + "country": "Georgia", + "country_iso_a3": "GEO" + }, + { + "continent": "Asia", + "country": "Philippines", + "country_iso_a3": "PHL" + }, + { + "continent": "Asia", + "country": "Malaysia", + "country_iso_a3": "MYS" + }, + { + "continent": "Asia", + "country": "Brunei", + "country_iso_a3": "BRN" + }, + { + "continent": "Europe", + "country": "Slovenia", + "country_iso_a3": "SVN" + }, + { + "continent": "Europe", + "country": "Finland", + "country_iso_a3": "FIN" + }, + { + "continent": "Europe", + "country": "Slovakia", + "country_iso_a3": "SVK" + }, + { + "continent": "Europe", + "country": "Czechia", + "country_iso_a3": "CZE" + }, + { + "continent": "Africa", + "country": "Eritrea", + "country_iso_a3": "ERI" + }, + { + "continent": "Asia", + "country": "Japan", + "country_iso_a3": "JPN" + }, + { + "continent": "South America", + "country": "Paraguay", + "country_iso_a3": "PRY" + }, + { + "continent": "Asia", + "country": "Yemen", + "country_iso_a3": "YEM" + }, + { + "continent": "Asia", + "country": "Saudi Arabia", + "country_iso_a3": "SAU" + }, + { + "continent": "Antarctica", + "country": "Antarctica", + "country_iso_a3": "ATA" + }, + { + "continent": "Asia", + "country": "N. Cyprus", + "country_iso_a3": "-99" + }, + { + "continent": "Asia", + "country": "Cyprus", + "country_iso_a3": "CYP" + }, + { + "continent": "Africa", + "country": "Morocco", + "country_iso_a3": "MAR" + }, + { + "continent": "Africa", + "country": "Egypt", + "country_iso_a3": "EGY" + }, + { + "continent": "Africa", + "country": "Libya", + "country_iso_a3": "LBY" + }, + { + "continent": "Africa", + "country": "Ethiopia", + "country_iso_a3": "ETH" + }, + { + "continent": "Africa", + "country": "Djibouti", + "country_iso_a3": "DJI" + }, + { + "continent": "Africa", + "country": "Somaliland", + "country_iso_a3": "-99" + }, + { + "continent": "Africa", + "country": "Uganda", + "country_iso_a3": "UGA" + }, + { + "continent": "Africa", + "country": "Rwanda", + "country_iso_a3": "RWA" + }, + { + "continent": "Europe", + "country": "Bosnia and Herz.", + "country_iso_a3": "BIH" + }, + { + "continent": "Europe", + "country": "Macedonia", + "country_iso_a3": "MKD" + }, + { + "continent": "Europe", + "country": "Serbia", + "country_iso_a3": "SRB" + }, + { + "continent": "Europe", + "country": "Montenegro", + "country_iso_a3": "MNE" + }, + { + "continent": "Europe", + "country": "Kosovo", + "country_iso_a3": "-99" + }, + { + "continent": "North America", + "country": "Trinidad and Tobago", + "country_iso_a3": "TTO" + }, + { + "continent": "Africa", + "country": "S. Sudan", + "country_iso_a3": "SSD" + } + ] +} diff --git a/src/physrisk/data/static/world.py b/src/physrisk/data/static/world.py new file mode 100644 index 00000000..84acec03 --- /dev/null +++ b/src/physrisk/data/static/world.py @@ -0,0 +1,52 @@ +import importlib.resources +import json +from typing import Dict, List, Union + +import numpy as np + +import physrisk.data.static +from physrisk.api.v1.common import Countries, Country + + +def get_countries_from_resource(): + with importlib.resources.open_text(physrisk.data.static, "world.json") as f: + countries = Countries(**json.load(f)) + return dict((c.country, c) for c in countries.items) + + +def get_countries_json(): + """Get countries and continents, populating json.""" + + import geopandas as gpd + + world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) # type: ignore + countries = world[["continent", "name", "iso_a3"]] + + countries = [ + Country(continent=continent, country=country, country_iso_a3=code) + for (continent, country, code) in zip(world["continent"], world["name"], world["iso_a3"]) + ] + + return json.dumps(Countries(items=countries).dict(), sort_keys=True, indent=4) + + +def get_countries_and_continents(longitudes: Union[List[float], np.ndarray], latitudes: Union[List[float], np.ndarray]): + """Only for use when on-boarding; look up country and continent (e.g. for use in vulnerability models) by + latitude and longitude.""" + + # Geopandas draws in a number of libraries, including GDAL, so we probably(?) + # want to confine its use to pre-processing / on-boarding of data + # In particular, country/continent look-up is probably something to do pre-onboarding + + import geopandas as gpd + + # consider using map here https://gadm.org/download_world.html + world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) # type: ignore + gdf = gpd.GeoDataFrame(crs=world.crs, geometry=gpd.points_from_xy(longitudes, latitudes)) + result = gpd.sjoin(gdf, world, how="left") + + return list(result["name"]), list(result["continent"]) + + +class World: + countries: Dict[str, Country] = get_countries_from_resource() diff --git a/src/physrisk/data/zarr_reader.py b/src/physrisk/data/zarr_reader.py new file mode 100644 index 00000000..c23c8c85 --- /dev/null +++ b/src/physrisk/data/zarr_reader.py @@ -0,0 +1,379 @@ +import os +from pathlib import PurePosixPath +from typing import Callable, MutableMapping, Optional + +import numpy as np +import s3fs +import zarr +from affine import Affine +from pyproj import Transformer +from shapely import MultiPoint, Point, affinity + + +def get_env(key: str, default: Optional[str] = None) -> str: + value = os.environ.get(key) + if value is None: + if default is not None: + return default + raise ValueError(f"environment variable {key} not present") + else: + return value + + +class ZarrReader: + """Reads hazard event data from Zarr files, including OSC-format-specific attributes.""" + + # environment variable names: + __access_key = "OSC_S3_ACCESS_KEY" + __secret_key = "OSC_S3_SECRET_KEY" + __S3_bucket = "OSC_S3_BUCKET" # e.g. physrisk-hazard-indicators + __zarr_path = "OSC_S3_HAZARD_PATH" # hazard/hazard.zarr + + def __init__( + self, + store: Optional[MutableMapping] = None, + path_provider: Optional[Callable[..., str]] = None, + get_env: Callable[[str, Optional[str]], str] = get_env, + ): + """Create a ZarrReader. + + Args: + store: if not supplied, create S3Map store. + path_provider: function that provides path to the data set based on a set ID. + get_env: allows override obtaining of environment variables. + """ + if store is None: + # if no store is provided, attempt to connect to an S3 bucket + if get_env is None: + raise TypeError("if no store specified, get_env is required to provide credentials") + + store = ZarrReader.create_s3_zarr_store(get_env) + + self._root = zarr.open(store, mode="r") + self._path_provider = path_provider + pass + + def all_data(self, set_id: str): + path = self._path_provider(set_id) if self._path_provider is not None else set_id + z = self._root[path] # e.g. inundation/wri/v2/ + return z + + @classmethod + def create_s3_zarr_store(cls, get_env: Callable[[str, Optional[str]], str] = get_env): + access_key = get_env(cls.__access_key, None) + secret_key = get_env(cls.__secret_key, None) + s3_bucket = get_env(cls.__S3_bucket, "physrisk-hazard-indicators") + zarr_path = get_env(cls.__zarr_path, "hazard/hazard.zarr") + + s3 = s3fs.S3FileSystem(anon=False, key=access_key, secret=secret_key) + + store = s3fs.S3Map( + root=str(PurePosixPath(s3_bucket, zarr_path)), + s3=s3, + check=False, + ) + return store + + def get_curves(self, set_id, longitudes, latitudes, interpolation="floor"): + """Get intensity curve for each latitude and longitude coordinate pair. + + Args: + set_id: string or tuple representing data set, converted into path by path_provider. + longitudes: list of longitudes. + latitudes: list of latitudes. + interpolation: interpolation method, "floor", "linear", "max" or "min". + + Returns: + curves: numpy array of intensity (no. coordinate pairs, no. return periods). + return_periods: return periods in years. + """ + # assume that calls to this are large, not chatty + if len(longitudes) != len(latitudes): + raise ValueError("length of longitudes and latitudes not equal") + path = self._path_provider(set_id) if self._path_provider is not None else set_id + z = self._root[path] # e.g. inundation/wri/v2/ + + # OSC-specific attributes contain transform and return periods + t = z.attrs["transform_mat3x3"] # type: ignore + transform = Affine(t[0], t[1], t[2], t[3], t[4], t[5]) + crs = z.attrs.get("crs", "epsg:4326") + + # in the case of acute risks, index_values will contain the return periods + index_values = self.get_index_values(z) + image_coords = self._get_coordinates( + longitudes, latitudes, crs, transform, pixel_is_area=interpolation != "floor" + ) + + if interpolation == "floor": + image_coords = np.floor(image_coords).astype(int) + image_coords[0, :] %= z.shape[2] + iz = np.tile(np.arange(z.shape[0]), image_coords.shape[1]) # type: ignore + iy = np.repeat(image_coords[1, :], len(index_values)) + ix = np.repeat(image_coords[0, :], len(index_values)) + + data = z.get_coordinate_selection((iz, iy, ix)) # type: ignore + return data.reshape([len(longitudes), len(index_values)]), np.array(index_values) + + elif interpolation in ["linear", "max", "min"]: + res = ZarrReader._linear_interp_frac_coordinates(z, image_coords, index_values, interpolation=interpolation) + return res, np.array(index_values) + + else: + raise ValueError("interpolation must have value 'floor', 'linear', 'max' or 'min") + + def get_index_values(self, z: zarr.Array): + index_values = z.attrs.get("index_values", [0]) + if index_values is None: + index_values = [0] + return index_values + + def get_max_curves(self, set_id, shapes, interpolation="floor"): + """Get maximal intensity curve for a given geometry. + + Args: + set_id: string or tuple representing data set, converted into path by path_provider. + shapes: list of shapely.Polygon. + interpolation: interpolation method, "floor", "linear", "max" or "min". + + Returns: + curves_max: numpy array of maximum intensity on the grid for a given geometry + (no. coordinate pairs, no. return periods). + return_periods: return periods in years. + """ + path = self._path_provider(set_id) if self._path_provider is not None else set_id + z = self._root[path] # e.g. inundation/wri/v2/ + + # in the case of acute risks, index_values will contain the return periods + index_values = self.get_index_values(z) + + t = z.attrs["transform_mat3x3"] # type: ignore + transform = Affine(t[0], t[1], t[2], t[3], t[4], t[5]) + + matrix = np.array(~transform).reshape(3, 3).transpose()[:, :-1].reshape(6) + transformed_shapes = [affinity.affine_transform(shape, matrix) for shape in shapes] + + pixel_offset = 0.5 if interpolation != "floor" else 0.0 + multipoints = [ + MultiPoint( + [ + (x - pixel_offset, y - pixel_offset) + for x in range(int(np.floor(shape.bounds[0])), int(np.ceil(shape.bounds[2])) + 1) + for y in range(int(np.floor(shape.bounds[1])), int(np.ceil(shape.bounds[3])) + 1) + ] + ).intersection(shape) + for shape in transformed_shapes + ] + multipoints = [ + ( + Point(0.5 * (shape.bounds[0] + shape.bounds[2]), 0.5 * (shape.bounds[1] + shape.bounds[3])) + if multipoint.is_empty + else multipoint + ) + for shape, multipoint in zip(transformed_shapes, multipoints) + ] + multipoints = [MultiPoint([(point.x, point.y)]) if isinstance(point, Point) else point for point in multipoints] + + if interpolation == "floor": + image_coords = np.floor( + np.array([[point.x, point.y] for multipoint in multipoints for point in multipoint.geoms]).transpose() + ).astype(int) + image_coords[0, :] %= z.shape[2] + + iz = np.tile(np.arange(z.shape[0]), image_coords.shape[1]) # type: ignore + iy = np.repeat(image_coords[1, :], len(index_values)) + ix = np.repeat(image_coords[0, :], len(index_values)) + + curves = z.get_coordinate_selection((iz, iy, ix)) + curves = curves.reshape(image_coords.shape[1], len(index_values)) + + elif interpolation in ["linear", "max", "min"]: + multipoints = [ + multipoint.union( + transformed_shape + if isinstance(transformed_shape, Point) + else MultiPoint(transformed_shape.exterior.coords) + ) + for transformed_shape, multipoint in zip(transformed_shapes, multipoints) + ] + image_coords = np.array( + [[point.x, point.y] for multipoint in multipoints for point in multipoint.geoms] + ).transpose() + + curves = ZarrReader._linear_interp_frac_coordinates( + z, image_coords, index_values, interpolation=interpolation + ) + + else: + raise ValueError("interpolation must have value 'floor', 'linear', 'max' or 'min") + + numbers_of_points_per_shape = [len(multipoint.geoms) for multipoint in multipoints] + numbers_of_points_per_shape_cumulated = np.cumsum(numbers_of_points_per_shape) + curves_max = np.array( + [ + np.nanmax(curves[index - number_of_points_per_shape : index, :], axis=0) + for number_of_points_per_shape, index in zip( + numbers_of_points_per_shape, numbers_of_points_per_shape_cumulated + ) + ] + ) + + return curves_max, np.array(index_values) + + def get_max_curves_on_grid(self, set_id, longitudes, latitudes, interpolation="floor", delta_km=1.0, n_grid=5): + """Get maximal intensity curve for a grid around a given latitude and longitude coordinate pair. + It is almost equivalent to: + self.get_max_curves + ( + set_id, + [ + Polygon( + ( + (x - 0.5 * delta_deg, y - 0.5 * delta_deg), + (x - 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y - 0.5 * delta_deg) + ) + ) + for x, y in zip(longitudes, latitudes) + ] + interpolation + ) + Args: + set_id: string or tuple representing data set, converted into path by path_provider. + longitudes: list of longitudes. + latitudes: list of latitudes. + interpolation: interpolation method, "floor", "linear", "max" or "min". + delta_km: linear distance in kilometres of the side of the square grid surrounding a given position. + n_grid: number of grid points along the latitude and longitude dimensions used for + calculating the maximal value. + + Returns: + curves_max: numpy array of maximum intensity on the grid for a given coordinate pair + (no. coordinate pairs, no. return periods). + return_periods: return periods in years. + """ + kilometres_per_degree = 110.574 + delta_deg = delta_km / kilometres_per_degree + + n_data = len(latitudes) + + grid = np.linspace(-0.5, 0.5, n_grid) + lats_grid_baseline = np.broadcast_to( + np.array(latitudes).reshape(n_data, 1, 1), (len(latitudes), n_grid, n_grid) + ) + lons_grid_baseline = np.broadcast_to( + np.array(longitudes).reshape(n_data, 1, 1), (len(longitudes), n_grid, n_grid) + ) + lats_grid_offsets = delta_deg * grid.reshape((1, n_grid, 1)) + lons_grid_offsets = ( + delta_deg + * grid.reshape((1, 1, n_grid)) + / (np.cos((np.pi / 180) * np.array(latitudes)).reshape(n_data, 1, 1)) + ) + lats_grid = lats_grid_baseline + lats_grid_offsets + lons_grid = lons_grid_baseline + lons_grid_offsets + curves, return_periods = self.get_curves( + set_id, lons_grid.reshape(-1), lats_grid.reshape(-1), interpolation=interpolation + ) + curves_max = np.nanmax(curves.reshape((n_data, n_grid * n_grid, len(return_periods))), axis=1) + return curves_max, return_periods + + @staticmethod + def _linear_interp_frac_coordinates(z, image_coords, return_periods, interpolation="linear"): + """Return linear interpolated data from fractional row and column coordinates.""" + icx = np.floor(image_coords[0, :]).astype(int)[..., None] + # note periodic boundary condition + ix = np.concatenate( + [icx % z.shape[2], icx % z.shape[2], (icx + 1) % z.shape[2], (icx + 1) % z.shape[2]], axis=1 + )[..., None].repeat( + len(return_periods), axis=2 + ) # points, 4, return_periods + + icy = np.floor(image_coords[1, :]).astype(int)[..., None] + iy = np.concatenate([icy, icy + 1, icy, icy + 1], axis=1)[..., None].repeat(len(return_periods), axis=2) + + iz = ( + np.arange(len(return_periods), dtype=int)[None, ...] + .repeat(4, axis=0)[None, ...] + .repeat(image_coords.shape[1], axis=0) + ) + + data = z.get_coordinate_selection((iz, iy, ix)) # type: ignore # index, row, column + + # nodata in the zarr files are considered to be + # 1) float("nan") (which Zarr supports) or 2) nan_value of -9999.0 + # 2 is legacy behaviour: for Zarr better to use float("nan") + nan_input_value = -9999.0 + # retain ability to output arbitrary NaN value, although might be no longer needed as + # physrisk deals separately, e.g. with removing NaNs before passing back via JSON + nan_output_value = float("nan") + + if interpolation == "linear": + xf = image_coords[0, :][..., None] - icx # type: ignore + yf = image_coords[1, :][..., None] - icy # type: ignore + w0 = (1 - yf) * (1 - xf) + w1 = yf * (1 - xf) + w2 = (1 - yf) * xf + w3 = yf * xf + w = np.transpose(np.array([w0, w1, w2, w3]), (1, 0, 2)) + mask = 1 - np.isnan(np.where(data == nan_input_value, np.nan, data)) + w_good = w * mask + w_good_sum = np.transpose( + np.sum(w_good, axis=1).reshape(tuple([1]) + np.sum(w_good, axis=1).shape), axes=(1, 0, 2) + ) + w_used = np.divide(w_good, np.where(w_good_sum == 0.0, np.nan, w_good_sum)) + return np.nan_to_num(np.sum(w_used * data, axis=1), nan=nan_output_value) + + elif interpolation == "max": + data = np.where(data == nan_input_value, -np.inf, data) + return np.nan_to_num( + np.maximum.reduce([data[:, 0, :], data[:, 1, :], data[:, 2, :], data[:, 3, :]]), + nan=nan_output_value, + neginf=nan_output_value, + ) + + elif interpolation == "min": + data = np.where(data == nan_input_value, np.inf, data) + return np.nan_to_num( + np.minimum.reduce([data[:, 0, :], data[:, 1, :], data[:, 2, :], data[:, 3, :]]), + nan=nan_output_value, + posinf=nan_output_value, + ) + + else: + raise ValueError("interpolation must have value 'linear', 'max' or 'min") + + @staticmethod + def _get_coordinates(longitudes, latitudes, crs: str, transform: Affine, pixel_is_area: bool): + if crs.lower() != "epsg:4236": + transproj = Transformer.from_crs("epsg:4326", crs, always_xy=True) + x, y = transproj.transform(longitudes, latitudes) + else: + x, y = longitudes, latitudes + coords = np.vstack((x, y, np.ones(len(longitudes)))) # type: ignore + inv_trans = ~transform + mat = np.array(inv_trans).reshape(3, 3) + frac_image_coords = mat @ coords + if pixel_is_area: + frac_image_coords[:2, :] -= 0.5 + return frac_image_coords + + @staticmethod + def _get_equivalent_buffer_in_arc_degrees(latitude, buffer_in_metres): + """ + area = radius * radius * cos(p) * dp * dq = buffer_in_metres * buffer_in_metres + """ + semi_major_axis = 6378137 + semi_minor_axis = 6356752.314245 + degrees_to_radians = np.pi / 180.0 + latitude_in_radians = latitude * degrees_to_radians + cosinus = np.abs(np.cos(latitude_in_radians)) + sinus = np.abs(np.sin(latitude_in_radians)) + buffer_in_arc_degrees = ( + buffer_in_metres + * np.sqrt((cosinus / semi_major_axis) ** 2 + (sinus / semi_minor_axis) ** 2) + / degrees_to_radians + ) + if 0.0 < cosinus: + buffer_in_arc_degrees /= np.sqrt(cosinus) + return buffer_in_arc_degrees diff --git a/src/visualization/dash/__init__.py b/src/physrisk/hazard_models/__init__.py similarity index 100% rename from src/visualization/dash/__init__.py rename to src/physrisk/hazard_models/__init__.py diff --git a/src/physrisk/hazard_models/core_hazards.py b/src/physrisk/hazard_models/core_hazards.py new file mode 100644 index 00000000..f576aef2 --- /dev/null +++ b/src/physrisk/hazard_models/core_hazards.py @@ -0,0 +1,185 @@ +from typing import Dict, Iterable, NamedTuple, Optional, Protocol + +from physrisk.api.v1.hazard_data import HazardResource +from physrisk.data.hazard_data_provider import HazardDataHint, SourcePath +from physrisk.data.inventory import EmbeddedInventory, Inventory +from physrisk.kernel import hazards +from physrisk.kernel.hazards import ChronicHeat, CoastalInundation, RiverineInundation, Wind + + +class ResourceSubset: + def __init__(self, resources: Iterable[HazardResource]): + self.resources = list(resources) + + def any(self): + return any(self.resources) + + def first(self): + return next(r for r in self.resources) + + def match(self, hint: HazardDataHint): + return next(r for r in self.resources if r.path == hint.path) + + def prefer_group_id(self, group_id: str): + with_condition = self.with_group_id(group_id) + return with_condition if with_condition.any() else self + + def with_group_id(self, group_id: str): + return ResourceSubset(r for r in self.resources if r.group_id == group_id) + + def with_model_gcm(self, gcm: str): + return ResourceSubset(r for r in self.resources if r.indicator_model_gcm == gcm) + + def with_model_id(self, model_id: str): + return ResourceSubset(r for r in self.resources if r.indicator_model_id == model_id) + + +class ResourceSelector(Protocol): + """For a particular hazard type and indicator_id (specifying the type of indicator), + defines the rule for selecting a resource from + all matches. The selection rule depends on scenario and year.""" + + def __call__( + self, *, candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None + ) -> HazardResource: ... + + +class ResourceSelectorKey(NamedTuple): + hazard_type: type + indicator_id: str + + +class InventorySourcePaths: + """Class used to generate SourcePaths by selecting the appropriate HazardResource from the + Inventory of HazardResources. + """ + + def __init__(self, inventory: Inventory): + self._inventory = inventory + self._selectors: Dict[ResourceSelectorKey, ResourceSelector] = {} + + def source_paths(self) -> Dict[type, SourcePath]: + all_hazard_types = list(set(htype for ((htype, _), _) in self._inventory.resources_by_type_id.items())) + source_paths: Dict[type, SourcePath] = {} + for hazard_type in all_hazard_types: + source_paths[hazards.hazard_class(hazard_type)] = self._get_resource_source_path( + hazard_type, + ) + return source_paths + + def add_selector(self, hazard_type: type, indicator_id: str, selector: ResourceSelector): + self._selectors[ResourceSelectorKey(hazard_type, indicator_id)] = selector + + def _get_resource_source_path(self, hazard_type: str): + def _get_source_path(*, indicator_id: str, scenario: str, year: int, hint: Optional[HazardDataHint] = None): + # all matching resources in the inventory + selector = self._selectors.get( + ResourceSelectorKey(hazard_type=hazards.hazard_class(hazard_type), indicator_id=indicator_id), + self._no_selector, + ) + resources = self._inventory.resources_by_type_id[(hazard_type, indicator_id)] + if len(resources) == 0: + raise RuntimeError( + f"unable to find any resources for hazard {hazard_type} " f"and indicator ID {indicator_id}" + ) + candidates = ResourceSubset(resources) + try: + if hint is not None: + resource = candidates.match(hint) + else: + resource = selector(candidates=candidates, scenario=scenario, year=year, hint=hint) + except Exception: + raise RuntimeError( + f"unable to select unique resource for hazard {hazard_type} " f"and indicator ID {indicator_id}" + ) + proxy_scenario = ( + cmip6_scenario_to_rcp(scenario) + if resource.scenarios[0].id.startswith("rcp") or resource.scenarios[-1].id.startswith("rcp") + else scenario + ) + if scenario == "historical": + scenarios = next(iter(s for s in resource.scenarios if s.id == "historical"), None) + if scenarios is None: + scenarios = next(s for s in sorted(resource.scenarios, key=lambda s: next(y for y in s.years))) + proxy_scenario = scenarios.id + year = next(s for s in scenarios.years) + return resource.path.format(id=indicator_id, scenario=proxy_scenario, year=year) + + return _get_source_path + + @staticmethod + def _no_selector(candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None): + return candidates.first() + + +class CoreInventorySourcePaths(InventorySourcePaths): + def __init__(self, inventory: Inventory): + super().__init__(inventory) + for indicator_id in ["mean_work_loss/low", "mean_work_loss/medium", "mean_work_loss/high"]: + self.add_selector(ChronicHeat, indicator_id, self._select_chronic_heat) + self.add_selector(ChronicHeat, "mean/degree/days/above/32c", self._select_chronic_heat) + self.add_selector(RiverineInundation, "flood_depth", self._select_riverine_inundation) + self.add_selector(CoastalInundation, "flood_depth", self._select_coastal_inundation) + self.add_selector(Wind, "max_speed", self._select_wind) + + def resources_with(self, *, hazard_type: type, indicator_id: str): + return ResourceSubset(self._inventory.resources_by_type_id[(hazard_type.__name__, indicator_id)]) + + @staticmethod + def _select_chronic_heat( + candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None + ): + return candidates.with_model_gcm("ACCESS-CM2").first() + + @staticmethod + def _select_coastal_inundation( + candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None + ): + return ( + candidates.with_model_id("nosub").first() + if scenario == "historical" + else candidates.with_model_id("wtsub/95").first() + ) + + @staticmethod + def _select_riverine_inundation( + candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None + ): + return ( + candidates.with_model_gcm("historical").first() + if scenario == "historical" + else candidates.with_model_gcm("MIROC-ESM-CHEM").first() + ) + + @staticmethod + def _select_wind(candidates: ResourceSubset, scenario: str, year: int, hint: Optional[HazardDataHint] = None): + return candidates.prefer_group_id("iris_osc").first() + + +def cmip6_scenario_to_rcp(scenario: str): + """Convention is that CMIP6 scenarios are expressed by identifiers: + SSP1-2.6: 'ssp126' + SSP2-4.5: 'ssp245' + SSP5-8.5: 'ssp585' etc. + Here we translate to form + RCP-4.5: 'rcp4p5' + RCP-8.5: 'rcp8p5' etc. + """ + if scenario == "ssp126": + return "rcp2p6" + elif scenario == "ssp245": + return "rcp4p5" + elif scenario == "ssp585": + return "rcp8p5" + else: + if scenario not in ["rcp2p6", "rcp4p5", "rcp6p0", "rcp8p5", "historical"]: + raise ValueError(f"unexpected scenario {scenario}") + return scenario + + +def get_default_source_path_provider(inventory: Inventory = EmbeddedInventory()): + return CoreInventorySourcePaths(inventory) + + +def get_default_source_paths(inventory: Inventory = EmbeddedInventory()): + return CoreInventorySourcePaths(inventory).source_paths() diff --git a/src/physrisk/kernel/__init__.py b/src/physrisk/kernel/__init__.py index 3259d6b5..41f35bb3 100644 --- a/src/physrisk/kernel/__init__.py +++ b/src/physrisk/kernel/__init__.py @@ -1,8 +1,6 @@ -from .asset_event_distrib import AssetEventDistrib -from .exceedance_curve import ExceedanceCurve -from .vulnerability_distrib import VulnerabilityDistrib -from .model import Model -from .events import Drought, Inundation from .assets import Asset, PowerGeneratingAsset -from .asset_impact import get_impact_distrib -from .calculation import calculate_impacts \ No newline at end of file +from .curve import ExceedanceCurve +from .hazard_event_distrib import HazardEventDistrib +from .hazards import Drought, Hazard, RiverineInundation +from .vulnerability_distrib import VulnerabilityDistrib +from .vulnerability_model import VulnerabilityModelAcuteBase diff --git a/src/physrisk/kernel/asset_event_distrib.py b/src/physrisk/kernel/asset_event_distrib.py deleted file mode 100644 index 4eab4921..00000000 --- a/src/physrisk/kernel/asset_event_distrib.py +++ /dev/null @@ -1,45 +0,0 @@ -import physrisk.kernel.curve as curve -import numpy as np -from typing import Optional, List - -from physrisk.kernel.exceedance_curve import ExceedanceCurve - -class AssetEventDistrib: - """Event intensity distributions specific to an asset.""" - - __slots__ = ["__event_type", "__intensity_bins", "__prob", "__exceedance"] - - def __init__(self, - event_type: type, - intensity_bins: List[float], - prob: List[float], - exceedance : Optional[ExceedanceCurve] = None): - """Create a new asset event distribution. - Args: - event_type: type of event - intensity_bins: non-decreasing intensity bin bounds - prob: probabilities with size [len(intensity_bins) - 1] - exceedence: exceedence curve, for reference - """ - self.__event_type = event_type - self.__intensity_bins = np.array(intensity_bins) - self.__prob = np.array(prob) - self.__exceedance = exceedance - - def intensity_bins_explicit(self): - return zip(self.__intensity_bins[0:-1], self.__intensity_bins[1:]) - - #def to_exceedance_curve(self): - # return curve.to_exceedance_curve(self.__intensity_bins, self.__prob) - - @property - def intensity_bins(self) -> np.ndarray: - return self.__intensity_bins - - @property - def prob(self) -> np.ndarray: - return self.__prob - - @property - def exceedance(self): - return self.__exceedance \ No newline at end of file diff --git a/src/physrisk/kernel/asset_impact.py b/src/physrisk/kernel/asset_impact.py deleted file mode 100644 index 67267a69..00000000 --- a/src/physrisk/kernel/asset_impact.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np -#import numpy.typing as npt -from abc import ABC, abstractmethod -from typing import Tuple, Union, List, Optional, Any -from .asset_event_distrib import AssetEventDistrib -from .impact_distrib import ImpactDistrib -from .vulnerability_distrib import VulnerabilityDistrib - -def get_impact_distrib(event_dist : AssetEventDistrib, vulnerability_dist : VulnerabilityDistrib) -> ImpactDistrib: - impact_prob = vulnerability_dist.prob_matrix.T @ event_dist.prob - return ImpactDistrib(vulnerability_dist.event_type, vulnerability_dist.impact_bins, impact_prob) - -class AssetImpact: - """Calculates the impacts associated with a portfolio of assets.""" - - def __init__(self, assets, vulnerabilities): - pass - -class AssetEventProvider(ABC): - - @abstractmethod - def get_asset_events(assets, eventTypes): - """Source event distributions in the locale of each asset for events of certain types """ - -class ModelsBuilder(ABC): - """Provides VulnerabilityModels and EventProviders for a type of aset.""" - - @abstractmethod - def get_vulnerability_model(assetType): - pass - - @abstractmethod - def get_event_data_provider(assetType): - """Return a list of backends matching the specified filtering. - Args: - assetType (AssetType): type of asset. - Returns: - dict[EventType, AssetEvents]: a list of Backends that match the filtering - criteria. - """ - pass - \ No newline at end of file diff --git a/src/physrisk/kernel/assets.py b/src/physrisk/kernel/assets.py index 476a2e48..48e4ff98 100644 --- a/src/physrisk/kernel/assets.py +++ b/src/physrisk/kernel/assets.py @@ -1,7 +1,140 @@ +from dataclasses import dataclass +from enum import Enum +from typing import Optional + + +# 'primary_fuel' entries in Global Power Plant Database v1.3.0 (World Resources Institute) +# https://wri-dataportal-prod.s3.amazonaws.com/manual/global_power_plant_database_v_1_3 +class FuelKind(Enum): + Biomass = 1 + Coal = 2 + Cogeneration = 3 + Gas = 4 + Geothermal = 5 + Hydro = 6 + Nuclear = 7 + Oil = 8 + Other = 9 + Petcoke = 10 + Solar = 11 + Storage = 12 + Waste = 13 + WaveAndTidal = 14 + Wind = 15 + + +class CoolingKind(Enum): + # Air Temperature, Inundation + Dry = 1 + + # Drought, Inundation, Water Temperature, Water Stress + OnceThrough = 2 + + # Drought, Inundation, Water Temperature, Water Stress (TO CLARIFY), Wet-Bulb Temperature + Recirculating = 3 + + +class TurbineKind(Enum): + Gas = 1 + Steam = 2 + + class Asset: def __init__(self, latitude: float, longitude: float, **kwargs): self.latitude = latitude self.longitude = longitude self.__dict__.update(kwargs) -class PowerGeneratingAsset(Asset) : pass \ No newline at end of file + +# WindFarm as separate +@dataclass +class WindTurbine(Asset): + capacity: Optional[float] = None + hub_height: Optional[float] = None + cut_in_speed: Optional[float] = None + cut_out_speed: Optional[float] = None + fixed_base: Optional[bool] = True + rotor_diameter: Optional[float] = None + + +class PowerGeneratingAsset(Asset): + def __init__( + self, + latitude: float, + longitude: float, + *, + type: Optional[str] = None, + location: Optional[str] = None, + capacity: Optional[float] = None, + ): + super().__init__(latitude, longitude) + + self.type: Optional[str] = type + self.location: Optional[str] = location + self.capacity: Optional[float] = capacity + + if type is not None: + self.primary_fuel: Optional[FuelKind] = None + archetypes = type.split("/") + if 0 < len(archetypes): + self.primary_fuel = FuelKind[archetypes[0]] + + +class ThermalPowerGeneratingAsset(PowerGeneratingAsset): + def __init__( + self, + latitude: float, + longitude: float, + *, + type: Optional[str] = None, + location: Optional[str] = None, + capacity: Optional[float] = None, + ): + super().__init__(latitude, longitude, type=type, location=location, capacity=capacity) + + self.turbine: Optional[TurbineKind] = None + self.cooling: Optional[CoolingKind] = None + + if type is not None: + archetypes = type.split("/") + if 1 < len(archetypes): + self.turbine = TurbineKind[archetypes[1]] + if 2 < len(archetypes): + assert self.turbine == TurbineKind.Steam + self.cooling = CoolingKind[archetypes[2]] + + # Designed to be protected against 250-year inundation events in the + # baseline except for nuclear power plants which are designed to be + # protected against 10,000-year inundation events in the baseline: + def get_inundation_protection_return_period(self): + if self.primary_fuel is not None: + if self.primary_fuel == FuelKind.Nuclear: + return 10000.0 + return 250.0 + + +class RealEstateAsset(Asset): + def __init__(self, latitude: float, longitude: float, *, location: str, type: str): + super().__init__(latitude, longitude) + self.location = location + self.type = type + + +class ManufacturingAsset(Asset): + def __init__( + self, latitude: float, longitude: float, *, location: Optional[str] = None, type: Optional[str] = None + ): + super().__init__(latitude, longitude) + self.location = location + self.type = type + + +class IndustrialActivity(Asset): + def __init__(self, latitude: float, longitude: float, *, location: Optional[str] = None, type: str): + super().__init__(latitude, longitude) + self.location = location + self.type = type + + +class TestAsset(Asset): + pass diff --git a/src/physrisk/kernel/calculation.py b/src/physrisk/kernel/calculation.py index 9380e327..40de8f7f 100644 --- a/src/physrisk/kernel/calculation.py +++ b/src/physrisk/kernel/calculation.py @@ -1,62 +1,59 @@ - -from collections import defaultdict -from physrisk.kernel import Asset, AssetEventDistrib, ExceedanceCurve, VulnerabilityDistrib -from physrisk.kernel import Drought, Inundation -from physrisk.kernel import get_impact_distrib -import physrisk.data.data_requests as dr -from physrisk.data import ReturnPeriodEvDataResp -from physrisk.models import InundationModel -from physrisk.data.hazard.event_provider_wri import EventProviderWri -from physrisk.kernel.assets import PowerGeneratingAsset -import logging - -def __get_default_hazard_data_sources(cache_folder = None): - """Get default hazard data sources for each hazard type.""" - return { Inundation : EventProviderWri('web', cache_folder = cache_folder).get_inundation_depth } - -def __get_default_models(): +from typing import Dict, Sequence + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.risk import RiskMeasureCalculator +from physrisk.risk_models.risk_models import RealEstateToyRiskMeasures +from physrisk.vulnerability_models import power_generating_asset_models as pgam +from physrisk.vulnerability_models.chronic_heat_models import ChronicHeatGZNModel +from physrisk.vulnerability_models.real_estate_models import ( + CoolingModel, + GenericTropicalCycloneModel, + RealEstateCoastalInundationModel, + RealEstateRiverineInundationModel, +) +from physrisk.vulnerability_models.thermal_power_generation_models import ( + ThermalPowerGenerationAirTemperatureModel, + ThermalPowerGenerationCoastalInundationModel, + ThermalPowerGenerationDroughtModel, + ThermalPowerGenerationRiverineInundationModel, + ThermalPowerGenerationWaterStressModel, + ThermalPowerGenerationWaterTemperatureModel, +) + +from .assets import IndustrialActivity, PowerGeneratingAsset, RealEstateAsset, TestAsset, ThermalPowerGeneratingAsset +from .hazard_model import HazardModel +from .vulnerability_model import VulnerabilityModelBase + + +def get_default_hazard_model() -> HazardModel: + # Model that gets hazard event data from Zarr storage + return ZarrHazardModel(source_paths=get_default_source_paths()) + + +def get_default_vulnerability_models() -> Dict[type, Sequence[VulnerabilityModelBase]]: """Get default exposure/vulnerability models for different asset types.""" - return { PowerGeneratingAsset : [ InundationModel ] } - -def calculate_impacts(assets, cache_folder = None, model_properties = None): - - # the types of model that apply to asset of a particular type - model_mapping = __get_default_models() - - # the different sources of hazard data - hazard_data_source = __get_default_hazard_data_sources(cache_folder = cache_folder) - - model_assets = defaultdict(list) - for asset in assets: - asset_type = type(asset) - mappings = model_mapping[asset_type] - for m in mappings: - model_assets[m].append(asset) - - detailed_results = {} - for model_type, assets in model_assets.items(): - logging.info("Applying model {0} to {1} assets of type {2}".format(model_type.__name__, len(assets), type(assets[0]).__name__)) - - model = model_type() if model_properties is None else model_type(**model_properties) - - event_requests_by_asset = [model.get_event_data_requests(asset) for asset in assets] - - event_requests = [req for event_request_by_asset in event_requests_by_asset for req in event_request_by_asset] - - responses = dr.process_requests(event_requests, hazard_data_source) - - for asset, requests in zip(assets, event_requests_by_asset): - hazard_data = [responses[req] for req in requests] - vul, event = model.get_distributions(asset, hazard_data) - impact = get_impact_distrib(event, vul) - detailed_results[asset] = DetailedResultItem(vul, event, impact, hazard_data) - - return detailed_results - -class DetailedResultItem: - def __init__(self, vulnerability, event, impact, hazard_data): - self.hazard_data = hazard_data - self.vulnerability = vulnerability - self.event = event - self.impact = impact - self.mean_impact = impact.mean_impact() + return { + PowerGeneratingAsset: [pgam.InundationModel()], + RealEstateAsset: [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + GenericTropicalCycloneModel(), + CoolingModel(), + ], + IndustrialActivity: [ChronicHeatGZNModel()], + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationWaterStressModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + TestAsset: [pgam.TemperatureModel()], + } + + +def get_default_risk_measure_calculators() -> Dict[type, RiskMeasureCalculator]: + """For asset-level risk measure, define the measure calculators to use.""" + return {RealEstateAsset: RealEstateToyRiskMeasures()} diff --git a/src/physrisk/kernel/curve.py b/src/physrisk/kernel/curve.py index 2f172a4b..08e3d03e 100644 --- a/src/physrisk/kernel/curve.py +++ b/src/physrisk/kernel/curve.py @@ -1,9 +1,11 @@ +from typing import List, Union + import numpy as np -from physrisk.kernel import exceedance_curve -from physrisk.kernel.exceedance_curve import ExceedanceCurve - + + def add_x_value_to_curve(x, curve_x, curve_y): - """Add an x value to a curve, interpolated from the existing curve. curve_x and curve_y are the curve x and y values. + """Add an x value to a curve, interpolated from the existing curve. + curve_x and curve_y are the curve x and y values. curve_x is sorted non-decreasing. This function may be used to align curves and bins. """ # note some care needed as multiple identical curve_x are permitted: cannot simply use np.interp @@ -12,27 +14,28 @@ def add_x_value_to_curve(x, curve_x, curve_y): i = np.searchsorted(curve_x, x) if i == len(curve_y): - curve_y = np.insert(curve_y, i, curve_y[i - 1]) # flat extrapolation - curve_x = np.insert(curve_x, i, x) + curve_y = np.insert(curve_y, i, curve_y[i - 1]) # flat extrapolation + curve_x = np.insert(curve_x, i, x) elif x == curve_x[i]: # point already exists; nothing to do return curve_x, curve_y - elif (i == 0): - curve_y = np.insert(curve_y, 0, curve_y[0]) # flat extrapolation - curve_x = np.insert(curve_x, 0, x) + elif i == 0: + curve_y = np.insert(curve_y, 0, curve_y[0]) # flat extrapolation + curve_x = np.insert(curve_x, 0, x) else: pl, pu = curve_y[i - 1], curve_y[i] il, iu = curve_x[i - 1], curve_x[i] # linear interpolation; quadratic interpolation (linear in probability density) may also be of interest - prob = pl + (x - il) * (pu - pl) / (iu - il) - curve_y = np.insert(curve_y, i, prob) + prob = pl + (x - il) * (pu - pl) / (iu - il) + curve_y = np.insert(curve_y, i, prob) curve_x = np.insert(curve_x, i, x) - + return curve_x, curve_y + def to_exceedance_curve(bin_edges, probs): """An exceedance curve gives the probability that the random variable is greater than the value, - a type of cumulative probability. + a type of cumulative probability. """ nz = np.asarray(probs > 0).nonzero() fnz = nz[0][0] if len(nz[0]) > 0 else 0 @@ -41,7 +44,8 @@ def to_exceedance_curve(bin_edges, probs): cum_prob = np.insert(np.cumsum(nz_probs[::-1]), 0, 0.0)[::-1] return ExceedanceCurve(cum_prob, nz_values) -def process_bin_edges_and_probs(bin_edges, probs, range_fraction = 0.05): + +def process_bin_edges_and_probs(bin_edges, probs, range_fraction=0.05): r = bin_edges[-1] - bin_edges[0] r = bin_edges[0] if r == 0 else r new_edges = [] @@ -69,38 +73,101 @@ def process_bin_edges_and_probs(bin_edges, probs, range_fraction = 0.05): i = j return new_edges, new_probs -def process_bin_edges_for_graph(bin_edges, range_fraction = 0.05): - """Process infinitessimal (zero width) bins for graph display. - We make width 5% of range or 1/4 the width to the next bin edge, whichever is smaller - """ - r = bin_edges[-1] - bin_edges[0] - r = bin_edges[0] if r == 0 else r - new_edges = np.copy(bin_edges) - # say we have edges - # 0, 1, 2, 3, 3, 3, 5, 7 - # we want to convert to - # 0, 1, 2, 3, 3 + d, 3 + 2d, 5, 7 - # 2d = (7 - 0) * 0.01 - i = 0 - while i < len(bin_edges): - j = __next_non_equal_index(bin_edges, i) - if j == i + 1: - i = i + 1 - continue - if j >= len(bin_edges): - delta = r * range_fraction / (j - i - 1) - else: - delta = min(r * range_fraction, 0.25 * (bin_edges[j] - bin_edges[i])) / (j - i - 1) - offset = delta - for k in range(i + 1, j): - new_edges[k] = new_edges[k] + offset - offset += delta - i = j - return new_edges - + +def process_bin_edges_for_graph(bin_edges, range_fraction=0.05): + """Process infinitessimal (zero width) bins for graph display. + We make width 5% of range or 1/4 the width to the next bin edge, whichever is smaller + """ + r = bin_edges[-1] - bin_edges[0] + r = bin_edges[0] if r == 0 else r + new_edges = np.copy(bin_edges) + # say we have edges + # 0, 1, 2, 3, 3, 3, 5, 7 + # we want to convert to + # 0, 1, 2, 3, 3 + d, 3 + 2d, 5, 7 + # 2d = (7 - 0) * 0.01 + i = 0 + while i < len(bin_edges): + j = __next_non_equal_index(bin_edges, i) + if j == i + 1: + i = i + 1 + continue + if j >= len(bin_edges): + delta = r * range_fraction / (j - i - 1) + else: + delta = min(r * range_fraction, 0.25 * (bin_edges[j] - bin_edges[i])) / (j - i - 1) + offset = delta + for k in range(i + 1, j): + new_edges[k] = new_edges[k] + offset + offset += delta + i = j + return new_edges + + def __next_non_equal_index(ndarray, i): j = i + 1 c = ndarray[i] while j < len(ndarray) and ndarray[j] == c: j = j + 1 - return j \ No newline at end of file + return j + + +class ExceedanceCurve: + """A point on an exceedance curve comprises an value, v, and a probability, p. + p is the probability that the random variable >= v, e.g. an event occurs with event value (e.g. intensity) >= v. + """ + + __slots__ = ["probs", "values"] + + def __init__(self, probs: Union[List[float], np.ndarray], values: Union[List[float], np.ndarray]): + """Create a new asset event distribution. + Args: + probs: exceedance probabilities (must be sorted and decreasing). + values: values (must be sorted and non-decreasing). + """ + + # probabilities must be sorted and decreasing + # values must be sorted and non-decreasing (intens[i + 1] >= intens[i]) + if len(probs) != len(values): + raise ValueError("same number of probabilities and values expected") + if not np.all(np.diff(probs) <= 0): + raise ValueError("probs must be sorted and decreasing") + if not np.all(np.diff(values) >= 0): + raise ValueError("values must be sorted and non-decreasing") + + self.probs = np.array(probs) + self.values = np.array(values) + + def add_value_point(self, value): + """Add a point to the curve with specified value and exceedance + probability determined from existing curve by linear interpolation. + """ + values, probs = add_x_value_to_curve(value, self.values, self.probs) + return ExceedanceCurve(probs, values) + + def get_value(self, prob): + return np.interp(prob, self.probs[::-1], self.values[::-1]) + + def get_probability_bins(self, include_last: bool = False): + r"""Convert from exceedance (cumulative) probability to bins of constant probability density. + This is equivalent to the assumption of linear interpolation of exceedance points. + + .. math:: + p^\text{b}_i = p^\text{e}_{i + 1} - p^\text{e}_i + + Returns: + value_bins (ndarray), probs: The contiguous bin lower and upper values, probabilities of each bin. + If value_bins is of length n then there are n-1 bins and n-1 probabilities + + """ + value_bins = self.values[:] + probs = self.probs[:-1] - self.probs[1:] # type: ignore + if include_last or len(self.values) == 1: + value_bins = np.append(value_bins, value_bins[-1]) # last bin has zero width + probs = np.append(probs, self.probs[-1]) + return value_bins, probs + + def get_samples(self, uniforms): + """Return value, v, for each probability p in uniforms such that p is the probability that the random variable + < v.""" + return np.where(uniforms > (1.0 - self.probs[0]), np.interp(uniforms, 1.0 - self.probs, self.values), 0.0) diff --git a/src/physrisk/kernel/enums.py b/src/physrisk/kernel/enums.py deleted file mode 100644 index e7002920..00000000 --- a/src/physrisk/kernel/enums.py +++ /dev/null @@ -1 +0,0 @@ -from enum import Enum diff --git a/src/physrisk/kernel/events.py b/src/physrisk/kernel/events.py index d56be071..d010b938 100644 --- a/src/physrisk/kernel/events.py +++ b/src/physrisk/kernel/events.py @@ -1,8 +1,150 @@ -class Event: pass +from typing import List, Protocol -class Drought(Event): pass +import numpy as np +from numba import float64, njit +from numba.experimental import jitclass -class HighTemperature(Event): pass -class Inundation(Event): pass +# @njit(cache=True) +def calculate_cumulative_probs(bins_lower: np.ndarray, bins_upper: np.ndarray, probs: np.ndarray): + # note: in some circumstances we could exclude the two extreme points and rely on flat extrapolation + # this implementation retains points for clarity, sacrificing some performance + assert bins_lower.size == bins_upper.size + assert probs.shape[1] == bins_lower.size + nb_bins = bins_lower.size # aka M: number of bins + n = probs.shape[0] + nb_points = bins_lower.size * 2 - np.count_nonzero(bins_lower[1:] == bins_upper[:-1]) + cum_prob = np.zeros(n) + values = np.zeros(nb_points) + cum_probs = np.zeros(shape=(n, nb_points)) + index = 0 + values[0] = bins_lower[0] + cum_probs[:, 0] = cum_prob + for i in range(nb_bins): + # index is index of last point in cumulative curve + if bins_lower[i] == values[index]: + # bin is contiguous with previous: add just upper bound + cum_prob += probs[:, i] + values[index + 1] = bins_upper[i] + cum_probs[:, index + 1] = cum_prob + index += 1 + else: + # bin not contiguous: add lower and upper bounds + values[index + 1] = bins_lower[i] + cum_probs[:, index + 1] = cum_prob + cum_prob += probs[:, i] + values[index + 2] = bins_upper[i] + cum_probs[:, index + 2] = cum_prob + index += 2 + return values, cum_probs + +@njit(cache=True) +def sample_from_cumulative_probs(values: np.ndarray, cum_probs: np.ndarray, uniforms: np.ndarray): + n = cum_probs.shape[0] + nb_samples = uniforms.shape[1] + assert uniforms.shape[0] == n + samples = np.zeros(shape=(n, nb_samples)) + for i in range(n): + samples[i, :] = np.interp(uniforms[i, :], cum_probs[i, :], values) + return samples + + +class MultivariateDistribution(Protocol): + def inv_cumulative_marginal_probs(self, cum_probs: np.ndarray): ... + + +class EmpiricalMultivariateDistribution(MultivariateDistribution): + """Stores an N dimensional empirical probability density function.""" + + def __init__(self, bins_lower: np.ndarray, bins_upper: np.ndarray, probs: np.ndarray): + """N marginal probability distributions are each represented as a set of bins of + uniform probability density. + + Args: + bins_lower (np.ndarray): Lower bounds of M probability bins (M,). + bins_upper (np.ndarray): Upper bounds of M probability bins (M,). + probs (np.ndarray): Probabilities of bins (N, M). + + Raises: + ValueError: _description_ + """ + if bins_lower.ndim > 1 or bins_upper.ndim > 1 or bins_lower.size != bins_upper.size: + raise ValueError("bin upper and lower bounds must be 1-D and same size.") + if probs.ndim != 2 or probs.shape[1] != bins_upper.size: + raise ValueError("probabilities must be (N, M).") + if np.any(bins_lower[1:] < bins_lower[:-1]) or np.any(bins_upper[1:] < bins_upper[:-1]): + raise ValueError("bins must be non-decreasing.") + if np.any(bins_upper[1:] < bins_lower[:-1]): + raise ValueError("bins may not overlap.") + self.bins_lower = bins_lower + self.bins_upper = bins_upper + self.probs = probs + + def inv_cumulative_marginal_probs(self, cum_probs: np.ndarray): + """Calculate inverse cumulative probabilities for each of the N + marginal probability distributions. By definition, this is the + vectorized form of get_inv_cumulative_marginal_prob, vectorized + as a performance optimization. + + Args: + cum_probs (np.ndarray): Cumulative probabilities (N, P), P being number of samples. + axis (int): Specifies the axis of the N events. + """ + values_dist, cum_probs_dist = calculate_cumulative_probs(self.bins_lower, self.bins_upper, self.probs) + return sample_from_cumulative_probs(values_dist, cum_probs_dist, cum_probs) + + +def event_samples(impacts_bins: np.ndarray, probs: List[np.ndarray], nb_events: int, nb_samples: int): + if any([p.size != 1 and p.size != nb_events for p in probs]): + raise ValueError(f"probabilities must be scalar or vector or length {nb_events}.") + + return event_samples_numba(impacts_bins, probs, nb_events, nb_samples) + + +def find(elements: np.ndarray, value): + """In case we need a specific formulation...""" + current: int = 0 + lower: int = 0 + upper: int = elements.size - 1 + while lower != upper - 1: + current = (lower + upper) // 2 + if elements[current] >= value: + upper = current + else: + lower = current + return current + + +@njit(cache=True) +def event_samples_numba(impacts_bins: np.ndarray, probs: List[np.ndarray], nb_events: int, nb_samples: int): + samples = np.zeros(shape=(nb_samples, nb_events)) + np.random.seed(111) + cum_probs = np.zeros(len(probs)) + for i in range(nb_events): + # for each event calculate cumulative probability distribution + sum = 0.0 + for j in range(len(probs)): + sum += probs[j][i] + cum_probs[j] = sum + cum_probs[-1] = np.minimum(cum_probs[-1], 1.0) + u = np.random.rand(nb_samples) + samples[:, i] = np.interp(u, cum_probs, impacts_bins[1:]) + return samples + + +spec = [ + ("values", float64[:]), + ("cum_probs", float64[:]), +] + + +@jitclass(spec) +class CumulativeProb(object): + def __init__(self, values: np.ndarray, cum_probs: np.ndarray): + self.values = values + self.cum_probs = cum_probs + + @property + def size(self): + return self.values.size diff --git a/src/physrisk/kernel/exceedance_curve.py b/src/physrisk/kernel/exceedance_curve.py deleted file mode 100644 index 10a27af6..00000000 --- a/src/physrisk/kernel/exceedance_curve.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -import physrisk.kernel.curve as cv -from typing import List, Union - -class ExceedanceCurve: - """A point on an exceedance curve comprises an value, v, and a probability, p. - p is the probability that an event occurs with event value (e.g. intensity) >= v.""" - - __slots__ = ["probs", "values"] - - def __init__(self, - probs: Union[List[float], np.ndarray], - values: Union[List[float], np.ndarray]): - """Create a new asset event distribution. - Args: - probs: exceedance probabilities (must be sorted and decreasing) - values: values (must be sorted and non-decreasing) - """ - - # probabilities must be sorted and decreasing - # values must be sorted and non-decreasing (intens[i + 1] >= intens[i]) - if len(probs) != len(values): - raise ValueError('same number of probabilities and values expected') - if not np.all(np.diff(probs) <= 0): - raise ValueError('probs must be sorted and decreasing') - if not np.all(np.diff(values) >= 0): - raise ValueError('values must be sorted and non-decreasing') - - self.probs = np.array(probs) - self.values = np.array(values) - - def add_value_point(self, value): - """Add a point to the curve with specified value and exceedance probability determined from existing curve by linear interpolation.""" - values, probs = cv.add_x_value_to_curve(value, self.values, self.probs) - return ExceedanceCurve(probs, values) - - def get_value(self, prob): - return np.interp(prob, self.probs[::-1], self.values[::-1]) - - def get_probability_bins(self): - r"""Convert from exceedance (cumulative) probability to bins of constant probability. - This is equivalent to the assumption of linear interpolation of exceedance points. - - .. math:: - p^\text{b}_i = p^\text{e}_{i + 1} - p^\text{e}_i - - Returns: - value_bins (ndarray), probs: the contiguous bin lower and upper values, probabilities of each bin - If value_bins is of lenth n then ther are n-1 bins and n-1 probabilities - - """ - # value bins are contiguous - value_bins = self.values[:] - probs = self.probs[:-1] - self.probs[1:] - return value_bins, probs - - diff --git a/src/physrisk/kernel/exposure.py b/src/physrisk/kernel/exposure.py new file mode 100644 index 00000000..74751799 --- /dev/null +++ b/src/physrisk/kernel/exposure.py @@ -0,0 +1,175 @@ +import logging +import math +from abc import abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import Dict, Iterable, List, Tuple + +import numpy as np + +from physrisk.data.hazard_data_provider import HazardDataHint +from physrisk.kernel.assets import Asset +from physrisk.kernel.hazard_model import ( + HazardDataRequest, + HazardDataResponse, + HazardEventDataResponse, + HazardModel, + HazardParameterDataResponse, +) +from physrisk.kernel.hazards import ChronicHeat, CombinedInundation, Drought, Fire, Hail, Wind +from physrisk.kernel.impact import _request_consolidated +from physrisk.kernel.vulnerability_model import DataRequester +from physrisk.utils.helpers import get_iterable + + +class Category(Enum): + LOWEST = 1 + LOW = 2 + MEDIUM = 3 + HIGH = 4 + HIGHEST = 5 + NODATA = 6 + + +@dataclass +class Bounds: + """Category applies if lower <= value < upper""" + + category: str + lower: float + upper: float + + +@dataclass +class AssetExposureResult: + hazard_categories: Dict[type, Tuple[Category, float]] + + +class ExposureMeasure(DataRequester): + @abstractmethod + def get_exposures( + self, asset: Asset, data_responses: Iterable[HazardDataResponse] + ) -> Dict[type, Tuple[Category, float]]: ... + + +class JupterExposureMeasure(ExposureMeasure): + def __init__(self): + self.exposure_bins = self.get_exposure_bins() + + def get_data_requests(self, asset: Asset, *, scenario: str, year: int) -> Iterable[HazardDataRequest]: + return [ + HazardDataRequest( + hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=indicator_id, + # select specific model for wind for consistency with thresholds + hint=HazardDataHint(path="wind/jupiter/v1/max_1min_{scenario}_{year}") if hazard_type == Wind else None, + ) + for (hazard_type, indicator_id) in self.exposure_bins.keys() + ] + + def get_exposures(self, asset: Asset, data_responses: Iterable[HazardDataResponse]): + result: Dict[type, Tuple[Category, float]] = {} + for (k, v), resp in zip(self.exposure_bins.items(), data_responses): + if isinstance(resp, HazardParameterDataResponse): + param = resp.parameter + elif isinstance(resp, HazardEventDataResponse): + if len(resp.intensities) > 1: + raise ValueError("single-value curve expected") + param = resp.intensities[0] + (hazard_type, _) = k + (lower_bounds, categories) = v + if math.isnan(param): + result[hazard_type] = (Category.NODATA, float(param)) + else: + index = np.searchsorted(lower_bounds, param, side="right") - 1 + result[hazard_type] = (categories[index], float(param)) + return result + + def get_exposure_bins(self): + categories = {} + # specify exposure bins as dataclass in case desirable to use JSON in future + categories[(CombinedInundation, "flooded_fraction")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=0.01), + Bounds(category=Category.LOW, lower=0.01, upper=0.04), + Bounds(category=Category.MEDIUM, lower=0.04, upper=0.1), + Bounds(category=Category.HIGH, lower=0.1, upper=0.2), + Bounds(category=Category.HIGHEST, lower=0.2, upper=float("inf")), + ] + ) + categories[(ChronicHeat, "days/above/35c")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=5), + Bounds(category=Category.LOW, lower=5, upper=10), + Bounds(category=Category.MEDIUM, lower=10, upper=20), + Bounds(category=Category.HIGH, lower=20, upper=30), + Bounds(category=Category.HIGHEST, lower=30, upper=float("inf")), + ] + ) + categories[(Wind, "max_speed")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=63), + Bounds(category=Category.LOW, lower=63, upper=90), + Bounds(category=Category.MEDIUM, lower=90, upper=119), + Bounds(category=Category.HIGH, lower=119, upper=178), + Bounds(category=Category.HIGHEST, lower=178, upper=float("inf")), + ] + ) + categories[(Drought, "months/spei3m/below/-2")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=0.1), + Bounds(category=Category.LOW, lower=0.1, upper=0.25), + Bounds(category=Category.MEDIUM, lower=0.25, upper=0.5), + Bounds(category=Category.HIGH, lower=0.5, upper=1.0), + Bounds(category=Category.HIGHEST, lower=1.0, upper=float("inf")), + ] + ) + categories[(Hail, "days/above/5cm")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=0.2), + Bounds(category=Category.LOW, lower=0.2, upper=1.0), + Bounds(category=Category.MEDIUM, lower=1.0, upper=2.0), + Bounds(category=Category.HIGH, lower=2.0, upper=3.0), + Bounds(category=Category.HIGHEST, lower=3.0, upper=float("inf")), + ] + ) + categories[(Fire, "fire_probability")] = self.bounds_to_lookup( + [ + Bounds(category=Category.LOWEST, lower=float("-inf"), upper=0.1), + Bounds(category=Category.LOW, lower=0.1, upper=0.2), + Bounds(category=Category.MEDIUM, lower=0.2, upper=0.35), + Bounds(category=Category.HIGH, lower=0.35, upper=0.5), + Bounds(category=Category.HIGHEST, lower=0.5, upper=float("inf")), + ] + ) + return categories + + def bounds_to_lookup(self, bounds: Iterable[Bounds]): + lower_bounds = np.array([b.lower for b in bounds]) + categories = np.array([b.category for b in bounds]) + return (lower_bounds, categories) + + +def calculate_exposures( + assets: List[Asset], hazard_model: HazardModel, exposure_measure: ExposureMeasure, scenario: str, year: int +) -> Dict[Asset, AssetExposureResult]: + requester_assets: Dict[DataRequester, List[Asset]] = {exposure_measure: assets} + asset_requests, responses = _request_consolidated(hazard_model, requester_assets, scenario, year) + + logging.info( + "Applying exposure measure {0} to {1} assets of type {2}".format( + type(exposure_measure).__name__, len(assets), type(assets[0]).__name__ + ) + ) + result: Dict[Asset, AssetExposureResult] = {} + + for asset in assets: + requests = asset_requests[(exposure_measure, asset)] # (ordered) requests for a given asset + hazard_data = [responses[req] for req in get_iterable(requests)] + result[asset] = AssetExposureResult(hazard_categories=exposure_measure.get_exposures(asset, hazard_data)) + + return result diff --git a/src/physrisk/kernel/financial_model.py b/src/physrisk/kernel/financial_model.py new file mode 100644 index 00000000..64dd25b5 --- /dev/null +++ b/src/physrisk/kernel/financial_model.py @@ -0,0 +1,63 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Dict + +import numpy as np + +from .assets import Asset + + +class FinancialDataProvider(ABC): + @abstractmethod + def get_asset_value(self, asset: Asset, currency: str) -> float: + """Return the current value of the asset in specified currency.""" + ... + + @abstractmethod + def get_asset_aggregate_cashflows(self, asset: Asset, start: datetime, end: datetime, currency: str) -> float: + """Return the expected sum of the cashflows generated by the Asset between start and end, in + specified currency.""" + ... + + +class FinancialModelBase(ABC): + @abstractmethod + def damage_to_loss(self, asset: Asset, impact: np.ndarray, currency: str): + """Convert the fractional damage of the specified asset to a financial loss.""" + ... + + @abstractmethod + def disruption_to_loss(self, asset: Asset, impact: np.ndarray, year: int, currency: str): + """Convert the fractional annual disruption of the specified asset to a financial loss.""" + ... + + +class FinancialModel(FinancialModelBase): + """ "Financial Model using a FinancialDataProvider as source of information.""" + + def __init__(self, data_provider: FinancialDataProvider): + self.data_provider = data_provider + + def damage_to_loss(self, asset: Asset, impact: np.ndarray, currency: str): + return self.data_provider.get_asset_value(asset, currency) * impact + + def disruption_to_loss(self, asset: Asset, impact: np.ndarray, year: int, currency: str): + return ( + self.data_provider.get_asset_aggregate_cashflows( + asset, datetime(year, 1, 1), datetime(year, 12, 31), currency + ) + * impact + ) + + +class CompositeFinancialModel(FinancialModelBase): + """Financial model split by asset type.""" + + def __init__(self, financial_models: Dict[type, FinancialModelBase]): + self.financial_models = financial_models + + def damage_to_loss(self, asset: Asset, impact: np.ndarray, currency: str): + return self.financial_models[type(asset)].damage_to_loss(asset, impact, currency) + + def disruption_to_loss(self, asset: Asset, impact: np.ndarray, year: int, currency: str): + return self.financial_models[type(asset)].disruption_to_loss(asset, impact, year, currency) diff --git a/src/physrisk/kernel/hazard_event_distrib.py b/src/physrisk/kernel/hazard_event_distrib.py new file mode 100644 index 00000000..61f73cef --- /dev/null +++ b/src/physrisk/kernel/hazard_event_distrib.py @@ -0,0 +1,40 @@ +from typing import List, Union + +import numpy as np + +from . import curve + + +class HazardEventDistrib: + """Intensity distribution of a hazard event (e.g. inundation depth, wind speed etc), + specific to an asset -- that is, at the location of the asset.""" + + __slots__ = ["__event_type", "__intensity_bins", "__prob", "__exceedance"] + + def __init__( + self, event_type: type, intensity_bins: Union[List[float], np.ndarray], prob: Union[List[float], np.ndarray] + ): + """Create a new asset event distribution. + Args: + event_type: type of event + intensity_bins: non-decreasing intensity bin edges. + e.g. bin edges [1.0, 1.5, 2.0] imply two bins: 1.0 < i <= 1.5, 1.5 < i <= 2.0 + prob: (annual) probability of occurrence for each intensity bin with size [len(intensity_bins) - 1] + """ + self.__event_type = event_type + self.__intensity_bins = np.array(intensity_bins) + self.__prob = np.array(prob) + + def intensity_bins(self): + return zip(self.__intensity_bins[0:-1], self.__intensity_bins[1:]) + + def to_exceedance_curve(self): + return curve.to_exceedance_curve(self.__intensity_bins, self.__prob) + + @property + def intensity_bin_edges(self) -> np.ndarray: + return self.__intensity_bins + + @property + def prob(self) -> np.ndarray: + return self.__prob diff --git a/src/physrisk/kernel/hazard_model.py b/src/physrisk/kernel/hazard_model.py new file mode 100644 index 00000000..f574ea99 --- /dev/null +++ b/src/physrisk/kernel/hazard_model.py @@ -0,0 +1,159 @@ +from abc import ABC, abstractmethod +from collections import defaultdict +from typing import Dict, List, Mapping, Optional, Protocol, Tuple + +import numpy as np + +from physrisk.data.hazard_data_provider import HazardDataHint + + +class HazardDataRequest: + """Request for hazard data. The event_type determines whether the hazard is acute or chronic. + An acute hazard is an event and the response will therefore comprise hazard intensities for the + different event return periods. A chronic hazard on the other hand is a shift in a climate parameter + and the parameter value is returned.""" + + def __init__( + self, + hazard_type: type, + longitude: float, + latitude: float, + *, + indicator_id: str, + scenario: str, + year: int, + hint: Optional[HazardDataHint] = None, + buffer: Optional[int] = None, + ): + """Create HazardDataRequest. + + Args: + event_type: type of hazard. + longitude: required longitude. + latitude: required latitude. + model: model identifier. + scenario: identifier of scenario, e.g. rcp8p5 (RCP 8.5). + year: projection year, e.g. 2080. + buffer: delimitation of the area for the hazard data expressed in metres (within [0,1000]). + """ + self.hazard_type = hazard_type + self.longitude = longitude + self.latitude = latitude + self.indicator_id = indicator_id + self.scenario = scenario + self.year = year + self.hint = hint + self.buffer = buffer + + def group_key(self): + """Key used to group EventDataRequests into batches.""" + return tuple( + ( + self.hazard_type, + self.indicator_id, + self.scenario, + self.year, + None if self.hint is None else self.hint.group_key(), + ) + ) + + +class HazardDataResponse: + pass + + +class HazardDataFailedResponse(HazardDataResponse): + def __init__(self, err: Exception): + self.error = err + + +class HazardEventDataResponse(HazardDataResponse): + """Response to HazardDataRequest for acute hazards.""" + + def __init__(self, return_periods: np.ndarray, intensities: np.ndarray): + """Create HazardEventDataResponse. + + Args: + return_periods: return periods in years. + intensities: hazard event intensity for each return period, or set of hazard event intensities corresponding to different events. # noqa: E501 + """ + + self.return_periods = return_periods + self.intensities = intensities + + +class HazardParameterDataResponse(HazardDataResponse): + """Response to HazardDataRequest.""" + + def __init__(self, parameters: np.ndarray, param_defns: np.ndarray = np.empty([])): + """Create HazardParameterDataResponse. In general the chronic parameters are an array of values. + For example, a chronic hazard may be the number of days per year with average temperature + above :math:`x' degrees for :math:`x' in [25, 30, 35, 40]°C. In this case the param_defns would + contain np.array([25, 30, 35, 40]). In some cases the hazard may be a scalar value. + Parameters will typically be a (1D) array of values where vulnerability models + require a number of parameters (e.g. to model decrease of efficiency as temperature increases). + + Args: + parameters (np.ndarray): Chronic hazard parameter values. + param_defns (np.ndarray): Chronic hazard parameter definitions. + """ + self.parameters = parameters + self.param_defns = param_defns + + @property + def parameter(self) -> float: + """Convenience function to return single parameter. + + Returns: + float: Single parameter. + """ + return self.parameters[0] + + +class HazardModelFactory(Protocol): + def hazard_model(self, interpolation: str = "floor", provider_max_requests: Dict[str, int] = {}): + """Create a HazardModel instance based on a number of options. + + Args: + interpolation (str): Interpolation type to use for sub-pixel raster interpolation (where + this is supported by hazard models). + provider_max_requests (Dict[str, int]): The maximum permitted number of permitted + requests to external providers. + """ + ... + + +class HazardModel(ABC): + """Hazard event model. The model accepts a set of EventDataRequests and returns the corresponding + EventDataResponses.""" + + @abstractmethod + def get_hazard_events(self, requests: List[HazardDataRequest]) -> Mapping[HazardDataRequest, HazardDataResponse]: + """Process the hazard data requests and return responses.""" + ... + + +class DataSource(Protocol): + def __call__( + self, longitudes, latitudes, *, model: str, scenario: str, year: int + ) -> Tuple[np.ndarray, np.ndarray]: ... + + +class CompositeHazardModel(HazardModel): + """Hazard Model that uses other models to process EventDataRequests.""" + + def __init__(self, hazard_models: Dict[type, HazardModel]): + self.hazard_models = hazard_models + + def get_hazard_events(self, requests: List[HazardDataRequest]) -> Mapping[HazardDataRequest, HazardDataResponse]: + requests_by_event_type = defaultdict(list) + + for request in requests: + requests_by_event_type[request.hazard_type].append(request) + + responses: Dict[HazardDataRequest, HazardDataResponse] = {} + for event_type, reqs in requests_by_event_type.items(): + events_reponses = self.hazard_models[event_type].get_hazard_events(reqs) + responses.update(events_reponses) + + return responses diff --git a/src/physrisk/kernel/hazards.py b/src/physrisk/kernel/hazards.py new file mode 100644 index 00000000..c307cc6a --- /dev/null +++ b/src/physrisk/kernel/hazards.py @@ -0,0 +1,103 @@ +import inspect +import sys +from enum import Enum +from typing import cast + + +class HazardKind(Enum): + acute = (1,) + chronic = 2 + + +class InundationType(Enum): + riverine = (1,) + coastal = 2 + + +class Hazard: + @staticmethod + def kind(hazard_type): + return cast(HazardKind, hazard_type.kind) + + +class ChronicHeat(Hazard): + kind = HazardKind.chronic + pass + + +class Inundation(Hazard): + kind = HazardKind.acute + pass + + +class AirTemperature(ChronicHeat): + pass + + +class CoastalInundation(Inundation): + kind = HazardKind.acute + pass + + +class ChronicWind(Hazard): + kind = HazardKind.chronic + pass + + +class CombinedInundation(Hazard): + kind = HazardKind.chronic + pass + + +class Drought(Hazard): + kind = HazardKind.chronic + pass + + +class Fire(Hazard): + kind = HazardKind.chronic + pass + + +class Hail(Hazard): + kind = HazardKind.chronic + pass + + +class PluvialInundation(Inundation): + kind = HazardKind.acute + pass + + +class Precipitation(Hazard): + kind = HazardKind.chronic + pass + + +class RiverineInundation(Inundation): + kind = HazardKind.acute + pass + + +class WaterRisk(Hazard): + kind = HazardKind.chronic + pass + + +class WaterTemperature(ChronicHeat): + pass + + +class Wind(Hazard): + kind = HazardKind.acute + pass + + +def all_hazards(): + return [ + obj for _, obj in inspect.getmembers(sys.modules[__name__]) if inspect.isclass(obj) and issubclass(obj, Hazard) + ] + + +def hazard_class(name: str): + return getattr(sys.modules[__name__], name) diff --git a/src/physrisk/kernel/impact.py b/src/physrisk/kernel/impact.py new file mode 100644 index 00000000..0be43f1a --- /dev/null +++ b/src/physrisk/kernel/impact.py @@ -0,0 +1,125 @@ +import logging +from collections import defaultdict +from dataclasses import dataclass +from typing import Dict, Iterable, List, NamedTuple, Optional, Tuple, Union + +from physrisk.kernel.assets import Asset +from physrisk.kernel.hazard_event_distrib import HazardEventDistrib +from physrisk.kernel.hazard_model import HazardDataFailedResponse, HazardDataRequest, HazardDataResponse, HazardModel +from physrisk.kernel.impact_distrib import EmptyImpactDistrib, ImpactDistrib +from physrisk.kernel.vulnerability_distrib import VulnerabilityDistrib +from physrisk.kernel.vulnerability_model import ( + DataRequester, + VulnerabilityModelAcuteBase, + VulnerabilityModelBase, + VulnerabilityModels, +) +from physrisk.utils.helpers import get_iterable + +logger = logging.getLogger(__name__) + + +class ImpactKey(NamedTuple): + asset: Asset + hazard_type: type + # these additional key items can be set to None, for example + # if the calculation is for a given scenario and year + # impact_type: Optional[str] = None # consider adding: whether damage or disruption + scenario: Optional[str] = None + key_year: Optional[int] = None # this is None for 'historical' scenario + + +@dataclass +class AssetImpactResult: + impact: ImpactDistrib + vulnerability: Optional[VulnerabilityDistrib] = None + event: Optional[HazardEventDistrib] = None + hazard_data: Optional[Iterable[HazardDataResponse]] = None # optional detailed results for drill-down + + +def calculate_impacts( # noqa: C901 + assets: Iterable[Asset], + hazard_model: HazardModel, + vulnerability_models: VulnerabilityModels, + *, + scenario: str, + year: int, +) -> Dict[ImpactKey, AssetImpactResult]: + """Calculate asset level impacts.""" + + model_assets: Dict[DataRequester, List[Asset]] = defaultdict( + list + ) # list of assets to be modelled using vulnerability model + + for asset in assets: + asset_type = type(asset) + mappings = vulnerability_models.vuln_model_for_asset_of_type(asset_type) + for mapping in mappings: + model_assets[mapping].append(asset) + results = {} + + asset_requests, responses = _request_consolidated(hazard_model, model_assets, scenario, year) + + logging.info("Calculating impacts") + for model, assets in model_assets.items(): + logging.info( + "Applying vulnerability model {0} to {1} assets of type {2}".format( + type(model).__name__, len(assets), type(assets[0]).__name__ + ) + ) + for asset in assets: + requests = asset_requests[(model, asset)] + hazard_data = [responses[req] for req in get_iterable(requests)] + if any(isinstance(hd, HazardDataFailedResponse) for hd in hazard_data): + assert isinstance(model, VulnerabilityModelBase) + if ( + ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year) + not in results + ): + results[ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year)] = ( + AssetImpactResult(EmptyImpactDistrib()) + ) + continue + try: + if isinstance(model, VulnerabilityModelAcuteBase): + impact, vul, event = model.get_impact_details(asset, hazard_data) + results[ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year)] = ( + AssetImpactResult(impact, vulnerability=vul, event=event, hazard_data=hazard_data) + ) + elif isinstance(model, VulnerabilityModelBase): + impact = model.get_impact(asset, hazard_data) + results[ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year)] = ( + AssetImpactResult(impact, hazard_data=hazard_data) + ) + except Exception as e: + logger.exception(e) + assert isinstance(model, VulnerabilityModelBase) + if ( + ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year) + not in results + ): + results[ImpactKey(asset=asset, hazard_type=model.hazard_type, scenario=scenario, key_year=year)] = ( + AssetImpactResult(EmptyImpactDistrib()) + ) + return results + + +def _request_consolidated( + hazard_model: HazardModel, requester_assets: Dict[DataRequester, List[Asset]], scenario: str, year: int +): + """As an important performance optimization, data requests are consolidated for all requesters + (e.g. vulnerability model) because different requesters may query the same hazard data sets + note that key for a single request is (requester, asset). + """ + # the list of requests for each requester and asset + asset_requests: Dict[Tuple[DataRequester, Asset], Union[HazardDataRequest, Iterable[HazardDataRequest]]] = {} + + logging.info("Generating hazard data requests for requesters") + for requester, assets in requester_assets.items(): + for asset in assets: + asset_requests[(requester, asset)] = requester.get_data_requests(asset, scenario=scenario, year=year) + + logging.info("Retrieving hazard data") + flattened_requests = [req for requests in asset_requests.values() for req in get_iterable(requests)] + responses = hazard_model.get_hazard_events(flattened_requests) + return asset_requests, responses diff --git a/src/physrisk/kernel/impact_distrib.py b/src/physrisk/kernel/impact_distrib.py index 2b7cc470..b11cdc9c 100644 --- a/src/physrisk/kernel/impact_distrib.py +++ b/src/physrisk/kernel/impact_distrib.py @@ -1,26 +1,37 @@ -import physrisk.kernel.curve as curve -import numpy as np +from enum import Enum from typing import List, Union -from physrisk.kernel.exceedance_curve import ExceedanceCurve +import numpy as np + +from physrisk.kernel.curve import to_exceedance_curve + + +class ImpactType(Enum): + damage = 1 + disruption = 2 + class ImpactDistrib: """Impact distributions specific to an asset.""" - - __slots__ = ["__event_type", "__impact_bins", "__prob"] - - def __init__(self, - event_type: type, - impact_bins: Union[List[float], np.ndarray], - prob: Union[List[float], np.ndarray]): + + __slots__ = ["__hazard_type", "__impact_bins", "__prob", "impact_type"] + + def __init__( + self, + hazard_type: type, + impact_bins: Union[List[float], np.ndarray], + prob: Union[List[float], np.ndarray], + impact_type: ImpactType = ImpactType.damage, + ): """Create a new asset event distribution. Args: event_type: type of event impact_bins: non-decreasing impact bin bounds - prob: probabilities with size [len(intensity_bins) - 1] + prob: probabilities with size [len(intensity_bins) - 1] """ - self.__event_type = event_type - self.__impact_bins = np.array(impact_bins) + self.__hazard_type = hazard_type + self.__impact_bins = np.array(impact_bins) + self.impact_type = impact_type self.__prob = np.array(prob) def impact_bins_explicit(self): @@ -29,8 +40,17 @@ def impact_bins_explicit(self): def mean_impact(self): return np.sum((self.__impact_bins[:-1] + self.__impact_bins[1:]) * self.__prob / 2) + def stddev_impact(self): + mean = self.mean_impact() + bin_mids = (self.__impact_bins[:-1] + self.__impact_bins[1:]) / 2 + return np.sqrt(np.sum(self.__prob * (bin_mids - mean) * (bin_mids - mean))) + def to_exceedance_curve(self): - return curve.to_exceedance_curve(self.__impact_bins, self.__prob) + return to_exceedance_curve(self.__impact_bins, self.__prob) + + @property + def hazard_type(self) -> type: + return self.__hazard_type @property def impact_bins(self) -> np.ndarray: @@ -38,4 +58,9 @@ def impact_bins(self) -> np.ndarray: @property def prob(self) -> np.ndarray: - return self.__prob \ No newline at end of file + return self.__prob + + +class EmptyImpactDistrib(ImpactDistrib): + def __init__(self): + pass diff --git a/src/physrisk/kernel/model.py b/src/physrisk/kernel/model.py deleted file mode 100644 index 74bdc20a..00000000 --- a/src/physrisk/kernel/model.py +++ /dev/null @@ -1,16 +0,0 @@ -from abc import ABC, abstractmethod - -class Model(ABC): - """Exposure/vulnerability model that generates the vulnerability and asset event distributions of an assets for different types of - hazard event. - """ - - @abstractmethod - def get_event_data_requests(self, event): - pass - - @abstractmethod - def get_distributions(self, event_data_responses): - pass - - diff --git a/src/physrisk/kernel/risk.py b/src/physrisk/kernel/risk.py new file mode 100644 index 00000000..26855a32 --- /dev/null +++ b/src/physrisk/kernel/risk.py @@ -0,0 +1,175 @@ +import concurrent.futures +from dataclasses import dataclass +from typing import Dict, List, NamedTuple, Optional, Protocol, Sequence, Set, Tuple, Type, Union + +from physrisk.api.v1.impact_req_resp import Category, ScoreBasedRiskMeasureDefinition +from physrisk.kernel.assets import Asset +from physrisk.kernel.hazard_model import HazardModel +from physrisk.kernel.hazards import Hazard, all_hazards +from physrisk.kernel.impact import AssetImpactResult, ImpactKey, calculate_impacts +from physrisk.kernel.impact_distrib import EmptyImpactDistrib, ImpactDistrib +from physrisk.kernel.vulnerability_model import VulnerabilityModels + +# from asyncio import ALL_COMPLETED +# import concurrent.futures + + +Impact = Dict[Tuple[Asset, type], AssetImpactResult] # the key is (Asset, Hazard type) + + +class BatchId(NamedTuple): + scenario: str + key_year: Optional[int] + + +class RiskModel: + """Base class for a risk model (i.e. a calculation of risk that makes use of hazard and vulnerability + models).""" + + def __init__(self, hazard_model: HazardModel, vulnerability_models: VulnerabilityModels): + self._hazard_model = hazard_model + self._vulnerability_models = vulnerability_models + + def calculate_risk_measures(self, assets: Sequence[Asset], prosp_scens: Sequence[str], years: Sequence[int]): ... + + def _calculate_all_impacts( + self, assets: Sequence[Asset], prosp_scens: Sequence[str], years: Sequence[int], include_histo: bool = False + ): + # ensure "historical" is present, e.g. needed for risk measures + scenarios = set(["historical"] + list(prosp_scens)) if include_histo else prosp_scens + impact_results: Dict[ImpactKey, AssetImpactResult] = {} + + # in case of multiple calculation, run on separate threads + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: + # with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor: + tagged_futures = { + executor.submit(self._calculate_single_impact, assets, scenario, year): BatchId( + scenario, None if scenario == "historical" else year + ) + for scenario in scenarios + for year in years + } + for future in concurrent.futures.as_completed(tagged_futures): + tag = tagged_futures[future] + try: + res = future.result() + # flatten to use single key + for temp_key, value in res.items(): + key = ImpactKey( + asset=temp_key.asset, + hazard_type=temp_key.hazard_type, + scenario=tag.scenario, + key_year=tag.key_year, + ) + impact_results[key] = value + + except Exception as exc: + print("%r generated an exception: %s" % (tag, exc)) + return impact_results + + def _calculate_single_impact(self, assets: Sequence[Asset], scenario: str, year: int): + """Calculate impacts for a single scenario and year.""" + return calculate_impacts(assets, self._hazard_model, self._vulnerability_models, scenario=scenario, year=year) + + +class MeasureKey(NamedTuple): + asset: Asset + prosp_scen: str # prospective scenario + year: int + hazard_type: type + + +@dataclass +class Measure: + score: Category + measure_0: float + definition: ScoreBasedRiskMeasureDefinition # reference to single instance of ScoreBasedRiskMeasureDefinition + + +class RiskMeasureCalculator(Protocol): + def calc_measure(self, hazard_type: type, base_impact: ImpactDistrib, impact: ImpactDistrib) -> Measure: ... + + def get_definition(self, hazard_type: type) -> ScoreBasedRiskMeasureDefinition: ... + + def supported_hazards(self) -> Set[type]: ... + + +class AssetLevelRiskModel(RiskModel): + def __init__( + self, + hazard_model: HazardModel, + vulnerability_models: VulnerabilityModels, + measure_calculators: Dict[type, RiskMeasureCalculator], + ): + """Risk model that calculates risk measures at the asset level for a sequence + of assets. + + Args: + hazard_model (HazardModel): The hazard model. + vulnerability_models (Dict[type, Sequence[VulnerabilityModelBase]]): Vulnerability models for asset types. + measure_calculators (Dict[type, RiskMeasureCalculator]): Risk measure calculators for asset types. + """ + super().__init__(hazard_model, vulnerability_models) + self._measure_calculators = measure_calculators + + def calculate_impacts(self, assets: Sequence[Asset], prosp_scens: Sequence[str], years: Sequence[int]): + impacts = self._calculate_all_impacts(assets, prosp_scens, years) + return impacts + + def populate_measure_definitions( + self, assets: Sequence[Asset] + ) -> Tuple[Dict[Type[Hazard], List[str]], Dict[ScoreBasedRiskMeasureDefinition, str]]: + hazards = all_hazards() + # the identifiers of the score-based risk measures used for each asset, for each hazard type + measure_ids_for_hazard: Dict[Type[Hazard], List[str]] = {} + # one + calcs_by_asset = [self._measure_calculators.get(type(asset), None) for asset in assets] + used_calcs = {c for c in calcs_by_asset if c is not None} + # get all measures + measure_id_lookup = { + cal: f"measure_{i}" + for (i, cal) in enumerate( + set( + item + for item in ( + cal.get_definition(hazard_type=hazard_type) for hazard_type in hazards for cal in used_calcs + ) + if item is not None + ) + ) + } + + def get_measure_id(measure_calc: Union[RiskMeasureCalculator, None], hazard_type: type): + if measure_calc is None: + return "na" + measure = measure_calc.get_definition(hazard_type=hazard_type) + return measure_id_lookup[measure] if measure is not None else "na" + + for hazard_type in hazards: + measure_ids = [get_measure_id(calc, hazard_type) for calc in calcs_by_asset] + measure_ids_for_hazard[hazard_type] = measure_ids + return measure_ids_for_hazard, measure_id_lookup + + def calculate_risk_measures(self, assets: Sequence[Asset], prosp_scens: Sequence[str], years: Sequence[int]): + impacts = self._calculate_all_impacts(assets, prosp_scens, years, include_histo=True) + measures: Dict[MeasureKey, Measure] = {} + + for asset in assets: + if type(asset) not in self._measure_calculators: + continue + measure_calc = self._measure_calculators[type(asset)] + for prosp_scen in prosp_scens: + for year in years: + for hazard_type in measure_calc.supported_hazards(): + base_impact = impacts.get( + ImpactKey(asset=asset, hazard_type=hazard_type, scenario="historical", key_year=None) + ).impact + prosp_impact = impacts.get( + ImpactKey(asset=asset, hazard_type=hazard_type, scenario=prosp_scen, key_year=year) + ).impact + if not isinstance(base_impact, EmptyImpactDistrib) and not isinstance( + prosp_impact, EmptyImpactDistrib + ): + risk_ind = measure_calc.calc_measure(hazard_type, base_impact, prosp_impact) + measures[MeasureKey(asset, prosp_scen, year, hazard_type)] = risk_ind + return impacts, measures diff --git a/src/physrisk/kernel/vulnerability_distrib.py b/src/physrisk/kernel/vulnerability_distrib.py index ab182d5b..66dc372d 100644 --- a/src/physrisk/kernel/vulnerability_distrib.py +++ b/src/physrisk/kernel/vulnerability_distrib.py @@ -1,36 +1,37 @@ -import numpy as np from typing import List, Union -from typing import TypeVar, Generic, List +import numpy as np # tempting to use typing.get_args -# and use generics for nicer syntax to capture event type, but requires Python 3.8 onwards +# and use generics for nicer syntax to capture event type, but requires Python 3.8 onwards # https://stackoverflow.com/questions/48572831/how-to-access-the-type-arguments-of-typing-generic -class VulnerabilityDistrib(): + +class VulnerabilityDistrib: """Vulnerability distribution as a discrete matrix.""" - + __slots__ = ["_event_type", "_intensity_bins", "_impact_bins", "_prob_matrix"] - - def __init__(self, + + def __init__( + self, event_type: type, - intensity_bins: Union[List[float], np.ndarray], - impact_bins: Union[List[float], np.ndarray], - prob_matrix: Union[List[List[float]], np.ndarray]): + intensity_bins: Union[List[float], np.ndarray], + impact_bins: Union[List[float], np.ndarray], + prob_matrix: Union[List[List[float]], np.ndarray], + ): """Create a new vulnerability distribution. Args: event_type: type of event intensity_bins: non-decreasing intensity bin bounds impact_bins: non-decreasing impact bin bounds - prob_matrix: matrix of probabilities with size [len(intensity_bins) - 1, len(impact_bins) - 1] + prob_matrix: matrix of probabilities with size [len(intensity_bins) - 1, len(impact_bins) - 1] """ self._event_type = event_type - self._intensity_bins = np.array(intensity_bins) + self._intensity_bins = np.array(intensity_bins) self._impact_bins = np.array(impact_bins) self._prob_matrix = np.array(prob_matrix) - #sdef from_mean_impact(intensity_bins: List[float], impact_bins: List[float]): - + # sdef from_mean_impact(intensity_bins: List[float], impact_bins: List[float]): @property def event_type(self) -> type: @@ -52,4 +53,4 @@ def intensity_bins(self) -> np.ndarray: @property def prob_matrix(self) -> np.ndarray: - return self._prob_matrix \ No newline at end of file + return self._prob_matrix diff --git a/src/physrisk/kernel/vulnerability_matrix_provider.py b/src/physrisk/kernel/vulnerability_matrix_provider.py new file mode 100644 index 00000000..60317b97 --- /dev/null +++ b/src/physrisk/kernel/vulnerability_matrix_provider.py @@ -0,0 +1,60 @@ +from typing import Callable, Sequence + +import numpy as np + + +class Distribution: + def __init__(self, mean, std_dev): + self.mean = mean + self.std_dev = std_dev + + +class VulnMatrixProvider: + __slots__ = ["intensity_bin_centres", "impact_cdfs"] + + def __init__( + self, + intensity_bin_centres: np.ndarray, + impact_cdfs: Sequence[Callable[[np.ndarray], np.ndarray]], + ): + """Via to_prob_matrix method, provides the probability that the impact on an asset falls within + a specified bin, an impact being either a fractional damage or disruption that occurs as a result of a + hazard event of a given intensity. + + Args: + intensity_bin_centres (Iterable[float]): The centres of the intensity bins. + impact_cdfs (Iterable[Callable[[float], float]]): For each intensity bin centre, provides a function + that takes parameter, d, and returns the probability that the impact is less than d. This is used to + construct the probability matrix. + """ + + if not np.all(np.diff(intensity_bin_centres) >= 0): + raise ValueError("intensities must be sorted and increasing") + + if len(intensity_bin_centres) != len(impact_cdfs): + raise ValueError("one impact_cdf expected for each intensity_bin_centre") + + self.intensity_bin_centres = np.array(intensity_bin_centres) + self.impact_cdfs = impact_cdfs + + def to_prob_matrix(self, impact_bin_edges: np.ndarray) -> np.ndarray: + """Return probability matrix, p with dimension (number intensity bins, number impact bins) + where p[i, j] is the conditional probability that given the intensity falls in bin i, the impact is + in bin j. + + Args: + impact_bin_edges (Iterable[float]): Bin edges of the impact bins. + + Returns: + np.ndarray: Probability matrix. + """ + # construct a cdf probability matrix at each intensity point + # the probability is the prob that the impact is greater than the specified + cdf_matrix = np.empty([len(self.intensity_bin_centres), len(impact_bin_edges)]) + + for i, _ in enumerate(self.intensity_bin_centres): + cdf_matrix[i, :] = self.impact_cdfs[i](impact_bin_edges) # type: ignore + + prob_matrix = cdf_matrix[:, 1:] - cdf_matrix[:, :-1] + + return prob_matrix diff --git a/src/physrisk/kernel/vulnerability_model.py b/src/physrisk/kernel/vulnerability_model.py new file mode 100644 index 00000000..b763a6db --- /dev/null +++ b/src/physrisk/kernel/vulnerability_model.py @@ -0,0 +1,315 @@ +import importlib.resources +import json +from abc import ABC, abstractmethod +from typing import Dict, Iterable, List, Optional, Protocol, Sequence, Tuple, Type, Union + +import numpy as np +from scipy import stats + +import physrisk.data.static.vulnerability +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType + +from ..api.v1.common import VulnerabilityCurve, VulnerabilityCurves +from .assets import Asset +from .curve import ExceedanceCurve +from .hazard_event_distrib import HazardEventDistrib +from .hazard_model import HazardDataRequest, HazardDataResponse, HazardEventDataResponse +from .vulnerability_distrib import VulnerabilityDistrib +from .vulnerability_matrix_provider import VulnMatrixProvider + +PLUGINS = dict() # type:ignore + + +def repeat(num_times): + def decorator_repeat(func): ... # Create and return a wrapper function + + return decorator_repeat + + +def applies_to_events(event_types): + def decorator_events(func): + PLUGINS[func.__name__] = func + return func + + return decorator_events + + +def applies_to_assets(asset_types): + def decorator_events(func): + PLUGINS[func.__name__] = func + return func + + return decorator_events + + +def get_vulnerability_curves_from_resource(id: str) -> VulnerabilityCurves: + with importlib.resources.open_text(physrisk.data.static.vulnerability, id + ".json") as f: + curve_set = VulnerabilityCurves(**json.load(f)) + return curve_set + + +def delta_cdf(y): + return lambda x: np.where(x < y, 0, 1) + + +def checked_beta_distrib(mean, std, scaling_factor=1.0): + if std == 0 or mean == 0 or mean == scaling_factor: + return delta_cdf(mean) + return beta_distrib(mean, std, scaling_factor) + + +def beta_distrib(mean, std, scaling_factor): + cv = std / mean + a = ((scaling_factor - mean) / (cv * cv) - mean) / scaling_factor + b = a * (scaling_factor - mean) / mean + return lambda x, a=a, b=b: stats.beta.cdf(x / scaling_factor, a, b) + + +class DataRequester(Protocol): + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: ... + + +class EventBased(Protocol): + def impact_samples(self, asset: Asset, data_responses: Iterable[HazardDataResponse]) -> np.ndarray: + # event-based models generate impact samples based on events received by the hazard model + # the events may be in the form of an array of severities in the form of return periods. + ... + + +class VulnerabilityModelBase(ABC, DataRequester): + def __init__(self, indicator_id: str, hazard_type: type, impact_type: ImpactType): + self.indicator_id = indicator_id + self.hazard_type = hazard_type + self.impact_type = impact_type + self._event_types: List[type] = [] + self._asset_types: List[type] = [] + + @abstractmethod + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Provide the one or more hazard event data requests required in order to calculate + the VulnerabilityDistrib and HazardEventDistrib for the asset.""" + ... + + @abstractmethod + def get_impact(self, asset: Asset, event_data: List[HazardDataResponse]) -> ImpactDistrib: ... + + +class VulnerabilityModels(Protocol): + def vuln_model_for_asset_of_type(self, type: Type[Asset]) -> Sequence[VulnerabilityModelBase]: + """Returns for a given asset type the vulnerability models for each hazard required. + + Returns: + Dict[type, Sequence[VulnerabilityModelBase]]: Vulnerability models.s + """ + ... + + +class VulnerabilityModelsFactory(Protocol): + def vulnerability_models(self) -> VulnerabilityModels: + """Create a VulnerabilityModels instance, that can based on a number of options. + Although no options used at present, implemented this way in order to add in future + (e.g. to allow a request to specify preferred methodology). + + Returns: + VulnerabilityModels: Instance that provides vulnerability models for asset types. + """ + ... + + +class DictBasedVulnerabilityModels(VulnerabilityModels): + def __init__(self, models: Dict[Type[Asset], Sequence[VulnerabilityModelBase]]): + self.models = models + + def vuln_model_for_asset_of_type(self, type: Type[Asset]): + return self.models[type] + + +class VulnerabilityModelAcuteBase(VulnerabilityModelBase): + """Models generate the VulnerabilityDistrib and HazardEventDistrib of an + Asset. + """ + + def __init__(self, indicator_id: str, hazard_type: type, impact_type: ImpactType): + super().__init__(indicator_id=indicator_id, hazard_type=hazard_type, impact_type=impact_type) + + @abstractmethod + def get_distributions( + self, asset: Asset, event_data_responses: Iterable[HazardDataResponse] + ) -> Tuple[VulnerabilityDistrib, HazardEventDistrib]: + """Return distributions for asset: VulnerabilityDistrib and HazardEventDistrib. + The hazard event data is used to do this. + + Args: + asset: the asset. + event_data_responses: the responses to the requests made by get_data_requests, in the same order. + """ + ... + + def get_impact(self, asset: Asset, data_responses: Iterable[HazardDataResponse]): + impact, _, _ = self.get_impact_details(asset, data_responses) + return impact + + def get_impact_details( + self, asset: Asset, data_responses: Iterable[HazardDataResponse] + ) -> Tuple[ImpactDistrib, VulnerabilityDistrib, HazardEventDistrib]: + """Return impact distribution along with vulnerability and hazard event distributions used to infer this. + + Args: + asset: the asset. + event_data_responses: the responses to the requests made by get_data_requests, in the same order. + """ + vulnerability_dist, event_dist = self.get_distributions(asset, data_responses) + impact_prob = vulnerability_dist.prob_matrix.T @ event_dist.prob + return ( + ImpactDistrib( + vulnerability_dist.event_type, vulnerability_dist.impact_bins, impact_prob, impact_type=self.impact_type + ), + vulnerability_dist, + event_dist, + ) + + def _check_event_type(self): + if self.hazard_type not in self._event_types: + raise NotImplementedError(f"model does not support events of type {self.hazard_type.__name__}") + + +class VulnerabilityModel(VulnerabilityModelAcuteBase): + """A vulnerability model that requires only specification of distributions of impacts for given intensities, + by implementing get_impact_curve.""" + + def __init__( + self, + *, + indicator_id: str = "", + hazard_type: type, + impact_type: ImpactType, + impact_bin_edges, + buffer: Optional[int] = None, + ): + super().__init__(indicator_id, hazard_type, impact_type) + self.impact_bin_edges = impact_bin_edges + self.buffer = buffer + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + return HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + buffer=self.buffer, + ) + + def get_distributions( + self, asset: Asset, event_data_responses: Iterable[HazardDataResponse] + ) -> Tuple[VulnerabilityDistrib, HazardEventDistrib]: + (event_data,) = event_data_responses + assert isinstance(event_data, HazardEventDataResponse) + + intensity_curve = ExceedanceCurve(1.0 / event_data.return_periods, event_data.intensities) + intensity_bin_edges, probs = intensity_curve.get_probability_bins() + + intensity_bin_centres = (intensity_bin_edges[1:] + intensity_bin_edges[:-1]) / 2 + vul = VulnerabilityDistrib( + self.hazard_type, + intensity_bin_edges, + self.impact_bin_edges, + # np.eye(8, 11) + self.get_impact_curve(intensity_bin_centres, asset).to_prob_matrix(self.impact_bin_edges), + ) + + event = HazardEventDistrib(self.hazard_type, intensity_bin_edges, probs) + return vul, event + + @abstractmethod + def get_impact_curve(self, intensity_bin_centres: np.ndarray, asset: Asset) -> VulnMatrixProvider: + """Defines a VulnMatrixProvider. The VulnMatrixProvider returns probabilities of specified impact bins + for the intensity bin centres.""" + ... + + +class CurveBasedVulnerabilityModel(VulnerabilityModel): + def get_impact_curve(self, intensity_bin_centres: np.ndarray, asset: Asset) -> VulnMatrixProvider: + curve: VulnerabilityCurve = self.get_vulnerability_curve(asset) + impact_means = np.interp(intensity_bin_centres, curve.intensity, curve.impact_mean) + impact_stddevs = np.interp(intensity_bin_centres, curve.intensity, curve.impact_std) + return VulnMatrixProvider( + intensity_bin_centres, + impact_cdfs=[checked_beta_distrib(m, s) for m, s in zip(impact_means, impact_stddevs)], + ) + + @abstractmethod + def get_vulnerability_curve(self, asset: Asset) -> VulnerabilityCurve: ... + + +class DeterministicVulnerabilityModel(VulnerabilityModelAcuteBase): + def __init__( + self, + *, + hazard_type: type, + damage_curve_intensities: Sequence[float], + damage_curve_impacts: Sequence[float], + indicator_id: str, + impact_type: ImpactType, + buffer: Optional[int] = None, + ): + """A vulnerability model that requires only specification of a damage/disruption curve. + This simple model contains no uncertainty around damage/disruption. The damage curve is passed via the + constructor. The edges of the (hazard) intensity bins are determined by the granularity of + the hazard data itself. The impact bin edges are inferred from the intensity bin edges, by + looking up the impact corresponding to the hazard indicator intensity from the damage curve. + + Args: + event_type (type): _description_ + damage_curve_intensities (Sequence[float]): Intensities + (i.e. hazard indicator values) of the damage/disruption (aka impact) curve. + damage_curve_impacts (Sequence[float]): Fractional damage to asset/disruption + to operation resulting from a hazard of the corresponding intensity. + indicator_id (str): ID of the hazard indicator to which this applies. Defaults to "". + buffer (Optional[int]): Delimitation of the area for the hazard data in metres (within [0,1000]). + """ + super().__init__(indicator_id=indicator_id, hazard_type=hazard_type, impact_type=impact_type) + self.damage_curve_intensities = damage_curve_intensities + self.damage_curve_impacts = damage_curve_impacts + self.buffer = buffer + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + return HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + buffer=self.buffer, + ) + + def get_distributions( + self, asset: Asset, event_data_responses: Iterable[HazardDataResponse] + ) -> Tuple[VulnerabilityDistrib, HazardEventDistrib]: + (event_data,) = event_data_responses + assert isinstance(event_data, HazardEventDataResponse) + + intensity_curve = ExceedanceCurve(1.0 / event_data.return_periods, event_data.intensities) + intensity_bin_edges, probs = intensity_curve.get_probability_bins() + + # look up the impact bin edges + impact_bins_edges = np.interp(intensity_bin_edges, self.damage_curve_intensities, self.damage_curve_impacts) + + # the vulnerability distribution probabilities are an identity matrix: + # we assume that if the intensity falls within a certain bin then the impacts *will* fall within the + # bin where the edges are obtained by applying the damage curve to the intensity bin edges. + vul = VulnerabilityDistrib( + type(self.hazard_type), intensity_bin_edges, impact_bins_edges, np.eye(len(impact_bins_edges) - 1) + ) + event = HazardEventDistrib(type(self.hazard_type), intensity_bin_edges, probs) + return vul, event diff --git a/src/physrisk/models/__init__.py b/src/physrisk/models/__init__.py deleted file mode 100644 index 4f158e00..00000000 --- a/src/physrisk/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from physrisk.models.power_generating_asset_model import InundationModel \ No newline at end of file diff --git a/src/physrisk/models/power_generating_asset_model.py b/src/physrisk/models/power_generating_asset_model.py deleted file mode 100644 index 3264b3d4..00000000 --- a/src/physrisk/models/power_generating_asset_model.py +++ /dev/null @@ -1,57 +0,0 @@ -import numpy as np -from physrisk.kernel.events import HighTemperature -from typing import List -from physrisk.kernel import Asset, PowerGeneratingAsset, Inundation, Model -from physrisk.kernel import AssetEventDistrib, VulnerabilityDistrib -from physrisk.data import EventDataRequest -from physrisk.kernel import ExceedanceCurve - -class InundationModel(Model): - __asset_types = [PowerGeneratingAsset] - __event_types = [Inundation] - - def __init__(self, model = "MIROC-ESM-CHEM"): - # default impact curve - self.__curve_depth = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) - self.__curve_impact = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - self.__model = model - self.__base_model = "000000000WATCH" - pass - - def get_event_data_requests(self, asset : Asset): - # assuming here that other specific look-ups wold be needed - histo = EventDataRequest(Inundation, asset.longitude, asset.latitude, - scenario = "historical", type = "river", year = 1980, model = self.__base_model) - - future = EventDataRequest(Inundation, asset.longitude, asset.latitude, - scenario = "rcp8p5", type = "river", year = 2080, model = self.__model) - - return histo, future - - def get_distributions(self, asset, event_data_responses): - """Return vulnerability and asset event distributions""" - - histo, future = event_data_responses - - protection_return_period = 250.0 - curve_histo = ExceedanceCurve(1.0 / histo.return_periods, histo.intensities) - protection_depth = curve_histo.get_value(1.0 / protection_return_period) - - curve_future = ExceedanceCurve(1.0 / future.return_periods, future.intensities) - curve_future = curve_future.add_value_point(protection_depth) - - depth_bins, probs = curve_future.get_probability_bins() - - impact_bins = np.interp(depth_bins, self.__curve_depth, self.__curve_impact) - - # keep all bins, but make use of vulnerability matrix to apply protection level - # for improved performance we could truncate (and treat identify matrix as a special case) - # but this general version allows model uncertainties to be added - probs_protected = np.where(depth_bins[1:] <= protection_depth, 0.0, 1.0) - n_bins = len(probs) - vul = VulnerabilityDistrib(type(Inundation), depth_bins, impact_bins, np.diag(probs_protected)) - event = AssetEventDistrib(type(Inundation), depth_bins, probs, curve_future) - - return vul, event - - diff --git a/src/physrisk/requests.py b/src/physrisk/requests.py new file mode 100644 index 00000000..94e98253 --- /dev/null +++ b/src/physrisk/requests.py @@ -0,0 +1,476 @@ +import importlib +import json +from importlib import import_module +from pathlib import PosixPath +from typing import Any, Dict, List, Optional, Sequence, Type, cast + +import numpy as np + +import physrisk.data.static.example_portfolios +from physrisk.api.v1.common import Distribution, ExceedanceCurve, VulnerabilityDistrib +from physrisk.api.v1.exposure_req_resp import AssetExposure, AssetExposureRequest, AssetExposureResponse, Exposure +from physrisk.api.v1.hazard_image import HazardImageRequest +from physrisk.data.hazard_data_provider import HazardDataHint +from physrisk.data.inventory import expand +from physrisk.data.inventory_reader import InventoryReader +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.exposure import JupterExposureMeasure, calculate_exposures +from physrisk.kernel.hazards import all_hazards +from physrisk.kernel.impact_distrib import EmptyImpactDistrib +from physrisk.kernel.risk import AssetLevelRiskModel, Measure, MeasureKey +from physrisk.kernel.vulnerability_model import ( + DictBasedVulnerabilityModels, + VulnerabilityModels, + VulnerabilityModelsFactory, +) + +from .api.v1.hazard_data import ( + HazardAvailabilityRequest, + HazardAvailabilityResponse, + HazardDataRequest, + HazardDataResponse, + HazardDataResponseItem, + HazardDescriptionRequest, + HazardDescriptionResponse, + HazardResource, + IntensityCurve, + Scenario, +) +from .api.v1.impact_req_resp import ( + AcuteHazardCalculationDetails, + AssetImpactRequest, + AssetImpactResponse, + AssetLevelImpact, + Assets, + AssetSingleImpact, + ImpactKey, + RiskMeasureKey, + RiskMeasures, + RiskMeasuresForAssets, + ScoreBasedRiskMeasureDefinition, + ScoreBasedRiskMeasureSetDefinition, +) +from .data.image_creator import ImageCreator +from .data.inventory import EmbeddedInventory, Inventory +from .kernel import Asset, Hazard +from .kernel import calculation as calc +from .kernel.hazard_model import HazardDataRequest as hmHazardDataRequest +from .kernel.hazard_model import HazardEventDataResponse as hmHazardEventDataResponse +from .kernel.hazard_model import HazardModel, HazardModelFactory, HazardParameterDataResponse + +Colormaps = Dict[str, Any] + + +class Requester: + def __init__( + self, + hazard_model_factory: HazardModelFactory, + vulnerability_models_factory: VulnerabilityModelsFactory, + inventory: Inventory, + inventory_reader: InventoryReader, + reader: ZarrReader, + colormaps: Colormaps, + ): + self.colormaps = colormaps + self.hazard_model_factory = hazard_model_factory + self.vulnerability_models_factory = vulnerability_models_factory + self.inventory = inventory + self.inventory_reader = inventory_reader + self.zarr_reader = reader + + def get(self, *, request_id, request_dict): + # the hazard model can depend + + if request_id == "get_hazard_data": + request = HazardDataRequest(**request_dict) + hazard_model = self.hazard_model_factory.hazard_model( + interpolation=request.interpolation, provider_max_requests=request.provider_max_requests + ) + return json.dumps(_get_hazard_data(request, hazard_model=hazard_model).model_dump()) # , allow_nan=False) + elif request_id == "get_hazard_data_availability": + request = HazardAvailabilityRequest(**request_dict) + return json.dumps(_get_hazard_data_availability(request, self.inventory, self.colormaps).model_dump()) + elif request_id == "get_hazard_data_description": + request = HazardDescriptionRequest(**request_dict) + return json.dumps(_get_hazard_data_description(request).dict()) + elif request_id == "get_asset_exposure": + request = AssetExposureRequest(**request_dict) + hazard_model = self.hazard_model_factory.hazard_model( + interpolation=request.calc_settings.hazard_interp, provider_max_requests=request.provider_max_requests + ) + return json.dumps(_get_asset_exposures(request, hazard_model).model_dump(exclude_none=True)) + elif request_id == "get_asset_impact": + request = AssetImpactRequest(**request_dict) + hazard_model = self.hazard_model_factory.hazard_model( + interpolation=request.calc_settings.hazard_interp, provider_max_requests=request.provider_max_requests + ) + vulnerability_models = self.vulnerability_models_factory.vulnerability_models() + return dumps(_get_asset_impacts(request, hazard_model, vulnerability_models).model_dump()) + elif request_id == "get_example_portfolios": + return dumps(_get_example_portfolios()) + else: + raise ValueError(f"request type '{request_id}' not found") + + def get_image(self, *, request_dict): + inventory = self.inventory + zarr_reader = self.zarr_reader + request = HazardImageRequest(**request_dict) + if not _read_permitted(request.group_ids, inventory.resources[request.resource]): + raise PermissionError() + model = inventory.resources[request.resource] + len(PosixPath(model.map.path).parts) + path = ( + str(PosixPath(model.path).with_name(model.map.path)) + if len(PosixPath(model.map.path).parts) == 1 + else model.map.path + ).format(scenario=request.scenario_id, year=request.year) + colormap = request.colormap if request.colormap is not None else model.map.colormap.name + creator = ImageCreator(zarr_reader) # store=ImageCreator.test_store(path)) + return creator.convert( + path, colormap=colormap, tile=request.tile, min_value=request.min_value, max_value=request.max_value + ) + + +def _create_inventory(reader: Optional[InventoryReader] = None, sources: Optional[List[str]] = None): + resources: List[HazardResource] = [] + colormaps: Dict[str, Dict[str, Any]] = {} + request_sources = ["embedded"] if sources is None else [s.lower() for s in sources] + for source in request_sources: + if source == "embedded": + inventory = EmbeddedInventory() + for res in inventory.resources.values(): + resources.append(res) + colormaps.update(inventory.colormaps()) + elif source == "hazard" or source == "hazard_test": + if reader is not None: + for resource in reader.read(source): + resources.extend(expand([resource])) + return Inventory(resources) + + +def create_source_paths(inventory: Inventory): + return get_default_source_paths(inventory) + + +class NumpyArrayEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return json.JSONEncoder.default(self, obj) + + +def dumps(dict): + return json.dumps(dict, cls=NumpyArrayEncoder) + + +def _read_permitted(group_ids: List[str], resource: HazardResource): + """_summary_ + + Args: + group_ids (List[str]): Groups to which requester belongs. + resourceId (str): Resource identifier. + + Returns: + bool: True is requester is permitted access to models comprising resource. + """ + return ("osc" in group_ids) or resource.group_id == "public" + + +def _get_hazard_data_availability(request: HazardAvailabilityRequest, inventory: Inventory, colormaps: dict): + response = HazardAvailabilityResponse( + models=list(inventory.resources.values()), colormaps=colormaps + ) # type: ignore + return response + + +def _get_hazard_data_description(request: HazardDescriptionRequest, reader: InventoryReader): + descriptions = reader.read_description_markdown(request.paths) + return HazardDescriptionResponse(descriptions=descriptions) + + +def _get_hazard_data(request: HazardDataRequest, hazard_model: HazardModel): + # if any( + # not _read_permitted(request.group_ids, inventory.resources_by_type_id[(i.event_type, i.model)][0]) + # for i in request.items + # ): + # raise PermissionError() + + # get hazard event types: + event_types = Hazard.__subclasses__() + event_dict = dict((et.__name__, et) for et in event_types) + event_dict.update((est.__name__, est) for et in event_types for est in et.__subclasses__()) + + # flatten list to let event processor decide how to group + item_requests = [] + all_requests = [] + for item in request.items: + hazard_type = ( + item.hazard_type if item.hazard_type is not None else item.event_type if item.event_type is not None else "" + ) + event_type = event_dict[hazard_type] + hint = None if item.path is None else HazardDataHint(path=item.path) + + data_requests = [ + hmHazardDataRequest( + event_type, lon, lat, indicator_id=item.indicator_id, scenario=item.scenario, year=item.year, hint=hint + ) + for (lon, lat) in zip(item.longitudes, item.latitudes) + ] + + all_requests.extend(data_requests) + item_requests.append(data_requests) + + response_dict = hazard_model.get_hazard_events(all_requests) + # responses comes back as a dictionary because requests may be executed in different order to list + # to optimise performance. + + response = HazardDataResponse(items=[]) + + for i, item in enumerate(request.items): + requests = item_requests[i] + resps = (response_dict[req] for req in requests) + intensity_curves = [ + ( + IntensityCurve( + intensities=list(resp.intensities), + index_values=list(resp.return_periods), + index_name="return period", + return_periods=[], + ) + if isinstance(resp, hmHazardEventDataResponse) + else ( + IntensityCurve( + intensities=list(resp.parameters), + index_values=list(resp.param_defns), + index_name="threshold", + return_periods=[], + ) + if isinstance(resp, HazardParameterDataResponse) + else IntensityCurve(intensities=[], index_values=[], index_name="", return_periods=[]) + ) + ) + for resp in resps + ] + response.items.append( + HazardDataResponseItem( + intensity_curve_set=intensity_curves, + request_item_id=item.request_item_id, + event_type=item.event_type, + model=item.indicator_id, + scenario=item.scenario, + year=item.year, + ) + ) + + return response + + +def create_assets(asset: Assets, assets: Optional[List[Asset]]): # noqa: max-complexity=11 + """Create list of Asset objects from the Assets API object:""" + if assets is not None: + if len(asset.items) != 0: + raise ValueError("Cannot provide asset items in the request while specifying an explicit asset list") + return assets + else: + module = import_module("physrisk.kernel.assets") + asset_objs = [] + for item in asset.items: + if hasattr(module, item.asset_class): + kwargs: Dict[str, Any] = {} + if item.type is not None: + kwargs["type"] = item.type + if item.location is not None: + kwargs["location"] = item.location + if item.capacity is not None: + kwargs["capacity"] = item.capacity + asset_obj = cast( + Asset, + getattr(module, item.asset_class)(item.latitude, item.longitude, **kwargs), + ) + if item.attributes is not None: + for key, value in item.attributes.items(): + if value.isdigit(): + value_as_double = float(value) + setattr( + asset_obj, + key, + int(value_as_double) if value_as_double.is_integer() else value_as_double, + ) + else: + setattr(asset_obj, key, value) + asset_objs.append(asset_obj) + else: + raise ValueError(f"asset type '{item.asset_class}' not found") + return asset_objs + + +def _get_asset_exposures( + request: AssetExposureRequest, hazard_model: HazardModel, assets: Optional[List[Asset]] = None +): + _assets = create_assets(request.assets, assets) + measure = JupterExposureMeasure() + results = calculate_exposures(_assets, hazard_model, measure, scenario="ssp585", year=2030) + return AssetExposureResponse( + items=[ + AssetExposure( + asset_id="", + exposures=dict( + (t.__name__, Exposure(category=c.name, value=v)) for (t, (c, v)) in r.hazard_categories.items() + ), + ) + for (a, r) in results.items() + ] + ) + + +def _get_asset_impacts( + request: AssetImpactRequest, + hazard_model: HazardModel, + vulnerability_models: Optional[VulnerabilityModels] = None, + assets: Optional[List[Asset]] = None, +): + vulnerability_models = ( + DictBasedVulnerabilityModels(calc.get_default_vulnerability_models()) + if vulnerability_models is None + else vulnerability_models + ) + # we keep API definition of asset separate from internal Asset class; convert by reflection + # based on asset_class: + _assets = create_assets(request.assets, assets) + measure_calcs = calc.get_default_risk_measure_calculators() + risk_model = AssetLevelRiskModel(hazard_model, vulnerability_models, measure_calcs) + + scenarios = [request.scenario] if request.scenarios is None or len(request.scenarios) == 0 else request.scenarios + years = [request.year] if request.years is None or len(request.years) == 0 else request.years + risk_measures = None + if request.include_measures: + impacts, measures = risk_model.calculate_risk_measures(_assets, scenarios, years) + measure_ids_for_asset, definitions = risk_model.populate_measure_definitions(_assets) + # create object for API: + risk_measures = _create_risk_measures(measures, measure_ids_for_asset, definitions, _assets, scenarios, years) + elif request.include_asset_level: + impacts = risk_model.calculate_impacts(_assets, scenarios, years) + + if request.include_asset_level: + ordered_impacts: Dict[Asset, List[AssetSingleImpact]] = {} + for asset in _assets: + ordered_impacts[asset] = [] + for k, v in impacts.items(): + if request.include_calc_details: + if v.event is not None and v.vulnerability is not None: + hazard_exceedance = v.event.to_exceedance_curve() + + vulnerability_distribution = VulnerabilityDistrib( + intensity_bin_edges=v.vulnerability.intensity_bins, + impact_bin_edges=v.vulnerability.impact_bins, + prob_matrix=v.vulnerability.prob_matrix, + ) + calc_details = AcuteHazardCalculationDetails( + hazard_exceedance=ExceedanceCurve( + values=hazard_exceedance.values, exceed_probabilities=hazard_exceedance.probs + ), + hazard_distribution=Distribution( + bin_edges=v.event.intensity_bin_edges, probabilities=v.event.prob + ), + vulnerability_distribution=vulnerability_distribution, + ) + else: + calc_details = None + + if isinstance(v.impact, EmptyImpactDistrib): + continue + + impact_exceedance = v.impact.to_exceedance_curve() + key = ImpactKey(hazard_type=k.hazard_type.__name__, scenario_id=k.scenario, year=str(k.key_year)) + hazard_impacts = AssetSingleImpact( + key=key, + impact_type=v.impact.impact_type.name, + impact_exceedance=ExceedanceCurve( + values=impact_exceedance.values, exceed_probabilities=impact_exceedance.probs + ), + impact_distribution=Distribution(bin_edges=v.impact.impact_bins, probabilities=v.impact.prob), + impact_mean=v.impact.mean_impact(), + impact_std_deviation=v.impact.stddev_impact(), + calc_details=None if v.event is None else calc_details, + ) + ordered_impacts[k.asset].append(hazard_impacts) + + # note that this does rely on ordering of dictionary (post 3.6) + asset_impacts = [AssetLevelImpact(asset_id="", impacts=a) for a in ordered_impacts.values()] + else: + asset_impacts = None + + return AssetImpactResponse(asset_impacts=asset_impacts, risk_measures=risk_measures) + + +def _create_risk_measures( + measures: Dict[MeasureKey, Measure], + measure_ids_for_asset: Dict[Type[Hazard], List[str]], + definitions: Dict[ScoreBasedRiskMeasureDefinition, str], + assets: List[Asset], + scenarios: Sequence[str], + years: Sequence[int], +) -> RiskMeasures: + """Prepare RiskMeasures object for (JSON) output from measure results. + + Args: + measures (Dict[MeasureKey, Measure]): The score-based risk measures. + measure_ids_for_asset (Dict[Type[Hazard], List[str]]): IDs of the score-based risk measures + for each asset. + definitions (Dict[ScoreBasedRiskMeasureDefinition, str]): Map of the score-based risk measures + definitions to ID. + assets (List[Asset]): Assets. + scenarios (Sequence[str]): Scenario IDs. + years (Sequence[int]): Years. + + Returns: + RiskMeasures: Output for writing to JSON. + """ + nan_value = -9999.0 # Nan not part of JSON spec + hazard_types = all_hazards() + measure_set_id = "measure_set_0" + measures_for_assets: List[RiskMeasuresForAssets] = [] + for hazard_type in hazard_types: + for scenario_id in scenarios: + for year in years: + # we calculate and tag results for each scenario, year and hazard + score_key = RiskMeasureKey( + hazard_type=hazard_type.__name__, scenario_id=scenario_id, year=str(year), measure_id=measure_set_id + ) + scores = [-1] * len(assets) + # measures_0 = [float("nan")] * len(assets) + measures_0 = [nan_value] * len(assets) + for i, asset in enumerate(assets): + # look up result using the MeasureKey: + measure_key = MeasureKey(asset=asset, prosp_scen=scenario_id, year=year, hazard_type=hazard_type) + measure = measures.get(measure_key, None) + if measure is not None: + scores[i] = measure.score + measures_0[i] = measure.measure_0 + measures_for_assets.append( + RiskMeasuresForAssets(key=score_key, scores=scores, measures_0=measures_0, measures_1=None) + ) + score_based_measure_set_defn = ScoreBasedRiskMeasureSetDefinition( + measure_set_id=measure_set_id, + asset_measure_ids_for_hazard={k.__name__: v for k, v in measure_ids_for_asset.items()}, + score_definitions={v: k for (k, v) in definitions.items()}, + ) + return RiskMeasures( + measures_for_assets=measures_for_assets, + score_based_measure_set_defn=score_based_measure_set_defn, + measures_definitions=None, + scenarios=[Scenario(id=scenario, years=list(years)) for scenario in scenarios], + asset_ids=[f"asset_{i}" for i, _ in enumerate(assets)], + ) + + +def _get_example_portfolios() -> List[Assets]: + portfolios = [] + for file in importlib.resources.contents(physrisk.data.static.example_portfolios): + if not str(file).endswith(".json"): + continue + with importlib.resources.open_text(physrisk.data.static.example_portfolios, file) as f: + portfolio = Assets(**json.load(f)) + portfolios.append(portfolio) + return portfolios diff --git a/data/external/.gitkeep b/src/physrisk/risk_models/__init__.py similarity index 100% rename from data/external/.gitkeep rename to src/physrisk/risk_models/__init__.py diff --git a/src/physrisk/risk_models/loss_model.py b/src/physrisk/risk_models/loss_model.py new file mode 100644 index 00000000..87c72ef6 --- /dev/null +++ b/src/physrisk/risk_models/loss_model.py @@ -0,0 +1,94 @@ +from abc import ABC, abstractmethod +from typing import Dict, List, Optional, Sequence + +import numpy as np + +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType + +from ..kernel.assets import Asset +from ..kernel.calculation import get_default_hazard_model, get_default_vulnerability_models +from ..kernel.financial_model import FinancialModelBase +from ..kernel.hazard_model import HazardModel +from ..kernel.impact import calculate_impacts +from ..kernel.vulnerability_model import DictBasedVulnerabilityModels, VulnerabilityModels + + +class Aggregator(ABC): + @abstractmethod + def get_aggregation_keys(self, asset: Asset, impact: ImpactDistrib) -> List: ... + + +class DefaultAggregator(Aggregator): + def get_aggregation_keys(self, asset: Asset, impact: ImpactDistrib) -> List: + return [(impact.hazard_type.__name__), ("root")] + + +class LossModel: + def __init__( + self, + hazard_model: Optional[HazardModel] = None, + vulnerability_models: Optional[VulnerabilityModels] = None, + ): + self.hazard_model = get_default_hazard_model() if hazard_model is None else hazard_model + self.vulnerability_models = ( + DictBasedVulnerabilityModels(get_default_vulnerability_models()) + if vulnerability_models is None + else vulnerability_models + ) + + """Calculates the financial impact on a list of assets.""" + + def get_financial_impacts( + self, + assets: Sequence[Asset], + *, + financial_model: FinancialModelBase, + scenario: str, + year: int, + aggregator: Optional[Aggregator] = None, + currency: str = "EUR", + sims: int = 100000, + ): + if aggregator is None: + aggregator = DefaultAggregator() + + aggregation_pools: Dict[str, np.ndarray] = {} + + results = calculate_impacts(assets, self.hazard_model, self.vulnerability_models, scenario=scenario, year=year) + # the impacts in the results are either fractional damage or a fractional disruption + + rg = np.random.Generator(np.random.MT19937(seed=111)) + + for impact_key, result in results.items(): + # look up keys for results + impact = result.impact + keys = aggregator.get_aggregation_keys(impact_key.asset, impact) + # transform units of impact into currency for aggregation + + # Monte-Carlo approach: note that if correlations of distributions are simple and model is otherwise linear + # then calculation by closed-form expression is preferred + impact_samples = self.uncorrelated_samples(impact, sims, rg) + + if impact.impact_type == ImpactType.damage: + loss = financial_model.damage_to_loss(impact_key.asset, impact_samples, currency) + else: # impact.impact_type == ImpactType.disruption: + loss = financial_model.disruption_to_loss(impact_key.asset, impact_samples, year, currency) + + for key in keys: + if key not in aggregation_pools: + aggregation_pools[key] = np.zeros(sims) + aggregation_pools[key] += loss # type: ignore + + measures = {} + percentiles = [0, 10, 20, 40, 60, 80, 90, 95, 97.5, 99, 99.5, 99.9] + for key, loss in aggregation_pools.items(): + measures[key] = { + "percentiles": percentiles, + "percentile_values": np.percentile(loss, percentiles), + "mean": np.mean(loss), + } + + return measures + + def uncorrelated_samples(self, impact: ImpactDistrib, samples: int, generator: np.random.Generator) -> np.ndarray: + return impact.to_exceedance_curve().get_samples(generator.uniform(size=samples)) diff --git a/src/physrisk/risk_models/risk_models.py b/src/physrisk/risk_models/risk_models.py new file mode 100644 index 00000000..3924b9c1 --- /dev/null +++ b/src/physrisk/risk_models/risk_models.py @@ -0,0 +1,221 @@ +from enum import Enum +from typing import Callable, Set + +from physrisk.api.v1.impact_req_resp import ( + Category, + RiskMeasureDefinition, + RiskScoreValue, + ScoreBasedRiskMeasureDefinition, +) +from physrisk.kernel.hazards import ChronicHeat, CoastalInundation, RiverineInundation, Wind +from physrisk.kernel.impact_distrib import ImpactDistrib +from physrisk.kernel.risk import Measure, RiskMeasureCalculator + + +class Threshold(int, Enum): + ABS_HIGH = 0 + ABS_LOW = 1 + CHANGE = 2 + + +class RealEstateToyRiskMeasures(RiskMeasureCalculator): + """Toy model for calculating risk measures for real estate assets.""" + + def __init__(self): + self.model_summary = {"*Toy* model for real estate risk assessment."} + self.return_period = 100.0 # criteria based on 1 in 100-year flood or cyclone events + self.measure_thresholds_acute = { + Threshold.ABS_HIGH: 0.1, # fraction + Threshold.ABS_LOW: 0.03, # fraction + Threshold.CHANGE: 0.03, # fraction + } + self.measure_thresholds_cooling = { + Threshold.ABS_HIGH: 500, # kWh + Threshold.ABS_LOW: 300, # kWh + Threshold.CHANGE: 0.2, # fraction + } + + definition_acute = self._definition_acute() + definition_cooling = self._definition_cooling() + self._definition_lookup = { + RiverineInundation: definition_acute, + CoastalInundation: definition_acute, + Wind: definition_acute, + ChronicHeat: definition_cooling, + } + + def _definition_acute(self): + definition = ScoreBasedRiskMeasureDefinition( + hazard_types=[RiverineInundation.__name__, CoastalInundation.__name__, Wind.__name__], + values=self._definition_values(self._acute_description), + underlying_measures=[ + RiskMeasureDefinition( + measure_id="measures_0", + label=f"1-in-{self.return_period:0.0f} year annual loss.", + description=f"1-in-{self.return_period:0.0f} year loss as fraction of asset insured value.", + ) + ], + ) + return definition + + def _definition_cooling(self): + definition = ScoreBasedRiskMeasureDefinition( + hazard_types=[ChronicHeat.__name__], + values=self._definition_values(self._cooling_description), + underlying_measures=[ + RiskMeasureDefinition( + measure_id="measures_1", + label="Expected cooling annual energy consumption (kWh).", + description="Expected cooling annual energy consumption (kWh).", + ) + ], + ) + return definition + + def _definition_values(self, description: Callable[[Category], str]): + return [ + RiskScoreValue( + value=Category.REDFLAG, + label=( + "The asset is very significantly impacted and the impact will increase " + "as a result of climate change." + ), + description=description(Category.REDFLAG), + ), + RiskScoreValue( + value=Category.HIGH, + label="The asset is materially impacted and the impact will increase as a result of climate change.", + description=description(Category.HIGH), + ), + RiskScoreValue( + value=Category.MEDIUM, + label=( + "The asset is materially impacted but the impact will not significantly increase " + "as a result of climate change." + ), + description=description(Category.MEDIUM), + ), + RiskScoreValue( + value=Category.LOW, + label="No material impact.", + description=description(Category.LOW), + ), + RiskScoreValue(value=Category.NODATA, label="No data.", description="No data."), + ] + + def _acute_description(self, category: Category): + if category == Category.LOW: + description = ( + f"Projected 1-in-{self.return_period:0.0f} year annual loss is less than " + f"{self.measure_thresholds_acute[Threshold.ABS_LOW]*100:0.0f}% of asset value." + ) + elif category == Category.MEDIUM: + description = ( + f"Projected 1-in-{self.return_period:0.0f} year annual loss is more than " + f"{self.measure_thresholds_acute[Threshold.ABS_LOW]*100:0.0f}% but increases by less than " + f"{self.measure_thresholds_acute[Threshold.CHANGE]*100:0.0f}% of asset value over historical baseline." + ) + elif category == Category.HIGH: + description = ( + f"Projected 1-in-{self.return_period:0.0f} year annual loss is more than " + f"{self.measure_thresholds_acute[Threshold.ABS_LOW]*100:0.0f}% and increases by more than " + f"{self.measure_thresholds_acute[Threshold.CHANGE]*100:0.0f}% of asset value over historical baseline." + ) + elif category == Category.REDFLAG: + description = ( + f"Projected 1-in-{self.return_period:0.0f} year annual loss is more than " + f"{self.measure_thresholds_acute[Threshold.ABS_HIGH]*100:0.0f}% and increases by more than " + f"{self.measure_thresholds_acute[Threshold.CHANGE]*100:0.0f}% of asset value over historical baseline." + ) + else: + description = "No Data" + return description + + def _cooling_description(self, category: Category): + if category == Category.LOW: + description = ( + f"Expected cooling annual energy consumption is less than " + f"{self.measure_thresholds_cooling[Threshold.ABS_LOW]}kWh." + ) + elif category == Category.MEDIUM: + description = ( + f"Expected cooling annual energy consumption is more than " + f"{self.measure_thresholds_cooling[Threshold.ABS_LOW]}kWh but increases by less than " + f"{self.measure_thresholds_cooling[Threshold.CHANGE]*100:0.0f}% over historical baseline." + ) + elif category == Category.HIGH: + description = ( + f"Expected cooling annual energy consumption is more than " + f"{self.measure_thresholds_cooling[Threshold.ABS_LOW]}kWh and increases by more than " + f"{self.measure_thresholds_cooling[Threshold.CHANGE]*100:0.0f}% over historical baseline." + ) + elif category == Category.REDFLAG: + description = ( + f"Expected cooling annual energy consumption is more than " + f"{self.measure_thresholds_cooling[Threshold.ABS_HIGH]}kWh and increases by more than " + f"{self.measure_thresholds_cooling[Threshold.CHANGE]*100:0.0f}% over historical baseline." + ) + else: + description = "No Data" + return description + + def calc_measure(self, hazard_type: type, base_impact: ImpactDistrib, impact: ImpactDistrib) -> Measure: + if hazard_type == ChronicHeat: + return self.calc_measure_cooling(hazard_type, base_impact, impact) + else: + return self.calc_measure_acute(hazard_type, base_impact, impact) + + def calc_measure_acute(self, hazard_type: type, base_impact: ImpactDistrib, impact: ImpactDistrib) -> Measure: + return_period = 100.0 # criterion based on 1 in 100-year flood events + histo_loss = base_impact.to_exceedance_curve().get_value(1.0 / return_period) + future_loss = impact.to_exceedance_curve().get_value(1.0 / return_period) + loss_change = future_loss - histo_loss + + if ( + future_loss > self.measure_thresholds_acute[Threshold.ABS_HIGH] + and loss_change > self.measure_thresholds_acute[Threshold.CHANGE] + ): + score = Category.REDFLAG + elif ( + future_loss > self.measure_thresholds_acute[Threshold.ABS_LOW] + and loss_change > self.measure_thresholds_acute[Threshold.CHANGE] + ): + score = Category.HIGH + elif ( + future_loss > self.measure_thresholds_acute[Threshold.ABS_LOW] + and loss_change <= self.measure_thresholds_acute[Threshold.CHANGE] + ): + score = Category.MEDIUM + else: + score = Category.LOW + return Measure(score=score, measure_0=future_loss, definition=self.get_definition(hazard_type)) + + def calc_measure_cooling(self, hazard_type: type, base_impact: ImpactDistrib, impact: ImpactDistrib) -> Measure: + histo_cooling = base_impact.mean_impact() + future_cooling = impact.mean_impact() + cooling_change = (future_cooling - histo_cooling) / histo_cooling + + if ( + future_cooling > self.measure_thresholds_cooling[Threshold.ABS_HIGH] + and cooling_change > self.measure_thresholds_cooling[Threshold.CHANGE] + ): + score = Category.REDFLAG + elif ( + future_cooling > self.measure_thresholds_cooling[Threshold.ABS_LOW] + and cooling_change > self.measure_thresholds_cooling[Threshold.CHANGE] + ): + score = Category.HIGH + elif ( + future_cooling > self.measure_thresholds_cooling[Threshold.ABS_LOW] + and cooling_change <= self.measure_thresholds_cooling[Threshold.CHANGE] + ): + score = Category.MEDIUM + else: + score = Category.LOW + return Measure(score=score, measure_0=future_cooling, definition=self.get_definition(hazard_type)) + + def get_definition(self, hazard_type: type): + return self._definition_lookup.get(hazard_type, None) + + def supported_hazards(self) -> Set[type]: + return set([RiverineInundation, CoastalInundation, Wind, ChronicHeat]) diff --git a/data/interim/.gitkeep b/src/physrisk/utils/__init__.py similarity index 100% rename from data/interim/.gitkeep rename to src/physrisk/utils/__init__.py diff --git a/src/physrisk/utils/helpers.py b/src/physrisk/utils/helpers.py new file mode 100644 index 00000000..b3d3699a --- /dev/null +++ b/src/physrisk/utils/helpers.py @@ -0,0 +1,8 @@ +import collections.abc + + +def get_iterable(x): + if isinstance(x, collections.abc.Iterable): + return x + else: + return (x,) diff --git a/src/physrisk/utils/lazy.py b/src/physrisk/utils/lazy.py new file mode 100644 index 00000000..0c47aabe --- /dev/null +++ b/src/physrisk/utils/lazy.py @@ -0,0 +1,50 @@ +import importlib.util +import sys +from threading import Lock +from types import ModuleType +from typing import Callable, Generic + +from typing_extensions import TypeVar + +T = TypeVar("T") + + +class Lazy(Generic[T]): + def __init__(self, provider: Callable[[], T]) -> None: + self._value = None + self._provider = provider + self._lock = Lock() + + def set_provider(self, provider: Callable[[], T]): + """Update provider. + + Args: + provider (Callable[..., T]): Provider to use. + """ + with self._lock: + self._provider = provider + + def value(self): + """Get value, loading as needed. + + Returns: + T: Value. + """ + with self._lock: + if self._value is None: + self._value = self._provider() + return self._value + + +def lazy_import(name): + spec = importlib.util.find_spec(name) + if spec is not None: + spec_loader = spec.loader + assert spec_loader is not None + loader = importlib.util.LazyLoader(spec_loader) + spec.loader = loader + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + loader.exec_module(module) + return module + return ModuleType("not found", None) diff --git a/data/processed/.gitkeep b/src/physrisk/vulnerability_models/__init__.py similarity index 100% rename from data/processed/.gitkeep rename to src/physrisk/vulnerability_models/__init__.py diff --git a/src/physrisk/vulnerability_models/chronic_heat_models.py b/src/physrisk/vulnerability_models/chronic_heat_models.py new file mode 100644 index 00000000..4c1551bd --- /dev/null +++ b/src/physrisk/vulnerability_models/chronic_heat_models.py @@ -0,0 +1,285 @@ +from typing import Iterable, List, Union, cast + +import numpy as np +from scipy.stats import norm + +from physrisk.kernel.assets import Asset, IndustrialActivity +from physrisk.kernel.hazard_model import HazardDataRequest, HazardDataResponse, HazardParameterDataResponse +from physrisk.kernel.hazards import ChronicHeat +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType +from physrisk.kernel.vulnerability_model import VulnerabilityModelBase + + +class ChronicHeatGZNModel(VulnerabilityModelBase): + """Model which estiamtes the labour productivity impact based on chronic heat based on the paper "Neidell M, + Graff Zivin J, Sheahan M, Willwerth J, Fant C, Sarofim M, et al. (2021) Temperature and work: + Time allocated to work under varying climate and labor market conditions." + Average annual work hours are based on USA values reported by the OECD for 2021.""" + + def __init__(self, indicator_id: str = "mean_degree_days/above/32c", delta=True): + super().__init__( + indicator_id=indicator_id, hazard_type=ChronicHeat, impact_type=ImpactType.disruption + ) # opportunity to give a model hint, but blank here + self.time_lost_per_degree_day = 4.671 # This comes from the paper converted to celsius + self.time_lost_per_degree_day_se = 2.2302 # This comes from the paper converted to celsius + self.total_labour_hours = 107460 + self.delta = delta + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Request the hazard data needed by the vulnerability model for a specific asset + (this is a Google-style doc string) + + Args: + asset: Asset for which data is requested. + scenario: Climate scenario of calculation. + year: Projection year of calculation. + + Returns: + Single data requests. + """ + + return [ + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=1980, + indicator_id=self.indicator_id, + ), + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + ), + ] + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + """Calcaulate impact (disruption) of asset based on the hazard data returned. + + Args: + asset: Asset for which impact is calculated. + data_responses: responses to the hazard data requests generated in get_data_requests. + + Returns: + Probability distribution of impacts. + """ + assert isinstance(asset, IndustrialActivity) + baseline_dd_above_mean, scenario_dd_above_mean = data_responses + + assert isinstance(baseline_dd_above_mean, HazardParameterDataResponse) + assert isinstance(scenario_dd_above_mean, HazardParameterDataResponse) + + delta_dd_above_mean: float = scenario_dd_above_mean.parameter - baseline_dd_above_mean.parameter * self.delta + + hours_worked = self.total_labour_hours + fraction_loss_mean = (delta_dd_above_mean * self.time_lost_per_degree_day) / hours_worked + fraction_loss_std = (delta_dd_above_mean * self.time_lost_per_degree_day_se) / hours_worked + + return get_impact_distrib(fraction_loss_mean, fraction_loss_std, ChronicHeat, ImpactType.disruption) + + +class ChronicHeatWBGTGZNModel(ChronicHeatGZNModel): + """Implementation of the WBGT/GZN chronic heat model. This model + inherits attributes from the ChronicHeatGZN model and estimates the + results based on applying both GZN and WBGT.""" + + def __init__(self, indicator_id: str = "mean_work_loss_high"): + super().__init__(indicator_id=indicator_id) # opportunity to give a model hint, but blank here + + def work_type_mapping(self): + return {"low": ["low", "medium"], "medium": ["medium", "low", "high"], "high": ["high", "medium"]} + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Request the hazard data needed by the vulnerability model for a specific asset + (this is a Google-style doc string) + + Args: + asset: Asset for which data is requested. + scenario: Climate scenario of calculation. + year: Projection year of calculation. + + Returns: + Single or multiple data requests. + """ + + work_type_mapping = self.work_type_mapping() + assert isinstance(asset, IndustrialActivity) + # specify hazard data needed. Model string is hierarchical and '/' separated. + model_gzn = "mean_degree_days/above/32c" + model_wbgt = "mean_work_loss/" + + asset_types = [type_asset for type_asset in work_type_mapping[asset.type]] + wbgt_data_requests = [] + for i_asset_types in asset_types: + wbgt_data_requests.append( + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=2010, + indicator_id=model_wbgt + i_asset_types, + ) + ) + + wbgt_data_requests.append( + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=model_wbgt + i_asset_types, + ) + ) + + return [ + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=1980, + indicator_id=model_gzn, + ), + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=model_gzn, + ), + ] + wbgt_data_requests + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + """ + Function to return the impact distribution of the wbgt model. + """ + + assert isinstance(asset, IndustrialActivity) + wbgt_responses = [cast(HazardParameterDataResponse, r) for r in data_responses[2:]] + + baseline_dd_above_mean = cast(HazardParameterDataResponse, data_responses[0]) + scenario_dd_above_mean = cast(HazardParameterDataResponse, data_responses[1]) + + hours_worked = self.total_labour_hours + fraction_loss_mean_base_gzn = (baseline_dd_above_mean.parameter * self.time_lost_per_degree_day) / hours_worked + + fraction_loss_mean_scenario_gzn = ( + scenario_dd_above_mean.parameter * self.time_lost_per_degree_day + ) / hours_worked + + fraction_loss_std_base = (baseline_dd_above_mean.parameter * self.time_lost_per_degree_day_se) / hours_worked + + fraction_loss_std_scenario = ( + scenario_dd_above_mean.parameter * self.time_lost_per_degree_day_se + ) / hours_worked + + baseline_work_ability = (1 - fraction_loss_mean_base_gzn) * (1 - wbgt_responses[0].parameter) + scenario_work_ability = (1 - fraction_loss_mean_scenario_gzn) * (1 - wbgt_responses[1].parameter) + + # Getting the parameters required for the uniform distribution. + if asset.type in ["low", "high"]: + a_historical = ( + wbgt_responses[0].parameter - abs((wbgt_responses[2].parameter - wbgt_responses[0].parameter)) / 2 + ) + b_historical = ( + wbgt_responses[0].parameter + abs((wbgt_responses[2].parameter - wbgt_responses[0].parameter)) / 2 + ) + a_scenario = ( + wbgt_responses[1].parameter - abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + b_scenario = ( + wbgt_responses[1].parameter + abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + elif asset.type == "medium": + a_historical = wbgt_responses[0].parameter - (wbgt_responses[2].parameter - wbgt_responses[0].parameter) / 2 + b_historical = wbgt_responses[0].parameter + (wbgt_responses[4].parameter - wbgt_responses[0].parameter) / 2 + a_scenario = ( + wbgt_responses[1].parameter - abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + b_scenario = ( + wbgt_responses[1].parameter + abs((wbgt_responses[5].parameter - wbgt_responses[1].parameter)) / 2 + ) + + # Estimation of the variance + variance_historical_uni = ((b_historical - a_historical) ** 2) / 12 + variance_scenario_uni = ((b_scenario - a_scenario) ** 2) / 12 + + variance_historical = two_variable_joint_variance( + (1 - fraction_loss_mean_base_gzn), + fraction_loss_std_base**2, + (1 - wbgt_responses[0].parameter), + variance_historical_uni, + ) + variance_scenario = two_variable_joint_variance( + (1 - fraction_loss_mean_scenario_gzn), + fraction_loss_std_scenario**2, + (1 - wbgt_responses[1].parameter), + variance_scenario_uni, + ) + + std_delta = variance_scenario ** (0.5) - variance_historical ** (0.5) + + total_work_loss_delta: float = baseline_work_ability - scenario_work_ability + + return get_impact_distrib(total_work_loss_delta, std_delta, self.hazard_type, self.impact_type) + + +def two_variable_joint_variance(ex, varx, ey, vary): + """ + Function to estimate the variance of two uncorrelated variables. + """ + return varx * vary + varx * (ey**2) + vary * (ex**2) + + +def get_impact_distrib( + fraction_loss_mean: float, fraction_loss_std: float, hazard_type: type, impact_type: ImpactType +) -> ImpactDistrib: + """Calculate impact (disruption) of asset based on the hazard data returned. + + Args: + fraction_loss_mean: mean of the impact distribution + fraction_loss_std: standard deviation of the impact distribution + hazard_type: Hazard Type. + impact_type: Impact Type. + + Returns: + Probability distribution of impacts. + """ + impact_bins = np.concatenate( + [ + np.linspace(-0.001, 0.001, 1, endpoint=False), + np.linspace(0.001, 0.01, 9, endpoint=False), + np.linspace(0.01, 0.1, 10, endpoint=False), + np.linspace(0.1, 0.999, 10, endpoint=False), + np.linspace(0.999, 1.001, 2), + ] + ) + + probs_cumulative = np.vectorize(lambda x: norm.cdf(x, loc=fraction_loss_mean, scale=max(1e-12, fraction_loss_std)))( + impact_bins + ) + probs_cumulative[-1] = np.maximum(probs_cumulative[-1], 1.0) + probs = np.diff(probs_cumulative) + + probs_norm = np.sum(probs) + prob_differential = 1 - probs_norm + if probs_norm < 1e-8: + if fraction_loss_mean <= 0.0: + probs = np.concatenate((np.array([1.0]), np.zeros(len(impact_bins) - 2))) + elif fraction_loss_mean >= 1.0: + probs = np.concatenate((np.zeros(len(impact_bins) - 2), np.array([1.0]))) + else: + probs[0] = probs[0] + prob_differential + + return ImpactDistrib(hazard_type, impact_bins, probs, impact_type) diff --git a/src/physrisk/vulnerability_models/example_models.py b/src/physrisk/vulnerability_models/example_models.py new file mode 100644 index 00000000..d4edc16a --- /dev/null +++ b/src/physrisk/vulnerability_models/example_models.py @@ -0,0 +1,27 @@ +import numpy as np + +from ..kernel.impact_distrib import ImpactType +from ..kernel.vulnerability_matrix_provider import VulnMatrixProvider +from ..kernel.vulnerability_model import VulnerabilityModel, checked_beta_distrib + + +class ExampleCdfBasedVulnerabilityModel(VulnerabilityModel): + def __init__(self, *, indicator_id: str, hazard_type: type): + self.intensities = np.array([0, 0.01, 0.5, 1.0, 1.5, 2, 3, 4, 5, 6]) + self.impact_means = np.array([0, 0.2, 0.44, 0.58, 0.68, 0.78, 0.85, 0.92, 0.96, 1.0]) + self.impact_stddevs = np.array([0, 0.17, 0.14, 0.14, 0.17, 0.14, 0.13, 0.10, 0.06, 0]) + impact_bin_edges = np.array([0, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) + super().__init__( + indicator_id=indicator_id, + hazard_type=hazard_type, + impact_type=ImpactType.damage, + impact_bin_edges=impact_bin_edges, + ) + + def get_impact_curve(self, intensities, asset): + # we interpolate the mean and standard deviation and use this to construct distributions + impact_means = np.interp(intensities, self.intensities, self.impact_means) + impact_stddevs = np.interp(intensities, self.intensities, self.impact_stddevs) + return VulnMatrixProvider( + intensities, impact_cdfs=[checked_beta_distrib(m, s) for m, s in zip(impact_means, impact_stddevs)] + ) diff --git a/data/raw/.gitkeep b/src/physrisk/vulnerability_models/labour_models.py similarity index 100% rename from data/raw/.gitkeep rename to src/physrisk/vulnerability_models/labour_models.py diff --git a/src/physrisk/vulnerability_models/power_generating_asset_models.py b/src/physrisk/vulnerability_models/power_generating_asset_models.py new file mode 100644 index 00000000..4a171ccd --- /dev/null +++ b/src/physrisk/vulnerability_models/power_generating_asset_models.py @@ -0,0 +1,103 @@ +from typing import Iterable, Union + +import numpy as np + +from ..kernel.assets import Asset, PowerGeneratingAsset +from ..kernel.curve import ExceedanceCurve +from ..kernel.hazard_event_distrib import HazardEventDistrib +from ..kernel.hazard_model import HazardDataRequest, HazardDataResponse, HazardEventDataResponse +from ..kernel.hazards import Drought, RiverineInundation +from ..kernel.impact_distrib import ImpactType +from ..kernel.vulnerability_distrib import VulnerabilityDistrib +from ..kernel.vulnerability_model import DeterministicVulnerabilityModel, VulnerabilityModelAcuteBase, applies_to_assets + + +@applies_to_assets([PowerGeneratingAsset]) +class InundationModel(VulnerabilityModelAcuteBase): + def __init__(self, indicator_id="flood_depth"): + # default impact curve + self.__curve_depth = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) + self.__curve_impact = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) + self.__indicator_id = indicator_id + super().__init__(indicator_id=indicator_id, hazard_type=RiverineInundation, impact_type=ImpactType.disruption) + pass + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Provide the list of hazard event data requests required in order to calculate + the VulnerabilityDistrib and HazardEventDistrib for the asset.""" + + histo = HazardDataRequest( + RiverineInundation, + asset.longitude, + asset.latitude, + scenario="historical", + year=1980, + indicator_id="flood_depth", + ) + + future = HazardDataRequest( + RiverineInundation, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.__indicator_id, + ) + + return histo, future + + def get_distributions(self, asset: Asset, event_data_responses: Iterable[HazardDataResponse]): + """Return distributions for asset, based on hazard event date: + VulnerabilityDistrib and HazardEventDistrib.""" + + histo, future = event_data_responses + assert isinstance(histo, HazardEventDataResponse) + assert isinstance(future, HazardEventDataResponse) + + protection_return_period = 250.0 + curve_histo = ExceedanceCurve(1.0 / histo.return_periods, histo.intensities) + # the protection depth is the 250-year-return-period inundation depth at the asset location + protection_depth = curve_histo.get_value(1.0 / protection_return_period) + + curve_future = ExceedanceCurve(1.0 / future.return_periods, future.intensities) + curve_future = curve_future.add_value_point(protection_depth) + + depth_bins, probs = curve_future.get_probability_bins() + + impact_bins = np.interp(depth_bins, self.__curve_depth, self.__curve_impact) / 365.0 + + # keep all bins, but make use of vulnerability matrix to apply protection level + # for improved performance we could truncate (and treat identify matrix as a special case) + # but this general version allows model uncertainties to be added + probs_protected = np.where(depth_bins[1:] <= protection_depth, 0.0, 1.0) + + vul = VulnerabilityDistrib(RiverineInundation, depth_bins, impact_bins, np.diag(probs_protected)) + event = HazardEventDistrib(RiverineInundation, depth_bins, probs) + + return vul, event + + +class TemperatureModel(DeterministicVulnerabilityModel): + def __init__(self): + # does nothing + pass + + +class SimpleDroughtModel(DeterministicVulnerabilityModel): + def __init__(self): + """This is a simple pre-cursor model to a model based on: + Luo T, Zhou L, Falzon J, Cheng Y, Christianson G, Wu Y, Habchi A. Assessing Physical Climate Risks for the + European Bank for Reconstruction and Development's Power Generation Project Investment Portfolio. + This simple model uses only a single indicator providing number of months per year where 3-month + SPEI is less than -2 as opposed to information related to SPEI -1.5, -2, -2.5 etc. + """ + intensities = np.array([0, 2, 4, 6, 8, 10, 12]) + impacts = intensities * 0.1 / 12 + super().__init__( + hazard_type=Drought, + damage_curve_intensities=intensities, + damage_curve_impacts=impacts, + indicator_id="months/spei3m/below/-2", + ) diff --git a/src/physrisk/vulnerability_models/real_estate_models.py b/src/physrisk/vulnerability_models/real_estate_models.py new file mode 100644 index 00000000..a0d9a489 --- /dev/null +++ b/src/physrisk/vulnerability_models/real_estate_models.py @@ -0,0 +1,235 @@ +from collections import defaultdict +from typing import Dict, List, Tuple + +import numpy as np + +from physrisk.api.v1.common import VulnerabilityCurve, VulnerabilityCurves +from physrisk.kernel.assets import Asset, RealEstateAsset +from physrisk.kernel.hazard_model import HazardDataRequest, HazardDataResponse, HazardParameterDataResponse +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType +from physrisk.kernel.vulnerability_matrix_provider import VulnMatrixProvider +from physrisk.kernel.vulnerability_model import VulnerabilityModel + +from ..kernel.hazards import ChronicHeat, CoastalInundation, PluvialInundation, RiverineInundation, Wind +from ..kernel.vulnerability_model import ( + DeterministicVulnerabilityModel, + VulnerabilityModelBase, + applies_to_events, + checked_beta_distrib, + get_vulnerability_curves_from_resource, +) + + +class RealEstateInundationModel(VulnerabilityModel): + _default_impact_bin_edges = np.array([0, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) + _default_resource = "EU JRC global flood depth-damage functions" + + def __init__( + self, + *, + hazard_type: type, + indicator_id: str, + resource: str = _default_resource, + impact_bin_edges=_default_impact_bin_edges, + ): + """ + Inundation vulnerability model for real estates assets. Applies to both riverine and coastal inundation. + + Args: + event_type: Event type. + model: optional identifier for hazard event model, passed to HazardModel. + resource: embedded resource identifier used to infer vulnerability matrix. + impact_bin_edges: specifies the impact (fractional damage/disruption bins). + """ + + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + + # for this model, key for looking up curves is (location, asset_type), e.g. ('Asian', 'Building/Industrial') + self.vulnerability_curves = dict(((c.location, c.asset_type), c) for c in curve_set.items) + self.vuln_curves_by_type = defaultdict(list) + self.proxy_curves: Dict[Tuple[str, str], VulnerabilityCurve] = {} + for item in curve_set.items: + self.vuln_curves_by_type[item.asset_type].append(item) + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + impact_type = ( + ImpactType.damage + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + super().__init__( + indicator_id=indicator_id, + hazard_type=hazard_type, + impact_type=impact_type, + impact_bin_edges=impact_bin_edges, + ) + + def get_impact_curve(self, intensity_bin_centres: np.ndarray, asset: Asset): + # we interpolate the mean and standard deviation and use this to construct distributions + assert isinstance(asset, RealEstateAsset) + + key = (asset.location, asset.type) + curve = self.vulnerability_curves[key] + + std_curve = curve + if len(curve.impact_std) == 0: + if key not in self.proxy_curves: + self.proxy_curves[key] = self.closest_curve_of_type(curve, asset) + std_curve = self.proxy_curves[key] + + impact_means = np.interp(intensity_bin_centres, curve.intensity, curve.impact_mean) + impact_stddevs = np.interp(intensity_bin_centres, std_curve.intensity, std_curve.impact_std) + + return VulnMatrixProvider( + intensity_bin_centres, + impact_cdfs=[checked_beta_distrib(m, s) for m, s in zip(impact_means, impact_stddevs)], + ) + + def closest_curve_of_type(self, curve: VulnerabilityCurve, asset: RealEstateAsset): + # we return the standard deviations of the damage curve most similar to the asset location + candidate_set = list(cand for cand in self.vuln_curves_by_type[asset.type] if (len(cand.impact_std) > 0)) + sum_square_diff = (self.sum_square_diff(curve, cand) for cand in candidate_set) + lowest = np.argmin(np.array(list(sum_square_diff))) + return candidate_set[lowest] + + def sum_square_diff(self, curve1: VulnerabilityCurve, curve2: VulnerabilityCurve): + return np.sum((curve1.impact_mean - np.interp(curve1.intensity, curve2.intensity, curve2.impact_mean)) ** 2) + + +@applies_to_events([CoastalInundation]) +class RealEstateCoastalInundationModel(RealEstateInundationModel): + def __init__( + self, + *, + indicator_id: str = "flood_depth", + resource: str = RealEstateInundationModel._default_resource, + impact_bin_edges=RealEstateInundationModel._default_impact_bin_edges, + ): + # by default include subsidence and 95% sea-level rise + super().__init__( + hazard_type=CoastalInundation, + indicator_id=indicator_id, + resource=resource, + impact_bin_edges=impact_bin_edges, + ) + + +class RealEstatePluvialInundationModel(RealEstateInundationModel): + def __init__( + self, + *, + indicator_id: str = "flood_depth", + resource: str = RealEstateInundationModel._default_resource, + impact_bin_edges=RealEstateInundationModel._default_impact_bin_edges, + ): + # by default include subsidence and 95% sea-level rise + super().__init__( + hazard_type=PluvialInundation, + indicator_id=indicator_id, + resource=resource, + impact_bin_edges=impact_bin_edges, + ) + + +@applies_to_events([RiverineInundation]) +class RealEstateRiverineInundationModel(RealEstateInundationModel): + def __init__( + self, + *, + indicator_id: str = "flood_depth", + resource: str = RealEstateInundationModel._default_resource, + impact_bin_edges=RealEstateInundationModel._default_impact_bin_edges, + ): + super().__init__( + hazard_type=RiverineInundation, + indicator_id=indicator_id, + resource=resource, + impact_bin_edges=impact_bin_edges, + ) + + +class GenericTropicalCycloneModel(DeterministicVulnerabilityModel): + def __init__(self): + """A very simple generic tropical cyclone vulnerability model.""" + v_half = 74.7 # m/s + intensities = np.arange(0, 100, 10) + impacts = self.wind_damage(intensities, v_half) + super().__init__( + hazard_type=Wind, + damage_curve_intensities=intensities, + damage_curve_impacts=impacts, + indicator_id="max_speed", + impact_type=ImpactType.damage, + ) + + def wind_damage(self, v: np.ndarray, v_half: float): + """Calculates damage based on functional form of + Emanuel K. Global warming effects on US hurricane damage. Weather, Climate, and Society. 2011 Oct 1;3(4):261-8. + Using a threshold speed of 25.7 m/s. + A review of the origin of parameters is available in + Eberenz S, Lüthi S, Bresch DN. Regional tropical cyclone impact functions for + globally consistent risk assessments. + Natural Hazards and Earth System Sciences. 2021 Jan 29;21(1):393-415. + which also provides suggested region-specific variations. + Args: + v (np.ndarray[float]): Wind speeds at which to calculate the fractional damage. + v_half (float): The 'v_half' function parameter. + + Returns: + np.ndarray[float]: Fractional damage. + """ + v_thresh = 25.7 # m/s + vn = np.where(v > v_thresh, v - v_thresh, 0) / (v_half - v_thresh) + return vn**3 / (1 + vn**3) + + +class CoolingModel(VulnerabilityModelBase): + _default_transfer_coeff = 200 # W/K + _default_cooling_cop = 3 # W/K + + # 200 W/K is a nominal total-asset heat transfer coefficient. It is approximately the + # heat loss of a fairly recently built residential property. + # For 2000 degree days of heating required in a year, the corresponding heating requirement + # would be 200 * 2000 * 24 / 1000 = 9600 kWh + # https://www.thegreenage.co.uk/how-much-energy-does-my-home-use/ has a gentle introduction to + # degree days for home cooling/heating. + + def __init__(self, threshold_temp_c: float = 23): + """Simple degree-days-based model for calculating cooling requirements as annual kWh of + electricity equivalent. The main limitation of the approach is that solar radiation and + humidity are not taken into account. Limitations of similar approaches and ways to address + are default with, for example in: + + Berardi U, Jafarpur P. Assessing the impact of climate change on building heating + and cooling energy demand in Canada. + Renewable and Sustainable Energy Reviews. 2020 Apr 1;121:109681.23. + + Cellura M, Guarino F, Longo S, Tumminia G. Climate change and the building sector: + Modelling and energy implications to an office building in southern Europe. + Energy for Sustainable Development. 2018 Aug 1;45:46-65. + """ + self.indicator_id = "mean_degree_days/above/index" + self.hazard_type = ChronicHeat + self.threshold_temp_c = threshold_temp_c + + def get_data_requests(self, asset: Asset, *, scenario: str, year: int): + return HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + ) + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + (data,) = data_responses + assert isinstance(data, HazardParameterDataResponse) + # we interpolate the specific threshold from the different values + deg_days = float(np.interp(self.threshold_temp_c, data.param_defns, data.parameters)) # [0] + heat_transfer = deg_days * self._default_transfer_coeff * 24 / 1000 # kWh of heat removed from asset + annual_electricity = heat_transfer / self._default_cooling_cop # kWh of electricity required for heat removal + # this is non-probabilistic model: probability of 1 of electricity use + return ImpactDistrib(ChronicHeat, [annual_electricity, annual_electricity], [1]) diff --git a/src/physrisk/vulnerability_models/thermal_power_generation_models.py b/src/physrisk/vulnerability_models/thermal_power_generation_models.py new file mode 100644 index 00000000..c57c8f52 --- /dev/null +++ b/src/physrisk/vulnerability_models/thermal_power_generation_models.py @@ -0,0 +1,803 @@ +from collections import defaultdict +from typing import Iterable, List, Tuple, Union, cast + +import numpy as np +from scipy.stats import multivariate_normal, norm + +from physrisk.api.v1.common import VulnerabilityCurve, VulnerabilityCurves +from physrisk.kernel.assets import Asset, ThermalPowerGeneratingAsset, TurbineKind +from physrisk.kernel.impact_distrib import EmptyImpactDistrib, ImpactDistrib, ImpactType +from physrisk.kernel.vulnerability_model import DeterministicVulnerabilityModel, VulnerabilityModelBase + +from ..kernel.curve import ExceedanceCurve +from ..kernel.hazard_event_distrib import HazardEventDistrib +from ..kernel.hazard_model import ( + HazardDataRequest, + HazardDataResponse, + HazardEventDataResponse, + HazardParameterDataResponse, +) +from ..kernel.hazards import ( + AirTemperature, + ChronicHeat, + CoastalInundation, + Drought, + RiverineInundation, + WaterRisk, + WaterTemperature, +) +from ..kernel.vulnerability_distrib import VulnerabilityDistrib +from ..kernel.vulnerability_model import applies_to_assets, applies_to_events, get_vulnerability_curves_from_resource + + +class ThermalPowerGenerationInundationModel(DeterministicVulnerabilityModel): + # Number of disrupted days per year + _default_resource = "WRI thermal power plant physical climate vulnerability factors" + + # delimitation of the area for the hazard data expressed in metres (within [0,1000]). + _default_buffer = 1000 + + def __init__( + self, *, hazard_type: type, indicator_id: str, resource: str = _default_resource, buffer: int = _default_buffer + ): + """ + Inundation vulnerability model for thermal power generation. + Applies to both riverine and coastal inundation. + + Args: + hazard_type (type): _description_ + indicator_id (str): ID of the hazard indicator to which this applies. + resource (str): embedded resource identifier used to infer vulnerability table. + buffer (int): delimitation of the area for the hazard data expressed in metres (within [0,1000]). + """ + + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + + # for this model, key for looking up curves is asset_type, e.g. 'Steam/Recirculating' + self.vulnerability_curves = dict( + (c.asset_type, c) for c in curve_set.items if c.event_type == hazard_type.__base__.__name__ # type:ignore + ) + self.vuln_curves_by_type = defaultdict(list) + for key in self.vulnerability_curves: + self.vuln_curves_by_type[TurbineKind[key.split("/")[0]]].append(self.vulnerability_curves[key]) + + impact_type = ( + ImpactType.disruption + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + super().__init__( + indicator_id=indicator_id, + hazard_type=hazard_type, + impact_type=impact_type, + damage_curve_intensities=[], + damage_curve_impacts=[], + buffer=buffer, + ) + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Provide the list of hazard event data requests required in order to calculate + the VulnerabilityDistrib and HazardEventDistrib for the asset.""" + request_scenario = HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + buffer=self.buffer, + ) + request_baseline = HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=2030, + indicator_id=self.indicator_id, + buffer=self.buffer, + ) + return request_scenario, request_baseline + + def get_distributions( + self, asset: Asset, event_data_responses: Iterable[HazardDataResponse] + ) -> Tuple[VulnerabilityDistrib, HazardEventDistrib]: + assert isinstance(asset, ThermalPowerGeneratingAsset) + + (response_scenario, response_baseline) = event_data_responses + assert isinstance(response_scenario, HazardEventDataResponse) + assert isinstance(response_baseline, HazardEventDataResponse) + + baseline_curve = ExceedanceCurve(1.0 / response_baseline.return_periods, response_baseline.intensities) + protection_depth = ( + 0.0 + if len(response_baseline.intensities) == 0 + else baseline_curve.get_value(1.0 / asset.get_inundation_protection_return_period()) + ) + + intensity_curve = ExceedanceCurve(1.0 / response_scenario.return_periods, response_scenario.intensities) + if 0 < len(intensity_curve.values): + if intensity_curve.values[0] < protection_depth: + if protection_depth < intensity_curve.values[-1]: + intensity_curve = intensity_curve.add_value_point(protection_depth) + + intensity_bins, probability_bins = intensity_curve.get_probability_bins(include_last=True) + + curves: List[VulnerabilityCurve] = [] + if asset.turbine is None: + curves = [self.vulnerability_curves[key] for key in self.vulnerability_curves] + elif asset.cooling is not None: + key = "/".join([asset.turbine.name, asset.cooling.name]) + if key in self.vulnerability_curves: + curves = [self.vulnerability_curves[key]] + elif asset.turbine in self.vuln_curves_by_type: + curves = self.vuln_curves_by_type[asset.turbine] + + if 0 < len(curves): + impact_bins = [ + ( + np.max([np.interp(intensity, curve.intensity, curve.impact_mean) for curve in curves]) / 365.0 + if protection_depth < intensity + else 0.0 + ) + for intensity in intensity_bins + ] + else: + impact_bins = [0.0 for _ in intensity_bins] + + vul = VulnerabilityDistrib(self.hazard_type, intensity_bins, impact_bins, np.eye(len(probability_bins))) + event = HazardEventDistrib(self.hazard_type, intensity_bins, probability_bins) + return vul, event + + +@applies_to_events([CoastalInundation]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationCoastalInundationModel(ThermalPowerGenerationInundationModel): + def __init__( + self, + *, + indicator_id: str = "flood_depth", + resource: str = ThermalPowerGenerationInundationModel._default_resource, + ): + # by default include subsidence and 95% sea-level rise + super().__init__(hazard_type=CoastalInundation, indicator_id=indicator_id, resource=resource) + + +@applies_to_events([RiverineInundation]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationRiverineInundationModel(ThermalPowerGenerationInundationModel): + def __init__( + self, + *, + indicator_id: str = "flood_depth", + resource: str = ThermalPowerGenerationInundationModel._default_resource, + ): + # by default request HazardModel to use "MIROC-ESM-CHEM" GCM + super().__init__(hazard_type=RiverineInundation, indicator_id=indicator_id, resource=resource) + + +@applies_to_events([Drought]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationDroughtModel(VulnerabilityModelBase): + # Number of disrupted days per year + _default_resource = "WRI thermal power plant physical climate vulnerability factors" + _impact_based_on_a_single_point = False + + def __init__( + self, + *, + resource: str = _default_resource, + impact_based_on_a_single_point: bool = _impact_based_on_a_single_point, + ): + """ + Drought vulnerability model for thermal power generation. + + Args: + resource (str): embedded resource identifier used to infer vulnerability table. + impact_based_on_a_single_point (str): calculation based on a single point instead of a curve. + """ + + hazard_type = Drought + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + + # for this model, key for looking up curves is asset_type, e.g. 'Steam/Recirculating' + self.vulnerability_curves = dict( + (c.asset_type, c) for c in curve_set.items if c.event_type == hazard_type.__name__ + ) + + self.vuln_curves_by_type = defaultdict(list) + for key in self.vulnerability_curves: + self.vuln_curves_by_type[TurbineKind[key.split("/")[0]]].append(self.vulnerability_curves[key]) + + impact_type = ( + ImpactType.disruption + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + super().__init__( + indicator_id="months/spei3m/below/-2" if impact_based_on_a_single_point else "months/spei12m/below/index", + hazard_type=hazard_type, + impact_type=impact_type, + ) + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + return HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + ) + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + assert isinstance(asset, ThermalPowerGeneratingAsset) + + # The unit being number of months per year, we divide by 12 to express the result as a year fraction. + intensities = np.array(cast(HazardParameterDataResponse, data_responses[0]).parameters / 12.0) + if len(intensities) == 1: + thresholds = np.array([-2.0]) # hard-coded + probability_bins = intensities + else: + thresholds = np.array(cast(HazardParameterDataResponse, data_responses[0]).param_defns) + probability_bins = intensities[:-1] - intensities[1:] + probability_bins = np.append(probability_bins, intensities[-1]) + + curves: List[VulnerabilityCurve] = [] + if asset.turbine is None: + curves = [self.vulnerability_curves[key] for key in self.vulnerability_curves] + elif asset.cooling is not None: + key = "/".join([asset.turbine.name, asset.cooling.name]) + if key in self.vulnerability_curves: + curves = [self.vulnerability_curves[key]] + elif asset.turbine in self.vuln_curves_by_type: + curves = self.vuln_curves_by_type[asset.turbine] + + if 0 < len(curves): + if len(intensities) == 1: + impact = 0.0 + denominator = norm.cdf(thresholds[0]) + for curve in curves: + cdf = np.array([min(norm.cdf(threshold) / denominator, 1.0) for threshold in curve.intensity]) + impact = max( + impact, + curve.impact_mean[-1] * cdf[-1] + + np.sum( + (cdf[:-1] - cdf[1:]) + * (np.array(curve.impact_mean[:-1]) + np.array(curve.impact_mean[1:])) + / 2 + ), + ) + impact_bins = np.array([impact]) + else: + impact_bins = np.array( + [ + np.max( + [np.interp(threshold, curve.intensity[::-1], curve.impact_mean[::-1]) for curve in curves] + ) + for threshold in thresholds + ] + ) + else: + impact_bins = np.array([0.0 for _ in thresholds]) + + impact_bins = np.append(impact_bins, impact_bins[-1]) + + impact_distrib = ImpactDistrib( + self.hazard_type, + impact_bins, + probability_bins, + self.impact_type, + ) + return impact_distrib + + +@applies_to_events([AirTemperature]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationAirTemperatureModel(VulnerabilityModelBase): + # Number of disrupted days per year + _default_resource = "WRI thermal power plant physical climate vulnerability factors" + _default_temperatures = [25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0] + + def __init__(self, *, resource: str = _default_resource, temperatures: List[float] = _default_temperatures): + """ + Air temperature vulnerability model for thermal power generation. + + Args: + resource (str): embedded resource identifier used to infer vulnerability table. + temperatures (list[Float]): thresholds of the "days with average temperature above". + """ + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + + # for this model, key for looking up curves is asset_type, e.g. 'Steam/Recirculating' + self.vulnerability_curves = dict((c.asset_type, c) for c in curve_set.items if c.event_type == "AirTemperature") + self.vuln_curves_by_type = defaultdict(list) + for key in self.vulnerability_curves: + self.vuln_curves_by_type[TurbineKind[key.split("/")[0]]].append(self.vulnerability_curves[key]) + + impact_type = ( + ImpactType.disruption + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + + self.temperatures = temperatures + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + super().__init__(indicator_id="days_tas/above/{temp_c}c", hazard_type=AirTemperature, impact_type=impact_type) + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + data_request = [] + for temperature in self.temperatures: + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id.format(temp_c=str(int(temperature))), + ) + ) + for temperature in self.temperatures: + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario="historical", + year=2005, + indicator_id=self.indicator_id.format(temp_c=str(int(temperature))), + ) + ) + return data_request + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + assert isinstance(asset, ThermalPowerGeneratingAsset) + + assert 2 * len(self.temperatures) == len(data_responses) + + # The unit being number of days per year, we divide by 365 to express the result as a year fraction. + baseline = [ + 1.0 - cast(HazardParameterDataResponse, data_response).parameter / 365.0 + for data_response in data_responses[len(self.temperatures) :] + ] + + # Threshold when it no longer makes technical or economical sense to keep power plant running. + shutdown_air_temperature = 50 + + # Temperature at which the power plant generates electricity with the designed maximum efficiency. + design_air_temperature = np.interp(0.9, baseline, self.temperatures) + + intensities = np.array( + [ + cast(HazardParameterDataResponse, data_response).parameter / 365.0 + for data_response in data_responses[: len(self.temperatures)] + ] + ) + probability_bins = intensities[:-1] - intensities[1:] + probability_bins = np.append(probability_bins, intensities[-1]) + + curves: List[VulnerabilityCurve] = [] + if asset.turbine is None: + curves = [self.vulnerability_curves[key] for key in self.vulnerability_curves] + elif asset.cooling is not None: + key = "/".join([asset.turbine.name, asset.cooling.name]) + if key in self.vulnerability_curves: + curves = [self.vulnerability_curves[key]] + elif asset.turbine in self.vuln_curves_by_type: + curves = self.vuln_curves_by_type[asset.turbine] + + if 0 < len(curves): + impact_bins = np.array( + [ + ( + 1.0 + if shutdown_air_temperature < temperature + else ( + 0.0 + if temperature < design_air_temperature + else np.max( + [ + np.interp(temperature - design_air_temperature, curve.intensity, curve.impact_mean) + for curve in curves + ] + ) + ) + ) + for temperature in self.temperatures + ] + ) + else: + impact_bins = np.array([0.0 for _ in self.temperatures]) + + impact_bins = np.append(impact_bins, impact_bins[-1]) + + impact_distrib = ImpactDistrib( + self.hazard_type, + impact_bins, + probability_bins, + self.impact_type, + ) + return impact_distrib + + +@applies_to_events([WaterTemperature]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationWaterTemperatureModel(VulnerabilityModelBase): + # Number of disrupted days per year + _default_resource = "WRI thermal power plant physical climate vulnerability factors" + _default_correlation = 0.5 + + def __init__(self, *, resource: str = _default_resource, correlation: float = _default_correlation): + """ + Water temperature vulnerability model for thermal power generation. + + Args: + resource (str): embedded resource identifier used to infer vulnerability table. + correlation (float): correlation specifying the Gaussian copula which joins + the marginal distributions of water temperature and WBGT. + """ + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + self.gaussian_copula = multivariate_normal( + mean=np.array([0.0, 0.0]), cov=np.array([[1.0, correlation], [correlation, 1.0]]) + ) + + # for this model, key for looking up curves is asset_type, e.g. 'Steam/Recirculating' + self.vulnerability_curves = dict( + (c.asset_type, c) for c in curve_set.items if c.event_type == "WaterTemperature" + ) + self.vuln_curves_by_type = defaultdict(list) + for key in self.vulnerability_curves: + self.vuln_curves_by_type[TurbineKind[key.split("/")[0]]].append(self.vulnerability_curves[key]) + + self.regulatory_discharge_curves = dict( + (c.asset_type, c) for c in curve_set.items if c.event_type == "RegulatoryDischargeWaterLimit" + ) + + impact_type = ( + ImpactType.disruption + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + super().__init__(indicator_id="weeks_water_temp_above", hazard_type=WaterTemperature, impact_type=impact_type) + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + data_request = [] + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=self.indicator_id, + ), + ) + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario="historical", + year=1991, + indicator_id=self.indicator_id, + ), + ) + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id="days_wbgt_above", + ), + ) + data_request.append( + HazardDataRequest( + ChronicHeat, + asset.longitude, + asset.latitude, + scenario="historical", + year=2005, + indicator_id="days_wbgt_above", + ), + ) + return data_request + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + assert isinstance(asset, ThermalPowerGeneratingAsset) + assert len(data_responses) == 4 + + # Water temperature below which the power plant does not experience generation losses. + design_intake_water_temperature = cast( + float, + np.interp( + 0.9, + # The unit being number of weeks per year, we divide by 52 to express the result as a year fraction. + 1.0 - cast(HazardParameterDataResponse, data_responses[1]).parameters / 52.0, + cast(HazardParameterDataResponse, data_responses[1]).param_defns, + ), + ) + + # Linear relationship between the outlet (discharge) + # water temperature and the intake water temperature. + design_intake_water_temperature_for_recirculating_steam_unit = min( + design_intake_water_temperature, (35.0 - 9.7951) / 1.0191 + ) + + # WBGT below which the recirculating steam unit does not + # experience any water-temperature-related generation losses. + design_wbgt_threshold = cast( + float, + np.interp( + 0.99, + # The unit being number of days per year, we divide by 365 to express the result as a year fraction. + 1.0 - cast(HazardParameterDataResponse, data_responses[3]).parameters / 365.0, + cast(HazardParameterDataResponse, data_responses[3]).param_defns, + ), + ) + + impact_scale_for_recirculating_steam_unit = cast( + float, + np.interp( + design_wbgt_threshold, + cast(HazardParameterDataResponse, data_responses[2]).param_defns, + cast(HazardParameterDataResponse, data_responses[2]).parameters, + ) + / 365.0, + ) + + intake_water_temperatures = cast(HazardParameterDataResponse, data_responses[0]).param_defns + intake_water_temperature_intensities = cast(HazardParameterDataResponse, data_responses[0]).parameters / 52.0 + + if impact_scale_for_recirculating_steam_unit == 0.0: + intake_water_temperature_intensities_for_recirculating_steam_unit = intake_water_temperature_intensities + else: + gaussian_threshold: float = norm.ppf(impact_scale_for_recirculating_steam_unit) + intake_water_temperature_intensities_for_recirculating_steam_unit = np.array( + [ + ( + intake_water_temperature_intensity + if intake_water_temperature_intensity == 0.0 or intake_water_temperature_intensity == 1.0 + else self.gaussian_copula.cdf( + np.array([norm.ppf(intake_water_temperature_intensity), gaussian_threshold]) + ) + / impact_scale_for_recirculating_steam_unit + ) + for intake_water_temperature_intensity in intake_water_temperature_intensities + ] + ) + + intake_water_temperature_probability_bins = ( + intake_water_temperature_intensities[:-1] - intake_water_temperature_intensities[1:] + ) + intake_water_temperature_probability_bins = np.append( + intake_water_temperature_probability_bins, intake_water_temperature_intensities[-1] + ) + + intake_water_temperature_probability_bins_for_recirculating_steam_unit = ( + intake_water_temperature_intensities_for_recirculating_steam_unit[:-1] + - intake_water_temperature_intensities_for_recirculating_steam_unit[1:] + ) + intake_water_temperature_probability_bins_for_recirculating_steam_unit = np.append( + intake_water_temperature_probability_bins_for_recirculating_steam_unit, + intake_water_temperature_intensities_for_recirculating_steam_unit[-1], + ) + + curves: List[VulnerabilityCurve] = [] + if asset.turbine is None: + curves = [self.vulnerability_curves[key] for key in self.vulnerability_curves] + elif asset.cooling is not None: + key = "/".join([asset.turbine.name, asset.cooling.name]) + if key in self.vulnerability_curves: + curves = [self.vulnerability_curves[key]] + elif asset.turbine in self.vuln_curves_by_type: + curves = self.vuln_curves_by_type[asset.turbine] + + impact_distrib_by_curve: List[ImpactDistrib] = [] + for curve in curves: + scale = 1.0 + threshold = design_intake_water_temperature + probability_bins = intake_water_temperature_probability_bins + if curve.asset_type == "Steam/Recirculating": + scale = impact_scale_for_recirculating_steam_unit + threshold = design_intake_water_temperature_for_recirculating_steam_unit + probability_bins = intake_water_temperature_probability_bins_for_recirculating_steam_unit + impact_bins = np.array( + [ + ( + 0.0 + if intake_water_temperature < threshold + else scale + * cast( + float, + np.interp( + intake_water_temperature - threshold, + curve.intensity, + curve.impact_mean, + ), + ) + ) + for intake_water_temperature in intake_water_temperatures + ] + ) + if curve.asset_type in self.regulatory_discharge_curves: + regulatory_discharge_curve = self.regulatory_discharge_curves[curve.asset_type] + impact_bins = np.array( + [ + max( + impact, + cast( + float, + np.interp( + intake_water_temperature, + regulatory_discharge_curve.intensity, + regulatory_discharge_curve.impact_mean, + ), + ), + ) + for impact, intake_water_temperature in zip(impact_bins, intake_water_temperatures) + ] + ) + + impact_bins = np.append(impact_bins, impact_bins[-1]) + + impact_distrib_by_curve.append( + ImpactDistrib( + self.hazard_type, + impact_bins, + probability_bins, + self.impact_type, + ) + ) + + if 0 < len(impact_distrib_by_curve): + impact_distrib = sorted(impact_distrib_by_curve, key=lambda x: x.mean_impact())[-1] + else: + impact_distrib = ImpactDistrib( + self.hazard_type, + [0.0 for _ in range(0, len(intake_water_temperature_probability_bins) + 1)], + intake_water_temperature_probability_bins, + self.impact_type, + ) + + return impact_distrib + + +@applies_to_events([WaterRisk]) +@applies_to_assets([ThermalPowerGeneratingAsset]) +class ThermalPowerGenerationWaterStressModel(VulnerabilityModelBase): + # Number of disrupted days per year + _default_resource = "WRI thermal power plant physical climate vulnerability factors" + + def __init__(self, *, resource: str = _default_resource): + """ + Water stress vulnerability model for thermal power generation. + + Args: + resource (str): embedded resource identifier used to infer vulnerability table. + """ + curve_set: VulnerabilityCurves = get_vulnerability_curves_from_resource(resource) + + # for this model, key for looking up curves is asset_type, e.g. 'Steam/Recirculating' + self.vulnerability_curves = dict((c.asset_type, c) for c in curve_set.items if c.event_type == "WaterStress") + self.vuln_curves_by_type = defaultdict(list) + for key in self.vulnerability_curves: + self.vuln_curves_by_type[TurbineKind[key.split("/")[0]]].append(self.vulnerability_curves[key]) + + impact_type = ( + ImpactType.disruption + if len(self.vulnerability_curves) == 0 + else [ImpactType[self.vulnerability_curves[key].impact_type.lower()] for key in self.vulnerability_curves][ + 0 + ] + ) + + # global circulation parameter 'model' is a hint; can be overriden by hazard model + super().__init__(indicator_id="water_stress", hazard_type=WaterRisk, impact_type=impact_type) + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + data_request = [] + data_request.append( + HazardDataRequest( + WaterRisk, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id="water_stress", + ), + ) + data_request.append( + HazardDataRequest( + WaterRisk, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id="water_supply", + ), + ) + data_request.append( + HazardDataRequest( + WaterRisk, + asset.longitude, + asset.latitude, + scenario="historical", + year=1999, + indicator_id="water_supply", + ), + ) + return data_request + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + assert isinstance(asset, ThermalPowerGeneratingAsset) + assert len(data_responses) == 3 + + if ( + len(cast(HazardParameterDataResponse, data_responses[0]).parameters) == 0 + or len(cast(HazardParameterDataResponse, data_responses[1]).parameters) == 0 + or len(cast(HazardParameterDataResponse, data_responses[2]).parameters) == 0 + ): + return EmptyImpactDistrib() + + # We (naively) assume that water stress follows a shifted uniform distribution: water_stress - 0.5 + U(0,1): + probability_water_stress_above_40pct = max( + 0.0, min(1.0, 0.1 + cast(HazardParameterDataResponse, data_responses[0]).parameter) + ) + + baseline_water_supply = cast(HazardParameterDataResponse, data_responses[2]).parameter + supply_reduction_rate = ( + 0.0 + if baseline_water_supply == 0.0 + else (cast(HazardParameterDataResponse, data_responses[1]).parameter / baseline_water_supply - 1.0) + ) + + curves: List[VulnerabilityCurve] = [] + if asset.turbine is None: + curves = [self.vulnerability_curves[key] for key in self.vulnerability_curves] + elif asset.cooling is not None: + key = "/".join([asset.turbine.name, asset.cooling.name]) + if key in self.vulnerability_curves: + curves = [self.vulnerability_curves[key]] + elif asset.turbine in self.vuln_curves_by_type: + curves = self.vuln_curves_by_type[asset.turbine] + + impact = ( + np.max([np.interp(-supply_reduction_rate, curve.intensity, curve.impact_mean) for curve in curves]) + if 0 < len(curves) + else 0.0 + ) + + impact_distrib = ImpactDistrib( + self.hazard_type, + [impact, impact], + [probability_water_stress_above_40pct], + self.impact_type, + ) + return impact_distrib diff --git a/src/test/data/hazard/test_events_wri.py b/src/test/data/hazard/test_events_wri.py deleted file mode 100644 index f25b523b..00000000 --- a/src/test/data/hazard/test_events_wri.py +++ /dev/null @@ -1,32 +0,0 @@ -""" Test asset impact calculations.""" -import unittest -import shutil, tempfile -import numpy as np -from physrisk.data.hazard.event_provider_wri import EventProviderWri -import boto3 - -class TestEventsWri(unittest.TestCase): - """Tests asset impact calculations.""" - - def setUp(self): - self.test_dir = tempfile.mkdtemp() - - def tearDown(self): - shutil.rmtree(self.test_dir) - - @unittest.skip("includes download of large files") - def test_wri_from_web(self): - cache_folder = self.test_dir - provider = EventProviderWri('web', cache_folder = cache_folder) - lon = 19.885738 - lat = 45.268405 - events = provider.get_inundation_depth([lon], [lat]) - print(events) - - - - - - - - \ No newline at end of file diff --git a/src/test/kernel/test_asset_impact.py b/src/test/kernel/test_asset_impact.py deleted file mode 100644 index f4f9731e..00000000 --- a/src/test/kernel/test_asset_impact.py +++ /dev/null @@ -1,85 +0,0 @@ -""" Test asset impact calculations.""" -import unittest -import numpy as np -from physrisk import AssetEventDistrib, ExceedanceCurve, VulnerabilityDistrib -from physrisk import Drought, Inundation -from physrisk import get_impact_distrib - -class TestAssetImpact(unittest.TestCase): - """Tests asset impact calculations.""" - - def test_impact_curve(self): - """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" - - # exceedance curve - return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) - exceed_probs = 1.0 / return_periods - depths = np.array([0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114]) - curve = ExceedanceCurve(exceed_probs, depths) - - # impact curve - vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) - vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - - # say we need to add an extra depth point because the damage below that inundation depth is zero - cutoff_depth = 0.9406518 #0.75 - curve = curve.add_value_point(cutoff_depth) - # we could also choose ensure that all impact curve depth points are represented in exceedance curve; we do not here - - depth_bins, probs = curve.get_probability_bins() - - impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) - - include_bin = depth_bins < cutoff_depth - probs[include_bin[:-1]] = 0 - - mean = np.sum((impact_bins[1:] + impact_bins[:-1]) * probs / 2) - self.assertAlmostEqual(mean, 4.8453897) - - def test_protection_level(self): - return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) - base_depth = np.array([0.0, 0.22372675, 0.3654859, 0.5393629, 0.6642473, 0.78564394, 0.9406518, 1.0539534, 1.1634114]) - future_depth = np.array([0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114]) - - exceed_probs = 1.0 / return_periods - - protection_return_period = 250.0 # protection level of 250 years - protection_depth = np.interp(1.0 / protection_return_period, exceed_probs[::-1], base_depth[::-1]) - - self.assertAlmostEqual(protection_depth, 0.9406518) - - - def test_single_asset_impact(self): - # exceedance curve - return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) - exceed_probs = 1.0 / return_periods - depths = np.array([0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114]) - curve = ExceedanceCurve(exceed_probs, depths) - - cutoff_depth = 0.9406518 - curve = curve.add_value_point(cutoff_depth) - - # impact curve - vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) - vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - - depth_bins, probs = curve.get_probability_bins() - - impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) - - # if upper end of bin less then cutoff then exclude - probs_w_cutoff = np.where(depth_bins[1:] <= cutoff_depth, 0.0, 1.0) - n_bins = len(probs) - vul = VulnerabilityDistrib(type(Inundation), depth_bins, impact_bins, np.diag(probs_w_cutoff)) # np.eye(n_bins, n_bins)) - event = AssetEventDistrib(type(Inundation), depth_bins, probs) - - impact = get_impact_distrib(event, vul) - mean = impact.mean_impact() - - self.assertAlmostEqual(mean, 4.8453897) - - - - - - \ No newline at end of file diff --git a/src/test/models/test_power_generating_asset_wri.py b/src/test/models/test_power_generating_asset_wri.py deleted file mode 100644 index 5b1e12f8..00000000 --- a/src/test/models/test_power_generating_asset_wri.py +++ /dev/null @@ -1,84 +0,0 @@ -""" Test asset impact calculations.""" -import unittest, os -import numpy as np -import pandas as pd -import physrisk -from physrisk import Asset, AssetEventDistrib, ExceedanceCurve, VulnerabilityDistrib -from physrisk.kernel import Drought, Inundation -from physrisk.kernel import calculate_impacts, get_impact_distrib -import physrisk.data.data_requests as dr -from physrisk.data import ReturnPeriodEvDataResp -from physrisk.models import InundationModel -from physrisk.data.hazard.event_provider_wri import EventProviderWri -import physrisk.data.raster_reader as rr -import time -from affine import Affine -from geotiff import GeoTiff -from tifffile import TiffFile - -from physrisk.kernel.assets import PowerGeneratingAsset - -class EventResponseMock: - - def __init__(self, return_periods, intensities): - self.return_periods = return_periods - self.intensities = intensities - -class TestPowerGeneratingAssetWri(unittest.TestCase): - """Tests World Resource Institute (WRI) models for power generating assets.""" - - def test_innundation(self): - # exceedance curve - return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) - base_depth = np.array([0.0, 0.22372675, 0.3654859, 0.5393629, 0.6642473, 0.78564394, 0.9406518, 1.0539534, 1.1634114]) - future_depth = np.array([0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114]) - - # we mock the response of the data request - responses_mock = EventResponseMock(return_periods, base_depth), EventResponseMock(return_periods, future_depth) - - latitude, longitude = 45.268405, 19.885738 - assets = [Asset(latitude, longitude)] - model = InundationModel(assets) - - vul, event = model.get_distributions(assets[0], responses_mock) - - impact = get_impact_distrib(event, vul) - mean = impact.mean_impact() - - self.assertAlmostEqual(mean, 4.8453897) - - @unittest.skip("example, not test") - def test_with_data_sourcing(self): - - cache_folder = r"" - - asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) - - types = asset_list["primary_fuel"].unique() - - filtered = asset_list.loc[asset_list['primary_fuel'] == 'Gas'] # Nuclear - - interest = [3, 8, 13, 14, 22, 25, 27, 28, 33, 40, 51, 64, 65, 66, 71, 72, 80, 88, 92, 109] - - filtered = filtered[22:23] - - longitudes = np.array(filtered['longitude']) - latitudes = np.array(filtered['latitude']) - generation = np.array(filtered['estimated_generation_gwh']) - - assets = [PowerGeneratingAsset(lat, lon, generation = gen, primary_fuel = 'gas') for lon, lat, gen in zip(longitudes, latitudes, generation)] - - detailed_results = calculate_impacts(assets, cache_folder = cache_folder) - detailed_results[assets[0]].impact.to_exceedance_curve() - means = np.array([detailed_results[asset].mean_impact for asset in assets]) - - self.assertAlmostEqual(1, 1) - - - - - - - - - \ No newline at end of file diff --git a/src/visualization/.gitkeep b/src/visualization/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/src/visualization/__init__.py b/src/visualization/__init__.py deleted file mode 100644 index 66e87d39..00000000 --- a/src/visualization/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Visualizations module.""" diff --git a/src/visualization/dash/Procfile b/src/visualization/dash/Procfile deleted file mode 100644 index 38371ebb..00000000 --- a/src/visualization/dash/Procfile +++ /dev/null @@ -1 +0,0 @@ -web: gunicorn app:server diff --git a/src/visualization/dash/app.py b/src/visualization/dash/app.py deleted file mode 100644 index eb138f69..00000000 --- a/src/visualization/dash/app.py +++ /dev/null @@ -1,583 +0,0 @@ -""" -Use dash to produce a visualization for an asset-level drill-down -Dash app then migrated to React / Flask -""" - -from datetime import datetime -from typing import List -import dash, dash_table -import dash_core_components as dcc -import dash_html_components as html -from dash_table.Format import Format, Scheme, Trim -from matplotlib.pyplot import title, xlabel -from numpy.lib.function_base import select -import plotly.express as px -import plotly.graph_objs as go -from plotly.subplots import make_subplots -#import plotly.graph_objects as go -import pandas as pd -import numpy as np -import os, sys, json -from collections import OrderedDict -import dash_leaflet as dl -import dash_leaflet.express as dlx -import dash_bootstrap_components as dbc - - -sys.path.append("../..") - -from dash.dependencies import Output, Input, State -import physrisk -from physrisk.kernel.calculation import DetailedResultItem -from physrisk.kernel import PowerGeneratingAsset, Asset -from physrisk.kernel import calculate_impacts, curve - -cache_folder = r"" -with open(os.path.join(cache_folder, 'colormap.json')) as jf: - color_map_info = json.load(jf) - -asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) - -types = asset_list["primary_fuel"].unique() - -asset_filt = asset_list.loc[asset_list['primary_fuel'] == 'Gas'] # Nuclear - -asset_filt = asset_filt.append({'gppd_idnr' : 'WRI12541', 'name' : 'Turkana 1', 'primary_fuel' : 'Gas', 'longitude' : 36.596, 'latitude' : 2.898, 'capacity_mw' : 102, 'estimated_generation_gwh' : 394.4 }, ignore_index=True) - -defaultIndex = np.flatnonzero(asset_filt['gppd_idnr'] == 'WRI12541')[0] #GBR1000313 #WRI1023786 #WRI1023786 #WRI1006025 - -ids = np.array(asset_filt['gppd_idnr']) -names = np.array(asset_filt['name']) -longitudes = np.array(asset_filt['longitude']) -latitudes = np.array(asset_filt['latitude']) -generation = np.array(asset_filt['estimated_generation_gwh']) -capacity = np.array(asset_filt['capacity_mw']) - -all_assets = [PowerGeneratingAsset(lat, lon, generation = gen, capacity = cap, primary_fuel = 'gas', name = name, id = id) for lon, lat, gen, cap, name, id in zip(longitudes, latitudes, generation, capacity, names, ids)] - -detailed_results = calculate_impacts(all_assets[defaultIndex:defaultIndex+1], cache_folder = cache_folder) -impact_bins = detailed_results[all_assets[defaultIndex]].impact.impact_bins -impact_bins = curve.process_bin_edges_for_graph(impact_bins) - - -impacts_file = os.path.join(cache_folder, 'all_impacts.json') -if os.path.isfile(impacts_file): - with open(impacts_file, 'r') as r: - mean_loss = json.load(r) - -else: - full_detailed_results = calculate_impacts(all_assets, cache_folder = cache_folder) - mean_loss = {a.id : full_detailed_results[a].impact.mean_impact() for a in all_assets} - with open(os.path.join(cache_folder, 'all_impacts.json'), 'w') as w: - contents = json.dumps(mean_loss) - w.write(contents) - -def create_map_fig_leaflet(assets): - markers = [] - for asset in assets: - markers.append(dict( - title = asset.name, - name = asset.name, - primary_fuel = asset.primary_fuel, - tooltip="Asset: " + asset.name + '
Type: ' + asset.primary_fuel +"", - id = asset.id, - lat = asset.latitude, - lon = asset.longitude)) - - access_token = '' - - wri_tile =dl.Overlay(children=[dl.TileLayer( - url = 'https://api.mapbox.com/v4/joemoorhouse.32lvye13/{z}/{x}/{y}@2x.png?access_token=' + access_token, - attribution = "Riverine inundation")], name="Riverine inundation") - - cl = dl.Overlay(children=[dl.GeoJSON(data=dlx.dicts_to_geojson(markers), cluster=True, id='markers')], - id='clusters', name="Power generating assets (gas)") - - map = dl.Map( - dl.LayersControl( - [ dl.BaseLayer(dl.TileLayer(), name="Base Layer"), wri_tile, cl] - ), id='the-map', preferCanvas=True, center=[39, -98], zoom=4 - ) - - return (map) - - -def create_map_fig(assets, layers): - lats = [a.latitude for a in assets] - lons = [a.longitude for a in assets] - - color_discrete_sequence = px.colors.qualitative.Pastel1 - colors_map = px.colors.qualitative.T10 - magnitudes = [mean_loss[a.id] for a in assets] - - map_fig = go.Figure( - go.Scattermapbox( - lat=lats, - lon=lons, - hovertext=names, - hoverinfo='text', - marker=go.scattermapbox.Marker( - size = 15, - color = magnitudes, - #color = colors_map[0], - allowoverlap = False, - colorscale = 'Aggrnyl', - ), - selected=go.scattermapbox.Selected( - marker = go.scattermapbox.selected.Marker( - size = 20, - color = colors_map[2])), - #selectedpoints = [defaultIndex] - ), - - layout = go.Layout( - mapbox_style="stamen-terrain", - mapbox_center_lon=0, - mapbox_center_lat=10, - margin={"r":0,"t":0,"l":0,"b":0}, - clickmode='event+select' - )) - access_token = '' - map_fig.layout.mapbox.accesstoken = access_token - url = 'https://api.mapbox.com/v4/joemoorhouse.0zy9pvov/{z}/{x}/{y}@2x.png?access_token=' + access_token - map_fig.layout.mapbox.layers = [ - { - "below":"traces", - "sourcetype": "raster", - "source" : [url], - "sourceattribution": "WRI", - "visible": True - }] - - update_map_fig_layers(map_fig, layers) - - map_fig_colorbar = make_subplots(rows=1, cols=1) - min_val, min_index = color_map_info['min']['data'], color_map_info['min']['color_index'] - max_val, max_index = color_map_info['max']['data'], color_map_info['max']['color_index'] - colorscale = [] - for i in range(min_index, max_index + 1): - (r, g, b, a) = color_map_info['colormap'][str(i)] - colorscale.append([(float(i) - min_index) / (max_index - min_index) , f'rgb({r}, {g}, {b})']) - - levels = np.linspace(0.0, 2.0, 256) - - colorbar = go.Heatmap( - x = levels, - y = [0.0, 1.0], - z = [levels[:-1]], - colorscale = colorscale, #'Reds', - hoverongaps = False, - hovertemplate='') - - map_fig_colorbar.add_trace( - colorbar, - row=1, col=1 - ) - - map_fig_colorbar.update_layout(height=60, margin=dict(l=20, r=20, t=35, b=5), showlegend = False, font_family='Lato, sans-serif', font_size = 16, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)') - map_fig_colorbar.update_xaxes(title_font = {"size": 20}, ticksuffix = 'm') #title = 'Intensity' - map_fig_colorbar.update_yaxes(visible = False, showticklabels = False, title = 'Inundation depth') - map_fig_colorbar.update_traces(showscale = False) #'rgb(158,202,225)') - return map_fig, map_fig_colorbar - -def update_map_fig_layers(map_fig, layers): - with map_fig.batch_update(): - map_fig.data[0].marker.size = 15 if "A" in layers else 0 - map_fig.data[0].selected.marker.size = 15 if "A" in layers else 0 - map_fig.layout.mapbox.layers[0]["visible"] = True if "I1000" in layers else False - return - -def get_fig_for_asset(asset, detailed_results): - - fig = make_subplots(rows=1, cols=2) - go1, go2 = get_fig_gos_for_asset(asset, detailed_results) - - fig.add_trace( - go1, - row=1, col=1 - ) - - fig.add_trace( - go2, - row=1, col=2 - ) - - fig.update_traces(marker_color = px.colors.qualitative.T10[0]) #'rgb(158,202,225)') - fig.update_xaxes(title = 'Impact (generation loss in days per year)', title_font = {"size": 20}, row=1, col=1) - fig.update_yaxes(title = 'Probability', title_font = {"size": 20}, ticksuffix = '%', row=1, col=1), - fig.update_xaxes(title = 'Exceedance probability', title_font = {"size": 20}, type="log", row=1, col=2) - fig.update_xaxes(autorange="reversed", ticksuffix = '%', row=1, col=2) - fig.update_yaxes(title = 'Loss (EUR)', title_font = {"size": 20}, row=1, col=2) - - headline = 'Average annual loss of {:.1f} generation days'.format(detailed_results[asset].impact.mean_impact()) - curve = detailed_results[asset].impact.to_exceedance_curve() - for p in [0.1, 0.05, 0.01, 0.005, 0.001]: - if p >= curve.probs[-1] and p <= curve.probs[0]: - loss = curve.get_value(p) - headline = '{0:.1g}% probability of annual loss greater than {1:.1f} generation days'.format(p * 100, loss) - break - - fig.update_layout(showlegend = False, font_family='Lato, sans-serif', font_size = 16, - title=headline, - title_font_size = 24, margin=dict(l=100, r=100, t=100, b=100)) - - return fig - -def get_fig_gos_for_asset(asset, detailed_results): - - res = detailed_results[asset] - - color_discrete_sequence = px.colors.qualitative.Pastel1 - - impact_bins = res.impact.impact_bins - impact_bin_probs = res.impact.prob - - impact_bins = curve.process_bin_edges_for_graph(impact_bins) - - with open(os.path.join(cache_folder, 'log.txt'), 'w') as f: - contents = json.dumps(str(impact_bins)) - f.write(contents) - - go1 = go.Bar(x = 0.5*(impact_bins[0:-1] + impact_bins[1:]), y = impact_bin_probs * 100, width = impact_bins[1:] - impact_bins[:-1]) - - exc = res.impact.to_exceedance_curve() - go2 = go.Scatter(x=exc.probs * 100, y=exc.values * asset.generation * 100 * 1000 / 365) - - return go1, go2 - -def get_fig_for_model(asset, detailed_results): - - fig = make_subplots(rows=1, cols=2) - go1, go2, headline = get_fig_gos_for_model(asset, detailed_results) - - fig.add_trace( - go1, - row=1, col=1 - ) - - fig.add_trace( - go2, - row=1, col=2 - ) - - fig.update_traces(marker_color = px.colors.qualitative.T10[0]) #'rgb(158,202,225)') - fig.update_xaxes(title = 'Inundation intensity (m)', title_font = {"size": 20}, row=1, col=1) - fig.update_yaxes(title = 'Probability', title_font = {"size": 20}, ticksuffix = "%", row=1, col=1), - fig.update_xaxes(title = 'Exceedance probability', title_font = {"size": 20}, type="log", row=1, col=2) - fig.update_xaxes(autorange="reversed", row=1, col=2, ticksuffix = "%") - fig.update_yaxes(title = 'Inundation intensity (m)', title_font = {"size": 20}, row=1, col=2) - fig.update_layout(showlegend = False, font_family='Lato, sans-serif', font_size = 16, - title=headline, # 'Inundation intensity', - title_font_size = 24, margin=dict(l=100, r=100, t=100, b=100)) - - return fig - -def get_fig_gos_for_model(asset, detailed_results): - - res : DetailedResultItem = detailed_results[asset] - - color_discrete_sequence = px.colors.qualitative.Pastel1 - - intensity_bins = res.event.intensity_bins - intensity_bin_probs = res.event.prob - - go1 = go.Bar(x = 0.5*(intensity_bins[0:-1] + intensity_bins[1:]), y = intensity_bin_probs * 100, width = intensity_bins[1:] - intensity_bins[:-1]) - - exc = res.event.exceedance # to_exceedance_curve() - go2 = go.Scatter(x=exc.probs * 100, y=exc.values) - - headline = '10% probability of event with intensity greater than {0:.2f}m in a single year'.format(exc.get_value(0.1)) - - return go1, go2, headline - -def get_fig_for_vulnerability(asset, detailed_results): - - fig = make_subplots(rows=1, cols=1) - go1 = get_fig_gos_for_vulnerability(asset, detailed_results) - - fig.add_trace( - go1, - row=1, col=1 - ) - - fig.update_xaxes(title = 'Inundation intensity (m)', title_font = {"size": 20}, row=1, col=1) - fig.update_yaxes(title = 'Impact (generation loss)', title_font = {"size": 20}, row=1, col=1), - fig.update_layout(showlegend = False, font_family='Lato, sans-serif', font_size = 16, - title='Vulnerability distribution', - title_font_size = 24, margin=dict(l=100, r=100, t=100, b=100)) - - return fig - -def get_fig_gos_for_vulnerability(asset, detailed_results): - - res : DetailedResultItem = detailed_results[asset] - - go1=go.Heatmap( - z=res.vulnerability.prob_matrix, - x=res.vulnerability.intensity_bins, - y=res.vulnerability.impact_bins, - colorscale = 'Reds', - hoverongaps = False, - hovertemplate='') - return go1 - - -asset_categories = np.array(["Power generating assets (gas)"]) -hazards = np.array(["Inundation"]) -date_start = datetime(2080, 1, 1) -date_end = datetime(2080, 12, 31) - -#map_fig = create_map_fig_leaflet(all_assets) -map_fig, map_fig_colorbar = create_map_fig(all_assets, ["I1000"]) - -external_stylesheets = [ - { - "href": "https://fonts.googleapis.com/css2?" - "family=Lato:wght@400;700&display=swap", - "rel": "stylesheet", - }, -] - -app = dash.Dash(__name__, external_stylesheets=external_stylesheets) -server = app.server -app.title = "Asset loss drill-down" - -app.layout = html.Div( - children=[ - html.Div( - children=[ - html.P(children="OS-Climate", className="header-subtitle"), - html.H1( - children="Asset loss drill-down", className="header-title" - ), - html.P( - children="Drill into financial loss" - " at asset level: loss, hazard intensity" - " and vulnerability", - className="header-description", - ), - ], - style = { 'background-image' : 'url("/assets/banner3.jpg', - 'background-position' : 'center', - 'background-repeat' : 'no-repeat', - 'background-size' : 2600 }, - className="header", - ), - html.Div( - children=[ - html.Div( - children=[ - html.Div(children="Asset Category", className="menu-title"), - dcc.Dropdown( - id="asset-filter", - options=[ - {"label": asset, "value": asset} - for asset in np.sort(asset_categories) - ], - value=asset_categories[0], - clearable=False, - className="dropdown", - ), - ], - style= {'width': '25%'}#, 'height': '100%' }, - ), - html.Div( - children=[ - html.Div(children="Hazard", className="menu-title"), - dcc.Dropdown( - id="hazard-filter", - options=[ - {"label": hazard, "value": hazard} - for hazard in np.sort(hazards) - ], - value="Inundation", - clearable=False, - searchable=False, - className="dropdown", - ), - ], - style= {'width': '20%' }#, 'height': '100%' }, - ), - html.Div( - children=[ - html.Div( - children="Dates", className="menu-title" - ), - dcc.Dropdown( - id="dates", - options=[ - {"label": date, "value": date} - for date in ["Today", "2030", "2050", "2080"] - ], - multi=True, - value=["2080"], - clearable=False, - searchable=False, - className="dropdown", - ), - ], - ), - html.Div( - children=[ - html.Div(children="Scenarios", className="menu-title"), - dcc.Dropdown( - id="scenario-filter", - options=[ - {"label": scenario, "value": scenario} - for scenario in ["Histo", "RCP4.5", "RCP8.5"] - ], - multi=True, - value=["RCP8.5"], - clearable=False, - searchable=False, - className="dropdown", - ), - ], - #className="custom-dropdown", - #style= {'width': '15%' } #, 'height': '100%' }, - ), - ], - className="menu", - ), - html.Div( - children=[ - html.Div( - children=html.H1(id="asset-category-header", children=asset_categories[0] - ), - ), - html.Div( - dcc.Checklist( - options=[ - {'label': 'Assets', 'value': 'A'}, - {'label': 'Inundation 10-year', 'value': 'I10'}, - {'label': 'Inundation 100-year', 'value': 'I100'}, - {'label': 'Inundation 1000-year', 'value': 'I1000'} - ], - id='layer-select', - value=[], - labelStyle={'display': 'inline-block'} - ), - style = {'width': '100%', 'display': 'flex', 'align-items': 'right', 'justify-content': 'right'}, - ), - html.Div( - children=[ - dcc.Graph( - id="map-chart", - figure=map_fig, - style={'width': '100%', 'height': '40vh'} - ), - ], - style={'margin': '2px'}, - className="card", - ), - html.Div([ - html.Div( - [html.P(children="Inundation depth (m)")], - #className="column", - style={'flex': '20%', 'margin': '0px 0px 0px 20px', 'font-size': 20, 'vertical-align': 'top'}, - ), - html.Div( - children=dcc.Graph( - id="map-chart-colorbar", - config={"displayModeBar": False}, - figure=map_fig_colorbar, - ), - style={'flex': '80%', 'padding': '0px 0px 0px 0px'}, - ), - ], - id="map-chart-colorbar-container", - style={'display':'flex'}, - className="card" - ), - html.Div(id = 'drilldown', style = {'display' : 'none'}, className = "wrapper", children=[ - html.Div( - children=html.H1(id="asset-header", children="Asset name" - ), - ), - html.Div( - children=dash_table.DataTable( - id='asset-data-table', - data = asset_filt.iloc[defaultIndex:defaultIndex + 1].to_dict('records'), - columns = [ - dict(id='gppd_idnr', name='ID'), - dict(id='name', name='Name'), - dict(id='primary_fuel', name='Primary Fuel'), - dict(id='capacity_mw', name='Capacity (MW)'), - dict(id='estimated_generation_gwh', name='Est. annual Generation (GWh)', type='numeric', format=Format(precision=1, scheme=Scheme.fixed)), - ], - style_cell={'textAlign': 'center', 'font-family': "Lato, sans-serif"}, - style_header={ 'fontWeight': 'bold' }, - style_as_list_view=True - ), - className="card", - ), - html.Div( - children=dcc.Graph( - id="exceedance-chart", - config={"displayModeBar": False} - ), - className="card", - ), - html.Div( - children=dcc.Graph( - id="intensity-chart", - config={"displayModeBar": False} - ), - className="card", - ), - html.Div( - children=dcc.Graph( - id="vulnerability-chart", - config={"displayModeBar": False} - ), - className="card", - ) - ]), - ], - className="wrapper", - ), - ] -) - -@app.callback( - [Output('drilldown', 'style'), Output('asset-header', 'children'), Output('asset-data-table', 'data'), - Output('exceedance-chart', 'figure'), Output('intensity-chart', 'figure'), Output('vulnerability-chart', 'figure')], - #[Input("markers", "click_feature")]) - [Input('map-chart', 'clickData')]) -def display_click_data(clickData): - visible = 'none' if clickData is None else 'block' - #index = defaultIndex if click_feature is None else np.flatnonzero(asset_filt['gppd_idnr'] == click_feature["properties"]["id"])[0] - index = defaultIndex if clickData is None else clickData['points'][0]['pointIndex'] - data = asset_filt.iloc[index:index + 1].to_dict('records') - asset = all_assets[index] - detailed_results = calculate_impacts([asset], cache_folder = cache_folder) - fig1 = get_fig_for_asset(asset, detailed_results) - fig2 = get_fig_for_model(asset, detailed_results) - fig3 = get_fig_for_vulnerability(asset, detailed_results) - - fig_plot = get_fig_for_model(asset, detailed_results) - fig_plot.update_layout(width=900) - fig_plot.write_image("C:/Users/joemo/Code/Repos/physrisk/docs/methodology/plots/fig_intensity.pdf") - return {'display':visible }, names[index], data, fig1, fig2, fig3 #json.dumps(clickData, indent=2) - -@app.callback( - #[Output('drilldown', 'style'), - [Output('map-chart-colorbar-container', 'style'), Output('map-chart', 'figure')],#, Output('map-chart-colorbar', 'figure')], - [Input('layer-select', 'value'), State('map-chart', 'figure')]) -def update_map_layers(value, fig): - - lon = fig["layout"]["mapbox"]["center"]["lon"] - lat = fig["layout"]["mapbox"]["center"]["lat"] - zoom = fig["layout"]["mapbox"].get("zoom", 1.0) - update_map_fig_layers(map_fig, value) - display_colorbar = any(["I" in v for v in value]) - display_asset = any(["A" in v for v in value]) - map_fig.update_layout(mapbox_center_lon=lon, - mapbox_center_lat=lat, - mapbox_zoom=zoom) - #{'display':'block'} if display_asset else {'display':'none'}, - return {'display':'flex'} if display_colorbar else {'display':'none'}, map_fig #, map_fig_colorbar - -if __name__ == "__main__": - app.run_server(debug=False) - diff --git a/src/visualization/dash/assets/banner1.jpg b/src/visualization/dash/assets/banner1.jpg deleted file mode 100644 index 085ad963..00000000 Binary files a/src/visualization/dash/assets/banner1.jpg and /dev/null differ diff --git a/src/visualization/dash/assets/banner2.jpg b/src/visualization/dash/assets/banner2.jpg deleted file mode 100644 index 94127f29..00000000 Binary files a/src/visualization/dash/assets/banner2.jpg and /dev/null differ diff --git a/src/visualization/dash/assets/banner3.jpg b/src/visualization/dash/assets/banner3.jpg deleted file mode 100644 index 9187f9e3..00000000 Binary files a/src/visualization/dash/assets/banner3.jpg and /dev/null differ diff --git a/src/visualization/dash/assets/style.css b/src/visualization/dash/assets/style.css deleted file mode 100644 index c41167ed..00000000 --- a/src/visualization/dash/assets/style.css +++ /dev/null @@ -1,79 +0,0 @@ -body { - font-family: "Lato", sans-serif; - margin: 0; - background-color: #F7F7F7; -} - -.header { - background-color: #303030; - height: 288px; - padding: 16px 0 0 0; -} - -.header-subtitle { - color: #707070; - font-size: 48px; - margin: 0 auto; - text-align: center; -} - -.header-title { - color: #FFFFFF; - font-size: 48px; - font-weight: bold; - text-align: center; - margin: 0 auto; -} - -.header-description { - color: #CFCFCF; - margin: 4px auto; - text-align: center; - max-width: 384px; -} - -.wrapper { - margin-right: auto; - margin-left: auto; - max-width: 1024px; - padding-right: 10px; - padding-left: 10px; - margin-top: 32px; -} - -.card { - margin-bottom: 24px; - box-shadow: 0 4px 6px 0 rgba(0, 0, 0, 0.18); -} - -.menu { - height: 102px; - width: 1012px; - display: flex; - justify-content: space-evenly; - padding-top: 24px; - padding-bottom: 4px; - margin: -60px auto 0 auto; - background-color: #FFFFFF; - box-shadow: 0 4px 6px 0 rgba(0, 0, 0, 0.18); -} - -.Select-control { - width: 256px; - height: 48px; -} - -.Select--single > .Select-control .Select-value, .Select-placeholder { - line-height: 48px; -} - -.Select--multi .Select-value-label { - line-height: 32px; -} - -.menu-title { - margin-bottom: 6px; - font-weight: bold; -} - - diff --git a/src/visualization/visualize.py b/src/visualization/visualize.py deleted file mode 100644 index 67bb6105..00000000 --- a/src/visualization/visualize.py +++ /dev/null @@ -1 +0,0 @@ -"""Visualizations code.""" diff --git a/test_environment.py b/test_environment.py deleted file mode 100644 index 53818506..00000000 --- a/test_environment.py +++ /dev/null @@ -1,26 +0,0 @@ -import sys - -REQUIRED_PYTHON = "python3" - - -def main(): - system_major = sys.version_info.major - if REQUIRED_PYTHON == "python": - required_major = 2 - elif REQUIRED_PYTHON == "python3": - required_major = 3 - else: - raise ValueError("Unrecognized python interpreter: {}".format(REQUIRED_PYTHON)) - - if system_major != required_major: - raise TypeError( - "This project requires Python {}. Found: Python {}".format( - required_major, sys.version - ) - ) - else: - print(">>> Development environment passes all tests!") - - -if __name__ == "__main__": - main() diff --git a/models/.gitkeep b/tests/__init__.py similarity index 100% rename from models/.gitkeep rename to tests/__init__.py diff --git a/references/.gitkeep b/tests/api/__init__.py similarity index 100% rename from references/.gitkeep rename to tests/api/__init__.py diff --git a/tests/api/container_test.py b/tests/api/container_test.py new file mode 100644 index 00000000..819b6434 --- /dev/null +++ b/tests/api/container_test.py @@ -0,0 +1,17 @@ +import fsspec.implementations.local as local +from dependency_injector import containers, providers + +from physrisk.container import Container +from physrisk.data.inventory_reader import InventoryReader + +from ..data.hazard_model_store_test import mock_hazard_model_store_heat + + +class TestContainer(containers.DeclarativeContainer): + __test__ = False + + config = providers.Configuration(default={"zarr_sources": ["embedded"]}) + + inventory_reader = providers.Singleton(lambda: InventoryReader(fs=local.LocalFileSystem(), base_path="")) + + zarr_store = providers.Singleton(lambda: mock_hazard_model_store_heat([0], [0])) diff --git a/tests/api/data_requests_test.py b/tests/api/data_requests_test.py new file mode 100644 index 00000000..59dd2854 --- /dev/null +++ b/tests/api/data_requests_test.py @@ -0,0 +1,194 @@ +import unittest + +import numpy as np +import numpy.testing + +from physrisk import requests +from physrisk.container import Container +from physrisk.data.hazard_data_provider import HazardDataHint +from physrisk.data.inventory import EmbeddedInventory +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.hazards import ChronicHeat, RiverineInundation + +from ..api.container_test import TestContainer +from ..base_test import TestWithCredentials +from ..data.hazard_model_store_test import ( + TestData, + get_mock_hazard_model_store_single_curve, + mock_hazard_model_store_heat, +) + + +class TestDataRequests(TestWithCredentials): + def setUp(self): + super().setUp() + + def tearDown(self): + super().tearDown() + + def test_hazard_data_availability(self): + # test that validation passes: + container = Container() + container.override(TestContainer()) + requester = container.requester() + _ = requester.get(request_id="get_hazard_data_availability", request_dict={}) + + @unittest.skip("requires mocking.") + def test_hazard_data_description(self): + # test that validation passes: + container = Container() + requester = container.requester + _ = requester.get(request_id="get_hazard_data_description", request_dict={"paths": ["test_path.md"]}) + + def test_generic_source_path(self): + inventory = EmbeddedInventory() + source_paths = get_default_source_paths(inventory) + result_heat = source_paths[ChronicHeat](indicator_id="mean_degree_days/above/32c", scenario="rcp8p5", year=2050) + result_flood = source_paths[RiverineInundation](indicator_id="flood_depth", scenario="rcp8p5", year=2050) + result_flood_hist = source_paths[RiverineInundation]( + indicator_id="flood_depth", scenario="historical", year=2080 + ) + result_heat_hint = source_paths[ChronicHeat]( + indicator_id="mean_degree_days/above/32c", + scenario="rcp8p5", + year=2050, + hint=HazardDataHint(path="chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_{scenario}_{year}"), + ) + + assert result_heat == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_ACCESS-CM2_rcp8p5_2050" + assert result_flood == "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050" + assert result_flood_hist == "inundation/wri/v2/inunriver_historical_000000000WATCH_1980" + assert result_heat_hint == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_rcp8p5_2050" + + def test_zarr_reading(self): + request_dict = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "RiverineInundation", + "longitudes": TestData.longitudes[0:3], # coords['longitudes'][0:100], + "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], + "year": 2080, + "scenario": "rcp8p5", + "indicator_id": "flood_depth", + "indicator_model_gcm": "MIROC-ESM-CHEM", + } + ], + } + # validate request + request = requests.HazardDataRequest(**request_dict) # type: ignore + + store = get_mock_hazard_model_store_single_curve() + + result = requests._get_hazard_data( + request, + ZarrHazardModel(source_paths=get_default_source_paths(EmbeddedInventory()), reader=ZarrReader(store=store)), + ) + + numpy.testing.assert_array_almost_equal_nulp(result.items[0].intensity_curve_set[0].intensities, np.zeros((9))) + numpy.testing.assert_array_almost_equal_nulp( + result.items[0].intensity_curve_set[1].intensities, np.linspace(0.1, 1.0, 9, dtype="f4") + ) + numpy.testing.assert_array_almost_equal_nulp(result.items[0].intensity_curve_set[2].intensities, np.zeros((9))) + + def test_zarr_reading_chronic(self): + request_dict = { + "group_ids": ["osc"], + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "ChronicHeat", + "longitudes": TestData.longitudes[0:3], # coords['longitudes'][0:100], + "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], + "year": 2050, + "scenario": "ssp585", + "indicator_id": "mean_degree_days/above/32c", + } + ], + } + # validate request + request = requests.HazardDataRequest(**request_dict) # type: ignore + + store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) + + source_paths = get_default_source_paths(EmbeddedInventory()) + result = requests._get_hazard_data( + request, ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)) + ) + numpy.testing.assert_array_almost_equal_nulp(result.items[0].intensity_curve_set[0].intensities[0], 600.0) + + # request_with_hint = request.copy() + # request_with_hint.items[0].path = "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_rcp8p5_2050" + # result = requests._get_hazard_data( + # request_with_hint, ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)) + # ) + + @unittest.skip("requires OSC environment variables set") + def test_zarr_reading_live(self): + # needs valid OSC_S3_BUCKET, OSC_S3_ACCESS_KEY, OSC_S3_SECRET_KEY + container = Container() + requester = container.requester() + + import json + from zipfile import ZipFile + + with ZipFile("src/test/api/test_lat_lons.json.zip") as z: + with z.open("test_lat_lons.json") as f: + data = json.loads(f.read()) + + request1 = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "ChronicHeat", + "longitudes": TestData.longitudes, + "latitudes": TestData.latitudes, + "year": 2030, + "scenario": "ssp585", + "indicator_id": "mean_work_loss/high", + } + ], + } + + request1 = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "ChronicHeat", + "longitudes": data["longitudes"], + "latitudes": data["latitudes"], + "year": 2030, + "scenario": "ssp585", + "indicator_id": "mean_work_loss/high", + } + ], + } + + response_floor = requester.get(request_id="get_hazard_data", request_dict=request1) + request1["interpolation"] = "linear" # type: ignore + response_linear = requester.get(request_id="get_hazard_data", request_dict=request1) + print(response_linear) + + floor = json.loads(response_floor)["items"][0]["intensity_curve_set"][5]["intensities"] + linear = json.loads(response_linear)["items"][0]["intensity_curve_set"][5]["intensities"] + + print(floor) + print(linear) + + request2 = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "CoastalInundation", + "longitudes": TestData.coastal_longitudes, + "latitudes": TestData.coastal_latitudes, + "year": 2080, + "scenario": "rcp8p5", + "model": "wtsub/95", + } + ], + } + response = requester.get(request_type="get_hazard_data", request_dict=request2) + print(response) diff --git a/tests/api/impact_requests_test.py b/tests/api/impact_requests_test.py new file mode 100644 index 00000000..c2d7306d --- /dev/null +++ b/tests/api/impact_requests_test.py @@ -0,0 +1,699 @@ +import json +import unittest + +import numpy as np +from pydantic import TypeAdapter + +from physrisk import requests +from physrisk.api.v1.common import Asset, Assets +from physrisk.api.v1.impact_req_resp import RiskMeasures, RiskMeasuresHelper +from physrisk.container import Container +from physrisk.data.inventory import EmbeddedInventory +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import PowerGeneratingAsset, RealEstateAsset, ThermalPowerGeneratingAsset +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.power_generating_asset_models import InundationModel +from physrisk.vulnerability_models.real_estate_models import ( + RealEstateCoastalInundationModel, + RealEstateRiverineInundationModel, +) +from physrisk.vulnerability_models.thermal_power_generation_models import ( + ThermalPowerGenerationAirTemperatureModel, + ThermalPowerGenerationDroughtModel, + ThermalPowerGenerationRiverineInundationModel, + ThermalPowerGenerationWaterStressModel, + ThermalPowerGenerationWaterTemperatureModel, +) + +from ..base_test import TestWithCredentials +from ..data.hazard_model_store_test import ( + TestData, + add_curves, + mock_hazard_model_store_inundation, + shape_transform_21600_43200, + zarr_memory_store, +) + +# from physrisk.api.v1.impact_req_resp import AssetImpactResponse +# from physrisk.data.static.world import get_countries_and_continents + + +class TestImpactRequests(TestWithCredentials): + def test_asset_list_json(self): + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": 69.4787, + "latitude": 34.556, + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": -70.9157, + "latitude": -39.2145, + }, + ], + } + assets_obj = Assets(**assets) + self.assertIsNotNone(assets_obj) + + def test_impact_request(self): + """Runs short asset-level impact request.""" + + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": TestData.longitudes[0], + "latitude": TestData.latitudes[0], + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": TestData.longitudes[1], + "latitude": TestData.latitudes[1], + }, + ], + } + + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_measures": False, + "include_calc_details": True, + "years": [2080], + "scenarios": ["rcp8p5"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + PowerGeneratingAsset: [InundationModel()], + RealEstateAsset: [RealEstateCoastalInundationModel(), RealEstateRiverineInundationModel()], + } + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + ) + + self.assertEqual(response.asset_impacts[0].impacts[0].hazard_type, "CoastalInundation") + + def test_risk_model_impact_request(self): + """Tests the risk model functionality of the impact request.""" + + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": TestData.longitudes[0], + "latitude": TestData.latitudes[0], + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": TestData.longitudes[1], + "latitude": TestData.latitudes[1], + }, + ], + } + + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_measures": False, + "include_calc_details": True, + "years": [2080], + "scenarios": ["rcp8p5"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + PowerGeneratingAsset: [InundationModel()], + RealEstateAsset: [RealEstateCoastalInundationModel(), RealEstateRiverineInundationModel()], + } + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + ) + + self.assertEqual(response.asset_impacts[0].impacts[0].hazard_type, "CoastalInundation") + + def test_thermal_power_generation(self): + + latitudes = np.array([32.6017]) + longitudes = np.array([-87.7811]) + + assets = [ + ThermalPowerGeneratingAsset( + latitude=latitudes[0], + longitude=longitudes[0], + location="North America", + capacity=1288.4, + type=archetype, + ) + for archetype in [ + "Gas", + "Gas/Gas", + "Gas/Steam", + "Gas/Steam/Dry", + "Gas/Steam/OnceThrough", + "Gas/Steam/Recirculating", + ] + ] + + assets_provided_in_the_request = False + + request_dict = { + "assets": Assets( + items=( + [ + Asset( + asset_class=asset.__class__.__name__, + latitude=asset.latitude, + longitude=asset.longitude, + type=asset.type, + capacity=asset.capacity, + location=asset.location, + ) + for asset in assets + ] + if assets_provided_in_the_request + else [] + ) + ), + "include_asset_level": True, + "include_calc_details": True, + "years": [2050], + "scenarios": ["ssp585"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + store, root = zarr_memory_store() + + # Add mock riverine inundation data: + return_periods = [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2030", + shape, + np.array( + [ + 8.378922939300537e-05, + 0.3319014310836792, + 0.7859689593315125, + 1.30947744846344, + 1.6689927577972412, + 2.002290964126587, + 2.416414737701416, + 2.7177860736846924, + 3.008821725845337, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050", + shape, + np.array( + [ + 0.001158079132437706, + 0.3938717246055603, + 0.8549619913101196, + 1.3880255222320557, + 1.7519289255142212, + 2.0910017490386963, + 2.5129663944244385, + 2.8202412128448486, + 3.115604877471924, + ] + ), + return_periods, + t, + ) + + # Add mock drought data: + return_periods = [0.0, -1.0, -1.5, -2.0, -2.5, -3.0, -3.6] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "drought/osc/v1/months_spei12m_below_index_MIROC6_ssp585_2050", + shape, + np.array( + [ + 6.900000095367432, + 1.7999999523162842, + 0.44999998807907104, + 0.06584064255906408, + 0.06584064255906408, + 0.0, + 0.0, + ] + ), + return_periods, + t, + ) + + return_periods = [0.0] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + + # Add mock drought (Jupiter) data: + add_curves( + root, + longitudes, + latitudes, + "drought/jupiter/v1/months_spei3m_below_-2_ssp585_2050", + shape, + np.array([0.06584064255906408]), + return_periods, + t, + ) + + # Add mock water-related risk data: + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_stress_ssp585_2050", + shape, + np.array([0.14204320311546326]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_supply_ssp585_2050", + shape, + np.array([76.09415435791016]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_supply_historical_1999", + shape, + np.array([88.62285614013672]), + return_periods, + t, + ) + + # Add mock chronic heat data: + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_ssp585_2050", + shape, + np.array([148.55369567871094]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_ssp585_2050", + shape, + np.array([65.30751037597656]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.6000000238418579]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_historical_2005", + shape, + np.array([120.51940155029297]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_historical_2005", + shape, + np.array([14.839207649230957]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_historical_2005", + shape, + np.array([0.049863386899232864]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + + # Add mock water temperature data: + return_periods = [5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30, 32.5, 35, 37.5, 40] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_historical_1991", + shape, + np.array( + [ + 52.0, + 51.9, + 49.666668, + 45.066666, + 38.0, + 31.1, + 26.0, + 21.066668, + 14.233334, + 8.0333338, + 5.0999999, + 2.3666666, + 6.6666669, + 3.3333335, + 0.0, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_rcp8p5_2050", + shape, + np.array([51.85, 51.5, 50.25, 46.75, 41.95, 35.35, 29.4, 24.55, 20.15, 13.85, 6.75, 3.5, 1.3, 0.25, 0.1]), + return_periods, + t, + ) + + # Add mock WBGT data: + return_periods = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_ssp585_2050", + shape, + np.array( + [ + 363.65054, + 350.21094, + 303.6388, + 240.48442, + 181.82924, + 128.46844, + 74.400276, + 1.3997267, + 0.0, + 0.0, + 0.0, + 0.0, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_historical_2005", + shape, + np.array( + [361.95273, 342.51804, 278.8146, 213.5123, 157.4511, 101.78238, 12.6897545, 0.0, 0.0, 0.0, 0.0, 0.0] + ), + return_periods, + t, + ) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationWaterStressModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ] + } + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + assets=None if assets_provided_in_the_request else assets, + ) + + # Air Temperature + self.assertAlmostEqual(response.asset_impacts[0].impacts[0].impact_mean, 0.0075618606988512764) + self.assertAlmostEqual(response.asset_impacts[1].impacts[0].impact_mean, 0.0075618606988512764) + self.assertAlmostEqual(response.asset_impacts[2].impacts[0].impact_mean, 0.0025192163596997963) + self.assertAlmostEqual(response.asset_impacts[3].impacts[0].impact_mean, 0.0025192163596997963) + self.assertAlmostEqual(response.asset_impacts[4].impacts[0].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[5].impacts[0].impact_mean, 0.0) + + # Drought + self.assertAlmostEqual(response.asset_impacts[0].impacts[1].impact_mean, 0.0008230079663917424) + self.assertAlmostEqual(response.asset_impacts[1].impacts[1].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[2].impacts[1].impact_mean, 0.0008230079663917424) + self.assertAlmostEqual(response.asset_impacts[3].impacts[1].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[4].impacts[1].impact_mean, 0.0008230079663917424) + self.assertAlmostEqual(response.asset_impacts[5].impacts[1].impact_mean, 0.0008230079663917424) + + # Riverine Inundation + self.assertAlmostEqual(response.asset_impacts[0].impacts[2].impact_mean, 0.0046864436945997625) + self.assertAlmostEqual(response.asset_impacts[1].impacts[2].impact_mean, 0.0046864436945997625) + self.assertAlmostEqual(response.asset_impacts[2].impacts[2].impact_mean, 0.0046864436945997625) + self.assertAlmostEqual(response.asset_impacts[3].impacts[2].impact_mean, 0.0046864436945997625) + self.assertAlmostEqual(response.asset_impacts[4].impacts[2].impact_mean, 0.0046864436945997625) + self.assertAlmostEqual(response.asset_impacts[5].impacts[2].impact_mean, 0.0046864436945997625) + + # Water Stress + self.assertAlmostEqual(response.asset_impacts[0].impacts[3].impact_mean, 0.010181435900296947) + self.assertAlmostEqual(response.asset_impacts[1].impacts[3].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[2].impacts[3].impact_mean, 0.010181435900296947) + self.assertAlmostEqual(response.asset_impacts[3].impacts[3].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[4].impacts[3].impact_mean, 0.010181435900296947) + self.assertAlmostEqual(response.asset_impacts[5].impacts[3].impact_mean, 0.010181435900296947) + + # Water Temperature + self.assertAlmostEqual(response.asset_impacts[0].impacts[4].impact_mean, 0.1448076958069578) + self.assertAlmostEqual(response.asset_impacts[1].impacts[4].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[2].impacts[4].impact_mean, 0.1448076958069578) + self.assertAlmostEqual(response.asset_impacts[3].impacts[4].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[4].impacts[4].impact_mean, 0.1448076958069578) + self.assertAlmostEqual(response.asset_impacts[5].impacts[4].impact_mean, 0.005896707722257193) + + vulnerability_models = DictBasedVulnerabilityModels( + { + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationDroughtModel(impact_based_on_a_single_point=True), + ] + } + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + assets=None if assets_provided_in_the_request else assets, + ) + + # Drought (Jupiter) + self.assertAlmostEqual(response.asset_impacts[0].impacts[0].impact_mean, 0.0005859470850072303) + self.assertAlmostEqual(response.asset_impacts[1].impacts[0].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[2].impacts[0].impact_mean, 0.0005859470850072303) + self.assertAlmostEqual(response.asset_impacts[3].impacts[0].impact_mean, 0.0) + self.assertAlmostEqual(response.asset_impacts[4].impacts[0].impact_mean, 0.0005859470850072303) + self.assertAlmostEqual(response.asset_impacts[5].impacts[0].impact_mean, 0.0005859470850072303) + + @unittest.skip("example, not test") + def test_example_portfolios(self): + example_portfolios = requests._get_example_portfolios() + for assets in example_portfolios: + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_calc_details": False, + "years": [2030, 2040, 2050], + "scenarios": ["ssp585"], + } + container = Container() + requester = container.requester() + response = requester.get(request_id="get_asset_impact", request_dict=request_dict) + with open("out.json", "w") as f: + f.write(response) + assert response is not None + + @unittest.skip("example, not test") + def test_example_portfolios_risk_measures(self): + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Commercial", + "location": "Europe", + "longitude": 11.5391, + "latitude": 48.1485, + } + ], + } + # 48.1485°, 11.5391° + # 48.1537°, 11.5852° + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_calc_details": True, + "include_measures": True, + "years": [2030, 2040, 2050], + "scenarios": ["ssp245", "ssp585"], # ["ssp126", "ssp245", "ssp585"], + } + container = Container() + requester = container.requester() + response = requester.get(request_id="get_asset_impact", request_dict=request_dict) + risk_measures_dict = json.loads(response)["risk_measures"] + helper = RiskMeasuresHelper(TypeAdapter(RiskMeasures).validate_python(risk_measures_dict)) + for hazard_type in ["RiverineInundation", "CoastalInundation", "ChronicHeat", "Wind"]: + scores, measure_values, measure_defns = helper.get_measure(hazard_type, "ssp585", 2050) + label, description = helper.get_score_details(scores[0], measure_defns[0]) + print(label) diff --git a/tests/api/test_lat_lons.json.zip b/tests/api/test_lat_lons.json.zip new file mode 100644 index 00000000..a8529d3d Binary files /dev/null and b/tests/api/test_lat_lons.json.zip differ diff --git a/tests/base_test.py b/tests/base_test.py new file mode 100644 index 00000000..5e72175a --- /dev/null +++ b/tests/base_test.py @@ -0,0 +1,21 @@ +import os +import pathlib +import shutil +import tempfile +import unittest + +from dotenv import load_dotenv + + +class TestWithCredentials(unittest.TestCase): + """Test that attempts to load contents of credentials.env into environment variables (if present)""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + dotenv_dir = os.environ.get("CREDENTIAL_DOTENV_DIR", os.getcwd()) + dotenv_path = pathlib.Path(dotenv_dir) / "credentials.env" + if os.path.exists(dotenv_path): + load_dotenv(dotenv_path=dotenv_path, override=True) + + def tearDown(self): + shutil.rmtree(self.test_dir) diff --git a/reports/.gitkeep b/tests/data/__init__.py similarity index 100% rename from reports/.gitkeep rename to tests/data/__init__.py diff --git a/tests/data/events_retrieval_test.py b/tests/data/events_retrieval_test.py new file mode 100644 index 00000000..c179b751 --- /dev/null +++ b/tests/data/events_retrieval_test.py @@ -0,0 +1,230 @@ +import os +import unittest + +# import fsspec.implementations.local as local # type: ignore +import numpy as np +import numpy.testing +import scipy.interpolate +import zarr +from fsspec.implementations.memory import MemoryFileSystem +from shapely import Polygon + +from physrisk.api.v1.hazard_data import HazardAvailabilityRequest, HazardResource, Scenario +from physrisk.data.inventory import EmbeddedInventory, Inventory +from physrisk.data.inventory_reader import InventoryReader +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.kernel.hazard_model import HazardDataRequest +from physrisk.kernel.hazards import RiverineInundation +from physrisk.requests import _get_hazard_data_availability + +# from pathlib import PurePosixPath +from ..base_test import TestWithCredentials +from ..data.hazard_model_store_test import ZarrStoreMocker, mock_hazard_model_store_inundation + + +class TestEventRetrieval(TestWithCredentials): + @unittest.skip("S3 access needed") + def test_inventory_change(self): + # check validation passes calling in service-like way + embedded = EmbeddedInventory() + resources1 = embedded.to_resources() + inventory = Inventory(resources1).json_ordered() + with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "inventory.json"), "w") as f: + f.write(inventory) + + def test_hazard_data_availability_summary(self): + # check validation passes calling in service-like way + inventory = EmbeddedInventory() + response = _get_hazard_data_availability( + HazardAvailabilityRequest(sources=["embedded"]), inventory, inventory.colormaps() + ) # , "hazard_test"]) + assert len(response.models) > 0 # rely on Pydantic validation for test + + def test_set_get_inventory(self): + fs = MemoryFileSystem() + reader = InventoryReader(fs=fs) + reader.append("hazard_test", [self._test_hazard_model()]) + assert reader.read("hazard_test")[0].indicator_id == "test_indicator_id" + + @unittest.skip("S3 access needed") + def test_set_get_inventory_s3(self): + reader = InventoryReader() + reader.append("hazard_test", [self._test_hazard_model()]) + assert reader.read("hazard_test")[0].id == "test_indicator_id" + + def _test_hazard_model(self): + return HazardResource( + hazard_type="TestHazardType", + indicator_id="test_indicator_id", + indicator_model_gcm="test_gcm", + path="test_array_path", + display_name="Test hazard indicator", + description="Description of test hazard indicator", + scenarios=[Scenario(id="historical", years=[2010])], + units="K", + ) + + def test_zarr_bilinear(self): + # create suitable asymmetric data set and compare with scipy + + xt, yt = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10)) + data0 = np.exp(-(xt**2 / 25.0 + yt**2 / 16.0)) + data1 = np.exp(-(xt**2 / 36.0 + yt**2 / 25.0)) + + data = np.stack([data0, data1], axis=0) + + # note that zarr array has index [z, y, x], e.g. 9, 21600, 43200 or [index, lat, lon] + y = np.array([1.4, 2.8, 3.4]) # row indices + x = np.array([3.2, 6.7, 7.9]) # column indices + image_coords = np.stack([x, y]) + data_zarr = zarr.array(data) + candidate_lin = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="linear" + ) + candidate_max = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="max" + ) + candidate_min = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="min" + ) + + image_coords_surr = np.stack( + [ + np.concatenate([np.floor(x), np.floor(x) + 1, np.floor(x), np.floor(x) + 1]), + np.concatenate([np.floor(y), np.floor(y), np.floor(y) + 1, np.floor(y) + 1]), + ] + ) + values_surr = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords_surr, np.array([0, 1]), interpolation="linear" + ).reshape((4, 3, 2)) + interp_scipy0 = scipy.interpolate.RectBivariateSpline( + np.linspace(0, 9, 10), np.linspace(0, 9, 10), data0.T, kx=1, ky=1 + ) + interp_scipy1 = scipy.interpolate.RectBivariateSpline( + np.linspace(0, 9, 10), np.linspace(0, 9, 10), data1.T, kx=1, ky=1 + ) + expected0_lin = interp_scipy0(x, y).diagonal().reshape(len(y)) + expected1_lin = interp_scipy1(x, y).diagonal().reshape(len(y)) + expected0_max = np.max(values_surr[:, :, 0], axis=0) + expected1_max = np.max(values_surr[:, :, 1], axis=0) + expected0_min = np.min(values_surr[:, :, 0], axis=0) + expected1_min = np.min(values_surr[:, :, 1], axis=0) + + numpy.testing.assert_allclose(candidate_lin[:, 0], expected0_lin, rtol=1e-6) + numpy.testing.assert_allclose(candidate_lin[:, 1], expected1_lin, rtol=1e-6) + numpy.testing.assert_allclose(candidate_max[:, 0], expected0_max, rtol=1e-6) + numpy.testing.assert_allclose(candidate_max[:, 1], expected1_max, rtol=1e-6) + numpy.testing.assert_allclose(candidate_min[:, 0], expected0_min, rtol=1e-6) + numpy.testing.assert_allclose(candidate_min[:, 1], expected1_min, rtol=1e-6) + + def test_zarr_bilinear_with_bad_data(self): + # create suitable asymmetric data set and compare with scipy + + xt, yt = np.meshgrid(np.linspace(0, 1, 2), np.linspace(0, 1, 2)) + data = np.array([[[1.0, -9999.0], [2.0, 0.0]]]) + + # note that zarr array has index [z, y, x], e.g. 9, 21600, 43200 or [index, lat, lon] + y = np.array([0.4, 0.5, 0.8]) # row indices + x = np.array([0.1, 0.6, 0.7]) # column indices + image_coords = np.stack([x, y]) + data_zarr = zarr.array(data) + + candidate_lin = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="linear" + ).flatten() + candidate_max = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="max" + ).flatten() + candidate_min = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="min" + ).flatten() + + expected_lin = np.array([1.34042553, 0.85714286, 0.62790698]) + expected_max = np.array([2.0, 2.0, 2.0]) + expected_min = np.array([0.0, 0.0, 0.0]) + + numpy.testing.assert_allclose(candidate_lin, expected_lin, rtol=1e-6) + numpy.testing.assert_allclose(candidate_max, expected_max, rtol=1e-6) + numpy.testing.assert_allclose(candidate_min, expected_min, rtol=1e-6) + + def test_zarr_geomax_on_grid(self): + lons_ = np.array([3.92783]) + lats_ = np.array([50.882394]) + curve = np.array( + [0.00, 0.06997928, 0.2679602, 0.51508933, 0.69842442, 0.88040525, 1.11911115, 1.29562478, 1.47200677] + ) + set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" + interpolation = "linear" + delta_km = 0.100 + n_grid = 11 + store_ = mock_hazard_model_store_inundation(lons_, lats_, curve) + zarrreader_ = ZarrReader(store_) + + lons_ = np.array([3.92916667, 3.925] + list(lons_)) + lats_ = np.array([50.87916667, 50.88333333] + list(lats_)) + curves_max_candidate, _ = zarrreader_.get_max_curves_on_grid( + set_id, lons_, lats_, interpolation=interpolation, delta_km=delta_km, n_grid=n_grid + ) + + curves_max_expected = np.array( + [ + curve, + [0.0, 0.02272942, 0.08703404, 0.16730212, 0.22684974, 0.28595751, 0.3634897, 0.42082168, 0.47811095], + [0.0, 0.0432026, 0.16542863, 0.31799695, 0.43118118, 0.54352937, 0.69089751, 0.7998704, 0.90876211], + ] + ) + numpy.testing.assert_allclose(curves_max_candidate, curves_max_expected, rtol=1e-6) + + def test_zarr_geomax(self): + longitudes = np.array([3.926]) + latitudes = np.array([50.878]) + curve = np.array( + [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4], + ) + set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" + delta_deg = 0.1 + shapes = [ + Polygon( + ( + (x - 0.5 * delta_deg, y - 0.5 * delta_deg), + (x - 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y - 0.5 * delta_deg), + ) + ) + for x, y in zip(longitudes, latitudes) + ] + store = mock_hazard_model_store_inundation(longitudes, latitudes, curve) + zarr_reader = ZarrReader(store) + curves_max_expected = np.array([[0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]]) + + curves_max_candidate, _ = zarr_reader.get_max_curves(set_id, shapes, interpolation="floor") + numpy.testing.assert_allclose(curves_max_candidate, curves_max_expected, rtol=1e-6) + + curves_max_candidate, _ = zarr_reader.get_max_curves(set_id, shapes, interpolation="linear") + numpy.testing.assert_allclose(curves_max_candidate, curves_max_expected / 4, rtol=1e-6) + + def test_reproject(self): + """Test adding data in a non-ESPG-4326 coordinate reference system. Check that attribute + end in the correct convertion.""" + mocker = ZarrStoreMocker() + lons = [1.1, -0.31] + lats = [47.0, 52.0] + mocker._add_curves( + "test", + lons, + lats, + "epsg:3035", + [3, 39420, 38371], + [100.0, 0.0, 2648100.0, 0.0, -100.0, 5404500], + [10.0, 100.0, 1000.0], + [1.0, 2.0, 3.0], + ) + + source_paths = {RiverineInundation: lambda indicator_id, scenario, year, hint: "test"} + hazard_model = ZarrHazardModel(source_paths=source_paths, store=mocker.store) + response = hazard_model.get_hazard_events( + [HazardDataRequest(RiverineInundation, lons[0], lats[0], indicator_id="", scenario="", year=2050)] + ) + numpy.testing.assert_equal(next(iter(response.values())).intensities, [1.0, 2.0, 3.0]) diff --git a/tests/data/hazard_model_store_test.py b/tests/data/hazard_model_store_test.py new file mode 100644 index 00000000..a85b8d5f --- /dev/null +++ b/tests/data/hazard_model_store_test.py @@ -0,0 +1,339 @@ +import os +from typing import Dict, List, Tuple, Union + +import numpy as np +import numpy.typing as npt +import zarr +import zarr.storage +from affine import Affine +from pyproj import Transformer + +from physrisk.hazard_models.core_hazards import cmip6_scenario_to_rcp + + +class TestData: + # fmt: off + longitudes = [69.4787, 68.71, 20.1047, 19.8936, 19.6359, 0.5407, 6.9366, 6.935, 13.7319, 13.7319, 14.4809, -68.3556, -68.3556, -68.9892, -70.9157] # noqa + latitudes = [34.556, 35.9416, 39.9116, 41.6796, 42.0137, 35.7835, 36.8789, 36.88, -12.4706, -12.4706, -9.7523, -38.9368, -38.9368, -34.5792, -39.2145] # noqa + + coastal_longitudes = [12.2, 50.5919, 90.3473, 90.4295, 90.4804, 90.3429, 90.5153, 90.6007] + coastal_latitudes = [-5.55, 26.1981, 23.6473, 23.6783, 23.5699, 23.9904, 23.59, 23.6112] + # fmt: on + + # fmt: off + wind_return_periods = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] # noqa + wind_intensities_1 = [34.314999, 40.843750, 44.605000, 46.973751, 48.548752, 49.803749, 51.188751, 52.213749, 52.902500, 53.576248, 57.552502, 59.863750, 60.916248, 61.801250, 62.508751, 63.082500, 63.251251, 63.884998, 64.577499] # noqa + wind_intensities_2 = [37.472500, 44.993752, 49.049999, 51.957500, 53.796249, 55.478748, 56.567501, 57.572498, 58.661251, 59.448750, 63.724998, 65.940002, 66.842499, 67.614998, 68.110001, 68.547501, 68.807503, 69.529999, 70.932503] # noqa + # fmt: on + + # fmt: off + temperature_thresholds = [10, 20, 30, 40, 50] # noqa + degree_days_above_index_1 = [6000, 3000, 100, 20, 10] # noqa + degree_days_above_index_2 = [7000, 4000, 150, 30, 12] # noqa + # fmt: on + + +class ZarrStoreMocker: + def __init__(self): + self.store, self._root = zarr_memory_store() + + def add_curves_global( + self, + array_path: str, + longitudes: List[float], + latitudes: List[float], + return_periods: Union[List[float], npt.NDArray], + intensities: Union[List[float], npt.NDArray], + width: int = 43200, + height: int = 21600, + ): + crs = "epsg:4326" + crs, shape, trans = self._crs_shape_transform_global(return_periods=return_periods, width=width, height=height) + self._add_curves(array_path, longitudes, latitudes, crs, shape, trans, return_periods, intensities) + + def _crs_shape_transform_global( + self, width: int = 43200, height: int = 21600, return_periods: Union[List[float], npt.NDArray] = [0.0] + ): + return self._crs_shape_transform(width, height, return_periods) + + def _add_curves( + self, + array_path: str, + longitudes: List[float], + latitudes: List[float], + crs: str, + shape: Tuple[int, int, int], + trans: List[float], + return_periods: Union[List[float], npt.NDArray], + intensities: Union[List[float], npt.NDArray], + ): + z = self._root.create_dataset( # type: ignore + array_path, shape=(shape[0], shape[1], shape[2]), chunks=(shape[0], 1000, 1000), dtype="f4" + ) + z.attrs["transform_mat3x3"] = trans + z.attrs["index_values"] = return_periods + z.attrs["crs"] = crs + + if crs.lower() != "epsg:4326": + transproj = Transformer.from_crs( + "epsg:4326", + crs, + always_xy=True, + ) + x, y = transproj.transform(longitudes, latitudes) + else: + x, y = longitudes, latitudes + + transform = Affine(trans[0], trans[1], trans[2], trans[3], trans[4], trans[5]) + coords = np.vstack((x, y, np.ones(len(x)))) + inv_trans = ~transform + mat = np.array(inv_trans).reshape(3, 3) + frac_image_coords = mat @ coords + image_coords = np.floor(frac_image_coords).astype(int) + for j in range(len(x)): + z[:, image_coords[1, j], image_coords[0, j]] = intensities + + def _crs_shape_transform(self, width: int, height: int, return_periods: Union[List[float], npt.NDArray] = [0.0]): + t = [360.0 / width, 0.0, -180.0, 0.0, -180.0 / height, 90.0, 0.0, 0.0, 1.0] + return "epsg:4326", (len(return_periods), height, width), t + + +def shape_transform_21600_43200( + width: int = 43200, height: int = 21600, return_periods: Union[List[float], npt.NDArray] = [0.0] +): + t = [360.0 / width, 0.0, -180.0, 0.0, -180.0 / height, 90.0, 0.0, 0.0, 1.0] + return (len(return_periods), height, width), t + + +def zarr_memory_store(path="hazard.zarr"): + store = zarr.storage.MemoryStore(root=path) + return store, zarr.open(store=store, mode="w") + + +def add_curves( + root: zarr.Group, + longitudes, + latitudes, + array_path: str, + shape: Tuple[int, int, int], + curve: np.ndarray, + return_periods: List[float], + trans: List[float], +): + z = root.create_dataset( # type: ignore + array_path, shape=(shape[0], shape[1], shape[2]), chunks=(shape[0], 1000, 1000), dtype="f4" + ) + z.attrs["transform_mat3x3"] = trans + z.attrs["index_values"] = return_periods + + trans = z.attrs["transform_mat3x3"] + transform = Affine(trans[0], trans[1], trans[2], trans[3], trans[4], trans[5]) + + coords = np.vstack((longitudes, latitudes, np.ones(len(longitudes)))) + inv_trans = ~transform + mat = np.array(inv_trans).reshape(3, 3) + frac_image_coords = mat @ coords + image_coords = np.floor(frac_image_coords).astype(int) + for j in range(len(longitudes)): + z[:, image_coords[1, j], image_coords[0, j]] = curve + + +def get_mock_hazard_model_store_single_curve(): + """Create a test MemoryStore for creation of Zarr hazard model for unit testing. A single curve + is applied at all locations.""" + + return_periods = inundation_return_periods() + t = [0.008333333333333333, 0.0, -180.0, 0.0, -0.008333333333333333, 90.0, 0.0, 0.0, 1.0] + shape = (len(return_periods), 21600, 43200) + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + array_path = get_source_path_wri_riverine_inundation(model="MIROC-ESM-CHEM", scenario="rcp8p5", year=2080) + z = root.create_dataset( # type: ignore + array_path, shape=(shape[0], shape[1], shape[2]), chunks=(shape[0], 1000, 1000), dtype="f4" + ) + z.attrs["transform_mat3x3"] = t + z.attrs["index_values"] = return_periods + + longitudes = TestData.longitudes + latitudes = TestData.latitudes + t = z.attrs["transform_mat3x3"] + transform = Affine(t[0], t[1], t[2], t[3], t[4], t[5]) + + coords = np.vstack((longitudes, latitudes, np.ones(len(longitudes)))) + inv_trans = ~transform + mat = np.array(inv_trans).reshape(3, 3) + frac_image_coords = mat @ coords + image_coords = np.floor(frac_image_coords).astype(int) + z[:, image_coords[1, 1], image_coords[0, 1]] = np.linspace(0.1, 1.0, z.shape[0]) + + return store + + +def mock_hazard_model_store_heat(longitudes, latitudes): + return mock_hazard_model_store_for_parameter_sets(longitudes, latitudes, degree_day_heat_parameter_set()) + + +def mock_hazard_model_store_heat_wbgt(longitudes, latitudes): + return mock_hazard_model_store_for_parameter_sets(longitudes, latitudes, wbgt_gzn_joint_parameter_set()) + + +def mock_hazard_model_store_inundation(longitudes, latitudes, curve): + return mock_hazard_model_store_single_curve_for_paths(longitudes, latitudes, curve, inundation_paths) + + +def mock_hazard_model_store_for_parameter_sets(longitudes, latitudes, path_parameters): + """Create a MemoryStore for creation of Zarr hazard model to be used with unit tests, + with the specified longitudes and latitudes set to the curve supplied.""" + + return_periods = None + shape = (1, 21600, 43200) + + t = [0.008333333333333333, 0.0, -180.0, 0.0, -0.008333333333333333, 90.0, 0.0, 0.0, 1.0] + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + + for path, parameter in path_parameters.items(): + add_curves(root, longitudes, latitudes, path, shape, parameter, return_periods, t) + + return store + + +def mock_hazard_model_store_single_curve_for_paths(longitudes, latitudes, curve, paths): + """Create a MemoryStore for creation of Zarr hazard model to be used with unit tests, + with the specified longitudes and latitudes set to the curve supplied.""" + + return_periods = [0.0] if len(curve) == 1 else [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] + if len(curve) != len(return_periods): + raise ValueError(f"curve must be single value or of length {len(return_periods)}") + + shape, t = shape_transform_21600_43200(return_periods=return_periods) + store, root = zarr_memory_store() + + for path in paths(): + add_curves(root, longitudes, latitudes, path, shape, curve, return_periods, t) + + return store + + +def inundation_return_periods(): + return [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] + + +def mock_hazard_model_store_path_curves(longitudes, latitudes, path_curves: Dict[str, np.ndarray]): + """Create a MemoryStore for creation of Zarr hazard model to be used with unit tests, + with the specified longitudes and latitudes set to the curve supplied.""" + + t = [0.008333333333333333, 0.0, -180.0, 0.0, -0.008333333333333333, 90.0, 0.0, 0.0, 1.0] + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + + for path, curve in path_curves.items(): + if len(curve) == 1: + return_periods = [0.0] + shape = (1, 21600, 43200) + else: + return_periods = [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] + if len(curve) != len(return_periods): + raise ValueError(f"curve must be single value or of length {len(return_periods)}") + shape = (len(return_periods), 21600, 43200) + + add_curves(root, longitudes, latitudes, path, shape, curve, return_periods, t) + + return store + + +def degree_day_heat_parameter_set(): + paths = [] + + for model, scenario, year in [ + ("mean_degree_days/above/32c/ACCESS-CM2", "historical", 2005), # 2005 + ("mean_degree_days/below/32c/ACCESS-CM2", "historical", 2005), + ("mean_degree_days/above/32c/ACCESS-CM2", "ssp585", 2050), + ("mean_degree_days/below/32c/ACCESS-CM2", "ssp585", 2050), + ]: + paths.append(get_source_path_osc_chronic_heat(model=model, scenario=scenario, year=year)) + parameters = [300, 300, 600, -200] + return dict(zip(paths, parameters)) + + +def wbgt_gzn_joint_parameter_set(): + paths = [] + # Getting paths for both hazards. + for model, scenario, year in [ + ("mean_degree_days/above/32c/ACCESS-CM2", "historical", 2005), # 2005 + ("mean_degree_days/below/32c/ACCESS-CM2", "historical", 2005), + ("mean_degree_days/above/32c/ACCESS-CM2", "ssp585", 2050), + ("mean_degree_days/below/32c/ACCESS-CM2", "ssp585", 2050), + ]: + paths.append(get_source_path_osc_chronic_heat(model=model, scenario=scenario, year=year)) + for model, scenario, year in [ + ("mean_work_loss/high/ACCESS-CM2", "historical", 2005), # 2005 + ("mean_work_loss/medium/ACCESS-CM2", "historical", 2005), + ("mean_work_loss/high/ACCESS-CM2", "ssp585", 2050), + ("mean_work_loss/medium/ACCESS-CM2", "ssp585", 2050), + ]: + paths.append(get_source_path_osc_chronic_heat(model=model, scenario=scenario, year=year)) + parameters = [300, 300, 600, -200, 0.05, 0.003, 0.11, 0.013] + return dict(zip(paths, parameters)) + + +def inundation_paths(): + paths = [] + for model, scenario, year in [("MIROC-ESM-CHEM", "rcp8p5", 2080), ("000000000WATCH", "historical", 1980)]: + paths.append(get_source_path_wri_riverine_inundation(model=model, scenario=scenario, year=year)) + for model, scenario, year in [ + ("wtsub/95", "rcp8p5", "2080"), + ("wtsub", "historical", "hist"), + ("nosub", "historical", "hist"), + ]: + paths.append(get_source_path_wri_coastal_inundation(model=model, scenario=scenario, year=year)) + return paths + + +def _wri_inundation_prefix(): + return "inundation/wri/v2" + + +_percentiles_map = {"95": "0", "5": "0_perc_05", "50": "0_perc_50"} +_subsidence_set = {"wtsub", "nosub"} + + +def get_source_path_wri_coastal_inundation(*, model: str, scenario: str, year: str): + type = "coast" + # model is expected to be of the form subsidence/percentile, e.g. wtsub/95 + # if percentile is omitted then 95th percentile is used + model_components = model.split("/") + sub = model_components[0] + if sub not in _subsidence_set: + raise ValueError("expected model input of the form {subsidence/percentile}, e.g. wtsub/95, nosub/5, wtsub/50") + perc = "95" if len(model_components) == 1 else model_components[1] + return os.path.join( + _wri_inundation_prefix(), f"inun{type}_{cmip6_scenario_to_rcp(scenario)}_{sub}_{year}_{_percentiles_map[perc]}" + ) + + +def get_source_path_wri_riverine_inundation(*, model: str, scenario: str, year: int): + type = "river" + return os.path.join(_wri_inundation_prefix(), f"inun{type}_{cmip6_scenario_to_rcp(scenario)}_{model}_{year}") + + +def _osc_chronic_heat_prefix(): + return "chronic_heat/osc/v2" + + +def get_source_path_osc_chronic_heat(*, model: str, scenario: str, year: int): + type, *levels = model.split("/") + + if type == "mean_degree_days": + assert levels[0] in ["above", "below"] # above or below + assert levels[1] in ["18c", "32c"] # threshold temperature + assert levels[2] in ["ACCESS-CM2"] # gcms + return _osc_chronic_heat_prefix() + "/" + f"{type}_v2_{levels[0]}_{levels[1]}_{levels[2]}_{scenario}_{year}" + + elif type == "mean_work_loss": + assert levels[0] in ["low", "medium", "high"] # work intensity + assert levels[1] in ["ACCESS-CM2"] # gcms + return _osc_chronic_heat_prefix() + "/" + f"{type}_{levels[0]}_{levels[1]}_{scenario}_{year}" + + else: + raise ValueError("valid types are {valid_types}") diff --git a/tests/data/static_data_test.py b/tests/data/static_data_test.py new file mode 100644 index 00000000..af4a4e9f --- /dev/null +++ b/tests/data/static_data_test.py @@ -0,0 +1,21 @@ +import unittest + +from physrisk.data.static.world import World, get_countries_and_continents, get_countries_json + +from ..data.hazard_model_store_test import TestData + + +class TestStaticDate(unittest.TestCase): + @unittest.skip("example that requires geopandas (consider adding for running tests only)") + def test_get_countries_and_continents(self): + countries, continents = get_countries_and_continents(TestData.longitudes, TestData.latitudes) + self.assertEqual(countries[0:3], ["Afghanistan", "Afghanistan", "Albania"]) + + @unittest.skip("not really a test; just showing how world.json was generated") + def test_get_countries_json(self): + with open("world.json", "w") as f: + world_json = get_countries_json() + f.write(world_json) + + def test_get_load_world(self): + self.assertEqual(World.countries["United Kingdom"].continent, "Europe") diff --git a/reports/figures/.gitkeep b/tests/kernel/__init__.py similarity index 100% rename from reports/figures/.gitkeep rename to tests/kernel/__init__.py diff --git a/tests/kernel/asset_impact_test.py b/tests/kernel/asset_impact_test.py new file mode 100644 index 00000000..9af95470 --- /dev/null +++ b/tests/kernel/asset_impact_test.py @@ -0,0 +1,138 @@ +""" Test asset impact calculations.""" + +import unittest + +import numpy as np + +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.curve import ExceedanceCurve +from physrisk.kernel.hazard_event_distrib import HazardEventDistrib +from physrisk.kernel.hazard_model import HazardDataRequest +from physrisk.kernel.hazards import RiverineInundation +from physrisk.kernel.impact import ImpactDistrib +from physrisk.kernel.vulnerability_distrib import VulnerabilityDistrib +from physrisk.vulnerability_models.real_estate_models import ( + RealEstateCoastalInundationModel, + RealEstateRiverineInundationModel, +) + + +class TestAssetImpact(unittest.TestCase): + """Tests asset impact calculations.""" + + def test_impact_curve(self): + """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" + + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + exceed_probs = 1.0 / return_periods + depths = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + curve = ExceedanceCurve(exceed_probs, depths) + + # impact curve + vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) + vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) + + # say we need to add an extra depth point because the damage below that inundation depth is zero + cutoff_depth = 0.9406518 # 0.75 + curve = curve.add_value_point(cutoff_depth) + # we could also choose ensure that all impact curve depth points are + # represented in exceedance curve; we do not here + + depth_bins, probs = curve.get_probability_bins() + + impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) + + include_bin = depth_bins < cutoff_depth + probs[include_bin[:-1]] = 0 # type: ignore + + mean = np.sum((impact_bins[1:] + impact_bins[:-1]) * probs / 2) # type: ignore + self.assertAlmostEqual(mean, 4.8453897) + + def test_protection_level(self): + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + base_depth = np.array( + [0.0, 0.22372675, 0.3654859, 0.5393629, 0.6642473, 0.78564394, 0.9406518, 1.0539534, 1.1634114] + ) + # future_depth = np.array( + # [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + # ) + + exceed_probs = 1.0 / return_periods + + protection_return_period = 250.0 # protection level of 250 years + protection_depth = np.interp(1.0 / protection_return_period, exceed_probs[::-1], base_depth[::-1]) + + self.assertAlmostEqual(protection_depth, 0.9406518) # type: ignore + + def test_single_asset_impact(self): + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + exceed_probs = 1.0 / return_periods + depths = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + curve = ExceedanceCurve(exceed_probs, depths) + + cutoff_depth = 0.9406518 + curve = curve.add_value_point(cutoff_depth) + + # impact curve + vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) + vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) + + depth_bins, probs = curve.get_probability_bins() + + impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) + + # if upper end of bin less then cutoff then exclude + probs_w_cutoff = np.where(depth_bins[1:] <= cutoff_depth, 0.0, 1.0) + # n_bins = len(probs) # type: ignore + vul = VulnerabilityDistrib( + type(RiverineInundation), depth_bins, impact_bins, np.diag(probs_w_cutoff) + ) # np.eye(n_bins, n_bins)) + event = HazardEventDistrib(type(RiverineInundation), depth_bins, probs) # type: ignore + + impact_prob = vul.prob_matrix.T @ event.prob + impact = ImpactDistrib(vul.event_type, vul.impact_bins, impact_prob) + + mean = impact.mean_impact() + + self.assertAlmostEqual(mean, 4.8453897) + + def test_performance_hazardlookup(self): + """Just for reference: not true test""" + asset_requests = {} + import time + + start = time.time() + + assets = [RealEstateAsset(latitude=0, longitude=0, location="", type="") for _ in range(10000)] + + vulnerability_models = [RealEstateCoastalInundationModel(), RealEstateRiverineInundationModel()] + + time_assets = time.time() - start + print(f"Time for asset generation {time_assets}s ") + start = time.time() + # we key requests via model and assets; let's check dictionary look-up is fast enough + # (there are less simple alternatives) + + # create requests: + for v in vulnerability_models: + for a in assets: + asset_requests[(v, a)] = [ + HazardDataRequest(RiverineInundation, 0, 0, indicator_id="", scenario="", year=2030) + ] + + time_requests = time.time() - start + print(f"Time for requests dictionary creation {time_requests}s ") + start = time.time() + # read requests: + for key in asset_requests: + if asset_requests[key][0].longitude != 0: + raise Exception() + + time_responses = time.time() - start + print(f"Time for response dictionary creation {time_responses}s ") diff --git a/tests/kernel/chronic_asset_impact_test.py b/tests/kernel/chronic_asset_impact_test.py new file mode 100644 index 00000000..3ecbfd10 --- /dev/null +++ b/tests/kernel/chronic_asset_impact_test.py @@ -0,0 +1,208 @@ +import unittest +from typing import Iterable, List, Union + +import numpy as np +from scipy.stats import norm + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import Asset, IndustrialActivity +from physrisk.kernel.hazard_model import HazardDataRequest, HazardDataResponse, HazardParameterDataResponse +from physrisk.kernel.hazards import ChronicHeat +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels, VulnerabilityModelBase +from physrisk.vulnerability_models.chronic_heat_models import ChronicHeatGZNModel + +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_heat + + +class ExampleChronicHeatModel(VulnerabilityModelBase): + """Example chronic vulnerability model for extreme heat (summary should fit on one line). + + More decription below as per + https://www.sphinx-doc.org/en/master/usage/extensions/example_google.html + """ + + def __init__(self, indicator_id: str = "mean_degree_days_above_32c", delta: bool = True): + super().__init__( + indicator_id=indicator_id, hazard_type=ChronicHeat, impact_type=ImpactType.disruption + ) # opportunity to give a model hint, but blank here + + self.time_lost_per_degree_day = 4.671 # This comes from the paper converted to celsius + self.time_lost_per_degree_day_se = 2.2302 # This comes from the paper converted to celsius + self.total_labour_hours = 107460 # OECD Average hours worked within the USA in one year. + self.delta = delta + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Request the hazard data needed by the vulnerability model for a specific asset + (this is a Google-style doc string) + + Args: + asset: Asset for which data is requested. + scenario: Climate scenario of calculation. + year: Projection year of calculation. + + Returns: + Single or multiple data requests. + """ + + # specify hazard data needed. Model string is hierarchical and '/' separated. + indicator_id = "mean_degree_days/above/32c" + + return [ + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=1980, + indicator_id=indicator_id, + ), + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=indicator_id, + ), + ] + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + """Calcaulate impact (disruption) of asset based on the hazard data returned. + + Args: + asset: Asset for which impact is calculated. + data_responses: responses to the hazard data requests generated in get_data_requests. + + Returns: + Probability distribution of impacts. + """ + assert isinstance(asset, IndustrialActivity) + baseline_dd_above_mean, scenario_dd_above_mean = data_responses + + # check expected type; can maybe do this more nicely + assert isinstance(baseline_dd_above_mean, HazardParameterDataResponse) + assert isinstance(scenario_dd_above_mean, HazardParameterDataResponse) + # Ensuring that the values are greater than zero. Should be by defition. + assert scenario_dd_above_mean.parameter >= 0 + assert baseline_dd_above_mean.parameter >= 0 + + # use hazard data requests via: + + # Allow for either a delta approach or a level estimate. + delta_dd_above_mean: float = scenario_dd_above_mean.parameter - baseline_dd_above_mean.parameter * self.delta + hours_worked = self.total_labour_hours + fraction_loss_mean = (delta_dd_above_mean * self.time_lost_per_degree_day) / hours_worked + fraction_loss_std = (delta_dd_above_mean * self.time_lost_per_degree_day_se) / hours_worked + + return get_impact_distrib(fraction_loss_mean, fraction_loss_std, ChronicHeat, ImpactType.disruption) + + +def get_impact_distrib( + fraction_loss_mean: float, fraction_loss_std: float, hazard_type: type, impact_type: ImpactType +) -> ImpactDistrib: + """Calculate impact (disruption) of asset based on the hazard data returned. + + Args: + fraction_loss_mean: mean of the impact distribution + fraction_loss_std: standard deviation of the impact distribution + hazard_type: Hazard Type. + impact_type: Impact Type. + + Returns: + Probability distribution of impacts. + """ + impact_bins = np.concatenate( + [ + np.linspace(-0.001, 0.001, 1, endpoint=False), + np.linspace(0.001, 0.01, 9, endpoint=False), + np.linspace(0.01, 0.1, 10, endpoint=False), + np.linspace(0.1, 0.999, 10, endpoint=False), + np.linspace(0.999, 1.001, 2), + ] + ) + + probs_cumulative = np.vectorize(lambda x: norm.cdf(x, loc=fraction_loss_mean, scale=max(1e-12, fraction_loss_std)))( + impact_bins + ) + probs_cumulative[-1] = np.maximum(probs_cumulative[-1], 1.0) + probs = np.diff(probs_cumulative) + + probs_norm = np.sum(probs) + prob_differential = 1 - probs_norm + if probs_norm < 1e-8: + if fraction_loss_mean <= 0.0: + probs = np.concatenate((np.array([1.0]), np.zeros(len(impact_bins) - 2))) + elif fraction_loss_mean >= 1.0: + probs = np.concatenate((np.zeros(len(impact_bins) - 2), np.array([1.0]))) + else: + probs[0] = probs[0] + prob_differential + + return ImpactDistrib(hazard_type, impact_bins, probs, impact_type) + + +class TestChronicAssetImpact(unittest.TestCase): + """Tests the impact on an asset of a chronic hazard model.""" + + def test_chronic_vulnerability_model(self): + """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" + + store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + # to run a live calculation, we omit the store parameter + + scenario = "ssp585" + year = 2050 + + vulnerability_models = DictBasedVulnerabilityModels({IndustrialActivity: [ChronicHeatGZNModel()]}) + + assets = [ + IndustrialActivity(lat, lon, type="Construction") + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ][:1] + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + value_test = list(results.values())[0].impact.mean_impact() + value_test = list(results.values())[0].impact.prob + value_exp = np.array( + [ + 0.02656777935, + 0.01152965908, + 0.01531928095, + 0.01983722513, + 0.02503479879, + 0.03079129430, + 0.03690901485, + 0.04311790414, + 0.04909118572, + 0.05447159590, + 0.51810304973, + 0.16109092806, + 0.00807680527, + 0.00005941883, + 0.00000005990, + 0.00000000001, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + ] + ) + value_diff = np.sum(np.abs(value_test - value_exp)) + self.assertAlmostEqual(value_diff, 0.0, places=8) diff --git a/src/test/kernel/test_curves.py b/tests/kernel/curves_test.py similarity index 76% rename from src/test/kernel/test_curves.py rename to tests/kernel/curves_test.py index 6b55104a..f8c22a19 100644 --- a/src/test/kernel/test_curves.py +++ b/tests/kernel/curves_test.py @@ -1,9 +1,11 @@ """ Test asset impact calculations.""" + import unittest + import numpy as np -from physrisk import AssetEventDistrib, ExceedanceCurve, VulnerabilityDistrib -from physrisk import Drought, Inundation -from physrisk import get_impact_distrib + +from physrisk.kernel.curve import ExceedanceCurve + class TestAssetImpact(unittest.TestCase): """Tests asset impact calculations.""" @@ -11,17 +13,12 @@ class TestAssetImpact(unittest.TestCase): def test_return_period_data(self): return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) depths = np.array([0.059, 0.33, 0.51, 0.71, 0.86, 1.00, 1.15, 1.16, 1.16]) - + # say we need to add an extra depth point because the damage below that point is zero - extra_depth = 0.75 + # extra_depth = 0.75 exceed_probs = 1.0 / return_periods curve = ExceedanceCurve(exceed_probs, depths) curve = curve.add_value_point(0.75) self.assertAlmostEqual(curve.probs[4], 0.03466667) - - - - - \ No newline at end of file diff --git a/tests/kernel/exposure_test.py b/tests/kernel/exposure_test.py new file mode 100644 index 00000000..de150dbd --- /dev/null +++ b/tests/kernel/exposure_test.py @@ -0,0 +1,95 @@ +import json + +import fsspec.implementations.local as local +import numpy as np + +import physrisk.api.v1.common +from physrisk.api.v1.exposure_req_resp import AssetExposureRequest, AssetExposureResponse +from physrisk.container import ZarrHazardModelFactory +from physrisk.data.inventory import EmbeddedInventory +from physrisk.data.inventory_reader import InventoryReader +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import Asset +from physrisk.kernel.exposure import Category, JupterExposureMeasure, calculate_exposures +from physrisk.kernel.hazards import ChronicHeat, CombinedInundation, Drought, Fire, Hail, Wind +from physrisk.requests import Requester + +from ..base_test import TestWithCredentials +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_path_curves + + +class TestExposureMeasures(TestWithCredentials): + def test_jupiter_exposure_service(self): + assets, store, hazard_model_factory, expected = self._get_components() + inventory = EmbeddedInventory() + requester = Requester( + hazard_model_factory=hazard_model_factory, + vulnerability_models_factory=None, + inventory=inventory, + inventory_reader=InventoryReader(fs=local.LocalFileSystem(), base_path=""), + reader=ZarrReader(store=store), + colormaps=inventory.colormaps(), + ) + assets_api = physrisk.api.v1.common.Assets( + items=[ + physrisk.api.v1.common.Asset(asset_class="Asset", latitude=a.latitude, longitude=a.longitude) + for a in assets[0:1] + ] + ) + request = AssetExposureRequest(assets=assets_api, scenario="ssp585", year=2050) + response = requester.get(request_id="get_asset_exposure", request_dict=request.model_dump()) + result = AssetExposureResponse(**json.loads(response)).items[0] + expected = dict((k.__name__, v) for (k, v) in expected.items()) + for key in result.exposures.keys(): + assert result.exposures[key].category == expected[key].name + + def test_jupiter_exposure(self): + assets, _, hazard_model_factory, expected = self._get_components() + asset = assets[0] + measure = JupterExposureMeasure() + + results = calculate_exposures( + [asset], hazard_model_factory.hazard_model(), measure, scenario="ssp585", year=2030 + ) + categories = results[asset].hazard_categories + for k, v in expected.items(): + assert categories[k][0] == v + + def _get_components(self): + # "precipitation/jupiter/v1/max_daily_water_equivalent_{scenario}_{year}" + paths = [ + "combined_flood/jupiter/v1/fraction_{scenario}_{year}", + "chronic_heat/jupiter/v1/days_above_35c_{scenario}_{year}", + "wind/jupiter/v1/max_1min_{scenario}_{year}", + "drought/jupiter/v1/months_spei3m_below_-2_{scenario}_{year}", + "hail/jupiter/v1/days_above_5cm_{scenario}_{year}", + "fire/jupiter/v1/fire_probability_{scenario}_{year}", + ] + + all_resources = EmbeddedInventory().resources + resources = [all_resources[p] for p in paths] + + values = [np.array([v]) for v in [0.02, 15, 100, 0.7, 0.1, 0.9]] + + expected = { + CombinedInundation: Category.LOW, + ChronicHeat: Category.MEDIUM, + Wind: Category.MEDIUM, + Drought: Category.HIGH, + Hail: Category.LOWEST, + Fire: Category.HIGHEST, + } + + def path_curves(): + return dict((r.path.format(scenario="ssp585", year=2030), v) for (r, v) in zip(resources, values)) + + assets = [Asset(lat, lon) for (lat, lon) in zip(TestData.latitudes, TestData.longitudes)] + + store = mock_hazard_model_store_path_curves(TestData.longitudes, TestData.latitudes, path_curves()) + + hazard_model_factory = ZarrHazardModelFactory( + source_paths=get_default_source_paths(EmbeddedInventory()), store=store + ) + + return assets, store, hazard_model_factory, expected diff --git a/tests/kernel/financial_model_test.py b/tests/kernel/financial_model_test.py new file mode 100644 index 00000000..9914eb24 --- /dev/null +++ b/tests/kernel/financial_model_test.py @@ -0,0 +1,52 @@ +import unittest +from datetime import datetime + +import numpy as np + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import Asset, PowerGeneratingAsset +from physrisk.kernel.financial_model import FinancialDataProvider, FinancialModel +from physrisk.risk_models.loss_model import LossModel + +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_inundation + + +class MockFinancialDataProvider(FinancialDataProvider): + def get_asset_value(self, asset: Asset, currency: str) -> float: + return 1000 + + def get_asset_aggregate_cashflows(self, asset: Asset, start: datetime, end: datetime, currency: str) -> float: + return 1000 + + +class TestAssetImpact(unittest.TestCase): + """Tests asset impact calculations.""" + + def test_financial_model(self): + curve = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + + # we need to define + # 1) The hazard models + # 2) The vulnerability models + # 3) The financial models + + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + model = LossModel(hazard_model=hazard_model) + + data_provider = MockFinancialDataProvider() + financial_model = FinancialModel(data_provider) + + assets = [PowerGeneratingAsset(lat, lon) for lon, lat in zip(TestData.longitudes, TestData.latitudes)] + measures = model.get_financial_impacts( + assets, financial_model=financial_model, scenario="ssp585", year=2080, sims=100000 + ) + + np.testing.assert_array_almost_equal_nulp( + measures["RiverineInundation"]["percentile_values"], + np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 1000.0, 1000.0, 2000.0]), + ) diff --git a/tests/kernel/hazard_models_test.py b/tests/kernel/hazard_models_test.py new file mode 100644 index 00000000..d1ca89e3 --- /dev/null +++ b/tests/kernel/hazard_models_test.py @@ -0,0 +1,96 @@ +from dataclasses import dataclass +from typing import Dict, List, Mapping, NamedTuple, Sequence, Tuple + +import numpy as np + +import tests.data.hazard_model_store_test as hms +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.hazard_model import ( + HazardDataRequest, + HazardDataResponse, + HazardEventDataResponse, + HazardModel, + HazardParameterDataResponse, +) +from physrisk.kernel.hazards import ChronicHeat, Wind +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.real_estate_models import GenericTropicalCycloneModel + + +@dataclass +class SinglePointData: + latitude: float + longitude: float + scenario: str + year: int + wind_return_periods: np.ndarray # years + wind_intensities: np.ndarray # m/s + chronic_heat_intensity: float # days over 35C + # etc + + +class PointsKey(NamedTuple): + latitude: float + longitude: float + scenario: str + year: int + + +class PointBasedHazardModel(HazardModel): + def __init__(self, points: Sequence[SinglePointData]): + """HazardModel suitable for storing relatively small number (<~ million say) of individual hazard + data points. + + Args: + points (Sequence[SinglePointData]): List of points. + """ + self.points: Dict[Tuple[PointsKey, float, float], SinglePointData] = { + self._get_key(p.latitude, p.longitude, p.scenario, p.year): p for p in points + } + + def _get_key(self, latitude: float, longitude: float, scenario: str, year: int): + return PointsKey(latitude=round(latitude, 3), longitude=round(longitude, 3), scenario=scenario, year=year) + + def get_hazard_events(self, requests: List[HazardDataRequest]) -> Mapping[HazardDataRequest, HazardDataResponse]: + response: Dict[HazardDataRequest, HazardDataResponse] = {} + for request in requests: + point = self.points[self._get_key(request.latitude, request.longitude, request.scenario, request.year)] + if request.hazard_type == Wind and request.indicator_id == "max_speed": + response[request] = HazardEventDataResponse( + return_periods=point.wind_return_periods, intensities=point.wind_intensities + ) + elif request.hazard_type == ChronicHeat and request.indicator_id == "days/above/35c": + response[request] = HazardParameterDataResponse(np.array(point.chronic_heat_intensity)) + # etc + return response + + +def test_using_point_based_hazard_model(): + # test that shows how data already present for a number of points can be used in a HazardModel + scenario = "rcp8p5" + year = 2080 + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip(hms.TestData.longitudes[0:1], hms.TestData.latitudes[0:1]) + ] + # fmt: off + wind_return_periods = np.array([10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0]) # noqa + wind_intensities = np.array([37.279999, 44.756248, 48.712502, 51.685001, 53.520000, 55.230000, 56.302502, 57.336250, 58.452499, 59.283749, 63.312500, 65.482498, 66.352501, 67.220001, 67.767502, 68.117500, 68.372498, 69.127502, 70.897499 ]) # noqa + # fmt: on + point = SinglePointData( + hms.TestData.latitudes[0], + hms.TestData.longitudes[0], + scenario=scenario, + year=year, + wind_return_periods=wind_return_periods, + wind_intensities=wind_intensities, + chronic_heat_intensity=0, + ) + + hazard_model = PointBasedHazardModel([point]) + vulnerability_models = DictBasedVulnerabilityModels({RealEstateAsset: [GenericTropicalCycloneModel()]}) + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + impact_distrib = results[(assets[0], Wind, scenario, year)].impact + mean_impact = impact_distrib.mean_impact() + np.testing.assert_almost_equal(mean_impact, 0.009909858317497338) diff --git a/tests/kernel/image_creation_test.py b/tests/kernel/image_creation_test.py new file mode 100644 index 00000000..1e2638e7 --- /dev/null +++ b/tests/kernel/image_creation_test.py @@ -0,0 +1,46 @@ +import os +import unittest + +import numpy as np +import zarr +import zarr.storage + +from physrisk.data import colormap_provider +from physrisk.data.image_creator import ImageCreator +from physrisk.data.zarr_reader import ZarrReader + +from ..base_test import TestWithCredentials + + +class TestImageCreation(TestWithCredentials): + def test_image_creation(self): + path = "test_array" + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + + im = np.array([[1.2, 0.8], [0.5, 0.4]]) + z = root.create_dataset( # type: ignore + path, shape=(1, im.shape[0], im.shape[1]), chunks=(1, im.shape[0], im.shape[1]), dtype="f4" + ) + z[0, :, :] = im + converter = ImageCreator(reader=ZarrReader(store)) + colormap = colormap_provider.colormap("test") + + def get_colors(index: int): + return colormap[str(index)] + + result = converter._to_rgba(im, get_colors) + # Max should be 255, min should be 1. Other values span the 253 elements from 2 to 254. + expected = np.array([[255, 2 + (0.8 - 0.4) * 253 / (1.2 - 0.4)], [2 + (0.5 - 0.4) * 253 / (1.2 - 0.4), 1]]) + converter.convert(path, colormap="test") # check no error running through mocked example. + np.testing.assert_equal(result, expected.astype(np.uint8)) + + @unittest.skip("just example") + def test_write_file(self): + # show how to create image from zarr array + # useful for testing image generation + test_output_dir = "{set me}" + test_path = "wildfire/jupiter/v1/wildfire_probability_ssp585_2050_map" + store = zarr.DirectoryStore(os.path.join(test_output_dir, "hazard_test", "hazard.zarr")) + creator = ImageCreator(ZarrReader(store)) + creator.to_file(os.path.join(test_output_dir, "test.png"), test_path) diff --git a/tests/kernel/live_services_test.py b/tests/kernel/live_services_test.py new file mode 100644 index 00000000..4750d0d6 --- /dev/null +++ b/tests/kernel/live_services_test.py @@ -0,0 +1,45 @@ +import pytest +import requests + +url = "https://physrisk-api2-sandbox.apps.odh-cl1.apps.os-climate.org" +# url = "http://127.0.0.1:5000" + + +@pytest.mark.skip("only as example") +def test_live_exposure(): + request = { + "assets": { + "items": [ + {"asset_class": "Asset", "type": None, "location": None, "latitude": 34.556, "longitude": 69.4787} + ] + }, + "calc_settings": {"hazard_interp": "floor"}, + "scenario": "ssp585", + "year": 2050, + } + result = requests.post(url + "/api/get_asset_exposure", json=request) + print(result.json()) + + +@pytest.mark.skip("only as example") +def test_live_impacts(): # "latitude": 34.556, "longitude": 69.4787 + request = { + "assets": { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "latitude": 23.1577, + "longitude": 113.8306, + } + ] + }, + "include_asset_level": False, + "include_measures": True, + "include_calc_details": False, + "scenarios": ["ssp585"], + "years": [2050], + } + result = requests.post(url + "/api/get_asset_impact", json=request) + print(result.json()) diff --git a/src/data/.gitkeep b/tests/models/__init__.py similarity index 100% rename from src/data/.gitkeep rename to tests/models/__init__.py diff --git a/tests/models/example_models_test.py b/tests/models/example_models_test.py new file mode 100644 index 00000000..f3525b10 --- /dev/null +++ b/tests/models/example_models_test.py @@ -0,0 +1,98 @@ +import unittest + +import numpy as np +from scipy import stats + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import Asset, RealEstateAsset +from physrisk.kernel.hazard_model import HazardEventDataResponse +from physrisk.kernel.hazards import Inundation, RiverineInundation +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.impact_distrib import ImpactType +from physrisk.kernel.vulnerability_matrix_provider import VulnMatrixProvider +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels, VulnerabilityModel +from physrisk.vulnerability_models.example_models import ExampleCdfBasedVulnerabilityModel +from tests.data.hazard_model_store_test import TestData, mock_hazard_model_store_inundation + + +class ExampleRealEstateInundationModel(VulnerabilityModel): + def __init__(self): + self.intensities = np.array([0, 0.01, 0.5, 1.0, 1.5, 2, 3, 4, 5, 6]) + self.impact_means = np.array([0, 0.2, 0.44, 0.58, 0.68, 0.78, 0.85, 0.92, 0.96, 1.0]) + self.impact_stddevs = np.array([0, 0.17, 0.14, 0.14, 0.17, 0.14, 0.13, 0.10, 0.06, 0]) + impact_bin_edges = np.array([0, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) + super().__init__( + indicator_id="flood_depth", + hazard_type=RiverineInundation, + impact_bin_edges=impact_bin_edges, + impact_type=ImpactType.damage, + ) + + def get_impact_curve(self, intensities, asset): + # we interpolate the mean and standard deviation and use this to construct distributions + impact_means = np.interp(intensities, self.intensities, self.impact_means) + impact_stddevs = np.interp(intensities, self.intensities, self.impact_stddevs) + return VulnMatrixProvider( + intensities, impact_cdfs=[checked_beta_distrib(m, s) for m, s in zip(impact_means, impact_stddevs)] + ) + + +def delta_cdf(y): + return lambda x: np.where(x >= y, 1, 0) + + +def checked_beta_distrib(mean, std): + if mean == 0: + return delta_cdf(0) + if mean == 1.0: + return delta_cdf(1) + else: + return beta_distrib(mean, std) + + +def beta_distrib(mean, std): + cv = std / mean + a = (1 - mean) / (cv * cv) - mean + b = a * (1 - mean) / mean + return lambda x, a=a, b=b: stats.beta.cdf(x, a, b) + + +class TestExampleModels(unittest.TestCase): + def test_pdf_based_vulnerability_model(self): + model = ExampleCdfBasedVulnerabilityModel(indicator_id="", hazard_type=Inundation) + + latitude, longitude = 45.268405, 19.885738 + asset = Asset(latitude, longitude) + + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + intensities = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + + mock_response = HazardEventDataResponse(return_periods, intensities) + + vul, event = model.get_distributions(asset, [mock_response]) + + def test_user_supplied_model(self): + curve = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + scenario = "rcp8p5" + year = 2080 + + vulnerability_models = DictBasedVulnerabilityModels({RealEstateAsset: [ExampleRealEstateInundationModel()]}) + + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Building/Industrial") + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ] + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + self.assertAlmostEqual( + results[assets[0], RiverineInundation, scenario, year].impact.to_exceedance_curve().probs[0], 0.499 + ) diff --git a/src/features/.gitkeep b/tests/models/multiple_assets_test.py similarity index 100% rename from src/features/.gitkeep rename to tests/models/multiple_assets_test.py diff --git a/tests/models/power_generating_asset_models_test.py b/tests/models/power_generating_asset_models_test.py new file mode 100644 index 00000000..bc525f3f --- /dev/null +++ b/tests/models/power_generating_asset_models_test.py @@ -0,0 +1,185 @@ +""" Test asset impact calculations.""" + +import os +import unittest +from typing import List + +import numpy as np + +import physrisk.api.v1.common +import physrisk.data.static.world as wd +from physrisk.kernel import Asset, PowerGeneratingAsset, calculation +from physrisk.kernel.assets import IndustrialActivity, RealEstateAsset, ThermalPowerGeneratingAsset +from physrisk.kernel.hazard_model import HazardEventDataResponse +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.impact_distrib import EmptyImpactDistrib +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.utils.lazy import lazy_import +from physrisk.vulnerability_models.power_generating_asset_models import InundationModel +from tests.base_test import TestWithCredentials + +pd = lazy_import("pandas") + + +class TestPowerGeneratingAssetModels(TestWithCredentials): + """Tests World Resource Institute (WRI) models for power generating assets.""" + + def test_inundation(self): + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + base_depth = np.array( + [0.0, 0.22372675, 0.3654859, 0.5393629, 0.6642473, 0.78564394, 0.9406518, 1.0539534, 1.1634114] + ) + future_depth = np.array( + [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] + ) + + # we mock the response of the data request + responses_mock = [ + HazardEventDataResponse(return_periods, base_depth), + HazardEventDataResponse(return_periods, future_depth), + ] + + latitude, longitude = 45.268405, 19.885738 + assets = [Asset(latitude, longitude)] + model = InundationModel(assets) + + impact, _, _ = model.get_impact_details(assets[0], responses_mock) + mean = impact.mean_impact() + + self.assertAlmostEqual(mean, 4.8453897 / 365.0) + + @unittest.skip("example, not test") + def test_create_synthetic_portfolios_and_test(self): + # cache_folder = r"" + + cache_folder = r"/users/joemoorhouse/code/data" + + asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) + # types = asset_list["primary_fuel"].unique() + # interesting = [3, 8, 13, 14, 22, 25, 27, 28, 33, 40, 51, 64, 65, 66, 71, 72, 80, 88, 92, 109] + + filtered = asset_list[0:1000] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + primary_fuel = np.array(filtered["primary_fuel"]) + generation = np.array(filtered["estimated_generation_gwh"]) + + _, continents = wd.get_countries_and_continents(latitudes=latitudes, longitudes=longitudes) + + # Power generating assets that are of interest + assets = [ + PowerGeneratingAsset(lat, lon, generation=gen, location=continent, type=prim_fuel) + for lon, lat, gen, prim_fuel, continent in zip(longitudes, latitudes, generation, primary_fuel, continents) + ] + detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) + keys = list(detailed_results.keys()) + # detailed_results[keys[0]].impact.to_exceedance_curve() + means = np.array([detailed_results[key].impact.mean_impact() for key in keys]) + interesting = [k for (k, m) in zip(keys, means) if m > 0] + assets_out = self.api_assets(item[0] for item in interesting[0:10]) + with open(os.path.join(cache_folder, "assets_example_power_generating_small.json"), "w") as f: + f.write(assets_out.model_dump_json(indent=4)) + + # Synthetic portfolio; industrial activity at different locations + assets = [ + IndustrialActivity(lat, lon, type="Construction", location=continent) + for lon, lat, continent in zip(longitudes, latitudes, continents) + ] + assets = [assets[i] for i in [0, 100, 200, 300, 400, 500, 600, 700, 800, 900]] + detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) + keys = list(detailed_results.keys()) + means = np.array([detailed_results[key].impact.mean_impact() for key in detailed_results.keys()]) + interesting = [k for (k, m) in zip(keys, means) if m > 0] + assets_out = self.api_assets(item[0] for item in interesting[0:10]) + with open(os.path.join(cache_folder, "assets_example_industrial_activity_small.json"), "w") as f: + f.write(assets_out.model_dump_json(indent=4)) + + # Synthetic portfolio; real estate assets at different locations + assets = [ + RealEstateAsset(lat, lon, location=continent, type="Buildings/Industrial") + for lon, lat, continent in zip(longitudes, latitudes, continents) + if isinstance(continent, str) and continent != "Oceania" + ] + detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) + keys = list(detailed_results.keys()) + means = np.array([detailed_results[key].impact.mean_impact() for key in detailed_results.keys()]) + interesting = [k for (k, m) in zip(keys, means) if m > 0] + assets_out = self.api_assets(item[0] for item in interesting[0:10]) + with open(os.path.join(cache_folder, "assets_example_real_estate_small.json"), "w") as f: + f.write(assets_out.model_dump_json(indent=4)) + self.assertAlmostEqual(1, 1) + + @unittest.skip("example, not test") + def test_thermal_power_generation_portfolio(self): + cache_folder = os.environ.get("CREDENTIAL_DOTENV_DIR", os.getcwd()) + + asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) + filtered = asset_list.loc[asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"])] + filtered = filtered[-60 < filtered["latitude"]] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + + primary_fuels = np.array( + [primary_fuel.replace(" and ", "And").replace(" ", "") for primary_fuel in filtered["primary_fuel"]] + ) + + # Capacity describes a maximum electric power rate. + # Generation describes the actual electricity output of the plant over a period of time. + capacities = np.array(filtered["capacity_mw"]) + + _, continents = wd.get_countries_and_continents(latitudes=latitudes, longitudes=longitudes) + + # Power generating assets that are of interest + assets = [ + ThermalPowerGeneratingAsset(latitude, longitude, type=primary_fuel, location=continent, capacity=capacity) + for latitude, longitude, capacity, primary_fuel, continent in zip( + latitudes, + longitudes, + capacities, + primary_fuels, + continents, + ) + ] + + scenario = "ssp585" + year = 2030 + + hazard_model = calculation.get_default_hazard_model() + vulnerability_models = DictBasedVulnerabilityModels(calculation.get_default_vulnerability_models()) + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + out = [ + { + "asset": type(result.asset).__name__, + "type": getattr(result.asset, "type", None), + "capacity": getattr(result.asset, "capacity", None), + "location": getattr(result.asset, "location", None), + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "impact_mean": ( + None if isinstance(results[key].impact, EmptyImpactDistrib) else results[key].impact.mean_impact() + ), + "hazard_type": key.hazard_type.__name__, + } + for result, key in zip(results, results.keys()) + ] + pd.DataFrame.from_dict(out).to_csv( + os.path.join(cache_folder, "thermal_power_generation_example_" + scenario + "_" + str(year) + ".csv") + ) + self.assertAlmostEqual(1, 1) + + def api_assets(self, assets: List[Asset]): + items = [ + physrisk.api.v1.common.Asset( + asset_class=type(a).__name__, + type=getattr(a, "type", None), + location=getattr(a, "location", None), + latitude=a.latitude, + longitude=a.longitude, + ) + for a in assets + ] + return physrisk.api.v1.common.Assets(items=items) diff --git a/tests/models/real_estate_models_test.py b/tests/models/real_estate_models_test.py new file mode 100644 index 00000000..18f194d4 --- /dev/null +++ b/tests/models/real_estate_models_test.py @@ -0,0 +1,224 @@ +""" Test asset impact calculations.""" + +import unittest + +import numpy as np + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.hazards import CoastalInundation, RiverineInundation +from physrisk.kernel.impact import ImpactKey, calculate_impacts +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.real_estate_models import ( + RealEstateCoastalInundationModel, + RealEstateRiverineInundationModel, +) + +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_inundation + + +class TestRealEstateModels(unittest.TestCase): + """Tests RealEstateInundationModel.""" + + def test_real_estate_model_details(self): + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + # location="Europe", type="Buildings/Residential" + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip(TestData.longitudes[0:1], TestData.latitudes[0:1]) + ] + + scenario = "rcp8p5" + year = 2080 + + vulnerability_models = DictBasedVulnerabilityModels({RealEstateAsset: [RealEstateRiverineInundationModel()]}) + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + hazard_bin_edges = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].event.intensity_bin_edges + hazard_bin_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].event.prob + + # check one: + # the probability of inundation greater than 0.505m in a year is 1/10.0 + # the probability of inundation greater than 0.333m in a year is 1/5.0 + # therefore the probability of an inundation between 0.333 and 0.505 in a year is 1/5.0 - 1/10.0 + np.testing.assert_almost_equal(hazard_bin_edges[1:3], np.array([0.333, 0.505])) + np.testing.assert_almost_equal(hazard_bin_probs[1], 0.1) + + # check that intensity bin edges for vulnerability matrix are same as for hazard + vulnerability_intensity_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ].vulnerability.intensity_bins + np.testing.assert_almost_equal(vulnerability_intensity_bin_edges, hazard_bin_edges) + + # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] + cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].vulnerability.prob_matrix[1, :] + # check conditional prob for inundation intensity 0.333..0.505 + mean, std = np.mean(cond_probs), np.std(cond_probs) + np.testing.assert_almost_equal(cond_probs.sum(), 1) + np.testing.assert_allclose([mean, std], [0.09090909, 0.08184968], rtol=1e-6) + + # probability that impact occurs between impact bin edge 1 and impact bin edge 2 + prob_impact = np.dot( + hazard_bin_probs, + results[ImpactKey(assets[0], RiverineInundation, scenario, year)].vulnerability.prob_matrix[:, 1], + ) + np.testing.assert_almost_equal(prob_impact, 0.19350789547968042) + + # no check with pre-calculated values for others: + np.testing.assert_allclose( + results[ImpactKey(assets[0], RiverineInundation, scenario, year)].impact.prob, + np.array( + [ + 0.02815762, + 0.1935079, + 0.11701139, + 0.06043065, + 0.03347816, + 0.02111368, + 0.01504522, + 0.01139892, + 0.00864469, + 0.00626535, + 0.00394643, + ] + ), + rtol=2e-6, + ) + + def test_coastal_real_estate_model(self): + curve = np.array([0.223, 0.267, 0.29, 0.332, 0.359, 0.386, 0.422, 0.449, 0.476]) + + store = mock_hazard_model_store_inundation(TestData.coastal_longitudes, TestData.coastal_latitudes, curve) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + # location="Europe", type="Buildings/Residential" + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip(TestData.coastal_longitudes[0:1], TestData.coastal_latitudes[0:1]) + ] + + scenario = "rcp8p5" + year = 2080 + + vulnerability_models = DictBasedVulnerabilityModels({RealEstateAsset: [RealEstateCoastalInundationModel()]}) + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + np.testing.assert_allclose( + results[ImpactKey(assets[0], CoastalInundation, scenario, year)].impact.prob, + np.array( + [ + 2.78081230e-02, + 1.96296619e-01, + 1.32234770e-01, + 7.36581177e-02, + 3.83434609e-02, + 1.83916914e-02, + 7.97401009e-03, + 3.04271878e-03, + 9.79400125e-04, + 2.41250436e-04, + 2.98387241e-05, + ] + ), + rtol=2e-6, + ) + + def test_commercial_real_estate_model_details(self): + curve = np.array( + [2.8302893e-06, 0.09990284, 0.21215445, 0.531271, 0.7655724, 0.99438345, 1.2871761, 1.502281, 1.7134278] + ) + store = mock_hazard_model_store_inundation(TestData.longitudes, TestData.latitudes, curve) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + # location="South America", type="Buildings/Commercial" + assets = [ + RealEstateAsset(lat, lon, location="South America", type="Buildings/Commercial") + for lon, lat in zip(TestData.longitudes[-4:-3], TestData.latitudes[-4:-3]) + ] + + scenario = "rcp8p5" + year = 2080 + + # impact bin edges are calibrated so that hazard_bin_probs == impact_bin_probs + # when the impact standard deviation is negligible: + vulnerability_models = DictBasedVulnerabilityModels( + { + RealEstateAsset: [ + RealEstateRiverineInundationModel( + impact_bin_edges=np.array( + [ + 0, + 0.030545039098059, + 0.125953058445539, + 0.322702019487674, + 0.566880882840096, + 0.731980974578735, + 0.823993215529066, + 0.884544511664047, + 0.922115133960502, + 0.969169745946688, + 1.0, + ] + ) + ) + ] + } + ) + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + hazard_bin_edges = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].event.intensity_bin_edges + hazard_bin_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].event.prob + + # check one: + # the probability of inundation greater than 0.531271m in a year is 1/25 + # the probability of inundation greater than 0.21215445m in a year is 1/10 + # therefore the probability of an inundation between 0.21215445 and 0.531271 in a year is 1/10 - 1/25 + np.testing.assert_almost_equal(hazard_bin_edges[2:4], np.array([0.21215445, 0.531271])) + np.testing.assert_almost_equal(hazard_bin_probs[2], 0.06) + + # check that intensity bin edges for vulnerability matrix are same as for hazard + vulnerability_intensity_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ].vulnerability.intensity_bins + np.testing.assert_almost_equal(vulnerability_intensity_bin_edges, hazard_bin_edges) + + # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] + cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)].vulnerability.prob_matrix[2, :] + # check conditional prob for inundation intensity at 0.371712725m + mean, std = np.mean(cond_probs), np.std(cond_probs) + np.testing.assert_almost_equal(cond_probs.sum(), 1) + np.testing.assert_allclose([mean, std], [0.1, 0.2884275164878624], rtol=1e-6) + + # probability that impact occurs between impact bin edge 2 and impact bin edge 3 + prob_impact = np.dot( + hazard_bin_probs, + results[ImpactKey(assets[0], RiverineInundation, scenario, year)].vulnerability.prob_matrix[:, 2], + ) + np.testing.assert_almost_equal(prob_impact, 0.10040196672295522) + + # no check with pre-calculated values for others: + np.testing.assert_allclose( + results[ImpactKey(assets[0], RiverineInundation, scenario, year)].impact.prob, + np.array( + [ + 2.009085e-07, + 3.001528e-01, + 1.004020e-01, + 5.885136e-02, + 1.760415e-02, + 1.159864e-02, + 6.130639e-03, + 2.729225e-03, + 1.446537e-03, + 8.450993e-05, + ] + ), + rtol=2e-6, + ) diff --git a/tests/models/wbgt_model_test.py b/tests/models/wbgt_model_test.py new file mode 100644 index 00000000..3c44e745 --- /dev/null +++ b/tests/models/wbgt_model_test.py @@ -0,0 +1,232 @@ +import unittest +from typing import Iterable, List, Union, cast + +import numpy as np + +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import Asset, IndustrialActivity +from physrisk.kernel.hazard_model import HazardDataRequest, HazardDataResponse, HazardParameterDataResponse +from physrisk.kernel.hazards import ChronicHeat +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.chronic_heat_models import ChronicHeatGZNModel, get_impact_distrib + +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_heat_wbgt + + +class ExampleWBGTGZNJointModel(ChronicHeatGZNModel): + """Example implementation of the wbgt chronic heat model. This model + inherits attributes from the ChronicHeatGZN model and estimate the + results based on applying both GZN and WBGT""" + + def __init__(self, indicator_id: str = "mean_work_loss_high"): + super().__init__(indicator_id, ChronicHeat) # opportunity to give a model hint, but blank here + + def work_type_mapping(self): + return {"low": ["low", "medium"], "medium": ["medium", "low", "high"], "high": ["high", "medium"]} + + def get_data_requests( + self, asset: Asset, *, scenario: str, year: int + ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: + """Request the hazard data needed by the vulnerability model for a specific asset + (this is a Google-style doc string) + + Args: + asset: Asset for which data is requested. + scenario: Climate scenario of calculation. + year: Projection year of calculation. + + Returns: + Single or multiple data requests. + """ + + work_type_mapping = self.work_type_mapping() + assert isinstance(asset, IndustrialActivity) + # specify hazard data needed. Model string is hierarchical and '/' separated. + model_gzn = "mean_degree_days/above/32c" + model_wbgt = "mean_work_loss/" + + asset_types = [type_asset for type_asset in work_type_mapping[asset.type]] + wbgt_data_requests = [] + for i_asset_types in asset_types: + wbgt_data_requests.append( + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=2010, + indicator_id=model_wbgt + i_asset_types, + ) + ) + + wbgt_data_requests.append( + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=model_wbgt + i_asset_types, + ) + ) + + return [ + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario="historical", + year=1980, + indicator_id=model_gzn, + ), + HazardDataRequest( + self.hazard_type, + asset.longitude, + asset.latitude, + scenario=scenario, + year=year, + indicator_id=model_gzn, + ), + ] + wbgt_data_requests + + def get_impact(self, asset: Asset, data_responses: List[HazardDataResponse]) -> ImpactDistrib: + """ + Function to return the impact distribution of the wbgt model. + """ + + assert isinstance(asset, IndustrialActivity) + wbgt_responses = [cast(HazardParameterDataResponse, r) for r in data_responses[2:]] + + baseline_dd_above_mean = cast(HazardParameterDataResponse, data_responses[0]) + scenario_dd_above_mean = cast(HazardParameterDataResponse, data_responses[1]) + + hours_worked = self.total_labour_hours + fraction_loss_mean_base_gzn = (baseline_dd_above_mean.parameter * self.time_lost_per_degree_day) / hours_worked + + fraction_loss_mean_scenario_gzn = ( + scenario_dd_above_mean.parameter * self.time_lost_per_degree_day + ) / hours_worked + + fraction_loss_std_base = (baseline_dd_above_mean.parameter * self.time_lost_per_degree_day_se) / hours_worked + + fraction_loss_std_scenario = ( + scenario_dd_above_mean.parameter * self.time_lost_per_degree_day_se + ) / hours_worked + + baseline_work_ability = (1 - fraction_loss_mean_base_gzn) * (1 - wbgt_responses[0].parameter) + scenario_work_ability = (1 - fraction_loss_mean_scenario_gzn) * (1 - wbgt_responses[1].parameter) + + # Getting the parameters required for the uniform distribution. + if asset.type in ["low", "high"]: + a_historical = ( + wbgt_responses[0].parameter - abs((wbgt_responses[2].parameter - wbgt_responses[0].parameter)) / 2 + ) + b_historical = ( + wbgt_responses[0].parameter + abs((wbgt_responses[2].parameter - wbgt_responses[0].parameter)) / 2 + ) + a_scenario = ( + wbgt_responses[1].parameter - abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + b_scenario = ( + wbgt_responses[1].parameter + abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + elif asset.type == "medium": + a_historical = wbgt_responses[0].parameter - (wbgt_responses[2].parameter - wbgt_responses[0].parameter) / 2 + b_historical = wbgt_responses[0].parameter + (wbgt_responses[4].parameter - wbgt_responses[0].parameter) / 2 + a_scenario = ( + wbgt_responses[1].parameter - abs((wbgt_responses[3].parameter - wbgt_responses[1].parameter)) / 2 + ) + b_scenario = ( + wbgt_responses[1].parameter + abs((wbgt_responses[5].parameter - wbgt_responses[1].parameter)) / 2 + ) + + # Estimation of the variance + variance_historical_uni = ((b_historical - a_historical) ** 2) / 12 + variance_scenario_uni = ((b_scenario - a_scenario) ** 2) / 12 + + variance_historical = two_variable_joint_variance( + (1 - fraction_loss_mean_base_gzn), + fraction_loss_std_base**2, + (1 - wbgt_responses[0].parameter), + variance_historical_uni, + ) + variance_scenario = two_variable_joint_variance( + (1 - fraction_loss_mean_scenario_gzn), + fraction_loss_std_scenario**2, + (1 - wbgt_responses[1].parameter), + variance_scenario_uni, + ) + + std_delta = variance_scenario ** (0.5) - variance_historical ** (0.5) + + total_work_loss_delta: float = baseline_work_ability - scenario_work_ability + + return get_impact_distrib(total_work_loss_delta, std_delta, ChronicHeat, ImpactType.disruption) + + +def two_variable_joint_variance(ex, varx, ey, vary): + """ + Function to estimate the variance of two uncorrelated variables. + """ + return varx * vary + varx * (ey**2) + vary * (ex**2) + + +class TestChronicAssetImpact(unittest.TestCase): + """Tests the impact on an asset of a chronic hazard model.""" + + def test_wbgt_vulnerability(self): + store = mock_hazard_model_store_heat_wbgt(TestData.longitudes, TestData.latitudes) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + # 'chronic_heat/osc/v2/mean_work_loss_high_ACCESS-CM2_historical_2005' + scenario = "ssp585" + year = 2050 + + vulnerability_models = DictBasedVulnerabilityModels({IndustrialActivity: [ExampleWBGTGZNJointModel()]}) + + assets = [ + IndustrialActivity(lat, lon, type="high") for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ][:1] + + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + + value_test = list(results.values())[0].impact.prob + + value_exp = np.array( + [ + 0.00000119194, + 0.00000046573, + 0.00000063758, + 0.00000086889, + 0.00000117871, + 0.00000159172, + 0.00000213966, + 0.00000286314, + 0.00000381379, + 0.00000505696, + 0.00021143251, + 0.00167372506, + 0.00924050344, + 0.03560011430, + 0.09575512509, + 0.17988407024, + 0.23607703667, + 0.21646814108, + 0.13867487025, + 0.06205630207, + 0.02433887116, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + ] + ) + np.testing.assert_almost_equal(value_test, value_exp, decimal=8) diff --git a/tests/models/wind_models_test.py b/tests/models/wind_models_test.py new file mode 100644 index 00000000..36cad386 --- /dev/null +++ b/tests/models/wind_models_test.py @@ -0,0 +1,53 @@ +import numpy as np + +import tests.data.hazard_model_store_test as hms +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import ResourceSubset, get_default_source_path_provider +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.hazards import Wind +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.real_estate_models import GenericTropicalCycloneModel + + +def test_wind_real_estate_model(): + scenario = "rcp8p5" + year = 2080 + # mock some IRIS data for the calculation: + store, root = hms.zarr_memory_store() + # fmt: off + return_periods = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0] # noqa + intensity = np.array([37.279999, 44.756248, 48.712502, 51.685001, 53.520000, 55.230000, 56.302502, 57.336250, 58.452499, 59.283749, 63.312500, 65.482498, 66.352501, 67.220001, 67.767502, 68.117500, 68.372498, 69.127502, 70.897499 ]) # noqa + # fmt: on + shape, transform = hms.shape_transform_21600_43200(return_periods=return_periods) + path = f"wind/iris/v1/max_speed_{scenario}_{year}".format(scenario=scenario, year=year) + hms.add_curves( + root, hms.TestData.longitudes, hms.TestData.latitudes, path, shape, intensity, return_periods, transform + ) + + provider = get_default_source_path_provider() + + def select_iris_osc(candidates: ResourceSubset, scenario: str, year: int, hint=None): + return candidates.with_group_id("iris_osc").first() + + # specify use of IRIS (OSC contribution) + provider.add_selector(Wind, "max_speed", select_iris_osc) + + hazard_model = ZarrHazardModel(source_paths=provider.source_paths(), store=store) + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip(hms.TestData.longitudes[0:1], hms.TestData.latitudes[0:1]) + ] + vulnerability_models = DictBasedVulnerabilityModels({RealEstateAsset: [GenericTropicalCycloneModel()]}) + results = calculate_impacts(assets, hazard_model, vulnerability_models, scenario=scenario, year=year) + # check calculation + cum_probs = 1.0 / np.array(return_periods) + probs = cum_probs[:-1] - cum_probs[1:] + model = GenericTropicalCycloneModel() + edges = np.interp(intensity, model.damage_curve_intensities, model.damage_curve_impacts) + centres = (edges[1:] + edges[:-1]) / 2 + mean_check = np.sum(probs * centres) + + impact_distrib = results[(assets[0], Wind, scenario, year)].impact + mean_impact = impact_distrib.mean_impact() + np.testing.assert_allclose(mean_impact, mean_check) diff --git a/tests/models/wind_turbine_models_test.py b/tests/models/wind_turbine_models_test.py new file mode 100644 index 00000000..c2da4c10 --- /dev/null +++ b/tests/models/wind_turbine_models_test.py @@ -0,0 +1,164 @@ +import typing +import unittest + +import numpy as np + +from physrisk.kernel.assets import WindTurbine +from physrisk.kernel.events import ( + EmpiricalMultivariateDistribution, + MultivariateDistribution, + calculate_cumulative_probs, +) +from physrisk.kernel.hazard_model import HazardEventDataResponse + +TAsset = typing.TypeVar("TAsset", contravariant=True) + + +class SupportsEventImpact(typing.Protocol[TAsset]): + def get_impact(self, asset: TAsset, event_data: HazardEventDataResponse) -> MultivariateDistribution: + pass + + +class WindTurbineModel(SupportsEventImpact[WindTurbine]): + """Placeholder wind turbine model to be populated.""" + + def prob_collapse(self, turbine: WindTurbine, wind_speed_hub: np.ndarray): + """Calculates probability of turbine collapse for a num ber of events given the wind speed + at the hub per event and characteristics of the turbine. + + Args: + turbine (WindTurbine): Wind turbine asset. + wind_speed_hub (np.ndarray): 1-D array of wind speeds at hub height for each event (length of array is number of events). # noqa: E501 + + Returns: + np.ndarray: 1-D array of probabilities of collapse for each event (length of array is number of events). + """ + # just a placeholder model that returns a probability of 0.3 for all events, regardless of wind speed! + return np.ones_like(wind_speed_hub) * 0.3 + + def get_impact(self, asset: WindTurbine, event_data: HazardEventDataResponse): + """Returns the probability distributions of fractional damage to the turbine for each event. + + Args: + asset (WindTurbine): Wind turbine asset. + event_data (HazardEventDataResponse): Provides wind speeds for different events with different probabilities. # noqa: E501 + + Raises: + NotImplementedError: Supports only the case of single probability bin: i.e. for each event the wind speed is deterministic. # noqa: E501 + + Returns: + MultivariateDistribution: Probability distribution of impacts associated with events. + """ + intens = event_data.intensities + # shape is (nb_events, nb_prob_bins) + if intens.ndim > 1 and intens.shape[0] != 1: + # only the single probability bin case implemented + raise NotImplementedError() + wind_speed = intens.squeeze(axis=0) # vector of wind speeds + pc = self.prob_collapse(asset, wind_speed) + bins_lower = np.array([0.0, 1.0]) + bins_upper = np.array([0.0, 1.0]) + probs = np.stack([1.0 - pc, pc], axis=1) + return EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) + + +class TestWindTurbineModels: + def test_cumulative_probs(self): + """Test calculation of cumulative probability from a combination of lumped probability and uniform probability + density bins. + """ + bins_lower = np.array([2.0, 3.0]) + bins_upper = np.array([2.5, 3.5]) + probs = np.array([[0.5, 0.5], [0.2, 0.8]]) + values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) + np.testing.assert_almost_equal(values, [2.0, 2.5, 3.0, 3.5]) + np.testing.assert_almost_equal(cum_probs[0, :], [0, 0.5, 0.5, 1.0]) + np.testing.assert_almost_equal(cum_probs[1, :], [0, 0.2, 0.2, 1.0]) + + bins_lower = np.array([2.0]) + bins_upper = np.array([2.0]) + probs = np.array([[1.0], [1.0]]) + values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) + np.testing.assert_almost_equal(values, [2.0, 2.0]) + np.testing.assert_almost_equal(cum_probs[0, :], [0, 1.0]) + np.testing.assert_almost_equal(cum_probs[1, :], [0, 1.0]) + + bins_lower = np.array([2.0, 2.0]) + bins_upper = np.array([2.0, 3.0]) + probs = np.array([[0.5, 0.5], [0.1, 0.9]]) + values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) + np.testing.assert_almost_equal(values, [2.0, 2.0, 3.0]) + np.testing.assert_almost_equal(cum_probs[0, :], [0.0, 0.5, 1.0]) + np.testing.assert_almost_equal(cum_probs[1, :], [0, 0.1, 1.0]) + + bins_lower = np.array([2.0, 3.0, 4.0, 5.0, 6.0]) + bins_upper = np.array([2.0, 3.0, 5.0, 5.5, 6.0]) + probs = np.array([[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.1, 0.1, 0.1, 0.6]]) + values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) + np.testing.assert_almost_equal(values, [2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.5, 6.0, 6.0]) + np.testing.assert_almost_equal(cum_probs[0, :], [0, 0.1, 0.1, 0.3, 0.3, 0.6, 0.8, 0.8, 1.0]) + np.testing.assert_almost_equal(cum_probs[1, :], [0, 0.1, 0.1, 0.2, 0.2, 0.3, 0.4, 0.4, 1.0]) + + def test_sampling(self): + """Test sampling from probability distributions comprising two lumped probabilities.""" + bins_lower = np.array([2.0, 3.0]) + bins_upper = np.array([2.0, 3.0]) + probs = np.array([[0.3, 0.7], [0.4, 0.6]]) + pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) + gen = np.random.Generator(np.random.MT19937(111)) + samples = pdf.inv_cumulative_marginal_probs(gen.random(size=(2, 10000))) + check_2 = np.count_nonzero(samples[0, :] == 2.0) + check_3 = np.count_nonzero(samples[0, :] == 3.0) + assert check_2 == 3062 + assert check_3 == 6938 + check_2 = np.count_nonzero(samples[1, :] == 2.0) + check_3 = np.count_nonzero(samples[1, :] == 3.0) + assert check_2 == 3924 + assert check_3 == 6076 + + @unittest.skip("Performance test: slow.") + def test_performance(self): + nb_events = 10000 + nb_samples = 10 + bins_lower = np.array([0.0, 1.0]) + bins_upper = np.array([0.0, 1.0]) + probs = np.tile(np.array([[0.3, 0.7]]), reps=(nb_events, 1)) + pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) + gen = np.random.Generator(np.random.MT19937(111)) + uniforms = gen.random(size=(nb_events, nb_samples)) + samples = pdf.inv_cumulative_marginal_probs(uniforms) + for i in range(1000): + uniforms = gen.random(size=(nb_events, nb_samples)) + samples = pdf.inv_cumulative_marginal_probs(uniforms) + print(samples) + + def test_rotor_damage_event_based(self): + """Test demonstrating how WindTurbineModel can be used in an event-based calculation.""" + # The data response for a single asset will be a HazardEventDataResponse. + # The format supports a distribution of hazard intensities *per event* + # but here we simply have a single realisation per event. + + # The hazard model is responsible for sourcing the events. Here we provide output from + # that model directly. + + rng = np.random.Generator(np.random.MT19937(111)) + asset1, asset2 = WindTurbine(), WindTurbine() + nb_events = 20 + response_asset1 = HazardEventDataResponse(np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events]))) + response_asset2 = HazardEventDataResponse(np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events]))) + + turbine_model = WindTurbineModel() + + # provides the impact distributions for each asset + impacts_asset1 = turbine_model.get_impact(asset1, response_asset1) + impacts_asset2 = turbine_model.get_impact(asset2, response_asset2) + + # we sample 10 times for each event for each asset + uniforms = rng.random(size=(nb_events, 10)) + samples_asset1 = impacts_asset1.inv_cumulative_marginal_probs(uniforms) + samples_asset2 = impacts_asset2.inv_cumulative_marginal_probs(uniforms) + + # we can then combine samples and calculate measures... + # for now just sanity-check that we get the approx. 0.3 of total loss in events from placeholder. + np.testing.assert_almost_equal(np.count_nonzero(samples_asset1 == 1.0) / samples_asset1.size, 0.31) + np.testing.assert_almost_equal(np.count_nonzero(samples_asset2 == 1.0) / samples_asset2.size, 0.31) diff --git a/src/models/.gitkeep b/tests/risk_models/__init__.py similarity index 100% rename from src/models/.gitkeep rename to tests/risk_models/__init__.py diff --git a/tests/risk_models/risk_models_test.py b/tests/risk_models/risk_models_test.py new file mode 100644 index 00000000..1c4bee7d --- /dev/null +++ b/tests/risk_models/risk_models_test.py @@ -0,0 +1,177 @@ +""" Test asset impact calculations.""" + +from typing import Sequence + +import numpy as np + +from physrisk import requests +from physrisk.api.v1.impact_req_resp import RiskMeasureKey, RiskMeasuresHelper +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.calculation import get_default_vulnerability_models +from physrisk.kernel.hazards import ChronicHeat, CoastalInundation, RiverineInundation, Wind +from physrisk.kernel.risk import AssetLevelRiskModel, MeasureKey +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.requests import _create_risk_measures +from physrisk.risk_models.risk_models import RealEstateToyRiskMeasures + +from ..base_test import TestWithCredentials +from ..data.hazard_model_store_test import TestData, ZarrStoreMocker, inundation_return_periods + + +class TestRiskModels(TestWithCredentials): + def test_risk_indicator_model(self): + scenarios = ["rcp8p5"] + years = [2050] + + assets = self._create_assets() + hazard_model = self._create_hazard_model(scenarios, years) + + model = AssetLevelRiskModel( + hazard_model, + DictBasedVulnerabilityModels(get_default_vulnerability_models()), + {RealEstateAsset: RealEstateToyRiskMeasures()}, + ) + measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) + _, measures = model.calculate_risk_measures(assets, prosp_scens=scenarios, years=years) + + # how to get a score using the MeasureKey + measure = measures[MeasureKey(assets[0], scenarios[0], years[0], RiverineInundation)] + score = measure.score + measure_0 = measure.measure_0 + np.testing.assert_allclose([measure_0], [0.89306593179]) + + # packing up the risk measures, e.g. for JSON transmission: + risk_measures = _create_risk_measures(measures, measure_ids_for_asset, definitions, assets, scenarios, years) + # we still have a key, but no asset: + key = RiskMeasureKey( + hazard_type="RiverineInundation", + scenario_id=scenarios[0], + year=str(years[0]), + measure_id=risk_measures.score_based_measure_set_defn.measure_set_id, + ) + item = next(m for m in risk_measures.measures_for_assets if m.key == key) + score2 = item.scores[0] + measure_0_2 = item.measures_0[0] + assert score == score2 + assert measure_0 == measure_0_2 + + helper = RiskMeasuresHelper(risk_measures) + asset_scores, measures, definitions = helper.get_measure("ChronicHeat", scenarios[0], years[0]) + label, description = helper.get_score_details(asset_scores[0], definitions[0]) + assert asset_scores[0] == 4 + + def _create_assets(self): + assets = [ + RealEstateAsset(TestData.latitudes[0], TestData.longitudes[0], location="Asia", type="Buildings/Industrial") + for i in range(2) + ] + return assets + + def _create_assets_json(self, assets: Sequence[RealEstateAsset]): + assets_dict = { + "items": [ + { + "asset_class": type(asset).__name__, + "type": asset.type, + "location": asset.location, + "longitude": asset.longitude, + "latitude": asset.latitude, + "attributes": { + "number_of_storeys": "2", + "structure_type": "concrete", + }, + } + for asset in assets + ], + } + return assets_dict + + def _create_hazard_model(self, scenarios, years): + source_paths = get_default_source_paths() + + def sp_riverine(scenario, year): + return source_paths[RiverineInundation](indicator_id="flood_depth", scenario=scenario, year=year) + + def sp_coastal(scenario, year): + return source_paths[CoastalInundation](indicator_id="flood_depth", scenario=scenario, year=year) + + def sp_wind(scenario, year): + return source_paths[Wind](indicator_id="max_speed", scenario=scenario, year=year) + + def sp_heat(scenario, year): + return source_paths[ChronicHeat](indicator_id="mean_degree_days/above/index", scenario=scenario, year=year) + + mocker = ZarrStoreMocker() + return_periods = inundation_return_periods() + flood_histo_curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + flood_projected_curve = np.array([0.0596, 0.333, 0.605, 0.915, 1.164, 1.503, 1.649, 1.763, 1.963]) + + for path in [sp_riverine("historical", 1980), sp_coastal("historical", 1980)]: + mocker.add_curves_global(path, TestData.longitudes, TestData.latitudes, return_periods, flood_histo_curve) + + for path in [sp_riverine("rcp8p5", 2050), sp_coastal("rcp8p5", 2050)]: + mocker.add_curves_global( + path, TestData.longitudes, TestData.latitudes, return_periods, flood_projected_curve + ) + + mocker.add_curves_global( + sp_wind("historical", -1), + TestData.longitudes, + TestData.latitudes, + TestData.wind_return_periods, + TestData.wind_intensities_1, + ) + mocker.add_curves_global( + sp_wind("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + TestData.wind_return_periods, + TestData.wind_intensities_2, + ) + mocker.add_curves_global( + sp_heat("historical", -1), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_1, + ) + mocker.add_curves_global( + sp_heat("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_2, + ) + + return ZarrHazardModel(source_paths=get_default_source_paths(), store=mocker.store) + + def test_via_requests(self): + scenarios = ["ssp585"] + years = [2050] + + assets = self._create_assets() + # hazard_model = ZarrHazardModel(source_paths=get_default_source_paths()) + hazard_model = self._create_hazard_model(scenarios, years) + + request_dict = { + "assets": self._create_assets_json(assets), + "include_asset_level": False, + "include_measures": True, + "include_calc_details": False, + "years": years, + "scenarios": scenarios, + } + + request = requests.AssetImpactRequest(**request_dict) + response = requests._get_asset_impacts( + request, + hazard_model, + vulnerability_models=DictBasedVulnerabilityModels(get_default_vulnerability_models()), + ) + res = next( + ma for ma in response.risk_measures.measures_for_assets if ma.key.hazard_type == "RiverineInundation" + ) + np.testing.assert_allclose(res.measures_0, [0.89306593179, 0.89306593179]) + # json_str = json.dumps(response.model_dump(), cls=NumpyArrayEncoder) diff --git a/tox.ini b/tox.ini index 7b509277..9ccadae3 100644 --- a/tox.ini +++ b/tox.ini @@ -1,3 +1,35 @@ +[tox] +envlist = py38, static + +[testenv] +deps = + pytest +commands = pytest {posargs} + +[testenv:static] +deps = + mypy + isort + black + flake8 +commands = + mypy --install-types --non-interactive src + isort --check . + black --check . + flake8 src + +[testenv:cov] +usedevelop = True +deps = + pytest-cov +commands = pytest --cov-report=html {posargs} + [flake8] +count = True max-line-length = 120 max-complexity = 10 +# Allow __init__ files to have unused imports. +per-file-ignores = __init__.py:F401 +extend-ignore = + # Allow spacing before colon (to favor Black). + E203