diff --git a/.github/workflows/_disabled/gt4py-sphinx.yml b/.github/workflows/_disabled/gt4py-sphinx.yml index d862ab7321..cb3b275787 100644 --- a/.github/workflows/_disabled/gt4py-sphinx.yml +++ b/.github/workflows/_disabled/gt4py-sphinx.yml @@ -22,7 +22,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.10 - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/daily-ci.yml b/.github/workflows/daily-ci.yml index 7ece5a4d5e..28512a18ac 100644 --- a/.github/workflows/daily-ci.yml +++ b/.github/workflows/daily-ci.yml @@ -15,7 +15,7 @@ jobs: daily-ci: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] tox-module-factor: ["cartesian", "eve", "next", "storage"] os: ["ubuntu-latest"] requirements-file: ["requirements-dev.txt", "min-requirements-test.txt", "min-extra-requirements-test.txt"] diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index 45bbdf271a..76fd898159 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] tox-factor: [internal, dace] steps: diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index f7e78ee6c1..fd896c3d89 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] tox-factor: [internal, dace] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test-eve-fallback.yml b/.github/workflows/test-eve-fallback.yml index 661118e71d..461400423f 100644 --- a/.github/workflows/test-eve-fallback.yml +++ b/.github/workflows/test-eve-fallback.yml @@ -18,7 +18,7 @@ jobs: test-eve: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-eve.yml b/.github/workflows/test-eve.yml index bfd6d8e481..e83c4c563b 100644 --- a/.github/workflows/test-eve.yml +++ b/.github/workflows/test-eve.yml @@ -22,7 +22,7 @@ jobs: test-eve: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] os: ["ubuntu-latest"] fail-fast: false diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index df861c6468..022c66b1f1 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -19,7 +19,7 @@ jobs: test-storage: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] tox-factor: [internal, dace] os: ["ubuntu-latest"] diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index 2f85670eeb..bfe6e49d23 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -23,7 +23,7 @@ jobs: test-storage: strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11"] tox-factor: [internal, dace] os: ["ubuntu-latest"] fail-fast: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e383112310..051781ea49 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: ## version = re.search('ruff==([0-9\.]*)', open("constraints.txt").read())[1] ## print(f"rev: v{version}") ##]]] - rev: v0.8.2 + rev: v0.8.6 ##[[[end]]] hooks: # Run the linter. @@ -72,9 +72,9 @@ repos: ## version = re.search('mypy==([0-9\.]*)', open("constraints.txt").read())[1] ## print(f"#========= FROM constraints.txt: v{version} =========") ##]]] - #========= FROM constraints.txt: v1.13.0 ========= + #========= FROM constraints.txt: v1.14.1 ========= ##[[[end]]] - rev: v1.13.0 # MUST match version ^^^^ in constraints.txt (if the mirror is up-to-date) + rev: v1.14.1 # MUST match version ^^^^ in constraints.txt (if the mirror is up-to-date) hooks: - id: mypy additional_dependencies: # versions from constraints.txt @@ -90,31 +90,29 @@ repos: ## for pkg in packages: ## print(f"- {pkg}==" + str(re.search(f'\n{pkg}==([0-9\.]*)', constraints)[1])) ##]]] - - astunparse==1.6.3 - - attrs==24.2.0 - - black==24.8.0 + - attrs==24.3.0 + - black==24.10.0 - boltons==24.1.0 - cached-property==2.0.1 - - click==8.1.7 - - cmake==3.31.1 - - cytoolz==1.0.0 - - deepdiff==8.0.1 + - click==8.1.8 + - cmake==3.31.2 + - cytoolz==1.0.1 + - deepdiff==8.1.1 - devtools==0.12.2 - diskcache==5.6.3 - factory-boy==3.3.1 - filelock==3.16.1 - frozendict==2.4.6 - gridtools-cpp==2.3.8 - - importlib-resources==6.4.5 - - jinja2==3.1.4 + - jinja2==3.1.5 - lark==1.2.2 - mako==1.3.8 - nanobind==2.4.0 - - ninja==1.11.1.2 - - numpy==1.24.4 + - ninja==1.11.1.3 + - numpy==1.26.4 - packaging==24.2 - pybind11==2.13.6 - - setuptools==75.3.0 + - setuptools==75.8.0 - tabulate==0.9.0 - typing-extensions==4.12.2 - xxhash==3.0.0 diff --git a/README.md b/README.md index b782e20f63..07e0e1cdee 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ GT4Py is a Python library for generating high performance implementations of stencil kernels from a high-level definition using regular Python functions. GT4Py is part of the GridTools framework, a set of libraries and utilities to develop performance portable applications in the area of weather and climate modeling. -**NOTE:** The `gt4py.next` subpackage contains a new version of GT4Py which is not compatible with the current _stable_ version defined in `gt4py.cartesian`. The new version is highly experimental, it only works with unstructured meshes and it requires `python >= 3.10`. +**NOTE:** The `gt4py.next` subpackage contains a new version of GT4Py which is not compatible with the current _stable_ version defined in `gt4py.cartesian`. The new version is still experimental. ## 📃 Description diff --git a/ci/cscs-ci.yml b/ci/cscs-ci.yml index 7adb88459e..c2a872c1c4 100644 --- a/ci/cscs-ci.yml +++ b/ci/cscs-ci.yml @@ -9,13 +9,6 @@ include: PYVERSION_PREFIX: py310 PYVERSION: 3.10.9 -.py39: &py39 - PYVERSION_PREFIX: py39 - PYVERSION: 3.9.1 - -.py38: &py38 - PYVERSION_PREFIX: py38 - PYVERSION: 3.8.5 stages: - baseimage @@ -78,20 +71,6 @@ build_py310_baseimage_aarch64: variables: <<: *py310 -build_py39_baseimage_x86_64: - extends: .build_baseimage_x86_64 - variables: - <<: *py39 -build_py39_baseimage_aarch64: - extends: .build_baseimage_aarch64 - variables: - <<: *py39 - -build_py38_baseimage_x86_64: - extends: .build_baseimage_x86_64 - variables: - <<: *py38 - .build_image: stage: image @@ -128,23 +107,6 @@ build_py310_image_aarch64: variables: <<: *py310 -build_py39_image_x86_64: - extends: .build_image_x86_64 - needs: [build_py39_baseimage_x86_64] - variables: - <<: *py39 -build_py39_image_aarch64: - extends: .build_image_aarch64 - needs: [build_py39_baseimage_aarch64] - variables: - <<: *py39 - -build_py38_image_x86_64: - extends: .build_image_x86_64 - needs: [build_py38_baseimage_x86_64] - variables: - <<: *py38 - .test_helper: stage: test @@ -210,20 +172,3 @@ test_py310_aarch64: needs: [build_py310_image_aarch64] variables: <<: *py310 - -test_py39_x86_64: - extends: [.test_helper_x86_64] - needs: [build_py39_image_x86_64] - variables: - <<: *py39 -test_py39_aarch64: - extends: [.test_helper_aarch64] - needs: [build_py39_image_aarch64] - variables: - <<: *py39 - -test_py38_x86_64: - extends: [.test_helper_x86_64] - needs: [build_py38_image_x86_64] - variables: - <<: *py38 diff --git a/constraints.txt b/constraints.txt index fbdfb6e267..8b3e5e697f 100644 --- a/constraints.txt +++ b/constraints.txt @@ -1,182 +1,178 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # "tox run -e requirements-base" # aenum==3.1.15 # via dace -alabaster==0.7.13 # via sphinx +alabaster==1.0.0 # via sphinx annotated-types==0.7.0 # via pydantic asttokens==2.4.1 # via devtools, stack-data -astunparse==1.6.3 ; python_version < "3.9" # via dace, gt4py (pyproject.toml) -attrs==24.2.0 # via gt4py (pyproject.toml), hypothesis, jsonschema, referencing +astunparse==1.6.3 # via dace +attrs==24.3.0 # via gt4py (pyproject.toml), hypothesis, jsonschema, referencing babel==2.16.0 # via sphinx -backcall==0.2.0 # via ipython -black==24.8.0 # via gt4py (pyproject.toml) +black==24.10.0 # via gt4py (pyproject.toml) boltons==24.1.0 # via gt4py (pyproject.toml) bracex==2.5.post1 # via wcmatch build==1.2.2.post1 # via pip-tools -bump-my-version==0.28.1 # via -r requirements-dev.in +bump-my-version==0.29.0 # via -r requirements-dev.in cached-property==2.0.1 # via gt4py (pyproject.toml) cachetools==5.5.0 # via tox -certifi==2024.8.30 # via requests +certifi==2024.12.14 # via requests cfgv==3.4.0 # via pre-commit chardet==5.2.0 # via tox -charset-normalizer==3.4.0 # via requests -clang-format==19.1.4 # via -r requirements-dev.in, gt4py (pyproject.toml) -click==8.1.7 # via black, bump-my-version, gt4py (pyproject.toml), pip-tools, rich-click -cmake==3.31.1 # via gt4py (pyproject.toml) +charset-normalizer==3.4.1 # via requests +clang-format==19.1.6 # via -r requirements-dev.in, gt4py (pyproject.toml) +click==8.1.8 # via black, bump-my-version, gt4py (pyproject.toml), pip-tools, rich-click +cmake==3.31.2 # via gt4py (pyproject.toml) cogapp==3.4.1 # via -r requirements-dev.in colorama==0.4.6 # via tox comm==0.2.2 # via ipykernel -contourpy==1.1.1 # via matplotlib -coverage==7.6.1 # via -r requirements-dev.in, pytest-cov +contourpy==1.3.1 # via matplotlib +coverage==7.6.10 # via -r requirements-dev.in, pytest-cov cycler==0.12.1 # via matplotlib -cytoolz==1.0.0 # via gt4py (pyproject.toml) +cytoolz==1.0.1 # via gt4py (pyproject.toml) dace==1.0.0 # via gt4py (pyproject.toml) darglint==1.8.1 # via -r requirements-dev.in -debugpy==1.8.9 # via ipykernel +debugpy==1.8.11 # via ipykernel decorator==5.1.1 # via ipython -deepdiff==8.0.1 # via gt4py (pyproject.toml) +deepdiff==8.1.1 # via gt4py (pyproject.toml) devtools==0.12.2 # via gt4py (pyproject.toml) dill==0.3.9 # via dace diskcache==5.6.3 # via gt4py (pyproject.toml) distlib==0.3.9 # via virtualenv -docutils==0.20.1 # via sphinx, sphinx-rtd-theme -exceptiongroup==1.2.2 # via hypothesis, pytest +docutils==0.21.2 # via sphinx, sphinx-rtd-theme +exceptiongroup==1.2.2 # via hypothesis, ipython, pytest execnet==2.1.1 # via pytest-cache, pytest-xdist executing==2.1.0 # via devtools, stack-data factory-boy==3.3.1 # via gt4py (pyproject.toml), pytest-factoryboy -faker==33.1.0 # via factory-boy +faker==33.3.0 # via factory-boy fastjsonschema==2.21.1 # via nbformat filelock==3.16.1 # via gt4py (pyproject.toml), tox, virtualenv -fonttools==4.55.2 # via matplotlib +fonttools==4.55.3 # via matplotlib fparser==0.2.0 # via dace frozendict==2.4.6 # via gt4py (pyproject.toml) -gitdb==4.0.11 # via gitpython -gitpython==3.1.43 # via tach +gitdb==4.0.12 # via gitpython +gitpython==3.1.44 # via tach gridtools-cpp==2.3.8 # via gt4py (pyproject.toml) -hypothesis==6.113.0 # via -r requirements-dev.in, gt4py (pyproject.toml) -identify==2.6.1 # via pre-commit +hypothesis==6.123.11 # via -r requirements-dev.in, gt4py (pyproject.toml) +identify==2.6.5 # via pre-commit idna==3.10 # via requests imagesize==1.4.1 # via sphinx -importlib-metadata==8.5.0 # via build, jupyter-client, sphinx -importlib-resources==6.4.5 ; python_version < "3.9" # via gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib inflection==0.5.1 # via pytest-factoryboy iniconfig==2.0.0 # via pytest ipykernel==6.29.5 # via nbmake -ipython==8.12.3 # via ipykernel +ipython==8.31.0 # via ipykernel +jax==0.4.38 # via gt4py (pyproject.toml) +jaxlib==0.4.38 # via jax jedi==0.19.2 # via ipython -jinja2==3.1.4 # via gt4py (pyproject.toml), sphinx +jinja2==3.1.5 # via gt4py (pyproject.toml), sphinx jsonschema==4.23.0 # via nbformat -jsonschema-specifications==2023.12.1 # via jsonschema +jsonschema-specifications==2024.10.1 # via jsonschema jupyter-client==8.6.3 # via ipykernel, nbclient -jupyter-core==5.7.2 # via ipykernel, jupyter-client, nbformat -jupytext==1.16.4 # via -r requirements-dev.in -kiwisolver==1.4.7 # via matplotlib +jupyter-core==5.7.2 # via ipykernel, jupyter-client, nbclient, nbformat +jupytext==1.16.6 # via -r requirements-dev.in +kiwisolver==1.4.8 # via matplotlib lark==1.2.2 # via gt4py (pyproject.toml) mako==1.3.8 # via gt4py (pyproject.toml) markdown-it-py==3.0.0 # via jupytext, mdit-py-plugins, rich -markupsafe==2.1.5 # via jinja2, mako -matplotlib==3.7.5 # via -r requirements-dev.in +markupsafe==3.0.2 # via jinja2, mako +matplotlib==3.10.0 # via -r requirements-dev.in matplotlib-inline==0.1.7 # via ipykernel, ipython mdit-py-plugins==0.4.2 # via jupytext mdurl==0.1.2 # via markdown-it-py +ml-dtypes==0.5.1 # via jax, jaxlib mpmath==1.3.0 # via sympy -mypy==1.13.0 # via -r requirements-dev.in +mypy==1.14.1 # via -r requirements-dev.in mypy-extensions==1.0.0 # via black, mypy nanobind==2.4.0 # via gt4py (pyproject.toml) -nbclient==0.6.8 # via nbmake +nbclient==0.10.2 # via nbmake nbformat==5.10.4 # via jupytext, nbclient, nbmake -nbmake==1.5.4 # via -r requirements-dev.in -nest-asyncio==1.6.0 # via ipykernel, nbclient -networkx==3.1 # via dace, tach -ninja==1.11.1.2 # via gt4py (pyproject.toml) +nbmake==1.5.5 # via -r requirements-dev.in +nest-asyncio==1.6.0 # via ipykernel +networkx==3.4.2 # via dace, tach +ninja==1.11.1.3 # via gt4py (pyproject.toml) nodeenv==1.9.1 # via pre-commit -numpy==1.24.4 # via contourpy, dace, gt4py (pyproject.toml), matplotlib, scipy -orderly-set==5.2.2 # via deepdiff +numpy==1.26.4 # via contourpy, dace, gt4py (pyproject.toml), jax, jaxlib, matplotlib, ml-dtypes, scipy +opt-einsum==3.4.0 # via jax +orderly-set==5.2.3 # via deepdiff packaging==24.2 # via black, build, dace, gt4py (pyproject.toml), ipykernel, jupytext, matplotlib, pipdeptree, pyproject-api, pytest, pytest-factoryboy, setuptools-scm, sphinx, tox parso==0.8.4 # via jedi pathspec==0.12.1 # via black pexpect==4.9.0 # via ipython -pickleshare==0.7.5 # via ipython -pillow==10.4.0 # via matplotlib +pillow==11.1.0 # via matplotlib pip-tools==7.4.1 # via -r requirements-dev.in pipdeptree==2.24.0 # via -r requirements-dev.in -pkgutil-resolve-name==1.3.10 # via jsonschema platformdirs==4.3.6 # via black, jupyter-core, tox, virtualenv pluggy==1.5.0 # via pytest, tox ply==3.11 # via dace -pre-commit==3.5.0 # via -r requirements-dev.in -prompt-toolkit==3.0.36 # via ipython, questionary, tach -psutil==6.1.0 # via -r requirements-dev.in, ipykernel, pytest-xdist +pre-commit==4.0.1 # via -r requirements-dev.in +prompt-toolkit==3.0.48 # via ipython, questionary, tach +psutil==6.1.1 # via -r requirements-dev.in, ipykernel, pytest-xdist ptyprocess==0.7.0 # via pexpect pure-eval==0.2.3 # via stack-data pybind11==2.13.6 # via gt4py (pyproject.toml) -pydantic==2.10.3 # via bump-my-version, pydantic-settings -pydantic-core==2.27.1 # via pydantic -pydantic-settings==2.6.1 # via bump-my-version -pydot==3.0.3 # via tach -pygments==2.18.0 # via -r requirements-dev.in, devtools, ipython, nbmake, rich, sphinx -pyparsing==3.1.4 # via matplotlib, pydot +pydantic==2.10.4 # via bump-my-version, pydantic-settings +pydantic-core==2.27.2 # via pydantic +pydantic-settings==2.7.1 # via bump-my-version +pydot==3.0.4 # via tach +pygments==2.19.1 # via -r requirements-dev.in, devtools, ipython, nbmake, rich, sphinx +pyparsing==3.2.1 # via matplotlib, pydot pyproject-api==1.8.0 # via tox pyproject-hooks==1.2.0 # via build, pip-tools pytest==8.3.4 # via -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -r requirements-dev.in -pytest-cov==5.0.0 # via -r requirements-dev.in +pytest-cov==6.0.0 # via -r requirements-dev.in pytest-custom-exit-code==0.3.0 # via -r requirements-dev.in pytest-factoryboy==2.7.0 # via -r requirements-dev.in pytest-instafail==0.5.0 # via -r requirements-dev.in pytest-xdist==3.6.1 # via -r requirements-dev.in python-dateutil==2.9.0.post0 # via faker, jupyter-client, matplotlib python-dotenv==1.0.1 # via pydantic-settings -pytz==2024.2 # via babel pyyaml==6.0.2 # via dace, jupytext, pre-commit, tach pyzmq==26.2.0 # via ipykernel, jupyter-client -questionary==2.0.1 # via bump-my-version +questionary==2.1.0 # via bump-my-version referencing==0.35.1 # via jsonschema, jsonschema-specifications requests==2.32.3 # via sphinx rich==13.9.4 # via bump-my-version, rich-click, tach rich-click==1.8.5 # via bump-my-version -rpds-py==0.20.1 # via jsonschema, referencing -ruff==0.8.2 # via -r requirements-dev.in -scipy==1.10.1 # via gt4py (pyproject.toml) +rpds-py==0.22.3 # via jsonschema, referencing +ruff==0.8.6 # via -r requirements-dev.in +scipy==1.15.0 # via gt4py (pyproject.toml), jax, jaxlib setuptools-scm==8.1.0 # via fparser six==1.17.0 # via asttokens, astunparse, python-dateutil -smmap==5.0.1 # via gitdb +smmap==5.0.2 # via gitdb snowballstemmer==2.2.0 # via sphinx sortedcontainers==2.4.0 # via hypothesis -sphinx==7.1.2 # via -r requirements-dev.in, sphinx-rtd-theme, sphinxcontrib-jquery +sphinx==8.1.3 # via -r requirements-dev.in, sphinx-rtd-theme, sphinxcontrib-jquery sphinx-rtd-theme==3.0.2 # via -r requirements-dev.in -sphinxcontrib-applehelp==1.0.4 # via sphinx -sphinxcontrib-devhelp==1.0.2 # via sphinx -sphinxcontrib-htmlhelp==2.0.1 # via sphinx +sphinxcontrib-applehelp==2.0.0 # via sphinx +sphinxcontrib-devhelp==2.0.0 # via sphinx +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jquery==4.1 # via sphinx-rtd-theme sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.3 # via sphinx -sphinxcontrib-serializinghtml==1.1.5 # via sphinx +sphinxcontrib-qthelp==2.0.0 # via sphinx +sphinxcontrib-serializinghtml==2.0.0 # via sphinx stack-data==0.6.3 # via ipython -stdlib-list==0.10.0 # via tach sympy==1.13.3 # via dace tabulate==0.9.0 # via gt4py (pyproject.toml) -tach==0.16.5 # via -r requirements-dev.in -tomli==2.2.1 ; python_version < "3.11" # via -r requirements-dev.in, black, build, coverage, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, tach, tox -tomli-w==1.0.0 # via tach +tach==0.19.5 # via -r requirements-dev.in +tomli==2.2.1 ; python_version < "3.11" # via -r requirements-dev.in, black, build, coverage, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, sphinx, tach, tox +tomli-w==1.1.0 # via tach tomlkit==0.13.2 # via bump-my-version toolz==1.0.0 # via cytoolz tornado==6.4.2 # via ipykernel, jupyter-client tox==4.23.2 # via -r requirements-dev.in traitlets==5.14.3 # via comm, ipykernel, ipython, jupyter-client, jupyter-core, matplotlib-inline, nbclient, nbformat types-tabulate==0.9.0.20241207 # via -r requirements-dev.in -typing-extensions==4.12.2 # via annotated-types, black, faker, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, setuptools-scm, tox -urllib3==2.2.3 # via requests -virtualenv==20.28.0 # via pre-commit, tox +typing-extensions==4.12.2 # via black, faker, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, tox +urllib3==2.3.0 # via requests +virtualenv==20.28.1 # via pre-commit, tox wcmatch==10.0 # via bump-my-version wcwidth==0.2.13 # via prompt-toolkit wheel==0.45.1 # via astunparse, pip-tools xxhash==3.0.0 # via gt4py (pyproject.toml) -zipp==3.20.2 # via importlib-metadata, importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.3.1 # via pip-tools, pipdeptree -setuptools==75.3.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==75.8.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm diff --git a/min-extra-requirements-test.txt b/min-extra-requirements-test.txt index 6d75415181..a4924cc09c 100644 --- a/min-extra-requirements-test.txt +++ b/min-extra-requirements-test.txt @@ -49,7 +49,6 @@ ## result.append(str(make_min_req(r))) ## print("\n".join(sorted(result))) ##]]] -astunparse==1.6.3; python_version < "3.9" attrs==21.3 black==22.3 boltons==20.1 @@ -71,8 +70,7 @@ filelock==3.16.1 frozendict==2.3 gridtools-cpp==2.3.8 hypothesis==6.0.0 -importlib-resources==5.0; python_version < "3.9" -jax[cpu]==0.4.18; python_version >= "3.10" +jax[cpu]==0.4.18 jinja2==3.0.0 jupytext==1.14 lark==1.1.2 diff --git a/min-requirements-test.txt b/min-requirements-test.txt index 991b7a6941..4b24385410 100644 --- a/min-requirements-test.txt +++ b/min-requirements-test.txt @@ -46,7 +46,6 @@ ## result.append(str(make_min_req(r))) ## print("\n".join(sorted(result))) ##]]] -astunparse==1.6.3; python_version < "3.9" attrs==21.3 black==22.3 boltons==20.1 @@ -67,7 +66,6 @@ filelock==3.16.1 frozendict==2.3 gridtools-cpp==2.3.8 hypothesis==6.0.0 -importlib-resources==5.0; python_version < "3.9" jinja2==3.0.0 jupytext==1.14 lark==1.1.2 diff --git a/pyproject.toml b/pyproject.toml index d086363ec4..878bc33484 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,8 +14,6 @@ classifiers = [ 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: Implementation :: CPython', @@ -24,7 +22,6 @@ classifiers = [ 'Topic :: Scientific/Engineering :: Physics' ] dependencies = [ - "astunparse>=1.6.3;python_version<'3.9'", 'attrs>=21.3', 'black>=22.3', 'boltons>=20.1', @@ -39,7 +36,6 @@ dependencies = [ 'filelock>=3.16.1', 'frozendict>=2.3', 'gridtools-cpp>=2.3.8,==2.*', - "importlib-resources>=5.0;python_version<'3.9'", 'jinja2>=3.0.0', 'lark>=1.1.2', 'mako>=1.1', @@ -67,7 +63,7 @@ keywords = [ license = {file = 'LICENSE.txt'} name = 'gt4py' readme = 'README.md' -requires-python = '>=3.8' +requires-python = '>=3.10' [project.optional-dependencies] # Bundles @@ -80,9 +76,9 @@ cuda12 = ['cupy-cuda12x>=12.0'] dace = ['dace>=1.0.0,<1.1.0'] # v1.x will contain breaking changes, see https://github.com/spcl/dace/milestone/4 formatting = ['clang-format>=9.0'] gpu = ['cupy>=12.0'] -jax-cpu = ['jax[cpu]>=0.4.18; python_version>="3.10"'] -jax-cuda11 = ['jax[cuda11_pip]>=0.4.18; python_version>="3.10"'] -jax-cuda12 = ['jax[cuda12_pip]>=0.4.18; python_version>="3.10"'] +jax-cpu = ['jax[cpu]>=0.4.18'] +jax-cuda11 = ['jax[cuda11_pip]>=0.4.18'] +jax-cuda12 = ['jax[cuda12_pip]>=0.4.18'] performance = ['scipy>=1.9.2'] rocm-43 = ['cupy-rocm-4-3'] testing = ['hypothesis>=6.0.0', 'pytest>=7.0'] @@ -239,32 +235,7 @@ markers = [ 'all: special marker that skips all tests', 'requires_atlas: tests that require `atlas4py` bindings package', 'requires_dace: tests that require `dace` package', - 'requires_gpu: tests that require a NVidia GPU (`cupy` and `cudatoolkit` are required)', - 'uses_applied_shifts: tests that require backend support for applied-shifts', - 'uses_constant_fields: tests that require backend support for constant fields', - 'uses_dynamic_offsets: tests that require backend support for dynamic offsets', - 'uses_floordiv: tests that require backend support for floor division', - 'uses_if_stmts: tests that require backend support for if-statements', - 'uses_index_fields: tests that require backend support for index fields', - 'uses_negative_modulo: tests that require backend support for modulo on negative numbers', - 'uses_origin: tests that require backend support for domain origin', - 'uses_reduction_with_only_sparse_fields: tests that require backend support for with sparse fields', - 'uses_scan: tests that uses scan', - 'uses_scan_in_field_operator: tests that require backend support for scan in field operator', - 'uses_scan_without_field_args: tests that require calls to scan that do not have any fields as arguments', - 'uses_scan_nested: tests that use nested scans', - 'uses_scan_requiring_projector: tests need a projector implementation in gtfn', - 'uses_sparse_fields: tests that require backend support for sparse fields', - 'uses_sparse_fields_as_output: tests that require backend support for writing sparse fields', - 'uses_strided_neighbor_offset: tests that require backend support for strided neighbor offset', - 'uses_tuple_args: tests that require backend support for tuple arguments', - 'uses_tuple_returns: tests that require backend support for tuple results', - 'uses_zero_dimensional_fields: tests that require backend support for zero-dimensional fields', - 'uses_cartesian_shift: tests that use a Cartesian connectivity', - 'uses_unstructured_shift: tests that use a unstructured connectivity', - 'uses_max_over: tests that use the max_over builtin', - 'uses_mesh_with_skip_values: tests that use a mesh with skip values', - 'checks_specific_error: tests that rely on the backend to produce a specific error message' + 'requires_gpu: tests that require a NVidia GPU (`cupy` and `cudatoolkit` are required)' ] norecursedirs = ['dist', 'build', 'cpp_backend_tests/build*', '_local/*', '.*'] testpaths = 'tests' @@ -275,7 +246,7 @@ line-length = 100 # It should be the same as in `tool.black.line-length` above respect-gitignore = true show-fixes = true # show-source = true -target-version = 'py38' +target-version = 'py310' [tool.ruff.format] docstring-code-format = true @@ -292,7 +263,8 @@ docstring-code-format = true # NPY: NumPy-specific rules # RUF: Ruff-specific rules ignore = [ - 'E501' # [line-too-long] + 'E501', # [line-too-long] + 'B905' # [zip-without-explicit-strict] # TODO(egparedes): Reevaluate this rule ] select = ['E', 'F', 'I', 'B', 'A', 'T10', 'ERA', 'NPY', 'RUF'] typing-modules = ['gt4py.eve.extended_typing'] diff --git a/requirements-dev.txt b/requirements-dev.txt index 40554cef13..463b1bc6ac 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,181 +1,178 @@ # -# This file is autogenerated by pip-compile with Python 3.8 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # "tox run -e requirements-base" # aenum==3.1.15 # via -c constraints.txt, dace -alabaster==0.7.13 # via -c constraints.txt, sphinx +alabaster==1.0.0 # via -c constraints.txt, sphinx annotated-types==0.7.0 # via -c constraints.txt, pydantic asttokens==2.4.1 # via -c constraints.txt, devtools, stack-data -astunparse==1.6.3 ; python_version < "3.9" # via -c constraints.txt, dace, gt4py (pyproject.toml) -attrs==24.2.0 # via -c constraints.txt, gt4py (pyproject.toml), hypothesis, jsonschema, referencing +astunparse==1.6.3 # via -c constraints.txt, dace +attrs==24.3.0 # via -c constraints.txt, gt4py (pyproject.toml), hypothesis, jsonschema, referencing babel==2.16.0 # via -c constraints.txt, sphinx -backcall==0.2.0 # via -c constraints.txt, ipython -black==24.8.0 # via -c constraints.txt, gt4py (pyproject.toml) +black==24.10.0 # via -c constraints.txt, gt4py (pyproject.toml) boltons==24.1.0 # via -c constraints.txt, gt4py (pyproject.toml) bracex==2.5.post1 # via -c constraints.txt, wcmatch build==1.2.2.post1 # via -c constraints.txt, pip-tools -bump-my-version==0.28.1 # via -c constraints.txt, -r requirements-dev.in +bump-my-version==0.29.0 # via -c constraints.txt, -r requirements-dev.in cached-property==2.0.1 # via -c constraints.txt, gt4py (pyproject.toml) cachetools==5.5.0 # via -c constraints.txt, tox -certifi==2024.8.30 # via -c constraints.txt, requests +certifi==2024.12.14 # via -c constraints.txt, requests cfgv==3.4.0 # via -c constraints.txt, pre-commit chardet==5.2.0 # via -c constraints.txt, tox -charset-normalizer==3.4.0 # via -c constraints.txt, requests -clang-format==19.1.4 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) -click==8.1.7 # via -c constraints.txt, black, bump-my-version, gt4py (pyproject.toml), pip-tools, rich-click -cmake==3.31.1 # via -c constraints.txt, gt4py (pyproject.toml) +charset-normalizer==3.4.1 # via -c constraints.txt, requests +clang-format==19.1.6 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) +click==8.1.8 # via -c constraints.txt, black, bump-my-version, gt4py (pyproject.toml), pip-tools, rich-click +cmake==3.31.2 # via -c constraints.txt, gt4py (pyproject.toml) cogapp==3.4.1 # via -c constraints.txt, -r requirements-dev.in colorama==0.4.6 # via -c constraints.txt, tox comm==0.2.2 # via -c constraints.txt, ipykernel -contourpy==1.1.1 # via -c constraints.txt, matplotlib -coverage[toml]==7.6.1 # via -c constraints.txt, -r requirements-dev.in, pytest-cov +contourpy==1.3.1 # via -c constraints.txt, matplotlib +coverage[toml]==7.6.10 # via -c constraints.txt, -r requirements-dev.in, pytest-cov cycler==0.12.1 # via -c constraints.txt, matplotlib -cytoolz==1.0.0 # via -c constraints.txt, gt4py (pyproject.toml) +cytoolz==1.0.1 # via -c constraints.txt, gt4py (pyproject.toml) dace==1.0.0 # via -c constraints.txt, gt4py (pyproject.toml) darglint==1.8.1 # via -c constraints.txt, -r requirements-dev.in -debugpy==1.8.9 # via -c constraints.txt, ipykernel +debugpy==1.8.11 # via -c constraints.txt, ipykernel decorator==5.1.1 # via -c constraints.txt, ipython -deepdiff==8.0.1 # via -c constraints.txt, gt4py (pyproject.toml) +deepdiff==8.1.1 # via -c constraints.txt, gt4py (pyproject.toml) devtools==0.12.2 # via -c constraints.txt, gt4py (pyproject.toml) dill==0.3.9 # via -c constraints.txt, dace diskcache==5.6.3 # via -c constraints.txt, gt4py (pyproject.toml) distlib==0.3.9 # via -c constraints.txt, virtualenv -docutils==0.20.1 # via -c constraints.txt, sphinx, sphinx-rtd-theme -exceptiongroup==1.2.2 # via -c constraints.txt, hypothesis, pytest +docutils==0.21.2 # via -c constraints.txt, sphinx, sphinx-rtd-theme +exceptiongroup==1.2.2 # via -c constraints.txt, hypothesis, ipython, pytest execnet==2.1.1 # via -c constraints.txt, pytest-cache, pytest-xdist executing==2.1.0 # via -c constraints.txt, devtools, stack-data factory-boy==3.3.1 # via -c constraints.txt, gt4py (pyproject.toml), pytest-factoryboy -faker==33.1.0 # via -c constraints.txt, factory-boy +faker==33.3.0 # via -c constraints.txt, factory-boy fastjsonschema==2.21.1 # via -c constraints.txt, nbformat filelock==3.16.1 # via -c constraints.txt, gt4py (pyproject.toml), tox, virtualenv -fonttools==4.55.2 # via -c constraints.txt, matplotlib +fonttools==4.55.3 # via -c constraints.txt, matplotlib fparser==0.2.0 # via -c constraints.txt, dace frozendict==2.4.6 # via -c constraints.txt, gt4py (pyproject.toml) -gitdb==4.0.11 # via -c constraints.txt, gitpython -gitpython==3.1.43 # via -c constraints.txt, tach +gitdb==4.0.12 # via -c constraints.txt, gitpython +gitpython==3.1.44 # via -c constraints.txt, tach gridtools-cpp==2.3.8 # via -c constraints.txt, gt4py (pyproject.toml) -hypothesis==6.113.0 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) -identify==2.6.1 # via -c constraints.txt, pre-commit +hypothesis==6.123.11 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) +identify==2.6.5 # via -c constraints.txt, pre-commit idna==3.10 # via -c constraints.txt, requests imagesize==1.4.1 # via -c constraints.txt, sphinx -importlib-metadata==8.5.0 # via -c constraints.txt, build, jupyter-client, sphinx -importlib-resources==6.4.5 ; python_version < "3.9" # via -c constraints.txt, gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib inflection==0.5.1 # via -c constraints.txt, pytest-factoryboy iniconfig==2.0.0 # via -c constraints.txt, pytest ipykernel==6.29.5 # via -c constraints.txt, nbmake -ipython==8.12.3 # via -c constraints.txt, ipykernel +ipython==8.31.0 # via -c constraints.txt, ipykernel +jax[cpu]==0.4.38 # via -c constraints.txt, gt4py (pyproject.toml) +jaxlib==0.4.38 # via -c constraints.txt, jax jedi==0.19.2 # via -c constraints.txt, ipython -jinja2==3.1.4 # via -c constraints.txt, gt4py (pyproject.toml), sphinx +jinja2==3.1.5 # via -c constraints.txt, gt4py (pyproject.toml), sphinx jsonschema==4.23.0 # via -c constraints.txt, nbformat -jsonschema-specifications==2023.12.1 # via -c constraints.txt, jsonschema +jsonschema-specifications==2024.10.1 # via -c constraints.txt, jsonschema jupyter-client==8.6.3 # via -c constraints.txt, ipykernel, nbclient -jupyter-core==5.7.2 # via -c constraints.txt, ipykernel, jupyter-client, nbformat -jupytext==1.16.4 # via -c constraints.txt, -r requirements-dev.in -kiwisolver==1.4.7 # via -c constraints.txt, matplotlib +jupyter-core==5.7.2 # via -c constraints.txt, ipykernel, jupyter-client, nbclient, nbformat +jupytext==1.16.6 # via -c constraints.txt, -r requirements-dev.in +kiwisolver==1.4.8 # via -c constraints.txt, matplotlib lark==1.2.2 # via -c constraints.txt, gt4py (pyproject.toml) mako==1.3.8 # via -c constraints.txt, gt4py (pyproject.toml) markdown-it-py==3.0.0 # via -c constraints.txt, jupytext, mdit-py-plugins, rich -markupsafe==2.1.5 # via -c constraints.txt, jinja2, mako -matplotlib==3.7.5 # via -c constraints.txt, -r requirements-dev.in +markupsafe==3.0.2 # via -c constraints.txt, jinja2, mako +matplotlib==3.10.0 # via -c constraints.txt, -r requirements-dev.in matplotlib-inline==0.1.7 # via -c constraints.txt, ipykernel, ipython mdit-py-plugins==0.4.2 # via -c constraints.txt, jupytext mdurl==0.1.2 # via -c constraints.txt, markdown-it-py +ml-dtypes==0.5.1 # via -c constraints.txt, jax, jaxlib mpmath==1.3.0 # via -c constraints.txt, sympy -mypy==1.13.0 # via -c constraints.txt, -r requirements-dev.in +mypy==1.14.1 # via -c constraints.txt, -r requirements-dev.in mypy-extensions==1.0.0 # via -c constraints.txt, black, mypy nanobind==2.4.0 # via -c constraints.txt, gt4py (pyproject.toml) -nbclient==0.6.8 # via -c constraints.txt, nbmake +nbclient==0.10.2 # via -c constraints.txt, nbmake nbformat==5.10.4 # via -c constraints.txt, jupytext, nbclient, nbmake -nbmake==1.5.4 # via -c constraints.txt, -r requirements-dev.in -nest-asyncio==1.6.0 # via -c constraints.txt, ipykernel, nbclient -networkx==3.1 # via -c constraints.txt, dace, tach -ninja==1.11.1.2 # via -c constraints.txt, gt4py (pyproject.toml) +nbmake==1.5.5 # via -c constraints.txt, -r requirements-dev.in +nest-asyncio==1.6.0 # via -c constraints.txt, ipykernel +networkx==3.4.2 # via -c constraints.txt, dace, tach +ninja==1.11.1.3 # via -c constraints.txt, gt4py (pyproject.toml) nodeenv==1.9.1 # via -c constraints.txt, pre-commit -numpy==1.24.4 # via -c constraints.txt, contourpy, dace, gt4py (pyproject.toml), matplotlib -orderly-set==5.2.2 # via -c constraints.txt, deepdiff +numpy==1.26.4 # via -c constraints.txt, contourpy, dace, gt4py (pyproject.toml), jax, jaxlib, matplotlib, ml-dtypes, scipy +opt-einsum==3.4.0 # via -c constraints.txt, jax +orderly-set==5.2.3 # via -c constraints.txt, deepdiff packaging==24.2 # via -c constraints.txt, black, build, dace, gt4py (pyproject.toml), ipykernel, jupytext, matplotlib, pipdeptree, pyproject-api, pytest, pytest-factoryboy, setuptools-scm, sphinx, tox parso==0.8.4 # via -c constraints.txt, jedi pathspec==0.12.1 # via -c constraints.txt, black pexpect==4.9.0 # via -c constraints.txt, ipython -pickleshare==0.7.5 # via -c constraints.txt, ipython -pillow==10.4.0 # via -c constraints.txt, matplotlib +pillow==11.1.0 # via -c constraints.txt, matplotlib pip-tools==7.4.1 # via -c constraints.txt, -r requirements-dev.in pipdeptree==2.24.0 # via -c constraints.txt, -r requirements-dev.in -pkgutil-resolve-name==1.3.10 # via -c constraints.txt, jsonschema platformdirs==4.3.6 # via -c constraints.txt, black, jupyter-core, tox, virtualenv pluggy==1.5.0 # via -c constraints.txt, pytest, tox ply==3.11 # via -c constraints.txt, dace -pre-commit==3.5.0 # via -c constraints.txt, -r requirements-dev.in -prompt-toolkit==3.0.36 # via -c constraints.txt, ipython, questionary, tach -psutil==6.1.0 # via -c constraints.txt, -r requirements-dev.in, ipykernel, pytest-xdist +pre-commit==4.0.1 # via -c constraints.txt, -r requirements-dev.in +prompt-toolkit==3.0.48 # via -c constraints.txt, ipython, questionary, tach +psutil==6.1.1 # via -c constraints.txt, -r requirements-dev.in, ipykernel, pytest-xdist ptyprocess==0.7.0 # via -c constraints.txt, pexpect pure-eval==0.2.3 # via -c constraints.txt, stack-data pybind11==2.13.6 # via -c constraints.txt, gt4py (pyproject.toml) -pydantic==2.10.3 # via -c constraints.txt, bump-my-version, pydantic-settings -pydantic-core==2.27.1 # via -c constraints.txt, pydantic -pydantic-settings==2.6.1 # via -c constraints.txt, bump-my-version -pydot==3.0.3 # via -c constraints.txt, tach -pygments==2.18.0 # via -c constraints.txt, -r requirements-dev.in, devtools, ipython, nbmake, rich, sphinx -pyparsing==3.1.4 # via -c constraints.txt, matplotlib, pydot +pydantic==2.10.4 # via -c constraints.txt, bump-my-version, pydantic-settings +pydantic-core==2.27.2 # via -c constraints.txt, pydantic +pydantic-settings==2.7.1 # via -c constraints.txt, bump-my-version +pydot==3.0.4 # via -c constraints.txt, tach +pygments==2.19.1 # via -c constraints.txt, -r requirements-dev.in, devtools, ipython, nbmake, rich, sphinx +pyparsing==3.2.1 # via -c constraints.txt, matplotlib, pydot pyproject-api==1.8.0 # via -c constraints.txt, tox pyproject-hooks==1.2.0 # via -c constraints.txt, build, pip-tools pytest==8.3.4 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -c constraints.txt, -r requirements-dev.in -pytest-cov==5.0.0 # via -c constraints.txt, -r requirements-dev.in +pytest-cov==6.0.0 # via -c constraints.txt, -r requirements-dev.in pytest-custom-exit-code==0.3.0 # via -c constraints.txt, -r requirements-dev.in pytest-factoryboy==2.7.0 # via -c constraints.txt, -r requirements-dev.in pytest-instafail==0.5.0 # via -c constraints.txt, -r requirements-dev.in pytest-xdist[psutil]==3.6.1 # via -c constraints.txt, -r requirements-dev.in python-dateutil==2.9.0.post0 # via -c constraints.txt, faker, jupyter-client, matplotlib python-dotenv==1.0.1 # via -c constraints.txt, pydantic-settings -pytz==2024.2 # via -c constraints.txt, babel pyyaml==6.0.2 # via -c constraints.txt, dace, jupytext, pre-commit, tach pyzmq==26.2.0 # via -c constraints.txt, ipykernel, jupyter-client -questionary==2.0.1 # via -c constraints.txt, bump-my-version +questionary==2.1.0 # via -c constraints.txt, bump-my-version referencing==0.35.1 # via -c constraints.txt, jsonschema, jsonschema-specifications requests==2.32.3 # via -c constraints.txt, sphinx rich==13.9.4 # via -c constraints.txt, bump-my-version, rich-click, tach rich-click==1.8.5 # via -c constraints.txt, bump-my-version -rpds-py==0.20.1 # via -c constraints.txt, jsonschema, referencing -ruff==0.8.2 # via -c constraints.txt, -r requirements-dev.in +rpds-py==0.22.3 # via -c constraints.txt, jsonschema, referencing +ruff==0.8.6 # via -c constraints.txt, -r requirements-dev.in +scipy==1.15.0 # via -c constraints.txt, jax, jaxlib setuptools-scm==8.1.0 # via -c constraints.txt, fparser six==1.17.0 # via -c constraints.txt, asttokens, astunparse, python-dateutil -smmap==5.0.1 # via -c constraints.txt, gitdb +smmap==5.0.2 # via -c constraints.txt, gitdb snowballstemmer==2.2.0 # via -c constraints.txt, sphinx sortedcontainers==2.4.0 # via -c constraints.txt, hypothesis -sphinx==7.1.2 # via -c constraints.txt, -r requirements-dev.in, sphinx-rtd-theme, sphinxcontrib-jquery +sphinx==8.1.3 # via -c constraints.txt, -r requirements-dev.in, sphinx-rtd-theme, sphinxcontrib-jquery sphinx-rtd-theme==3.0.2 # via -c constraints.txt, -r requirements-dev.in -sphinxcontrib-applehelp==1.0.4 # via -c constraints.txt, sphinx -sphinxcontrib-devhelp==1.0.2 # via -c constraints.txt, sphinx -sphinxcontrib-htmlhelp==2.0.1 # via -c constraints.txt, sphinx +sphinxcontrib-applehelp==2.0.0 # via -c constraints.txt, sphinx +sphinxcontrib-devhelp==2.0.0 # via -c constraints.txt, sphinx +sphinxcontrib-htmlhelp==2.1.0 # via -c constraints.txt, sphinx sphinxcontrib-jquery==4.1 # via -c constraints.txt, sphinx-rtd-theme sphinxcontrib-jsmath==1.0.1 # via -c constraints.txt, sphinx -sphinxcontrib-qthelp==1.0.3 # via -c constraints.txt, sphinx -sphinxcontrib-serializinghtml==1.1.5 # via -c constraints.txt, sphinx +sphinxcontrib-qthelp==2.0.0 # via -c constraints.txt, sphinx +sphinxcontrib-serializinghtml==2.0.0 # via -c constraints.txt, sphinx stack-data==0.6.3 # via -c constraints.txt, ipython -stdlib-list==0.10.0 # via -c constraints.txt, tach sympy==1.13.3 # via -c constraints.txt, dace tabulate==0.9.0 # via -c constraints.txt, gt4py (pyproject.toml) -tach==0.16.5 # via -c constraints.txt, -r requirements-dev.in -tomli==2.2.1 ; python_version < "3.11" # via -c constraints.txt, -r requirements-dev.in, black, build, coverage, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, tach, tox -tomli-w==1.0.0 # via -c constraints.txt, tach +tach==0.19.5 # via -c constraints.txt, -r requirements-dev.in +tomli==2.2.1 ; python_version < "3.11" # via -c constraints.txt, -r requirements-dev.in, black, build, coverage, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, sphinx, tach, tox +tomli-w==1.1.0 # via -c constraints.txt, tach tomlkit==0.13.2 # via -c constraints.txt, bump-my-version toolz==1.0.0 # via -c constraints.txt, cytoolz tornado==6.4.2 # via -c constraints.txt, ipykernel, jupyter-client tox==4.23.2 # via -c constraints.txt, -r requirements-dev.in traitlets==5.14.3 # via -c constraints.txt, comm, ipykernel, ipython, jupyter-client, jupyter-core, matplotlib-inline, nbclient, nbformat types-tabulate==0.9.0.20241207 # via -c constraints.txt, -r requirements-dev.in -typing-extensions==4.12.2 # via -c constraints.txt, annotated-types, black, faker, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, setuptools-scm, tox -urllib3==2.2.3 # via -c constraints.txt, requests -virtualenv==20.28.0 # via -c constraints.txt, pre-commit, tox +typing-extensions==4.12.2 # via -c constraints.txt, black, faker, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, tox +urllib3==2.3.0 # via -c constraints.txt, requests +virtualenv==20.28.1 # via -c constraints.txt, pre-commit, tox wcmatch==10.0 # via -c constraints.txt, bump-my-version wcwidth==0.2.13 # via -c constraints.txt, prompt-toolkit wheel==0.45.1 # via -c constraints.txt, astunparse, pip-tools xxhash==3.0.0 # via -c constraints.txt, gt4py (pyproject.toml) -zipp==3.20.2 # via -c constraints.txt, importlib-metadata, importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.3.1 # via -c constraints.txt, pip-tools, pipdeptree -setuptools==75.3.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==75.8.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm diff --git a/src/gt4py/cartesian/frontend/gtscript_frontend.py b/src/gt4py/cartesian/frontend/gtscript_frontend.py index f155ea6209..4d8ac98529 100644 --- a/src/gt4py/cartesian/frontend/gtscript_frontend.py +++ b/src/gt4py/cartesian/frontend/gtscript_frontend.py @@ -1451,21 +1451,6 @@ def visit_Assign(self, node: ast.Assign) -> list: message="Assignment to non-zero offsets in K is not available in PARALLEL. Choose FORWARD or BACKWARD.", loc=nodes.Location.from_ast_node(t), ) - if self.backend_name in ["gt:gpu", "dace:gpu"]: - import cupy as cp - - if cp.cuda.runtime.runtimeGetVersion() < 12000: - raise GTScriptSyntaxError( - message=f"Assignment to non-zero offsets in K is not available in {self.backend_name} for CUDA<12. Please update CUDA.", - loc=nodes.Location.from_ast_node(t), - ) - - if self.backend_name in ["gt:gpu"]: - raise GTScriptSyntaxError( - message=f"Assignment to non-zero offsets in K is not available in {self.backend_name} as an unsolved bug remains." - "Please refer to https://github.com/GridTools/gt4py/issues/1754.", - loc=nodes.Location.from_ast_node(t), - ) if not self._is_known(name): if name in self.temp_decls: diff --git a/src/gt4py/cartesian/gtc/common.py b/src/gt4py/cartesian/gtc/common.py index dcb01db7ca..8c3c731c75 100644 --- a/src/gt4py/cartesian/gtc/common.py +++ b/src/gt4py/cartesian/gtc/common.py @@ -229,8 +229,8 @@ class LevelMarker(eve.StrEnum): @enum.unique class ExprKind(eve.IntEnum): - SCALAR: ExprKind = typing.cast("ExprKind", enum.auto()) - FIELD: ExprKind = typing.cast("ExprKind", enum.auto()) + SCALAR = typing.cast("ExprKind", enum.auto()) + FIELD = typing.cast("ExprKind", enum.auto()) class LocNode(eve.Node): diff --git a/src/gt4py/eve/datamodels/core.py b/src/gt4py/eve/datamodels/core.py index 1b0e995156..31e63bdf9f 100644 --- a/src/gt4py/eve/datamodels/core.py +++ b/src/gt4py/eve/datamodels/core.py @@ -16,6 +16,7 @@ import dataclasses import functools import sys +import types import typing import warnings @@ -1254,8 +1255,11 @@ def _make_concrete_with_cache( if not is_generic_datamodel_class(datamodel_cls): raise TypeError(f"'{datamodel_cls.__name__}' is not a generic model class.") for t in type_args: + _accepted_types: tuple[type, ...] = (type, type(None), xtyping.StdGenericAliasType) + if sys.version_info >= (3, 10): + _accepted_types = (*_accepted_types, types.UnionType) if not ( - isinstance(t, (type, type(None), xtyping.StdGenericAliasType)) + isinstance(t, _accepted_types) or (getattr(type(t), "__module__", None) in ("typing", "typing_extensions")) ): raise TypeError( diff --git a/src/gt4py/eve/type_validation.py b/src/gt4py/eve/type_validation.py index e150832295..695ab69dc3 100644 --- a/src/gt4py/eve/type_validation.py +++ b/src/gt4py/eve/type_validation.py @@ -14,6 +14,8 @@ import collections.abc import dataclasses import functools +import sys +import types import typing from . import exceptions, extended_typing as xtyping, utils @@ -193,6 +195,12 @@ def __call__( if type_annotation is None: type_annotation = type(None) + if sys.version_info >= (3, 10): + if isinstance( + type_annotation, types.UnionType + ): # see https://github.com/python/cpython/issues/105499 + type_annotation = typing.Union[type_annotation.__args__] + # Non-generic types if xtyping.is_actual_type(type_annotation): assert not xtyping.get_args(type_annotation) @@ -277,6 +285,7 @@ def __call__( if issubclass(origin_type, (collections.abc.Sequence, collections.abc.Set)): assert len(type_args) == 1 + make_recursive(type_args[0]) if (member_validator := make_recursive(type_args[0])) is None: raise exceptions.EveValueError( f"{type_args[0]} type annotation is not supported." diff --git a/src/gt4py/eve/utils.py b/src/gt4py/eve/utils.py index 2c66d39290..96e41a7bd8 100644 --- a/src/gt4py/eve/utils.py +++ b/src/gt4py/eve/utils.py @@ -440,8 +440,8 @@ def content_hash(*args: Any, hash_algorithm: str | xtyping.HashlibAlgorithm | No return result -ddiff = deepdiff.DeepDiff -"""Shortcut for deepdiff.DeepDiff. +ddiff = deepdiff.diff.DeepDiff +"""Shortcut for deepdiff.diff.DeepDiff. Check https://zepworks.com/deepdiff/current/diff.html for more info. """ @@ -458,13 +458,13 @@ def dhash(obj: Any, **kwargs: Any) -> str: def pprint_ddiff( old: Any, new: Any, *, pprint_opts: Optional[Dict[str, Any]] = None, **kwargs: Any ) -> None: - """Pretty printing of deepdiff.DeepDiff objects. + """Pretty printing of deepdiff.diff.DeepDiff objects. Keyword Arguments: pprint_opts: kwargs dict with options for pprint.pprint. """ pprint_opts = pprint_opts or {"indent": 2} - pprint.pprint(deepdiff.DeepDiff(old, new, **kwargs), **pprint_opts) + pprint.pprint(deepdiff.diff.DeepDiff(old, new, **kwargs), **pprint_opts) AnyWordsIterable = Union[str, Iterable[str]] diff --git a/src/gt4py/next/ffront/foast_passes/type_deduction.py b/src/gt4py/next/ffront/foast_passes/type_deduction.py index d334487ae1..6b40cbb77f 100644 --- a/src/gt4py/next/ffront/foast_passes/type_deduction.py +++ b/src/gt4py/next/ffront/foast_passes/type_deduction.py @@ -6,7 +6,7 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -from typing import Any, Optional, TypeVar, cast +from typing import Any, Optional, TypeAlias, TypeVar, cast import gt4py.next.ffront.field_operator_ast as foast from gt4py.eve import NodeTranslator, NodeVisitor, traits @@ -48,7 +48,7 @@ def with_altered_scalar_kind( if isinstance(type_spec, ts.FieldType): return ts.FieldType( dims=type_spec.dims, - dtype=ts.ScalarType(kind=new_scalar_kind, shape=type_spec.dtype.shape), + dtype=with_altered_scalar_kind(type_spec.dtype, new_scalar_kind), ) elif isinstance(type_spec, ts.ScalarType): return ts.ScalarType(kind=new_scalar_kind, shape=type_spec.shape) @@ -68,13 +68,18 @@ def construct_tuple_type( >>> mask_type = ts.FieldType( ... dims=[Dimension(value="I")], dtype=ts.ScalarType(kind=ts.ScalarKind.BOOL) ... ) - >>> true_branch_types = [ts.ScalarType(kind=ts.ScalarKind), ts.ScalarType(kind=ts.ScalarKind)] + >>> true_branch_types = [ + ... ts.ScalarType(kind=ts.ScalarKind.FLOAT64), + ... ts.ScalarType(kind=ts.ScalarKind.FLOAT64), + ... ] >>> false_branch_types = [ - ... ts.FieldType(dims=[Dimension(value="I")], dtype=ts.ScalarType(kind=ts.ScalarKind)), - ... ts.ScalarType(kind=ts.ScalarKind), + ... ts.FieldType( + ... dims=[Dimension(value="I")], dtype=ts.ScalarType(kind=ts.ScalarKind.FLOAT64) + ... ), + ... ts.ScalarType(kind=ts.ScalarKind.FLOAT64), ... ] >>> print(construct_tuple_type(true_branch_types, false_branch_types, mask_type)) - [FieldType(dims=[Dimension(value='I', kind=)], dtype=ScalarType(kind=, shape=None)), FieldType(dims=[Dimension(value='I', kind=)], dtype=ScalarType(kind=, shape=None))] + [FieldType(dims=[Dimension(value='I', kind=)], dtype=ScalarType(kind=, shape=None)), FieldType(dims=[Dimension(value='I', kind=)], dtype=ScalarType(kind=, shape=None))] """ element_types_new = true_branch_types for i, element in enumerate(true_branch_types): @@ -105,8 +110,8 @@ def promote_to_mask_type( >>> I, J = (Dimension(value=dim) for dim in ["I", "J"]) >>> bool_type = ts.ScalarType(kind=ts.ScalarKind.BOOL) >>> dtype = ts.ScalarType(kind=ts.ScalarKind.FLOAT64) - >>> promote_to_mask_type(ts.FieldType(dims=[I, J], dtype=bool_type), ts.ScalarType(kind=dtype)) - FieldType(dims=[Dimension(value='I', kind=), Dimension(value='J', kind=)], dtype=ScalarType(kind=ScalarType(kind=, shape=None), shape=None)) + >>> promote_to_mask_type(ts.FieldType(dims=[I, J], dtype=bool_type), dtype) + FieldType(dims=[Dimension(value='I', kind=), Dimension(value='J', kind=)], dtype=ScalarType(kind=, shape=None)) >>> promote_to_mask_type( ... ts.FieldType(dims=[I, J], dtype=bool_type), ts.FieldType(dims=[I], dtype=dtype) ... ) @@ -360,7 +365,7 @@ def visit_Assign(self, node: foast.Assign, **kwargs: Any) -> foast.Assign: def visit_TupleTargetAssign( self, node: foast.TupleTargetAssign, **kwargs: Any ) -> foast.TupleTargetAssign: - TargetType = list[foast.Starred | foast.Symbol] + TargetType: TypeAlias = list[foast.Starred | foast.Symbol] values = self.visit(node.value, **kwargs) if isinstance(values.type, ts.TupleType): @@ -374,7 +379,7 @@ def visit_TupleTargetAssign( ) new_targets: TargetType = [] - new_type: ts.TupleType | ts.DataType + new_type: ts.DataType for i, index in enumerate(indices): old_target = targets[i] @@ -391,7 +396,8 @@ def visit_TupleTargetAssign( location=old_target.location, ) else: - new_type = values.type.types[index] + new_type = values.type.types[index] # type: ignore[assignment] # see check in next line + assert isinstance(new_type, ts.DataType) new_target = self.visit( old_target, refine_type=new_type, location=old_target.location, **kwargs ) diff --git a/src/gt4py/next/ffront/foast_to_gtir.py b/src/gt4py/next/ffront/foast_to_gtir.py index 3c65695aec..4519b4e571 100644 --- a/src/gt4py/next/ffront/foast_to_gtir.py +++ b/src/gt4py/next/ffront/foast_to_gtir.py @@ -236,6 +236,7 @@ def visit_TupleExpr(self, node: foast.TupleExpr, **kwargs: Any) -> itir.Expr: def visit_UnaryOp(self, node: foast.UnaryOp, **kwargs: Any) -> itir.Expr: # TODO(tehrengruber): extend iterator ir to support unary operators dtype = type_info.extract_dtype(node.type) + assert isinstance(dtype, ts.ScalarType) if node.op in [dialect_ast_enums.UnaryOperator.NOT, dialect_ast_enums.UnaryOperator.INVERT]: if dtype.kind != ts.ScalarKind.BOOL: raise NotImplementedError(f"'{node.op}' is only supported on 'bool' arguments.") @@ -417,12 +418,14 @@ def _visit_neighbor_sum(self, node: foast.Call, **kwargs: Any) -> itir.Expr: def _visit_max_over(self, node: foast.Call, **kwargs: Any) -> itir.Expr: dtype = type_info.extract_dtype(node.type) + assert isinstance(dtype, ts.ScalarType) min_value, _ = type_info.arithmetic_bounds(dtype) init_expr = self._make_literal(str(min_value), dtype) return self._make_reduction_expr(node, "maximum", init_expr, **kwargs) def _visit_min_over(self, node: foast.Call, **kwargs: Any) -> itir.Expr: dtype = type_info.extract_dtype(node.type) + assert isinstance(dtype, ts.ScalarType) _, max_value = type_info.arithmetic_bounds(dtype) init_expr = self._make_literal(str(max_value), dtype) return self._make_reduction_expr(node, "minimum", init_expr, **kwargs) diff --git a/src/gt4py/next/ffront/past_passes/type_deduction.py b/src/gt4py/next/ffront/past_passes/type_deduction.py index 92f7327218..9355273588 100644 --- a/src/gt4py/next/ffront/past_passes/type_deduction.py +++ b/src/gt4py/next/ffront/past_passes/type_deduction.py @@ -104,6 +104,15 @@ def visit_Program(self, node: past.Program, **kwargs: Any) -> past.Program: location=node.location, ) + def visit_Slice(self, node: past.Slice, **kwargs: Any) -> past.Slice: + return past.Slice( + lower=self.visit(node.lower, **kwargs), + upper=self.visit(node.upper, **kwargs), + step=self.visit(node.step, **kwargs), + type=ts.DeferredType(constraint=None), + location=node.location, + ) + def visit_Subscript(self, node: past.Subscript, **kwargs: Any) -> past.Subscript: value = self.visit(node.value, **kwargs) return past.Subscript( diff --git a/src/gt4py/next/ffront/past_process_args.py b/src/gt4py/next/ffront/past_process_args.py index 7958b7a8d3..1add668791 100644 --- a/src/gt4py/next/ffront/past_process_args.py +++ b/src/gt4py/next/ffront/past_process_args.py @@ -109,6 +109,7 @@ def _field_constituents_shape_and_dims( match arg_type: case ts.TupleType(): for el, el_type in zip(arg, arg_type.types): + assert isinstance(el_type, ts.DataType) yield from _field_constituents_shape_and_dims(el, el_type) case ts.FieldType(): dims = type_info.extract_dims(arg_type) diff --git a/src/gt4py/next/ffront/type_info.py b/src/gt4py/next/ffront/type_info.py index 8160a2c42d..83ecf92839 100644 --- a/src/gt4py/next/ffront/type_info.py +++ b/src/gt4py/next/ffront/type_info.py @@ -169,7 +169,9 @@ def _scan_param_promotion(param: ts.TypeSpec, arg: ts.TypeSpec) -> ts.FieldType -------- >>> _scan_param_promotion( ... ts.ScalarType(kind=ts.ScalarKind.INT64), - ... ts.FieldType(dims=[common.Dimension("I")], dtype=ts.ScalarKind.FLOAT64), + ... ts.FieldType( + ... dims=[common.Dimension("I")], dtype=ts.ScalarType(kind=ts.ScalarKind.FLOAT64) + ... ), ... ) FieldType(dims=[Dimension(value='I', kind=)], dtype=ScalarType(kind=, shape=None)) """ @@ -252,8 +254,8 @@ def function_signature_incompatibilities_scanop( # build a function type to leverage the already existing signature checking capabilities function_type = ts.FunctionType( pos_only_args=[], - pos_or_kw_args=promoted_params, # type: ignore[arg-type] # dict is invariant, but we don't care here. - kw_only_args=promoted_kwparams, # type: ignore[arg-type] # same as above + pos_or_kw_args=promoted_params, + kw_only_args=promoted_kwparams, returns=ts.DeferredType(constraint=None), ) diff --git a/src/gt4py/next/ffront/type_specifications.py b/src/gt4py/next/ffront/type_specifications.py index e4f6c826fe..b76a116297 100644 --- a/src/gt4py/next/ffront/type_specifications.py +++ b/src/gt4py/next/ffront/type_specifications.py @@ -6,23 +6,19 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass import gt4py.next.type_system.type_specifications as ts -from gt4py.next import common as func_common +from gt4py.next import common -@dataclass(frozen=True) class ProgramType(ts.TypeSpec, ts.CallableType): definition: ts.FunctionType -@dataclass(frozen=True) class FieldOperatorType(ts.TypeSpec, ts.CallableType): definition: ts.FunctionType -@dataclass(frozen=True) class ScanOperatorType(ts.TypeSpec, ts.CallableType): - axis: func_common.Dimension + axis: common.Dimension definition: ts.FunctionType diff --git a/src/gt4py/next/iterator/embedded.py b/src/gt4py/next/iterator/embedded.py index 13c64e264e..5949d29432 100644 --- a/src/gt4py/next/iterator/embedded.py +++ b/src/gt4py/next/iterator/embedded.py @@ -54,7 +54,6 @@ ) from gt4py.next.ffront import fbuiltins from gt4py.next.iterator import builtins, runtime -from gt4py.next.iterator.type_system import type_specifications as itir_ts from gt4py.next.otf import arguments from gt4py.next.type_system import type_specifications as ts, type_translation @@ -1460,7 +1459,7 @@ class _List(Generic[DT]): def __getitem__(self, i: int): return self.values[i] - def __gt_type__(self) -> itir_ts.ListType: + def __gt_type__(self) -> ts.ListType: offset_tag = self.offset.value assert isinstance(offset_tag, str) element_type = type_translation.from_value(self.values[0]) @@ -1470,7 +1469,7 @@ def __gt_type__(self) -> itir_ts.ListType: connectivity = offset_provider[offset_tag] assert common.is_neighbor_connectivity(connectivity) local_dim = connectivity.__gt_type__().neighbor_dim - return itir_ts.ListType(element_type=element_type, offset_type=local_dim) + return ts.ListType(element_type=element_type, offset_type=local_dim) @dataclasses.dataclass(frozen=True) @@ -1480,10 +1479,10 @@ class _ConstList(Generic[DT]): def __getitem__(self, _): return self.value - def __gt_type__(self) -> itir_ts.ListType: + def __gt_type__(self) -> ts.ListType: element_type = type_translation.from_value(self.value) assert isinstance(element_type, ts.DataType) - return itir_ts.ListType( + return ts.ListType( element_type=element_type, offset_type=_CONST_DIM, ) @@ -1801,7 +1800,7 @@ def _fieldspec_list_to_value( domain: common.Domain, type_: ts.TypeSpec ) -> tuple[common.Domain, ts.TypeSpec]: """Translate the list element type into the domain.""" - if isinstance(type_, itir_ts.ListType): + if isinstance(type_, ts.ListType): if type_.offset_type == _CONST_DIM: return domain.insert( len(domain), common.named_range((_CONST_DIM, 1)) diff --git a/src/gt4py/next/iterator/transforms/fuse_as_fieldop.py b/src/gt4py/next/iterator/transforms/fuse_as_fieldop.py index b7087472e0..cc42896f2b 100644 --- a/src/gt4py/next/iterator/transforms/fuse_as_fieldop.py +++ b/src/gt4py/next/iterator/transforms/fuse_as_fieldop.py @@ -20,10 +20,7 @@ inline_lifts, trace_shifts, ) -from gt4py.next.iterator.type_system import ( - inference as type_inference, - type_specifications as it_ts, -) +from gt4py.next.iterator.type_system import inference as type_inference from gt4py.next.type_system import type_info, type_specifications as ts @@ -140,7 +137,7 @@ def fuse_as_fieldop( if arg.type and not isinstance(arg.type, ts.DeferredType): assert isinstance(arg.type, ts.TypeSpec) dtype = type_info.apply_to_primitive_constituents(type_info.extract_dtype, arg.type) - assert not isinstance(dtype, it_ts.ListType) + assert not isinstance(dtype, ts.ListType) new_param: str if isinstance( arg, itir.SymRef @@ -246,7 +243,7 @@ def visit_FunCall(self, node: itir.FunCall): ) or cpm.is_call_to(arg, "if_") ) - and (isinstance(dtype, it_ts.ListType) or len(arg_shifts) <= 1) + and (isinstance(dtype, ts.ListType) or len(arg_shifts) <= 1) ) ) diff --git a/src/gt4py/next/iterator/transforms/global_tmps.py b/src/gt4py/next/iterator/transforms/global_tmps.py index 334fb330d7..ac7fcb8f1c 100644 --- a/src/gt4py/next/iterator/transforms/global_tmps.py +++ b/src/gt4py/next/iterator/transforms/global_tmps.py @@ -98,12 +98,12 @@ def _transform_by_pattern( tmp_expr.type, tuple_constructor=lambda *elements: tuple(elements), ) - tmp_dtypes: ts.ScalarType | tuple[ts.ScalarType | tuple, ...] = ( - type_info.apply_to_primitive_constituents( - type_info.extract_dtype, - tmp_expr.type, - tuple_constructor=lambda *elements: tuple(elements), - ) + tmp_dtypes: ( + ts.ScalarType | ts.ListType | tuple[ts.ScalarType | ts.ListType | tuple, ...] + ) = type_info.apply_to_primitive_constituents( + type_info.extract_dtype, + tmp_expr.type, + tuple_constructor=lambda *elements: tuple(elements), ) # allocate temporary for all tuple elements diff --git a/src/gt4py/next/iterator/type_system/inference.py b/src/gt4py/next/iterator/type_system/inference.py index 1b980783fa..1da59546c0 100644 --- a/src/gt4py/next/iterator/type_system/inference.py +++ b/src/gt4py/next/iterator/type_system/inference.py @@ -275,8 +275,8 @@ def _get_dimensions(obj: Any): if isinstance(obj, common.Dimension): yield obj elif isinstance(obj, ts.TypeSpec): - for field in dataclasses.fields(obj.__class__): - yield from _get_dimensions(getattr(obj, field.name)) + for field in obj.__datamodel_fields__.keys(): + yield from _get_dimensions(getattr(obj, field)) elif isinstance(obj, collections.abc.Mapping): for el in obj.values(): yield from _get_dimensions(el) @@ -479,7 +479,7 @@ def visit_Temporary(self, node: itir.Temporary, *, ctx) -> ts.FieldType | ts.Tup assert domain.dims != "unknown" assert node.dtype return type_info.apply_to_primitive_constituents( - lambda dtype: ts.FieldType(dims=domain.dims, dtype=dtype), # type: ignore[arg-type] # ensured by domain.dims != "unknown" above + lambda dtype: ts.FieldType(dims=domain.dims, dtype=dtype), node.dtype, ) diff --git a/src/gt4py/next/iterator/type_system/type_specifications.py b/src/gt4py/next/iterator/type_system/type_specifications.py index eef8c75d0f..7825bf1c98 100644 --- a/src/gt4py/next/iterator/type_system/type_specifications.py +++ b/src/gt4py/next/iterator/type_system/type_specifications.py @@ -6,43 +6,29 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -import dataclasses -from typing import Literal, Optional +from typing import Literal from gt4py.next import common from gt4py.next.type_system import type_specifications as ts -@dataclasses.dataclass(frozen=True) class NamedRangeType(ts.TypeSpec): dim: common.Dimension -@dataclasses.dataclass(frozen=True) class DomainType(ts.DataType): dims: list[common.Dimension] | Literal["unknown"] -@dataclasses.dataclass(frozen=True) class OffsetLiteralType(ts.TypeSpec): value: ts.ScalarType | common.Dimension -@dataclasses.dataclass(frozen=True) -class ListType(ts.DataType): - element_type: ts.DataType - # TODO(havogt): the `offset_type` is not yet used in type_inference, - # it is meant to describe the neighborhood (via the local dimension) - offset_type: Optional[common.Dimension] = None - - -@dataclasses.dataclass(frozen=True) class IteratorType(ts.DataType, ts.CallableType): position_dims: list[common.Dimension] | Literal["unknown"] defined_dims: list[common.Dimension] element_type: ts.DataType -@dataclasses.dataclass(frozen=True) class ProgramType(ts.TypeSpec): params: dict[str, ts.DataType] diff --git a/src/gt4py/next/iterator/type_system/type_synthesizer.py b/src/gt4py/next/iterator/type_system/type_synthesizer.py index 5be9ed7438..22a04ec04a 100644 --- a/src/gt4py/next/iterator/type_system/type_synthesizer.py +++ b/src/gt4py/next/iterator/type_system/type_synthesizer.py @@ -155,18 +155,18 @@ def if_(pred: ts.ScalarType, true_branch: ts.DataType, false_branch: ts.DataType @_register_builtin_type_synthesizer -def make_const_list(scalar: ts.ScalarType) -> it_ts.ListType: +def make_const_list(scalar: ts.ScalarType) -> ts.ListType: assert isinstance(scalar, ts.ScalarType) - return it_ts.ListType(element_type=scalar) + return ts.ListType(element_type=scalar) @_register_builtin_type_synthesizer -def list_get(index: ts.ScalarType | it_ts.OffsetLiteralType, list_: it_ts.ListType) -> ts.DataType: +def list_get(index: ts.ScalarType | it_ts.OffsetLiteralType, list_: ts.ListType) -> ts.DataType: if isinstance(index, it_ts.OffsetLiteralType): assert isinstance(index.value, ts.ScalarType) index = index.value assert isinstance(index, ts.ScalarType) and type_info.is_integral(index) - assert isinstance(list_, it_ts.ListType) + assert isinstance(list_, ts.ListType) return list_.element_type @@ -198,14 +198,14 @@ def index(arg: ts.DimensionType) -> ts.FieldType: @_register_builtin_type_synthesizer -def neighbors(offset_literal: it_ts.OffsetLiteralType, it: it_ts.IteratorType) -> it_ts.ListType: +def neighbors(offset_literal: it_ts.OffsetLiteralType, it: it_ts.IteratorType) -> ts.ListType: assert ( isinstance(offset_literal, it_ts.OffsetLiteralType) and isinstance(offset_literal.value, common.Dimension) and offset_literal.value.kind == common.DimensionKind.LOCAL ) assert isinstance(it, it_ts.IteratorType) - return it_ts.ListType(element_type=it.element_type) + return ts.ListType(element_type=it.element_type) @_register_builtin_type_synthesizer @@ -270,7 +270,7 @@ def _convert_as_fieldop_input_to_iterator( else: defined_dims.append(dim) if is_nb_field: - element_type = it_ts.ListType(element_type=element_type) + element_type = ts.ListType(element_type=element_type) return it_ts.IteratorType( position_dims=domain.dims, defined_dims=defined_dims, element_type=element_type @@ -342,14 +342,14 @@ def apply_scan( def map_(op: TypeSynthesizer) -> TypeSynthesizer: @TypeSynthesizer def applied_map( - *args: it_ts.ListType, offset_provider_type: common.OffsetProviderType - ) -> it_ts.ListType: + *args: ts.ListType, offset_provider_type: common.OffsetProviderType + ) -> ts.ListType: assert len(args) > 0 - assert all(isinstance(arg, it_ts.ListType) for arg in args) + assert all(isinstance(arg, ts.ListType) for arg in args) arg_el_types = [arg.element_type for arg in args] el_type = op(*arg_el_types, offset_provider_type=offset_provider_type) assert isinstance(el_type, ts.DataType) - return it_ts.ListType(element_type=el_type) + return ts.ListType(element_type=el_type) return applied_map @@ -357,8 +357,8 @@ def applied_map( @_register_builtin_type_synthesizer def reduce(op: TypeSynthesizer, init: ts.TypeSpec) -> TypeSynthesizer: @TypeSynthesizer - def applied_reduce(*args: it_ts.ListType, offset_provider_type: common.OffsetProviderType): - assert all(isinstance(arg, it_ts.ListType) for arg in args) + def applied_reduce(*args: ts.ListType, offset_provider_type: common.OffsetProviderType): + assert all(isinstance(arg, ts.ListType) for arg in args) return op( init, *(arg.element_type for arg in args), offset_provider_type=offset_provider_type ) diff --git a/src/gt4py/next/otf/binding/nanobind.py b/src/gt4py/next/otf/binding/nanobind.py index 24913a1365..edd56fad48 100644 --- a/src/gt4py/next/otf/binding/nanobind.py +++ b/src/gt4py/next/otf/binding/nanobind.py @@ -86,6 +86,7 @@ def _type_string(type_: ts.TypeSpec) -> str: return f"std::tuple<{','.join(_type_string(t) for t in type_.types)}>" elif isinstance(type_, ts.FieldType): ndims = len(type_.dims) + assert isinstance(type_.dtype, ts.ScalarType) dtype = cpp_interface.render_scalar_type(type_.dtype) shape = f"nanobind::shape<{', '.join(['gridtools::nanobind::dynamic_size'] * ndims)}>" buffer_t = f"nanobind::ndarray<{dtype}, {shape}>" diff --git a/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py b/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py index d5b34fd5b9..f7bb1805e0 100644 --- a/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py +++ b/src/gt4py/next/program_processors/codegens/gtfn/itir_to_gtfn_ir.py @@ -701,7 +701,7 @@ def visit_Temporary( def dtype_to_cpp(x: ts.DataType) -> str: if isinstance(x, ts.TupleType): assert all(isinstance(i, ts.ScalarType) for i in x.types) - return "::gridtools::tuple<" + ", ".join(dtype_to_cpp(i) for i in x.types) + ">" + return "::gridtools::tuple<" + ", ".join(dtype_to_cpp(i) for i in x.types) + ">" # type: ignore[arg-type] # ensured by assert assert isinstance(x, ts.ScalarType) res = pytype_to_cpptype(x) assert isinstance(res, str) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_builtin_translators.py b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_builtin_translators.py index 2dc8318769..3195e656d1 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_builtin_translators.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_builtin_translators.py @@ -23,7 +23,6 @@ domain_utils, ir_makers as im, ) -from gt4py.next.iterator.type_system import type_specifications as itir_ts from gt4py.next.program_processors.runners.dace_common import utility as dace_utils from gt4py.next.program_processors.runners.dace_fieldview import ( gtir_dataflow, @@ -120,7 +119,7 @@ def get_local_view( ) elif len(local_dims) == 1: - field_dtype = itir_ts.ListType( + field_dtype = ts.ListType( element_type=self.gt_type.dtype, offset_type=local_dims[0] ) field_domain = [ @@ -289,7 +288,6 @@ def _create_field_operator( input_edges: Iterable[gtir_dataflow.DataflowInputEdge], output_edges: gtir_dataflow.DataflowOutputEdge | tuple[gtir_dataflow.DataflowOutputEdge | tuple[Any, ...], ...], - scan_dim: Optional[gtx_common.Dimension] = None, ) -> FieldopResult: """ Helper method to allocate a temporary field to store the output of a field operator. @@ -302,70 +300,27 @@ def _create_field_operator( sdfg_builder: The object used to build the map scope in the provided SDFG. input_edges: List of edges to pass input data into the dataflow. output_edges: Single edge or tuple of edges representing the dataflow output data. - scan_dim: Column dimension used in scan field operators. Returns: The field data descriptor, which includes the field access node in the given `state` and the field domain offset. """ - domain_dims, domain_offset, domain_shape = _get_field_layout(domain) - domain_indices = _get_domain_indices(domain_dims, domain_offset) - domain_subset = dace_subsets.Range.from_indices(domain_indices) - - scan_dim_index: Optional[int] = None - if scan_dim is not None: - scan_dim_index = domain_dims.index(scan_dim) - # we construct the field operator only on the horizontal domain - domain_subset = dace_subsets.Range( - domain_subset[:scan_dim_index] + domain_subset[scan_dim_index + 1 :] - ) - - # now check, after removal of the vertical dimension, whether the domain is empty - if len(domain_subset) == 0: - # no need to create a map scope, the field operator domain is empty - me, mx = (None, None) - else: - # create map range corresponding to the field operator domain - me, mx = sdfg_builder.add_map( - "fieldop", - state, - ndrange={ - dace_gtir_utils.get_map_variable(dim): f"{lower_bound}:{upper_bound}" - for dim, lower_bound, upper_bound in domain - if dim != scan_dim - }, - ) - # here we setup the edges passing through the map entry node - for edge in input_edges: - edge.connect(me) - - def create_field(output_edge: gtir_dataflow.DataflowOutputEdge, sym: gtir.Sym) -> FieldopData: + def _create_field_operator_impl( + output_edge: gtir_dataflow.DataflowOutputEdge, mx: dace.nodes.MapExit, sym: gtir.Sym + ) -> FieldopData: assert isinstance(sym.type, ts.FieldType) dataflow_output_desc = output_edge.result.dc_node.desc(sdfg) if isinstance(output_edge.result.gt_dtype, ts.ScalarType): assert output_edge.result.gt_dtype == sym.type.dtype - assert dataflow_output_desc.dtype == dace_utils.as_dace_type(sym.type.dtype) field_dtype = output_edge.result.gt_dtype field_dims, field_shape, field_offset = (domain_dims, domain_shape, domain_offset) - if scan_dim is not None: - # the scan field operator produces a 1D vertical field - assert isinstance(dataflow_output_desc, dace.data.Array) - assert len(dataflow_output_desc.shape) == 1 - # the vertical dimension should not belong to the field operator domain - # but we need to write it to the output field - field_subset = ( - dace_subsets.Range(domain_subset[:scan_dim_index]) - + dace_subsets.Range.from_array(dataflow_output_desc) - + dace_subsets.Range(domain_subset[scan_dim_index:]) - ) - else: - assert isinstance(dataflow_output_desc, dace.data.Scalar) - field_subset = domain_subset + assert isinstance(dataflow_output_desc, dace.data.Scalar) + field_subset = domain_subset else: - assert isinstance(sym.type.dtype, itir_ts.ListType) - assert output_edge.result.gt_dtype.element_type == sym.type.dtype.element_type + assert isinstance(sym.type.dtype, ts.ListType) assert isinstance(output_edge.result.gt_dtype.element_type, ts.ScalarType) + assert output_edge.result.gt_dtype.element_type == sym.type.dtype.element_type field_dtype = output_edge.result.gt_dtype.element_type assert isinstance(dataflow_output_desc, dace.data.Array) assert len(dataflow_output_desc.shape) == 1 @@ -377,16 +332,10 @@ def create_field(output_edge: gtir_dataflow.DataflowOutputEdge, sym: gtir.Sym) - field_subset = domain_subset + dace_subsets.Range.from_array(dataflow_output_desc) # allocate local temporary storage - field_name, field_desc = sdfg.add_temp_transient(field_shape, dataflow_output_desc.dtype) + assert dataflow_output_desc.dtype == dace_utils.as_dace_type(field_dtype) + field_name, _ = sdfg.add_temp_transient(field_shape, dataflow_output_desc.dtype) field_node = state.add_access(field_name) - if scan_dim is not None: - # By default, we leave `strides=None` which corresponds to use DaCe default memory layout - # for transient arrays. However, for scan field operators we need to ensure that the same - # stride is used for the vertical dimension in inner and outer array. - scan_output_stride = field_desc.strides[scan_dim_index] - dataflow_output_desc.strides = (scan_output_stride,) - # and here the edge writing the dataflow result data through the map exit node output_edge.connect(mx, field_node, field_subset) @@ -396,15 +345,33 @@ def create_field(output_edge: gtir_dataflow.DataflowOutputEdge, sym: gtir.Sym) - offset=(field_offset if set(field_offset) != {0} else None), ) + domain_dims, domain_offset, domain_shape = _get_field_layout(domain) + domain_indices = _get_domain_indices(domain_dims, domain_offset) + domain_subset = dace_subsets.Range.from_indices(domain_indices) + + # create map range corresponding to the field operator domain + me, mx = sdfg_builder.add_map( + "fieldop", + state, + ndrange={ + dace_gtir_utils.get_map_variable(dim): f"{lower_bound}:{upper_bound}" + for dim, lower_bound, upper_bound in domain + }, + ) + + # here we setup the edges passing through the map entry node + for edge in input_edges: + edge.connect(me) + if isinstance(output_edges, gtir_dataflow.DataflowOutputEdge): assert isinstance(node_type, ts.FieldType) - return create_field(output_edges, im.sym("x", node_type)) + return _create_field_operator_impl(output_edges, mx, im.sym("x", node_type)) else: # handle tuples of fields assert isinstance(node_type, ts.TupleType) - return gtx_utils.tree_map(create_field)( - output_edges, dace_gtir_utils.make_symbol_tuple("x", node_type) - ) + return gtx_utils.tree_map( + lambda output_edge, sym: _create_field_operator_impl(output_edge, mx, sym) + )(output_edges, dace_gtir_utils.make_symbol_tuple("x", node_type)) def extract_domain(node: gtir.Node) -> FieldopDomain: @@ -469,9 +436,6 @@ def translate_as_fieldop( assert len(fun_node.args) == 2 fieldop_expr, domain_expr = fun_node.args - if cpm.is_call_to(fieldop_expr, "scan"): - return translate_scan(node, sdfg, state, sdfg_builder) - if cpm.is_ref_to(fieldop_expr, "deref"): # Special usage of 'deref' as argument to fieldop expression, to pass a scalar # value to 'as_fieldop' function. It results in broadcasting the scalar value @@ -1175,6 +1139,5 @@ def translate_symbol_ref( translate_make_tuple, translate_tuple_get, translate_scalar_expr, - translate_scan, translate_symbol_ref, ] diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_dataflow.py b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_dataflow.py index f41788d340..1373859c57 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_dataflow.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_dataflow.py @@ -32,7 +32,6 @@ from gt4py.next.iterator import ir as gtir from gt4py.next.iterator.ir_utils import common_pattern_matcher as cpm, ir_makers as im from gt4py.next.iterator.transforms import symbol_ref_utils -from gt4py.next.iterator.type_system import type_specifications as itir_ts from gt4py.next.program_processors.runners.dace_common import utility as dace_utils from gt4py.next.program_processors.runners.dace_fieldview import ( gtir_python_codegen, @@ -65,7 +64,7 @@ class ValueExpr: """ dc_node: dace.nodes.AccessNode - gt_dtype: itir_ts.ListType | ts.ScalarType + gt_dtype: ts.ListType | ts.ScalarType @dataclasses.dataclass(frozen=True) @@ -80,7 +79,7 @@ class MemletExpr: """ dc_node: dace.nodes.AccessNode - gt_dtype: itir_ts.ListType | ts.ScalarType + gt_dtype: ts.ListType | ts.ScalarType subset: dace_subsets.Range @@ -113,7 +112,7 @@ class IteratorExpr: """ field: dace.nodes.AccessNode - gt_dtype: itir_ts.ListType | ts.ScalarType + gt_dtype: ts.ListType | ts.ScalarType field_domain: list[tuple[gtx_common.Dimension, dace.symbolic.SymExpr]] indices: dict[gtx_common.Dimension, DataExpr] @@ -122,7 +121,7 @@ def get_memlet_subset(self, sdfg: dace.SDFG) -> dace_subsets.Range: raise ValueError(f"Cannot deref iterator {self}.") field_desc = self.field.desc(sdfg) - if isinstance(self.gt_dtype, itir_ts.ListType): + if isinstance(self.gt_dtype, ts.ListType): assert len(field_desc.shape) == len(self.field_domain) + 1 assert self.gt_dtype.offset_type is not None field_domain = [*self.field_domain, (self.gt_dtype.offset_type, 0)] @@ -460,7 +459,7 @@ def _construct_tasklet_result( return ValueExpr( dc_node=temp_node, gt_dtype=( - itir_ts.ListType(element_type=data_type, offset_type=_CONST_DIM) + ts.ListType(element_type=data_type, offset_type=_CONST_DIM) if use_array else data_type ), @@ -803,7 +802,7 @@ def connect_output(inner_value: ValueExpr) -> ValueExpr: ) def _visit_neighbors(self, node: gtir.FunCall) -> ValueExpr: - assert isinstance(node.type, itir_ts.ListType) + assert isinstance(node.type, ts.ListType) assert len(node.args) == 2 assert isinstance(node.args[0], gtir.OffsetLiteral) @@ -906,7 +905,7 @@ def _visit_neighbors(self, node: gtir.FunCall) -> ValueExpr: ) return ValueExpr( - dc_node=neighbors_node, gt_dtype=itir_ts.ListType(node.type.element_type, offset_type) + dc_node=neighbors_node, gt_dtype=ts.ListType(node.type.element_type, offset_type) ) def _visit_list_get(self, node: gtir.FunCall) -> ValueExpr: @@ -914,7 +913,7 @@ def _visit_list_get(self, node: gtir.FunCall) -> ValueExpr: index_arg = self.visit(node.args[0]) list_arg = self.visit(node.args[1]) assert isinstance(list_arg, ValueExpr) - assert isinstance(list_arg.gt_dtype, itir_ts.ListType) + assert isinstance(list_arg.gt_dtype, ts.ListType) assert isinstance(list_arg.gt_dtype.element_type, ts.ScalarType) list_desc = list_arg.dc_node.desc(self.sdfg) @@ -974,7 +973,7 @@ def _visit_map(self, node: gtir.FunCall) -> ValueExpr: In above example, the result would be an array with size V2E.max_neighbors, containing the V2E neighbor values incremented by 1.0. """ - assert isinstance(node.type, itir_ts.ListType) + assert isinstance(node.type, ts.ListType) assert isinstance(node.fun, gtir.FunCall) assert len(node.fun.args) == 1 # the operation to be mapped on the arguments @@ -994,7 +993,7 @@ def _visit_map(self, node: gtir.FunCall) -> ValueExpr: gtx_common.Dimension, gtx_common.NeighborConnectivityType ] = {} for input_arg in input_args: - assert isinstance(input_arg.gt_dtype, itir_ts.ListType) + assert isinstance(input_arg.gt_dtype, ts.ListType) assert input_arg.gt_dtype.offset_type is not None offset_type = input_arg.gt_dtype.offset_type if offset_type == _CONST_DIM: @@ -1064,7 +1063,7 @@ def _visit_map(self, node: gtir.FunCall) -> ValueExpr: connectivity_slice = self._construct_local_view( MemletExpr( dc_node=self.state.add_access(connectivity), - gt_dtype=itir_ts.ListType( + gt_dtype=ts.ListType( element_type=node.type.element_type, offset_type=offset_type ), subset=dace_subsets.Range.from_string( @@ -1103,7 +1102,7 @@ def _visit_map(self, node: gtir.FunCall) -> ValueExpr: return ValueExpr( dc_node=result_node, - gt_dtype=itir_ts.ListType(node.type.element_type, offset_type), + gt_dtype=ts.ListType(node.type.element_type, offset_type), ) def _make_reduce_with_skip_values( @@ -1130,7 +1129,7 @@ def _make_reduce_with_skip_values( origin_map_index = dace_gtir_utils.get_map_variable(offset_provider_type.source_dim) assert ( - isinstance(input_expr.gt_dtype, itir_ts.ListType) + isinstance(input_expr.gt_dtype, ts.ListType) and input_expr.gt_dtype.offset_type is not None ) offset_type = input_expr.gt_dtype.offset_type @@ -1243,7 +1242,7 @@ def _visit_reduce(self, node: gtir.FunCall) -> ValueExpr: input_expr = self.visit(node.args[0]) assert isinstance(input_expr, (MemletExpr, ValueExpr)) assert ( - isinstance(input_expr.gt_dtype, itir_ts.ListType) + isinstance(input_expr.gt_dtype, ts.ListType) and input_expr.gt_dtype.offset_type is not None ) offset_type = input_expr.gt_dtype.offset_type @@ -1537,7 +1536,7 @@ def _visit_generic_builtin(self, node: gtir.FunCall) -> ValueExpr: connector, ) - if isinstance(node.type, itir_ts.ListType): + if isinstance(node.type, ts.ListType): # The only builtin function (so far) handled here that returns a list # is 'make_const_list'. There are other builtin functions (map_, neighbors) # that return a list but they are handled in specialized visit methods. diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_sdfg.py b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_sdfg.py index f932058433..91a2c27cde 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_sdfg.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/gtir_sdfg.py @@ -22,7 +22,6 @@ from typing import Any, Dict, Iterable, List, Optional, Protocol, Sequence, Set, Tuple, Union import dace -from dace.sdfg import utils as dace_sdfg_utils from gt4py import eve from gt4py.eve import concepts @@ -112,21 +111,6 @@ def get_symbol_type(self, symbol_name: str) -> ts.DataType: """Retrieve the GT4Py type of a symbol used in the SDFG.""" ... - @abc.abstractmethod - def is_column_dimension(self, dim: gtx_common.Dimension) -> bool: - """Check if the given dimension is the column dimension.""" - ... - - @abc.abstractmethod - def nested_context( - self, - sdfg: dace.SDFG, - global_symbols: dict[str, ts.DataType], - field_offsets: dict[str, Optional[list[dace.symbolic.SymExpr]]], - ) -> SDFGBuilder: - """Create a new empty context, useful to build a nested SDFG.""" - ... - @abc.abstractmethod def visit(self, node: concepts.RootNode, **kwargs: Any) -> Any: """Visit a node of the GT4Py IR.""" @@ -180,7 +164,6 @@ class GTIRToSDFG(eve.NodeVisitor, SDFGBuilder): """ offset_provider_type: gtx_common.OffsetProviderType - column_dim: Optional[gtx_common.Dimension] global_symbols: dict[str, ts.DataType] = dataclasses.field(default_factory=lambda: {}) field_offsets: dict[str, Optional[list[dace.symbolic.SymExpr]]] = dataclasses.field( default_factory=lambda: {} @@ -207,25 +190,6 @@ def make_field( def get_symbol_type(self, symbol_name: str) -> ts.DataType: return self.global_symbols[symbol_name] - def is_column_dimension(self, dim: gtx_common.Dimension) -> bool: - assert self.column_dim - return dim == self.column_dim - - def nested_context( - self, - sdfg: dace.SDFG, - global_symbols: dict[str, ts.DataType], - field_offsets: dict[str, Optional[list[dace.symbolic.SymExpr]]], - ) -> SDFGBuilder: - nsdfg_builder = GTIRToSDFG( - self.offset_provider_type, self.column_dim, global_symbols, field_offsets - ) - nsdfg_params = [ - gtir.Sym(id=p_name, type=p_type) for p_name, p_type in global_symbols.items() - ] - nsdfg_builder._add_sdfg_params(sdfg, node_params=nsdfg_params, symbolic_arguments=None) - return nsdfg_builder - def unique_nsdfg_name(self, sdfg: dace.SDFG, prefix: str) -> str: nsdfg_list = [ nsdfg.label for nsdfg in sdfg.all_sdfgs_recursive() if nsdfg.label.startswith(prefix) @@ -314,6 +278,7 @@ def _add_storage( # represent zero-dimensional fields as scalar arguments return self._add_storage(sdfg, symbolic_arguments, name, gt_type.dtype, transient) # handle default case: field with one or more dimensions + assert isinstance(gt_type.dtype, ts.ScalarType) dc_dtype = dace_utils.as_dace_type(gt_type.dtype) # Use symbolic shape, which allows to invoke the program with fields of different size; # and symbolic strides, which enables decoupling the memory layout from generated code. @@ -396,7 +361,7 @@ def _add_sdfg_params( self, sdfg: dace.SDFG, node_params: Sequence[gtir.Sym], - symbolic_arguments: Optional[set[str]], + symbolic_arguments: set[str], ) -> list[str]: """ Helper function to add storage for node parameters and connectivity tables. @@ -406,9 +371,6 @@ def _add_sdfg_params( except when they are listed in 'symbolic_arguments', in which case they will be represented in the SDFG as DaCe symbols. """ - if symbolic_arguments is None: - symbolic_arguments = set() - # add non-transient arrays and/or SDFG symbols for the program arguments sdfg_args = [] for param in node_params: @@ -687,7 +649,7 @@ def get_field_domain_offset( # lower let-statement lambda node as a nested SDFG lambda_translator = GTIRToSDFG( - self.offset_provider_type, self.column_dim, lambda_symbols, lambda_field_offsets + self.offset_provider_type, lambda_symbols, lambda_field_offsets ) nsdfg = dace.SDFG(name=self.unique_nsdfg_name(sdfg, "lambda")) nsdfg.debuginfo = dace_utils.debug_info(node, default=sdfg.debuginfo) @@ -864,7 +826,6 @@ def visit_SymRef( def build_sdfg_from_gtir( ir: gtir.Program, offset_provider_type: gtx_common.OffsetProviderType, - column_dim: Optional[gtx_common.Dimension] = None, ) -> dace.SDFG: """ Receives a GTIR program and lowers it to a DaCe SDFG. @@ -875,7 +836,6 @@ def build_sdfg_from_gtir( Args: ir: The GTIR program node to be lowered to SDFG offset_provider_type: The definitions of offset providers used by the program node - column_dim: Vertical dimension used for scan expressions. Returns: An SDFG in the DaCe canonical form (simplified) @@ -883,11 +843,8 @@ def build_sdfg_from_gtir( ir = gtir_type_inference.infer(ir, offset_provider_type=offset_provider_type) ir = ir_prune_casts.PruneCasts().visit(ir) - sdfg_genenerator = GTIRToSDFG(offset_provider_type, column_dim) + sdfg_genenerator = GTIRToSDFG(offset_provider_type) sdfg = sdfg_genenerator.visit(ir) assert isinstance(sdfg, dace.SDFG) - # TODO(edopao): remove inlining when DaCe transformations support LoopRegion construct - dace_sdfg_utils.inline_loop_blocks(sdfg) - return sdfg diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/utility.py b/src/gt4py/next/program_processors/runners/dace_fieldview/utility.py index ad120e2502..985d6a8a35 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/utility.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/utility.py @@ -42,6 +42,7 @@ def make_symbol_tuple(tuple_name: str, tuple_type: ts.TupleType) -> tuple[gtir.S ... (im.sym("a_1_0", fty), im.sym("a_1_1", sty)), ... ) """ + assert all(isinstance(t, ts.DataType) for t in tuple_type.types) fields = [(f"{tuple_name}_{i}", field_type) for i, field_type in enumerate(tuple_type.types)] return tuple( make_symbol_tuple(field_name, field_type) # type: ignore[misc] diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py b/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py index 407faf7ec1..a38a50d886 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py @@ -47,13 +47,13 @@ def generate_sdfg( self, ir: itir.Program, offset_provider: common.OffsetProvider, - column_dim: Optional[common.Dimension], + column_axis: Optional[common.Dimension], auto_opt: bool, on_gpu: bool, ) -> dace.SDFG: ir = itir_transforms.apply_fieldview_transforms(ir, offset_provider=offset_provider) sdfg = gtir_sdfg.build_sdfg_from_gtir( - ir, common.offset_provider_to_type(offset_provider), column_dim + ir, offset_provider_type=common.offset_provider_to_type(offset_provider) ) if auto_opt: diff --git a/src/gt4py/next/type_system/type_info.py b/src/gt4py/next/type_system/type_info.py index 66f8937dc5..983063a9cb 100644 --- a/src/gt4py/next/type_system/type_info.py +++ b/src/gt4py/next/type_system/type_info.py @@ -78,15 +78,15 @@ def type_class(symbol_type: ts.TypeSpec) -> Type[ts.TypeSpec]: >>> type_class(ts.TupleType(types=[])).__name__ 'TupleType' """ - match symbol_type: - case ts.DeferredType(constraint): - if constraint is None: - raise ValueError(f"No type information available for '{symbol_type}'.") - elif isinstance(constraint, tuple): - raise ValueError(f"Not sufficient type information available for '{symbol_type}'.") - return constraint - case ts.TypeSpec() as concrete_type: - return concrete_type.__class__ + if isinstance(symbol_type, ts.DeferredType): + constraint = symbol_type.constraint + if constraint is None: + raise ValueError(f"No type information available for '{symbol_type}'.") + elif isinstance(constraint, tuple): + raise ValueError(f"Not sufficient type information available for '{symbol_type}'.") + return constraint + if isinstance(symbol_type, ts.TypeSpec): + return symbol_type.__class__ raise ValueError( f"Invalid type for TypeInfo: requires '{ts.TypeSpec}', got '{type(symbol_type)}'." ) @@ -197,7 +197,7 @@ def apply_to_primitive_constituents( return fun(*symbol_types) -def extract_dtype(symbol_type: ts.TypeSpec) -> ts.ScalarType: +def extract_dtype(symbol_type: ts.TypeSpec) -> ts.ScalarType | ts.ListType: """ Extract the data type from ``symbol_type`` if it is either `FieldType` or `ScalarType`. @@ -234,7 +234,10 @@ def is_floating_point(symbol_type: ts.TypeSpec) -> bool: >>> is_floating_point(ts.FieldType(dims=[], dtype=ts.ScalarType(kind=ts.ScalarKind.FLOAT32))) True """ - return extract_dtype(symbol_type).kind in [ts.ScalarKind.FLOAT32, ts.ScalarKind.FLOAT64] + return isinstance(dtype := extract_dtype(symbol_type), ts.ScalarType) and dtype.kind in [ + ts.ScalarKind.FLOAT32, + ts.ScalarKind.FLOAT64, + ] def is_integer(symbol_type: ts.TypeSpec) -> bool: @@ -295,7 +298,10 @@ def is_number(symbol_type: ts.TypeSpec) -> bool: def is_logical(symbol_type: ts.TypeSpec) -> bool: - return extract_dtype(symbol_type).kind is ts.ScalarKind.BOOL + return ( + isinstance(dtype := extract_dtype(symbol_type), ts.ScalarType) + and dtype.kind is ts.ScalarKind.BOOL + ) def is_arithmetic(symbol_type: ts.TypeSpec) -> bool: @@ -385,11 +391,10 @@ def extract_dims(symbol_type: ts.TypeSpec) -> list[common.Dimension]: >>> extract_dims(ts.FieldType(dims=[I, J], dtype=ts.ScalarType(kind=ts.ScalarKind.INT64))) [Dimension(value='I', kind=), Dimension(value='J', kind=)] """ - match symbol_type: - case ts.ScalarType(): - return [] - case ts.FieldType(dims): - return dims + if isinstance(symbol_type, ts.ScalarType): + return [] + if isinstance(symbol_type, ts.FieldType): + return symbol_type.dims raise ValueError(f"Can not extract dimensions from '{symbol_type}'.") @@ -502,7 +507,9 @@ def promote( return types[0] elif all(isinstance(type_, (ts.ScalarType, ts.FieldType)) for type_ in types): dims = common.promote_dims(*(extract_dims(type_) for type_ in types)) - dtype = cast(ts.ScalarType, promote(*(extract_dtype(type_) for type_ in types))) + extracted_dtypes = [extract_dtype(type_) for type_ in types] + assert all(isinstance(dtype, ts.ScalarType) for dtype in extracted_dtypes) + dtype = cast(ts.ScalarType, promote(*extracted_dtypes)) # type: ignore[arg-type] # checked is `ScalarType` return ts.FieldType(dims=dims, dtype=dtype) raise TypeError("Expected a 'FieldType' or 'ScalarType'.") diff --git a/src/gt4py/next/type_system/type_specifications.py b/src/gt4py/next/type_system/type_specifications.py index fa8c9b9ab1..060d56aea2 100644 --- a/src/gt4py/next/type_system/type_specifications.py +++ b/src/gt4py/next/type_system/type_specifications.py @@ -6,21 +6,13 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass from typing import Iterator, Optional, Sequence, Union -from gt4py.eve.type_definitions import IntEnum -from gt4py.eve.utils import content_hash -from gt4py.next import common as func_common +from gt4py.eve import datamodels as eve_datamodels, type_definitions as eve_types +from gt4py.next import common -@dataclass(frozen=True) -class TypeSpec: - def __hash__(self) -> int: - return hash(content_hash(self)) - - def __init_subclass__(cls) -> None: - cls.__hash__ = TypeSpec.__hash__ # type: ignore[method-assign] +class TypeSpec(eve_datamodels.DataModel, kw_only=False, frozen=True): ... # type: ignore[call-arg] class DataType(TypeSpec): @@ -40,14 +32,12 @@ class CallableType: """ -@dataclass(frozen=True) class DeferredType(TypeSpec): """Dummy used to represent a type not yet inferred.""" constraint: Optional[type[TypeSpec] | tuple[type[TypeSpec], ...]] -@dataclass(frozen=True) class VoidType(TypeSpec): """ Return type of a function without return values. @@ -56,22 +46,20 @@ class VoidType(TypeSpec): """ -@dataclass(frozen=True) class DimensionType(TypeSpec): - dim: func_common.Dimension + dim: common.Dimension -@dataclass(frozen=True) class OffsetType(TypeSpec): # TODO(havogt): replace by ConnectivityType - source: func_common.Dimension - target: tuple[func_common.Dimension] | tuple[func_common.Dimension, func_common.Dimension] + source: common.Dimension + target: tuple[common.Dimension] | tuple[common.Dimension, common.Dimension] def __str__(self) -> str: return f"Offset[{self.source}, {self.target}]" -class ScalarKind(IntEnum): +class ScalarKind(eve_types.IntEnum): BOOL = 1 INT32 = 32 INT64 = 64 @@ -80,7 +68,6 @@ class ScalarKind(IntEnum): STRING = 3001 -@dataclass(frozen=True) class ScalarType(DataType): kind: ScalarKind shape: Optional[list[int]] = None @@ -92,31 +79,43 @@ def __str__(self) -> str: return f"{kind_str}{self.shape}" -@dataclass(frozen=True) -class TupleType(DataType): - types: list[DataType] - - def __str__(self) -> str: - return f"tuple[{', '.join(map(str, self.types))}]" +class ListType(DataType): + """Represents a neighbor list in the ITIR representation. - def __iter__(self) -> Iterator[DataType]: - yield from self.types + Note: not used in the frontend. + """ - def __len__(self) -> int: - return len(self.types) + element_type: DataType + # TODO(havogt): the `offset_type` is not yet used in type_inference, + # it is meant to describe the neighborhood (via the local dimension) + offset_type: Optional[common.Dimension] = None -@dataclass(frozen=True) class FieldType(DataType, CallableType): - dims: list[func_common.Dimension] - dtype: ScalarType + dims: list[common.Dimension] + dtype: ScalarType | ListType def __str__(self) -> str: dims = "..." if self.dims is Ellipsis else f"[{', '.join(dim.value for dim in self.dims)}]" return f"Field[{dims}, {self.dtype}]" -@dataclass(frozen=True) +class TupleType(DataType): + # TODO(tehrengruber): Remove `DeferredType` again. This was erroneously + # introduced before we checked the annotations at runtime. All attributes of + # a type that are types themselves must be concrete. + types: list[DataType | DimensionType | DeferredType] + + def __str__(self) -> str: + return f"tuple[{', '.join(map(str, self.types))}]" + + def __iter__(self) -> Iterator[DataType | DimensionType | DeferredType]: + yield from self.types + + def __len__(self) -> int: + return len(self.types) + + class FunctionType(TypeSpec, CallableType): pos_only_args: Sequence[TypeSpec] pos_or_kw_args: dict[str, TypeSpec] diff --git a/src/gt4py/next/type_system/type_translation.py b/src/gt4py/next/type_system/type_translation.py index 62a6781316..e601556e55 100644 --- a/src/gt4py/next/type_system/type_translation.py +++ b/src/gt4py/next/type_system/type_translation.py @@ -10,7 +10,6 @@ import builtins import collections.abc -import dataclasses import functools import types import typing @@ -105,7 +104,7 @@ def from_type_hint( raise ValueError(f"Unbound tuples '{type_hint}' are not allowed.") tuple_types = [recursive_make_symbol(arg) for arg in args] assert all(isinstance(elem, ts.DataType) for elem in tuple_types) - return ts.TupleType(types=tuple_types) # type: ignore[arg-type] # checked in assert + return ts.TupleType(types=tuple_types) case common.Field: if (n_args := len(args)) != 2: @@ -168,7 +167,6 @@ def from_type_hint( raise ValueError(f"'{type_hint}' type is not supported.") -@dataclasses.dataclass(frozen=True) class UnknownPythonObject(ts.TypeSpec): _object: Any @@ -217,9 +215,9 @@ def from_value(value: Any) -> ts.TypeSpec: # not needed anymore. elems = [from_value(el) for el in value] assert all(isinstance(elem, ts.DataType) for elem in elems) - return ts.TupleType(types=elems) # type: ignore[arg-type] # checked in assert + return ts.TupleType(types=elems) elif isinstance(value, types.ModuleType): - return UnknownPythonObject(_object=value) + return UnknownPythonObject(value) else: type_ = xtyping.infer_type(value, annotate_callable_kwargs=True) symbol_type = from_type_hint(type_) diff --git a/tests/cartesian_tests/integration_tests/multi_feature_tests/test_code_generation.py b/tests/cartesian_tests/integration_tests/multi_feature_tests/test_code_generation.py index 4609184547..5a43144b4b 100644 --- a/tests/cartesian_tests/integration_tests/multi_feature_tests/test_code_generation.py +++ b/tests/cartesian_tests/integration_tests/multi_feature_tests/test_code_generation.py @@ -582,17 +582,6 @@ def test_K_offset_write(backend): # Cuda generates bad code for the K offset if backend == "cuda": pytest.skip("cuda K-offset write generates bad code") - if backend in ["dace:gpu"]: - import cupy as cp - - if cp.cuda.runtime.runtimeGetVersion() < 12000: - pytest.skip( - f"{backend} backend with CUDA 11 and/or GCC 10.3 is not capable of K offset write, update CUDA/GCC if possible" - ) - if backend in ["gt:gpu"]: - pytest.skip( - f"{backend} backend is not capable of K offset write, bug remains unsolved: https://github.com/GridTools/gt4py/issues/1754" - ) arraylib = get_array_library(backend) array_shape = (1, 1, 4) @@ -664,17 +653,6 @@ def backward(A: Field[np.float64], B: Field[np.float64], scalar: np.float64): def test_K_offset_write_conditional(backend): if backend == "cuda": pytest.skip("Cuda backend is not capable of K offset write") - if backend in ["dace:gpu"]: - import cupy as cp - - if cp.cuda.runtime.runtimeGetVersion() < 12000: - pytest.skip( - f"{backend} backend with CUDA 11 and/or GCC 10.3 is not capable of K offset write, update CUDA/GCC if possible" - ) - if backend in ["gt:gpu"]: - pytest.skip( - f"{backend} backend is not capable of K offset write, bug remains unsolved: https://github.com/GridTools/gt4py/issues/1754" - ) arraylib = get_array_library(backend) array_shape = (1, 1, 4) @@ -682,7 +660,7 @@ def test_K_offset_write_conditional(backend): @gtscript.stencil(backend=backend) def column_physics_conditional(A: Field[np.float64], B: Field[np.float64], scalar: np.float64): - with computation(BACKWARD), interval(1, None): + with computation(BACKWARD), interval(1, -1): if A > 0 and B > 0: A[0, 0, -1] = scalar B[0, 0, 1] = A @@ -700,6 +678,42 @@ def column_physics_conditional(A: Field[np.float64], B: Field[np.float64], scala backend=backend, aligned_index=(0, 0, 0), shape=array_shape, dtype=np.float64 ) column_physics_conditional(A, B, 2.0) + # Manual unroll of the above + # Starts with + # - A[...] = [40, 41, 42, 43] + # - B[...] = [1, 1, 1, 1] + # Now in-stencil + # ITERATION k = 2 of [2:1] + # if condition + # - A[2] == 42 && B[2] == 1 => True + # - A[1] = 2.0 + # - B[3] = A[2] = 42 + # while + # - lev = 1 + # - A[2] == 42 && B[2] == 1 => True + # - A[3] = -1 + # - B[2] = -1 + # - lev = 2 + # - A[2] == 42 && B[2] == -1 => False + # End of iteration state + # - A[...] = A[40, 2.0, 2.0, -1] + # - B[...] = A[1, 1, -1, 42] + # ITERATION k = 1 of [2:1] + # if condition + # - A[1] == 2.0 && B[1] == 1 => True + # - A[0] = 2.0 + # - B[2] = A[1] = 2.0 + # while + # - lev = 1 + # - A[1] == 2.0 && B[1] == 1 => True + # - A[2] = -1 + # - B[1] = -1 + # - lev = 2 + # - A[1] == 2.0 && B[2] == -1 => False + # End of stencil state + # - A[...] = A[2.0, 2.0, -1, -1] + # - B[...] = A[1, -1, 2.0, 42] + assert (A[0, 0, :] == arraylib.array([2, 2, -1, -1])).all() assert (B[0, 0, :] == arraylib.array([1, -1, 2, 42])).all() diff --git a/tests/eve_tests/unit_tests/test_datamodels.py b/tests/eve_tests/unit_tests/test_datamodels.py index 05be5f3db0..75b07fd8a0 100644 --- a/tests/eve_tests/unit_tests/test_datamodels.py +++ b/tests/eve_tests/unit_tests/test_datamodels.py @@ -10,9 +10,9 @@ import enum import numbers +import sys import types import typing -from typing import Set # noqa: F401 [unused-import] used in exec() context from typing import ( Any, Callable, @@ -26,6 +26,7 @@ MutableSequence, Optional, Sequence, + Set, # noqa: F401 [unused-import] used in exec() context Tuple, Type, TypeVar, @@ -555,6 +556,18 @@ class WrongModel: ("typing.MutableSequence[int]", ([1, 2, 3], []), ((1, 2, 3), tuple(), 1, [1.0], {1})), ("typing.Set[int]", ({1, 2, 3}, set()), (1, [1], (1,), {1: None})), ("typing.Union[int, float, str]", [1, 3.0, "one"], [[1], [], 1j]), + pytest.param( + "int | float | str", + [1, 3.0, "one"], + [[1], [], 1j], + marks=pytest.mark.skipif(sys.version_info < (3, 10), reason="| union syntax not supported"), + ), + pytest.param( + "typing.List[int|float]", + [[1, 2.0], []], + [1, 2.0, [1, "2.0"]], + marks=pytest.mark.skipif(sys.version_info < (3, 10), reason="| union syntax not supported"), + ), ("typing.Optional[int]", [1, None], [[1], [], 1j]), ( "typing.Dict[Union[int, float, str], Union[Tuple[int, Optional[float]], Set[int]]]", diff --git a/tests/next_tests/definitions.py b/tests/next_tests/definitions.py index 3fca8460e5..942318a8c7 100644 --- a/tests/next_tests/definitions.py +++ b/tests/next_tests/definitions.py @@ -104,7 +104,6 @@ class ProgramFormatterId(_PythonObjectIdMixin, str, enum.Enum): USES_SCAN_WITHOUT_FIELD_ARGS = "uses_scan_without_field_args" USES_SCAN_NESTED = "uses_scan_nested" USES_SCAN_REQUIRING_PROJECTOR = "uses_scan_requiring_projector" -USES_SCAN_1D_FIELD = "uses_scan_1d_field" USES_SPARSE_FIELDS = "uses_sparse_fields" USES_SPARSE_FIELDS_AS_OUTPUT = "uses_sparse_fields_as_output" USES_REDUCTION_WITH_ONLY_SPARSE_FIELDS = "uses_reduction_with_only_sparse_fields" @@ -184,17 +183,9 @@ class ProgramFormatterId(_PythonObjectIdMixin, str, enum.Enum): EmbeddedIds.NUMPY_EXECUTION: EMBEDDED_SKIP_LIST, EmbeddedIds.CUPY_EXECUTION: EMBEDDED_SKIP_LIST, OptionalProgramBackendId.DACE_CPU: DACE_SKIP_TEST_LIST, - OptionalProgramBackendId.DACE_GPU: DACE_SKIP_TEST_LIST - + [ - # dace issue https://github.com/spcl/dace/issues/1773 - (USES_SCAN_1D_FIELD, XFAIL, UNSUPPORTED_MESSAGE), - ], + OptionalProgramBackendId.DACE_GPU: DACE_SKIP_TEST_LIST, OptionalProgramBackendId.DACE_CPU_NO_OPT: DACE_SKIP_TEST_LIST, - OptionalProgramBackendId.DACE_GPU_NO_OPT: DACE_SKIP_TEST_LIST - + [ - # dace issue https://github.com/spcl/dace/issues/1773 - (USES_SCAN_1D_FIELD, XFAIL, UNSUPPORTED_MESSAGE), - ], + OptionalProgramBackendId.DACE_GPU_NO_OPT: DACE_SKIP_TEST_LIST, ProgramBackendId.GTFN_CPU: GTFN_SKIP_TEST_LIST + [(USES_SCAN_NESTED, XFAIL, UNSUPPORTED_MESSAGE)], ProgramBackendId.GTFN_CPU_IMPERATIVE: GTFN_SKIP_TEST_LIST diff --git a/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py b/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py index b13e7c3f60..2e40cb897a 100644 --- a/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py +++ b/tests/next_tests/integration_tests/feature_tests/ffront_tests/test_execution.py @@ -819,7 +819,6 @@ def testee(a: cases.EField, b: cases.EField) -> cases.VField: @pytest.mark.uses_scan -@pytest.mark.uses_scan_1d_field def test_ternary_scan(cartesian_case): @gtx.scan_operator(axis=KDim, forward=True, init=0.0) def simple_scan_operator(carry: float, a: float) -> float: diff --git a/tests/next_tests/unit_tests/iterator_tests/test_type_inference.py b/tests/next_tests/unit_tests/iterator_tests/test_type_inference.py index 7eb4e86adb..b6b70af07c 100644 --- a/tests/next_tests/unit_tests/iterator_tests/test_type_inference.py +++ b/tests/next_tests/unit_tests/iterator_tests/test_type_inference.py @@ -46,8 +46,8 @@ bool_type = ts.ScalarType(kind=ts.ScalarKind.BOOL) int_type = ts.ScalarType(kind=ts.ScalarKind.INT32) float64_type = ts.ScalarType(kind=ts.ScalarKind.FLOAT64) -float64_list_type = it_ts.ListType(element_type=float64_type) -int_list_type = it_ts.ListType(element_type=int_type) +float64_list_type = ts.ListType(element_type=float64_type) +int_list_type = ts.ListType(element_type=int_type) float_i_field = ts.FieldType(dims=[IDim], dtype=float64_type) float_vertex_k_field = ts.FieldType(dims=[Vertex, KDim], dtype=float64_type) @@ -77,8 +77,8 @@ def expression_test_cases(): (im.deref(im.ref("it", it_on_e_of_e_type)), it_on_e_of_e_type.element_type), (im.call("can_deref")(im.ref("it", it_on_e_of_e_type)), bool_type), (im.if_(True, 1, 2), int_type), - (im.call("make_const_list")(True), it_ts.ListType(element_type=bool_type)), - (im.call("list_get")(0, im.ref("l", it_ts.ListType(element_type=bool_type))), bool_type), + (im.call("make_const_list")(True), ts.ListType(element_type=bool_type)), + (im.call("list_get")(0, im.ref("l", ts.ListType(element_type=bool_type))), bool_type), ( im.call("named_range")(itir.AxisLiteral(value="Vertex"), 0, 1), it_ts.NamedRangeType(dim=Vertex), @@ -110,7 +110,7 @@ def expression_test_cases(): # neighbors ( im.neighbors("E2V", im.ref("a", it_on_e_of_e_type)), - it_ts.ListType(element_type=it_on_e_of_e_type.element_type), + ts.ListType(element_type=it_on_e_of_e_type.element_type), ), # cast (im.call("cast_")(1, "int32"), int_type), diff --git a/tox.ini b/tox.ini index 8da0e45810..e7bfd4a3e4 100644 --- a/tox.ini +++ b/tox.ini @@ -9,19 +9,14 @@ envlist = storage-py{310}-{internal,dace}-{cpu} # docs labels = - test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-internal-py39-cpu, \ - cartesian-internal-py310-cpu, cartesian-py311-internal-cpu, \ - cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu - test-eve-cpu = eve-py38, eve-py39, eve-py310, eve-py311 + test-cartesian-cpu = cartesian-internal-py310-cpu, cartesian-py311-internal-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu + test-eve-cpu = eve-py310, eve-py311 test-next-cpu = next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu - test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ - storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu - test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, cartesian-py311-internal-cpu, \ - cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu, \ - eve-py38, eve-py39, eve-py310, eve-py311, \ + test-storage-cpu = storage-py310-internal-cpu, storage-py311-internal-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu + test-cpu = cartesian-py310-internal-cpu, cartesian-py311-internal-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu, \ + eve-py310, eve-py311, \ next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ - storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ - storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu + storage-py310-internal-cpu, storage-py311-internal-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu [testenv] deps = -r {tox_root}{/}{env:ENV_REQUIREMENTS_FILE:requirements-dev.txt} @@ -42,7 +37,7 @@ set_env = PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program:UserWarning} # -- Primary tests -- -[testenv:cartesian-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:cartesian-py{310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.cartesian' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH, CXX, CC, OPENMP_CPPFLAGS, OPENMP_LDFLAGS, PIP_USER, PYTHONUSERBASE allowlist_externals = @@ -65,7 +60,7 @@ commands = # coverage json --rcfile=setup.cfg # coverage html --rcfile=setup.cfg --show-contexts -[testenv:eve-py{38,39,310,311}] +[testenv:eve-py{310,311}] description = Run 'gt4py.eve' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} {posargs} tests{/}eve_tests @@ -89,7 +84,7 @@ commands = " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next -[testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:storage-py{310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ @@ -112,7 +107,7 @@ commands = python -m pytest --nbmake examples -v -n {env:NUM_PROCESSES:1} # -- Other artefacts -- -[testenv:dev-py{38,39,310,311}{-atlas,}] +[testenv:dev-py{310,311}{-atlas,}] description = Initialize development environment for gt4py deps = -r {tox_root}{/}requirements-dev.txt @@ -141,17 +136,13 @@ set_env = # git add _static # commands_post = -[testenv:requirements-{base,py38,py39,py310,py311}] +[testenv:requirements-{base,py310,py311}] description = base: Update pinned development requirements - py38: Update requirements for testing a specific python version - py39: Update requirements for testing a specific python version py310: Update requirements for testing a specific python version py311: Update requirements for testing a specific python version base_python = - base: py38 - py38: py38 - py39: py39 + base: py310 py310: py310 py311: py311 deps =