From cf8a8d3ed17727f90e3306bbcc1a7d94a23b68b8 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Thu, 25 Jul 2024 21:30:25 +0200 Subject: [PATCH 01/24] Simplify tox generative environment names and related CI actions --- .github/workflows/test-cartesian.yml | 4 +- .github/workflows/test-next.yml | 4 +- tox.ini | 70 +++++++++++++++------------- 3 files changed, 42 insertions(+), 36 deletions(-) diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index 5da467e436..7b95c632d2 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - backends: [internal-cpu, dace-cpu] + tox-factor: [cpu, dace-cpu] steps: - uses: actions/checkout@v2 - name: Install boost @@ -59,4 +59,4 @@ jobs: run: | pyversion=${{ matrix.python-version }} pyversion_no_dot=${pyversion//./} - tox run -e cartesian-py${pyversion_no_dot}-${{ matrix.backends }} + tox run -e cartesian-py${pyversion_no_dot}-${{ matrix.tox-factor }} diff --git a/.github/workflows/test-next.yml b/.github/workflows/test-next.yml index 0eda38e2cc..5f204f9eaa 100644 --- a/.github/workflows/test-next.yml +++ b/.github/workflows/test-next.yml @@ -21,7 +21,7 @@ jobs: strategy: matrix: python-version: ["3.10", "3.11"] - tox-env-factor: ["nomesh", "atlas"] + tox-factor: ["cpu", "atlas-cpu"] os: ["ubuntu-latest"] fail-fast: false @@ -59,7 +59,7 @@ jobs: run: | pyversion=${{ matrix.python-version }} pyversion_no_dot=${pyversion//./} - tox run -e next-py${pyversion_no_dot}-${{ matrix.tox-env-factor }}-cpu + tox run -e next-py${pyversion_no_dot}-${{ matrix.tox-factor }}-cpu # mv coverage.json coverage-py${{ matrix.python-version }}-${{ matrix.os }}-${{ matrix.tox-env-factor }}-cpu.json # - name: Upload coverage.json artifact # uses: actions/upload-artifact@v3 diff --git a/tox.ini b/tox.ini index e6a5ab36c6..3cd7bb1e02 100644 --- a/tox.ini +++ b/tox.ini @@ -3,31 +3,31 @@ requires = tox>=4.2 virtualenv>20.2 envlist = - cartesian-py{310}-{internal,dace}-{cpu} + cartesian-py{310}{-,-dace-}{cpu} eve-py{310} - next-py{310}-{nomesh,atlas} - storage-py{310}-{internal,dace}-{cpu} + next-py{310}{-,-atlas-}{cpu} + storage-py{310}{-,-dace-}{cpu} linters-py{310} -# docs + # docs labels = - test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ + test-cartesian-cpu = cartesian-py38-cpu, cartesian-py39-cpu, cartesian-py310-cpu, \ + cartesian-py311-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ cartesian-py311-dace-cpu test-eve-cpu = eve-py38, eve-py39, eve-py310, eve-py311 - test-next-cpu = next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas + test-next-cpu = next-py310-cpu, next-py311-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu - test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, \ - storage-py311-internal-cpu, storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, \ + test-storage-cpu = storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, \ + storage-py311-cpu, storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, \ storage-py311-dace-cpu - test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ + test-cpu = cartesian-py38-cpu, cartesian-py39-cpu, cartesian-py310-cpu, \ + cartesian-py311-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ cartesian-py311-dace-cpu, \ eve-py38, eve-py39, eve-py310, eve-py311, \ - next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas, \ - storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ + next-py310-cpu, next-py311-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ + storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, storage-py311-internal-cpu, \ storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu [testenv] @@ -46,10 +46,10 @@ wheel_build_env = .pkg pass_env = CUDAARCHS, NUM_PROCESSES, GT4PY_* set_env = PYTEST_ADDOPTS = --color=auto --instafail - PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program ':UserWarning} + PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program:UserWarning} # -- Primary tests -- -[testenv:cartesian-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:cartesian-py{38,39,310,311}{-,-dace-}{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.cartesian' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH, CXX, CC, OPENMP_CPPFLAGS, OPENMP_LDFLAGS, PIP_USER, PYTHONUSERBASE allowlist_externals = @@ -59,16 +59,18 @@ allowlist_externals = ldd rm commands = - internal-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu and not requires_dace" {posargs} tests{/}cartesian_tests - internal-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu and not requires_dace" {posargs} tests{/}cartesian_tests - dace-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu and requires_dace" {posargs} tests{/}cartesian_tests - dace-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu and requires_dace" {posargs} tests{/}cartesian_tests + python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + dace: requires_dace \ + !dace: not requires_dace \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* -;commands_post = -; coverage json --rcfile=setup.cfg -; coverage html --rcfile=setup.cfg --show-contexts +#commands_post = +# coverage json --rcfile=setup.cfg +# coverage html --rcfile=setup.cfg --show-contexts [testenv:eve-py{38,39,310,311}] description = Run 'gt4py.eve' tests @@ -76,7 +78,7 @@ commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} {posargs} tests{/}eve_tests python -m pytest --doctest-modules src{/}gt4py{/}eve -[testenv:next-py{310,311}-{nomesh,atlas}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:next-py{310,311}{-,-atlas-}{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.next' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH deps = @@ -86,18 +88,22 @@ set_env = {[testenv]set_env} PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = - nomesh-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_atlas and not requires_gpu" {posargs} tests{/}next_tests - nomesh-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_atlas and requires_gpu" {posargs} tests{/}next_tests - atlas-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_atlas and not requires_gpu" {posargs} tests{/}next_tests - # atlas-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_atlas and requires_gpu" {posargs} tests{/}next_tests # TODO(ricoh): activate when such tests exist + python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + atlas: requires_atlas \ + !atlas: not requires_atlas \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next -[testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:storage-py{38,39,310,311}{-,-dace-}{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = - cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu" {posargs} tests{/}storage_tests - {cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu" {posargs} tests{/}storage_tests - #pytest doctest-modules {posargs} src{/}gt4py{/}storage + python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + cpu: not requires_gpu \ + {cuda,cuda11x,cuda12x}: requires_gpu + " {posargs} tests{/}storage_tests + pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- [testenv:linters-py{38,39,310,311}] @@ -119,7 +125,7 @@ commands = python -m pytest --nbmake examples -v -n {env:NUM_PROCESSES:1} # -- Other artefacts -- -[testenv:dev-py{38,39,310,311}{-atlas,}] +[testenv:dev-py{38,39,310,311}{-atlas}] description = Initialize development environment for gt4py deps = -r {tox_root}{/}requirements-dev.txt From c3c70f46f259de9bb8f8883347a6736564692857 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Thu, 25 Jul 2024 21:35:11 +0200 Subject: [PATCH 02/24] Fix format --- tox.ini | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tox.ini b/tox.ini index 3cd7bb1e02..13d1cb10d9 100644 --- a/tox.ini +++ b/tox.ini @@ -60,11 +60,11 @@ allowlist_externals = rm commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - dace: requires_dace \ - !dace: not requires_dace \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}cartesian_tests + dace: requires_dace \ + !dace: not requires_dace \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* @@ -89,20 +89,20 @@ set_env = PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - atlas: requires_atlas \ - !atlas: not requires_atlas \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}next_tests + atlas: requires_atlas \ + !atlas: not requires_atlas \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next [testenv:storage-py{38,39,310,311}{-,-dace-}{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - cpu: not requires_gpu \ - {cuda,cuda11x,cuda12x}: requires_gpu - " {posargs} tests{/}storage_tests + cpu: not requires_gpu \ + {cuda,cuda11x,cuda12x}: requires_gpu + " {posargs} tests{/}storage_tests pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- From 91b6548095288e0ed465a31df16413d0d5b06c28 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Thu, 25 Jul 2024 21:36:39 +0200 Subject: [PATCH 03/24] Fix dev env factor --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 13d1cb10d9..e9faf2cb2f 100644 --- a/tox.ini +++ b/tox.ini @@ -125,7 +125,7 @@ commands = python -m pytest --nbmake examples -v -n {env:NUM_PROCESSES:1} # -- Other artefacts -- -[testenv:dev-py{38,39,310,311}{-atlas}] +[testenv:dev-py{38,39,310,311}{,-atlas}] description = Initialize development environment for gt4py deps = -r {tox_root}{/}requirements-dev.txt From d10cc42d69f03ad4c5b4025e6d8a0f20407d9357 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Thu, 25 Jul 2024 21:39:41 +0200 Subject: [PATCH 04/24] Fix 'internal' renaming leftovers --- .github/workflows/test-cartesian-fallback.yml | 2 +- .github/workflows/test-storage-fallback.yml | 2 +- .github/workflows/test-storage.yml | 4 ++-- CONTRIBUTING.md | 2 +- tox.ini | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index ed38a4f4b0..fdfff29cd7 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - backends: [internal-cpu, dace-cpu] + tox-factor: cpu, dace-cpu] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index a296e55e59..03fc8e6092 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -20,7 +20,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - backends: [internal-cpu, dace-cpu] + tox-factor: cpu, dace-cpu] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index c9f280e651..c9c4b090f2 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -24,7 +24,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - backends: [internal-cpu, dace-cpu] + tox-factor: cpu, dace-cpu] os: ["ubuntu-latest"] fail-fast: false @@ -51,7 +51,7 @@ jobs: run: | pyversion=${{ matrix.python-version }} pyversion_no_dot=${pyversion//./} - tox run -e storage-py${pyversion_no_dot}-${{ matrix.backends }} + tox run -e storage-py${pyversion_no_dot}-${{ matrix.tox-factor }} # mv coverage.json coverage-py${{ matrix.python-version }}-${{ matrix.os }}.json # - name: Upload coverage.json artifact # uses: actions/upload-artifact@v3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 537a3b5651..2e9e3f8a09 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -150,7 +150,7 @@ We recommended you to use `tox` for most development-related tasks, like running tox list # Run a specific task environment -tox run -e cartesian-py38-internal-cpu +tox run -e cartesian-py38-cpu ``` Check `tox` documentation (`tox --help`) for the complete reference. diff --git a/tox.ini b/tox.ini index e9faf2cb2f..a23d5f8dc0 100644 --- a/tox.ini +++ b/tox.ini @@ -27,7 +27,7 @@ labels = cartesian-py311-dace-cpu, \ eve-py38, eve-py39, eve-py310, eve-py311, \ next-py310-cpu, next-py311-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ - storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, storage-py311-internal-cpu, \ + storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, storage-py311-cpu, \ storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu [testenv] From 458ff9c7e114cf2754855984aaa83bad26442c96 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Thu, 25 Jul 2024 21:59:55 +0200 Subject: [PATCH 05/24] More leftovers of the removal of dummy markers --- ci/cscs-ci.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ci/cscs-ci.yml b/ci/cscs-ci.yml index b8902f37f7..e37546d0cc 100644 --- a/ci/cscs-ci.yml +++ b/ci/cscs-ci.yml @@ -159,23 +159,23 @@ build_py38_image_x86_64: parallel: matrix: - SUBPACKAGE: [cartesian, storage] - VARIANT: [-internal, -dace] - SUBVARIANT: [-cuda11x, -cpu] + VARIANT: ['-', -dace-] + SUBVARIANT: [cuda11x, cpu] - SUBPACKAGE: eve - SUBPACKAGE: next - VARIANT: [-nomesh, -atlas] - SUBVARIANT: [-cuda11x, -cpu] + VARIANT: ['-', -atlas-] + SUBVARIANT: [cuda11x, cpu] .test_helper_aarch64: extends: [.container-runner-todi-gh200, .test_helper] parallel: matrix: - SUBPACKAGE: [cartesian, storage] - VARIANT: [-internal, -dace] - SUBVARIANT: [-cuda12x, -cpu] + VARIANT: ['-', -dace-] + SUBVARIANT: [cuda12x, cpu] - SUBPACKAGE: eve - SUBPACKAGE: next - VARIANT: [-nomesh, -atlas] - SUBVARIANT: [-cuda12x, -cpu] + VARIANT: ['-', -atlas-] + SUBVARIANT: [cuda12x, cpu] before_script: # TODO: remove start of CUDA MPS daemon once CI-CD can handle CRAY_CUDA_MPS - CUDA_MPS_PIPE_DIRECTORY="/tmp/nvidia-mps" nvidia-cuda-mps-control -d From 99a33cd3957d07601adf8d700e938a025a8312b9 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:16:11 +0200 Subject: [PATCH 06/24] Put back the removed internal and nomesh factors --- .github/workflows/test-cartesian-fallback.yml | 2 +- .github/workflows/test-cartesian.yml | 2 +- .github/workflows/test-next-fallback.yml | 2 +- .github/workflows/test-next.yml | 2 +- .github/workflows/test-storage-fallback.yml | 2 +- .github/workflows/test-storage.yml | 2 +- .pre-commit-config.yaml | 1 + ci/cscs-ci.yml | 16 ++--- tox.ini | 68 +++++++++---------- 9 files changed, 46 insertions(+), 51 deletions(-) diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index fdfff29cd7..c3649a2bab 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: cpu, dace-cpu] + tox-factor: [interal-cpu, dace-cpu] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index 7b95c632d2..9cd9bea92c 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [cpu, dace-cpu] + tox-factor: [interal-cpu, dace-cpu] steps: - uses: actions/checkout@v2 - name: Install boost diff --git a/.github/workflows/test-next-fallback.yml b/.github/workflows/test-next-fallback.yml index 913acc2e08..0efa7e0527 100644 --- a/.github/workflows/test-next-fallback.yml +++ b/.github/workflows/test-next-fallback.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: python-version: ["3.10", "3.11"] - tox-env-factor: ["nomesh", "atlas"] + tox-factor: ["nomesh-cpu", "atlas-cpu"] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-next.yml b/.github/workflows/test-next.yml index 5f204f9eaa..d3b5a7e245 100644 --- a/.github/workflows/test-next.yml +++ b/.github/workflows/test-next.yml @@ -21,7 +21,7 @@ jobs: strategy: matrix: python-version: ["3.10", "3.11"] - tox-factor: ["cpu", "atlas-cpu"] + tox-factor: ["nomesh-cpu", "atlas-cpu"] os: ["ubuntu-latest"] fail-fast: false diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index 03fc8e6092..74bd55e130 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -20,7 +20,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: cpu, dace-cpu] + tox-factor: [interal-cpu, dace-cpu] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index c9c4b090f2..769e5a3b22 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -24,7 +24,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: cpu, dace-cpu] + tox-factor: [interal-cpu, dace-cpu] os: ["ubuntu-latest"] fail-fast: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c3976a96fb..a1d69d18de 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,7 @@ repos: hooks: - id: pretty-format-ini args: [--autofix] + exclude: tox.ini - id: pretty-format-toml args: [--autofix] - id: pretty-format-yaml diff --git a/ci/cscs-ci.yml b/ci/cscs-ci.yml index e37546d0cc..b8902f37f7 100644 --- a/ci/cscs-ci.yml +++ b/ci/cscs-ci.yml @@ -159,23 +159,23 @@ build_py38_image_x86_64: parallel: matrix: - SUBPACKAGE: [cartesian, storage] - VARIANT: ['-', -dace-] - SUBVARIANT: [cuda11x, cpu] + VARIANT: [-internal, -dace] + SUBVARIANT: [-cuda11x, -cpu] - SUBPACKAGE: eve - SUBPACKAGE: next - VARIANT: ['-', -atlas-] - SUBVARIANT: [cuda11x, cpu] + VARIANT: [-nomesh, -atlas] + SUBVARIANT: [-cuda11x, -cpu] .test_helper_aarch64: extends: [.container-runner-todi-gh200, .test_helper] parallel: matrix: - SUBPACKAGE: [cartesian, storage] - VARIANT: ['-', -dace-] - SUBVARIANT: [cuda12x, cpu] + VARIANT: [-internal, -dace] + SUBVARIANT: [-cuda12x, -cpu] - SUBPACKAGE: eve - SUBPACKAGE: next - VARIANT: ['-', -atlas-] - SUBVARIANT: [cuda12x, cpu] + VARIANT: [-nomesh, -atlas] + SUBVARIANT: [-cuda12x, -cpu] before_script: # TODO: remove start of CUDA MPS daemon once CI-CD can handle CRAY_CUDA_MPS - CUDA_MPS_PIPE_DIRECTORY="/tmp/nvidia-mps" nvidia-cuda-mps-control -d diff --git a/tox.ini b/tox.ini index a23d5f8dc0..8317a53ce6 100644 --- a/tox.ini +++ b/tox.ini @@ -3,32 +3,26 @@ requires = tox>=4.2 virtualenv>20.2 envlist = - cartesian-py{310}{-,-dace-}{cpu} + cartesian-py{310}-{internal,dace}-{cpu} eve-py{310} - next-py{310}{-,-atlas-}{cpu} - storage-py{310}{-,-dace-}{cpu} + next-py{310}-{nomesh,atlas}-{cpu} + storage-py{310}-{internal,dace}-{cpu} linters-py{310} # docs labels = - test-cartesian-cpu = cartesian-py38-cpu, cartesian-py39-cpu, cartesian-py310-cpu, \ - cartesian-py311-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ - cartesian-py311-dace-cpu - + test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-internal-py39-cpu, \ + cartesian-internal-py310-cpu, cartesian-py311-internal-cpu, \ + cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu test-eve-cpu = eve-py38, eve-py39, eve-py310, eve-py311 - - test-next-cpu = next-py310-cpu, next-py311-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu - - test-storage-cpu = storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, \ - storage-py311-cpu, storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, \ - storage-py311-dace-cpu - - test-cpu = cartesian-py38-cpu, cartesian-py39-cpu, cartesian-py310-cpu, \ - cartesian-py311-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ - cartesian-py311-dace-cpu, \ - eve-py38, eve-py39, eve-py310, eve-py311, \ - next-py310-cpu, next-py311-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ - storage-py38-cpu, storage-py39-cpu, storage-py310-cpu, storage-py311-cpu, \ - storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu + test-next-cpu = next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu + test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ + storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu + test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, cartesian-py311-internal-cpu, \ + cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu, \ + eve-py38, eve-py39, eve-py310, eve-py311, \ + next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ + storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ + storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu [testenv] deps = -r {tox_root}{/}{env:ENV_REQUIREMENTS_FILE:requirements-dev.txt} @@ -49,7 +43,7 @@ set_env = PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program:UserWarning} # -- Primary tests -- -[testenv:cartesian-py{38,39,310,311}{-,-dace-}{cpu,cuda,cuda11x,cuda12x}] +[testenv:cartesian-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.cartesian' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH, CXX, CC, OPENMP_CPPFLAGS, OPENMP_LDFLAGS, PIP_USER, PYTHONUSERBASE allowlist_externals = @@ -60,11 +54,11 @@ allowlist_externals = rm commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - dace: requires_dace \ - !dace: not requires_dace \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}cartesian_tests + dace: requires_dace \ + internal: not requires_dace \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* @@ -78,7 +72,7 @@ commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} {posargs} tests{/}eve_tests python -m pytest --doctest-modules src{/}gt4py{/}eve -[testenv:next-py{310,311}{-,-atlas-}{cpu,cuda,cuda11x,cuda12x}] +[testenv:next-py{310,311}-{nomesh,atlas}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.next' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH deps = @@ -89,20 +83,20 @@ set_env = PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - atlas: requires_atlas \ - !atlas: not requires_atlas \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}next_tests + atlas: requires_atlas \ + nomesh: not requires_atlas \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next -[testenv:storage-py{38,39,310,311}{-,-dace-}{cpu,cuda,cuda11x,cuda12x}] +[testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - cpu: not requires_gpu \ - {cuda,cuda11x,cuda12x}: requires_gpu - " {posargs} tests{/}storage_tests + cpu: not requires_gpu \ + {cuda,cuda11x,cuda12x}: requires_gpu + " {posargs} tests{/}storage_tests pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- From 18c27f927db1c9780314db56429937559da32c09 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:17:03 +0200 Subject: [PATCH 07/24] Fix typos --- .github/workflows/test-cartesian-fallback.yml | 2 +- .github/workflows/test-cartesian.yml | 2 +- .github/workflows/test-storage-fallback.yml | 2 +- .github/workflows/test-storage.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index c3649a2bab..a0d64a2f46 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [interal-cpu, dace-cpu] + tox-factor: [internal-cpu, dace-cpu] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index 9cd9bea92c..19bef8fe75 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [interal-cpu, dace-cpu] + tox-factor: [internal-cpu, dace-cpu] steps: - uses: actions/checkout@v2 - name: Install boost diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index 74bd55e130..2587fe217e 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -20,7 +20,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [interal-cpu, dace-cpu] + tox-factor: [internal-cpu, dace-cpu] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index 769e5a3b22..debabe75b4 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -24,7 +24,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [interal-cpu, dace-cpu] + tox-factor: [internal-cpu, dace-cpu] os: ["ubuntu-latest"] fail-fast: false From 01a795c60c9d5e03c93d2ee7ddcf3401d0125713 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:19:58 +0200 Subject: [PATCH 08/24] Hardcode cpu factor in CI tests --- .github/workflows/test-cartesian-fallback.yml | 2 +- .github/workflows/test-cartesian.yml | 4 ++-- .github/workflows/test-next-fallback.yml | 2 +- .github/workflows/test-next.yml | 2 +- .github/workflows/test-storage-fallback.yml | 2 +- .github/workflows/test-storage.yml | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index a0d64a2f46..45bbdf271a 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -18,7 +18,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [internal-cpu, dace-cpu] + tox-factor: [internal, dace] steps: - run: 'echo "No build required"' diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index 19bef8fe75..5d23577bc9 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [internal-cpu, dace-cpu] + tox-factor: [internal, dace] steps: - uses: actions/checkout@v2 - name: Install boost @@ -59,4 +59,4 @@ jobs: run: | pyversion=${{ matrix.python-version }} pyversion_no_dot=${pyversion//./} - tox run -e cartesian-py${pyversion_no_dot}-${{ matrix.tox-factor }} + tox run -e cartesian-py${pyversion_no_dot}-${{ matrix.tox-factor }}-cpu diff --git a/.github/workflows/test-next-fallback.yml b/.github/workflows/test-next-fallback.yml index 0efa7e0527..b8c39dc0e6 100644 --- a/.github/workflows/test-next-fallback.yml +++ b/.github/workflows/test-next-fallback.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: python-version: ["3.10", "3.11"] - tox-factor: ["nomesh-cpu", "atlas-cpu"] + tox-factor: ["nomesh", "atlas"] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-next.yml b/.github/workflows/test-next.yml index d3b5a7e245..8e05bbc86a 100644 --- a/.github/workflows/test-next.yml +++ b/.github/workflows/test-next.yml @@ -21,7 +21,7 @@ jobs: strategy: matrix: python-version: ["3.10", "3.11"] - tox-factor: ["nomesh-cpu", "atlas-cpu"] + tox-factor: ["nomesh", "atlas"] os: ["ubuntu-latest"] fail-fast: false diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index 2587fe217e..df861c6468 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -20,7 +20,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [internal-cpu, dace-cpu] + tox-factor: [internal, dace] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index debabe75b4..e76526c296 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -24,7 +24,7 @@ jobs: strategy: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] - tox-factor: [internal-cpu, dace-cpu] + tox-factor: [internal, dace] os: ["ubuntu-latest"] fail-fast: false @@ -51,7 +51,7 @@ jobs: run: | pyversion=${{ matrix.python-version }} pyversion_no_dot=${pyversion//./} - tox run -e storage-py${pyversion_no_dot}-${{ matrix.tox-factor }} + tox run -e storage-py${pyversion_no_dot}-${{ matrix.tox-factor }}-cpu # mv coverage.json coverage-py${{ matrix.python-version }}-${{ matrix.os }}.json # - name: Upload coverage.json artifact # uses: actions/upload-artifact@v3 From c8ff4e3512157354762923bd5d3c96e4fb2306fa Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:21:31 +0200 Subject: [PATCH 09/24] Revert back to internal factor in docs --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e9e3f8a09..537a3b5651 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -150,7 +150,7 @@ We recommended you to use `tox` for most development-related tasks, like running tox list # Run a specific task environment -tox run -e cartesian-py38-cpu +tox run -e cartesian-py38-internal-cpu ``` Check `tox` documentation (`tox --help`) for the complete reference. From 665e5636ba6b34da508c2fc8d779425c4142be20 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:23:23 +0200 Subject: [PATCH 10/24] Fix format --- tox.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 8317a53ce6..aba53d1f19 100644 --- a/tox.ini +++ b/tox.ini @@ -58,7 +58,7 @@ commands = internal: not requires_dace \ cpu: and not requires_gpu \ {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}cartesian_tests + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* @@ -87,7 +87,7 @@ commands = nomesh: not requires_atlas \ cpu: and not requires_gpu \ {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}next_tests + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next [testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] @@ -96,7 +96,7 @@ commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ cpu: not requires_gpu \ {cuda,cuda11x,cuda12x}: requires_gpu - " {posargs} tests{/}storage_tests + " {posargs} tests{/}storage_tests pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- From d1942f4ea56fcc76e718cd6582d0ab1b2cd95de6 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:24:49 +0200 Subject: [PATCH 11/24] Reorder conditions --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index aba53d1f19..d4cef71c7a 100644 --- a/tox.ini +++ b/tox.ini @@ -54,8 +54,8 @@ allowlist_externals = rm commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - dace: requires_dace \ internal: not requires_dace \ + dace: requires_dace \ cpu: and not requires_gpu \ {cuda,cuda11x,cuda12x}: and requires_gpu \ " {posargs} tests{/}cartesian_tests @@ -83,8 +83,8 @@ set_env = PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - atlas: requires_atlas \ nomesh: not requires_atlas \ + atlas: requires_atlas \ cpu: and not requires_gpu \ {cuda,cuda11x,cuda12x}: and requires_gpu \ " {posargs} tests{/}next_tests From 4da05fdcd59923de34e695fcc1c8d441afab4afa Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:26:07 +0200 Subject: [PATCH 12/24] Fix typo --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index d4cef71c7a..bd1de20f0b 100644 --- a/tox.ini +++ b/tox.ini @@ -95,7 +95,7 @@ description = Run 'gt4py.storage' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ cpu: not requires_gpu \ - {cuda,cuda11x,cuda12x}: requires_gpu + {cuda,cuda11x,cuda12x}: requires_gpu \ " {posargs} tests{/}storage_tests pytest doctest-modules {posargs} src{/}gt4py{/}storage From 690cdb48893aad76d3090e8c2d4825c5677d8929 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:41:07 +0200 Subject: [PATCH 13/24] Comment out again doctests in storage --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index bd1de20f0b..73bc3629b8 100644 --- a/tox.ini +++ b/tox.ini @@ -97,7 +97,7 @@ commands = cpu: not requires_gpu \ {cuda,cuda11x,cuda12x}: requires_gpu \ " {posargs} tests{/}storage_tests - pytest doctest-modules {posargs} src{/}gt4py{/}storage + # pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- [testenv:linters-py{38,39,310,311}] From 1e16e90558a9a076129886478d4afa79da863cd0 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:46:55 +0200 Subject: [PATCH 14/24] Format issues --- tox.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 73bc3629b8..ec0f4546b6 100644 --- a/tox.ini +++ b/tox.ini @@ -62,9 +62,9 @@ commands = python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* -#commands_post = -# coverage json --rcfile=setup.cfg -# coverage html --rcfile=setup.cfg --show-contexts +# commands_post = +# coverage json --rcfile=setup.cfg +# coverage html --rcfile=setup.cfg --show-contexts [testenv:eve-py{38,39,310,311}] description = Run 'gt4py.eve' tests From 1bff3cbf6e7c1a8731b3c8c24091c4347f03225f Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Fri, 26 Jul 2024 14:47:46 +0200 Subject: [PATCH 15/24] Restore dev factors --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ec0f4546b6..0e7a6caa55 100644 --- a/tox.ini +++ b/tox.ini @@ -119,7 +119,7 @@ commands = python -m pytest --nbmake examples -v -n {env:NUM_PROCESSES:1} # -- Other artefacts -- -[testenv:dev-py{38,39,310,311}{,-atlas}] +[testenv:dev-py{38,39,310,311}{-atlas,}] description = Initialize development environment for gt4py deps = -r {tox_root}{/}requirements-dev.txt From daec4cb3c993aead500c8009c880310ccde49cae Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Mon, 2 Sep 2024 16:59:08 +0200 Subject: [PATCH 16/24] Add custom pytest option to vaoid returning an exit code error if the collection of tests is empty. --- tests/conftest.py | 19 +++++++++++++++++++ tox.ini | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 285ccda2b0..3cbfa56fde 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,5 +8,24 @@ """Global configuration of pytest for collecting and running tests.""" +import pytest + + # Ignore hidden folders and disabled tests collect_ignore_glob = [".*", "_disabled*"] + + +def pytest_addoption(parser): + group = parser.getgroup("This project") + group.addoption( + "--ignore-no-tests-collected", + action="store_true", + default=False, + help='Suppress the "no tests were collected" exit code.', + ) + + +def pytest_sessionfinish(session, exitstatus): + if session.config.getoption("--ignore-no-tests-collected"): + if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED: + session.exitstatus = pytest.ExitCode.OK diff --git a/tox.ini b/tox.ini index 0e7a6caa55..befddf22f8 100644 --- a/tox.ini +++ b/tox.ini @@ -82,7 +82,7 @@ set_env = {[testenv]set_env} PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = - python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + python -m pytest --ignore-no-tests-collected --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ nomesh: not requires_atlas \ atlas: requires_atlas \ cpu: and not requires_gpu \ From 77e579e0de373a17a1815ede70992876485a4f10 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 09:54:16 +0200 Subject: [PATCH 17/24] Use pytest plugin to suppress no-tests-collected errors instead of custom hook. --- .pre-commit-config.yaml | 20 +- constraints.txt | 57 ++--- .../exercises/1_simple_addition.ipynb | 14 +- .../1_simple_addition_solution.ipynb | 12 +- .../exercises/2_divergence_exercise.ipynb | 15 +- .../2_divergence_exercise_solution.ipynb | 15 +- .../exercises/3_gradient_exercise.ipynb | 10 +- .../3_gradient_exercise_solution.ipynb | 10 +- .../workshop/exercises/4_curl_exercise.ipynb | 20 +- .../exercises/4_curl_exercise_solution.ipynb | 20 +- .../exercises/5_vector_laplace_exercise.ipynb | 59 ++--- .../5_vector_laplace_exercise_solution.ipynb | 64 +++--- .../workshop/exercises/6_where_domain.ipynb | 19 +- .../exercises/6_where_domain_solutions.ipynb | 26 ++- .../workshop/exercises/7_scan_operator.ipynb | 13 +- .../exercises/7_scan_operator_solutions.ipynb | 7 +- .../8_diffusion_exercise_solution.ipynb | 62 +++--- docs/user/next/workshop/jupyter_intro.ipynb | 2 +- docs/user/next/workshop/slides/slides_1.ipynb | 18 +- docs/user/next/workshop/slides/slides_2.ipynb | 81 ++++--- docs/user/next/workshop/slides/slides_3.ipynb | 30 ++- docs/user/next/workshop/slides/slides_4.ipynb | 11 +- examples/cartesian/demo_burgers.ipynb | 207 +++++++++++------- .../cartesian/demo_horizontal_diffusion.ipynb | 27 +-- .../demo_isentropic_diagnostics.ipynb | 87 ++++---- examples/lap_cartesian_vs_next.ipynb | 16 +- min-extra-requirements-test.txt | 1 + min-requirements-test.txt | 1 + requirements-dev.in | 1 + requirements-dev.txt | 57 ++--- tests/conftest.py | 19 -- tox.ini | 2 +- 32 files changed, 521 insertions(+), 482 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c0f29e75e6..fcdef06ca9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: ## version = re.search('ruff==([0-9\.]*)', open("constraints.txt").read())[1] ## print(f"rev: v{version}") ##]]] - rev: v0.5.6 + rev: v0.6.3 ##[[[end]]] hooks: # Run the linter. @@ -67,9 +67,9 @@ repos: ## version = re.search('mypy==([0-9\.]*)', open("constraints.txt").read())[1] ## print(f"#========= FROM constraints.txt: v{version} =========") ##]]] - #========= FROM constraints.txt: v1.11.1 ========= + #========= FROM constraints.txt: v1.11.2 ========= ##[[[end]]] - rev: v1.11.1 # MUST match version ^^^^ in constraints.txt (if the mirror is up-to-date) + rev: v1.11.2 # MUST match version ^^^^ in constraints.txt (if the mirror is up-to-date) hooks: - id: mypy additional_dependencies: # versions from constraints.txt @@ -93,21 +93,21 @@ repos: - click==8.1.7 - cmake==3.30.2 - cytoolz==0.12.3 - - deepdiff==7.0.1 + - deepdiff==8.0.1 - devtools==0.12.2 - - factory-boy==3.3.0 + - factory-boy==3.3.1 - frozendict==2.4.4 - gridtools-cpp==2.3.4 - - importlib-resources==6.4.0 + - importlib-resources==6.4.4 - jinja2==3.1.4 - - lark==1.1.9 + - lark==1.2.2 - mako==1.3.5 - - nanobind==2.0.0 + - nanobind==2.1.0 - ninja==1.11.1.1 - numpy==1.24.4 - packaging==24.1 - - pybind11==2.13.1 - - setuptools==72.1.0 + - pybind11==2.13.5 + - setuptools==74.1.0 - tabulate==0.9.0 - typing-extensions==4.12.2 - xxhash==3.0.0 diff --git a/constraints.txt b/constraints.txt index e5027a922a..7e6c4e7359 100644 --- a/constraints.txt +++ b/constraints.txt @@ -10,16 +10,16 @@ annotated-types==0.7.0 # via pydantic asttokens==2.4.1 # via devtools, stack-data astunparse==1.6.3 ; python_version < "3.9" # via dace, gt4py (pyproject.toml) attrs==24.2.0 # via flake8-bugbear, flake8-eradicate, gt4py (pyproject.toml), hypothesis, jsonschema, referencing -babel==2.15.0 # via sphinx +babel==2.16.0 # via sphinx backcall==0.2.0 # via ipython black==24.8.0 # via gt4py (pyproject.toml) boltons==24.0.0 # via gt4py (pyproject.toml) bracex==2.5 # via wcmatch build==1.2.1 # via pip-tools -bump-my-version==0.25.0 # via -r requirements-dev.in +bump-my-version==0.26.0 # via -r requirements-dev.in cached-property==1.5.2 # via gt4py (pyproject.toml) -cachetools==5.4.0 # via tox -certifi==2024.7.4 # via requests +cachetools==5.5.0 # via tox +certifi==2024.8.30 # via requests cfgv==3.4.0 # via pre-commit chardet==5.2.0 # via tox charset-normalizer==3.3.2 # via requests @@ -37,7 +37,7 @@ dace==0.16.1 # via gt4py (pyproject.toml) darglint==1.8.1 # via -r requirements-dev.in debugpy==1.8.5 # via ipykernel decorator==5.1.1 # via ipython -deepdiff==7.0.1 # via gt4py (pyproject.toml) +deepdiff==8.0.1 # via gt4py (pyproject.toml) devtools==0.12.2 # via gt4py (pyproject.toml) dill==0.3.8 # via dace distlib==0.3.8 # via virtualenv @@ -45,13 +45,13 @@ docutils==0.20.1 # via restructuredtext-lint, sphinx, sphinx-rtd-theme eradicate==2.3.0 # via flake8-eradicate exceptiongroup==1.2.2 # via hypothesis, pytest execnet==2.1.1 # via pytest-cache, pytest-xdist -executing==2.0.1 # via devtools, stack-data -factory-boy==3.3.0 # via gt4py (pyproject.toml), pytest-factoryboy -faker==26.2.0 # via factory-boy +executing==2.1.0 # via devtools, stack-data +factory-boy==3.3.1 # via gt4py (pyproject.toml), pytest-factoryboy +faker==28.1.0 # via factory-boy fastjsonschema==2.20.0 # via nbformat filelock==3.15.4 # via tox, virtualenv flake8==7.1.1 # via -r requirements-dev.in, flake8-bugbear, flake8-builtins, flake8-debugger, flake8-docstrings, flake8-eradicate, flake8-mutable, flake8-pyproject, flake8-rst-docstrings -flake8-bugbear==24.4.26 # via -r requirements-dev.in +flake8-bugbear==24.8.19 # via -r requirements-dev.in flake8-builtins==2.5.0 # via -r requirements-dev.in flake8-debugger==4.1.2 # via -r requirements-dev.in flake8-docstrings==1.7.0 # via -r requirements-dev.in @@ -63,12 +63,12 @@ fonttools==4.53.1 # via matplotlib fparser==0.1.4 # via dace frozendict==2.4.4 # via gt4py (pyproject.toml) gridtools-cpp==2.3.4 # via gt4py (pyproject.toml) -hypothesis==6.109.0 # via -r requirements-dev.in, gt4py (pyproject.toml) +hypothesis==6.111.2 # via -r requirements-dev.in, gt4py (pyproject.toml) identify==2.6.0 # via pre-commit -idna==3.7 # via requests +idna==3.8 # via requests imagesize==1.4.1 # via sphinx -importlib-metadata==8.2.0 # via build, jupyter-client, sphinx -importlib-resources==6.4.0 ; python_version < "3.9" # via gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib +importlib-metadata==8.4.0 # via build, jupyter-client, sphinx +importlib-resources==6.4.4 ; python_version < "3.9" # via gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib inflection==0.5.1 # via pytest-factoryboy iniconfig==2.0.0 # via pytest ipykernel==6.29.5 # via nbmake @@ -82,7 +82,7 @@ jupyter-client==8.6.2 # via ipykernel, nbclient jupyter-core==5.7.2 # via ipykernel, jupyter-client, nbformat jupytext==1.16.4 # via -r requirements-dev.in kiwisolver==1.4.5 # via matplotlib -lark==1.1.9 # via gt4py (pyproject.toml) +lark==1.2.2 # via gt4py (pyproject.toml) mako==1.3.5 # via gt4py (pyproject.toml) markdown-it-py==3.0.0 # via jupytext, mdit-py-plugins, rich markupsafe==2.1.5 # via jinja2, mako @@ -92,9 +92,9 @@ mccabe==0.7.0 # via flake8 mdit-py-plugins==0.4.1 # via jupytext mdurl==0.1.2 # via markdown-it-py mpmath==1.3.0 # via sympy -mypy==1.11.1 # via -r requirements-dev.in +mypy==1.11.2 # via -r requirements-dev.in mypy-extensions==1.0.0 # via black, mypy -nanobind==2.0.0 # via gt4py (pyproject.toml) +nanobind==2.1.0 # via gt4py (pyproject.toml) nbclient==0.6.8 # via nbmake nbformat==5.10.4 # via jupytext, nbclient, nbmake nbmake==1.5.4 # via -r requirements-dev.in @@ -103,7 +103,7 @@ networkx==3.1 # via dace ninja==1.11.1.1 # via gt4py (pyproject.toml) nodeenv==1.9.1 # via pre-commit numpy==1.24.4 # via contourpy, dace, gt4py (pyproject.toml), matplotlib, scipy -ordered-set==4.1.0 # via deepdiff +orderly-set==5.2.2 # via deepdiff packaging==24.1 # via black, build, gt4py (pyproject.toml), ipykernel, jupytext, matplotlib, pipdeptree, pyproject-api, pytest, pytest-factoryboy, setuptools-scm, sphinx, tox parso==0.8.4 # via jedi pathspec==0.12.1 # via black @@ -121,7 +121,7 @@ prompt-toolkit==3.0.36 # via ipython, questionary psutil==6.0.0 # via -r requirements-dev.in, ipykernel, pytest-xdist ptyprocess==0.7.0 # via pexpect pure-eval==0.2.3 # via stack-data -pybind11==2.13.1 # via gt4py (pyproject.toml) +pybind11==2.13.5 # via gt4py (pyproject.toml) pycodestyle==2.12.1 # via flake8, flake8-debugger pydantic==2.8.2 # via bump-my-version, pydantic-settings pydantic-core==2.20.1 # via pydantic @@ -129,12 +129,13 @@ pydantic-settings==2.4.0 # via bump-my-version pydocstyle==6.3.0 # via flake8-docstrings pyflakes==3.2.0 # via flake8 pygments==2.18.0 # via -r requirements-dev.in, devtools, flake8-rst-docstrings, ipython, nbmake, rich, sphinx -pyparsing==3.1.2 # via matplotlib +pyparsing==3.1.4 # via matplotlib pyproject-api==1.7.1 # via tox pyproject-hooks==1.1.0 # via build, pip-tools -pytest==8.3.2 # via -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-factoryboy, pytest-instafail, pytest-xdist +pytest==8.3.2 # via -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -r requirements-dev.in pytest-cov==5.0.0 # via -r requirements-dev.in +pytest-custom-exit-code==0.3.0 # via -r requirements-dev.in pytest-factoryboy==2.7.0 # via -r requirements-dev.in pytest-instafail==0.5.0 # via -r requirements-dev.in pytest-xdist==3.6.1 # via -r requirements-dev.in @@ -142,15 +143,15 @@ python-dateutil==2.9.0.post0 # via faker, jupyter-client, matplotlib python-dotenv==1.0.1 # via pydantic-settings pytz==2024.1 # via babel pyyaml==6.0.2 # via dace, jupytext, pre-commit -pyzmq==26.1.0 # via ipykernel, jupyter-client +pyzmq==26.2.0 # via ipykernel, jupyter-client questionary==2.0.1 # via bump-my-version referencing==0.35.1 # via jsonschema, jsonschema-specifications requests==2.32.3 # via sphinx restructuredtext-lint==1.4.0 # via flake8-rst-docstrings -rich==13.7.1 # via bump-my-version, rich-click +rich==13.8.0 # via bump-my-version, rich-click rich-click==1.8.3 # via bump-my-version rpds-py==0.20.0 # via jsonschema, referencing -ruff==0.5.6 # via -r requirements-dev.in +ruff==0.6.3 # via -r requirements-dev.in scipy==1.10.1 # via gt4py (pyproject.toml) setuptools-scm==8.1.0 # via fparser six==1.16.0 # via asttokens, astunparse, python-dateutil @@ -169,10 +170,10 @@ stack-data==0.6.3 # via ipython sympy==1.12.1 # via dace, gt4py (pyproject.toml) tabulate==0.9.0 # via gt4py (pyproject.toml) tomli==2.0.1 ; python_version < "3.11" # via -r requirements-dev.in, black, build, coverage, flake8-pyproject, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, tox -tomlkit==0.13.0 # via bump-my-version +tomlkit==0.13.2 # via bump-my-version toolz==0.12.1 # via cytoolz tornado==6.4.1 # via ipykernel, jupyter-client -tox==4.17.0 # via -r requirements-dev.in +tox==4.18.0 # via -r requirements-dev.in traitlets==5.14.3 # via comm, ipykernel, ipython, jupyter-client, jupyter-core, matplotlib-inline, nbclient, nbformat types-tabulate==0.9.0.20240106 # via -r requirements-dev.in typing-extensions==4.12.2 # via annotated-types, black, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, setuptools-scm @@ -180,11 +181,11 @@ urllib3==2.2.2 # via requests virtualenv==20.26.3 # via pre-commit, tox wcmatch==9.0 # via bump-my-version wcwidth==0.2.13 # via prompt-toolkit -websockets==12.0 # via dace +websockets==13.0.1 # via dace wheel==0.44.0 # via astunparse, pip-tools xxhash==3.0.0 # via gt4py (pyproject.toml) -zipp==3.19.2 # via importlib-metadata, importlib-resources +zipp==3.20.1 # via importlib-metadata, importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.2 # via pip-tools, pipdeptree -setuptools==72.1.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==74.1.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm diff --git a/docs/user/next/workshop/exercises/1_simple_addition.ipynb b/docs/user/next/workshop/exercises/1_simple_addition.ipynb index 918e72b084..7f42f2b9d8 100644 --- a/docs/user/next/workshop/exercises/1_simple_addition.ipynb +++ b/docs/user/next/workshop/exercises/1_simple_addition.ipynb @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "def addition ... # TODO fix this cell" + "def addition(): ... # TODO fix this cell" ] }, { @@ -89,14 +89,12 @@ " print(\"Result:\")\n", " print(c)\n", " print(c.asnumpy())\n", - " \n", + "\n", " # Plots\n", - " fig, ax = plt.subplot_mosaic([\n", - " ['a', 'b', 'c']\n", - " ])\n", - " ax['a'].imshow(a.asnumpy())\n", - " ax['b'].imshow(b.asnumpy())\n", - " ax['c'].imshow(c.asnumpy())\n", + " fig, ax = plt.subplot_mosaic([[\"a\", \"b\", \"c\"]])\n", + " ax[\"a\"].imshow(a.asnumpy())\n", + " ax[\"b\"].imshow(b.asnumpy())\n", + " ax[\"c\"].imshow(c.asnumpy())\n", "\n", " print(\"\\nTest successful!\")" ] diff --git a/docs/user/next/workshop/exercises/1_simple_addition_solution.ipynb b/docs/user/next/workshop/exercises/1_simple_addition_solution.ipynb index 59505f142a..dd1a30cc6b 100644 --- a/docs/user/next/workshop/exercises/1_simple_addition_solution.ipynb +++ b/docs/user/next/workshop/exercises/1_simple_addition_solution.ipynb @@ -120,14 +120,12 @@ " print(\"Result:\")\n", " print(c)\n", " print(c.asnumpy())\n", - " \n", + "\n", " # Plots\n", - " fig, ax = plt.subplot_mosaic([\n", - " ['a', 'b', 'c']\n", - " ])\n", - " ax['a'].imshow(a.asnumpy())\n", - " ax['b'].imshow(b.asnumpy())\n", - " ax['c'].imshow(c.asnumpy())\n", + " fig, ax = plt.subplot_mosaic([[\"a\", \"b\", \"c\"]])\n", + " ax[\"a\"].imshow(a.asnumpy())\n", + " ax[\"b\"].imshow(b.asnumpy())\n", + " ax[\"c\"].imshow(c.asnumpy())\n", "\n", " print(\"\\nTest successful!\")" ] diff --git a/docs/user/next/workshop/exercises/2_divergence_exercise.ipynb b/docs/user/next/workshop/exercises/2_divergence_exercise.ipynb index 86baf90901..50349e52b0 100644 --- a/docs/user/next/workshop/exercises/2_divergence_exercise.ipynb +++ b/docs/user/next/workshop/exercises/2_divergence_exercise.ipynb @@ -53,12 +53,7 @@ " A: np.array,\n", " edge_orientation: np.array,\n", ") -> np.array:\n", - " uv_div = (\n", - " np.sum(\n", - " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n", - " )\n", - " / A\n", - " )\n", + " uv_div = np.sum((u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1) / A\n", " return uv_div" ] }, @@ -105,9 +100,7 @@ " ny = random_field(edge_domain, allocator=backend)\n", " L = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " divergence_ref = divergence_numpy(\n", " c2e_table,\n", @@ -120,9 +113,7 @@ " edge_orientation.asnumpy(),\n", " )\n", "\n", - " c2e_connectivity = gtx.NeighborTableOffsetProvider(\n", - " c2e_table, C, E, 3, has_skip_values=False\n", - " )\n", + " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n", "\n", " divergence_gt4py = gtx.zeros(cell_domain, allocator=backend)\n", "\n", diff --git a/docs/user/next/workshop/exercises/2_divergence_exercise_solution.ipynb b/docs/user/next/workshop/exercises/2_divergence_exercise_solution.ipynb index eda22846eb..6baac2b8c0 100644 --- a/docs/user/next/workshop/exercises/2_divergence_exercise_solution.ipynb +++ b/docs/user/next/workshop/exercises/2_divergence_exercise_solution.ipynb @@ -53,12 +53,7 @@ " A: np.array,\n", " edge_orientation: np.array,\n", ") -> np.array:\n", - " uv_div = (\n", - " np.sum(\n", - " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n", - " )\n", - " / A\n", - " )\n", + " uv_div = np.sum((u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1) / A\n", " return uv_div" ] }, @@ -110,9 +105,7 @@ " ny = random_field(edge_domain, allocator=backend)\n", " L = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " divergence_ref = divergence_numpy(\n", " c2e_table,\n", @@ -125,9 +118,7 @@ " edge_orientation.asnumpy(),\n", " )\n", "\n", - " c2e_connectivity = gtx.NeighborTableOffsetProvider(\n", - " c2e_table, C, E, 3, has_skip_values=False\n", - " )\n", + " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n", "\n", " divergence_gt4py = gtx.zeros(cell_domain, allocator=backend)\n", "\n", diff --git a/docs/user/next/workshop/exercises/3_gradient_exercise.ipynb b/docs/user/next/workshop/exercises/3_gradient_exercise.ipynb index a0e5580eb6..c8914120d3 100644 --- a/docs/user/next/workshop/exercises/3_gradient_exercise.ipynb +++ b/docs/user/next/workshop/exercises/3_gradient_exercise.ipynb @@ -92,15 +92,13 @@ "\n", " cell_domain = gtx.domain({C: n_cells})\n", " edge_domain = gtx.domain({E: n_edges})\n", - " \n", + "\n", " f = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", " ny = random_field(edge_domain, allocator=backend)\n", " L = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " gradient_numpy_x, gradient_numpy_y = gradient_numpy(\n", " c2e_table,\n", @@ -114,8 +112,8 @@ "\n", " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n", "\n", - " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend) \n", - " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend) \n", + " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend)\n", + " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend)\n", "\n", " gradient(\n", " f,\n", diff --git a/docs/user/next/workshop/exercises/3_gradient_exercise_solution.ipynb b/docs/user/next/workshop/exercises/3_gradient_exercise_solution.ipynb index 64550d9b58..5e940a4b71 100644 --- a/docs/user/next/workshop/exercises/3_gradient_exercise_solution.ipynb +++ b/docs/user/next/workshop/exercises/3_gradient_exercise_solution.ipynb @@ -105,15 +105,13 @@ "\n", " cell_domain = gtx.domain({C: n_cells})\n", " edge_domain = gtx.domain({E: n_edges})\n", - " \n", + "\n", " f = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", " ny = random_field(edge_domain, allocator=backend)\n", " L = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " gradient_numpy_x, gradient_numpy_y = gradient_numpy(\n", " c2e_table,\n", @@ -127,8 +125,8 @@ "\n", " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n", "\n", - " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend) \n", - " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend) \n", + " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend)\n", + " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend)\n", "\n", " gradient(\n", " f,\n", diff --git a/docs/user/next/workshop/exercises/4_curl_exercise.ipynb b/docs/user/next/workshop/exercises/4_curl_exercise.ipynb index 54806b4a3a..4a6b37baf7 100644 --- a/docs/user/next/workshop/exercises/4_curl_exercise.ipynb +++ b/docs/user/next/workshop/exercises/4_curl_exercise.ipynb @@ -114,16 +114,14 @@ "\n", " edge_domain = gtx.domain({E: n_edges})\n", " vertex_domain = gtx.domain({V: n_vertices})\n", - " \n", + "\n", " u = random_field(edge_domain, allocator=backend)\n", " v = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", " ny = random_field(edge_domain, allocator=backend)\n", " dualL = random_field(edge_domain, allocator=backend)\n", " dualA = random_field(vertex_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend)\n", "\n", " divergence_ref = curl_numpy(\n", " v2e_table,\n", @@ -138,12 +136,20 @@ "\n", " v2e_connectivity = gtx.NeighborTableOffsetProvider(v2e_table, V, E, 6, has_skip_values=False)\n", "\n", - " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend) \n", + " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend)\n", "\n", " curl(\n", - " u, v, nx, ny, dualL, dualA, edge_orientation, out = curl_gt4py, offset_provider = {V2E.value: v2e_connectivity}\n", + " u,\n", + " v,\n", + " nx,\n", + " ny,\n", + " dualL,\n", + " dualA,\n", + " edge_orientation,\n", + " out=curl_gt4py,\n", + " offset_provider={V2E.value: v2e_connectivity},\n", " )\n", - " \n", + "\n", " assert np.allclose(curl_gt4py.asnumpy(), divergence_ref)" ] }, diff --git a/docs/user/next/workshop/exercises/4_curl_exercise_solution.ipynb b/docs/user/next/workshop/exercises/4_curl_exercise_solution.ipynb index 2649c5e2cd..065cf02de7 100644 --- a/docs/user/next/workshop/exercises/4_curl_exercise_solution.ipynb +++ b/docs/user/next/workshop/exercises/4_curl_exercise_solution.ipynb @@ -119,16 +119,14 @@ "\n", " edge_domain = gtx.domain({E: n_edges})\n", " vertex_domain = gtx.domain({V: n_vertices})\n", - " \n", + "\n", " u = random_field(edge_domain, allocator=backend)\n", " v = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", " ny = random_field(edge_domain, allocator=backend)\n", " dualL = random_field(edge_domain, allocator=backend)\n", " dualA = random_field(vertex_domain, allocator=backend)\n", - " edge_orientation = random_sign(\n", - " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n", - " )\n", + " edge_orientation = random_sign(gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend)\n", "\n", " divergence_ref = curl_numpy(\n", " v2e_table,\n", @@ -143,12 +141,20 @@ "\n", " v2e_connectivity = gtx.NeighborTableOffsetProvider(v2e_table, V, E, 6, has_skip_values=False)\n", "\n", - " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend) \n", + " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend)\n", "\n", " curl(\n", - " u, v, nx, ny, dualL, dualA, edge_orientation, out = curl_gt4py, offset_provider = {V2E.value: v2e_connectivity}\n", + " u,\n", + " v,\n", + " nx,\n", + " ny,\n", + " dualL,\n", + " dualA,\n", + " edge_orientation,\n", + " out=curl_gt4py,\n", + " offset_provider={V2E.value: v2e_connectivity},\n", " )\n", - " \n", + "\n", " assert np.allclose(curl_gt4py.asnumpy(), divergence_ref)" ] }, diff --git a/docs/user/next/workshop/exercises/5_vector_laplace_exercise.ipynb b/docs/user/next/workshop/exercises/5_vector_laplace_exercise.ipynb index b976b214c3..832375a86b 100644 --- a/docs/user/next/workshop/exercises/5_vector_laplace_exercise.ipynb +++ b/docs/user/next/workshop/exercises/5_vector_laplace_exercise.ipynb @@ -79,12 +79,7 @@ " A: np.array,\n", " edge_orientation: np.array,\n", ") -> np.array:\n", - " uv_div = (\n", - " np.sum(\n", - " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n", - " )\n", - " / A\n", - " )\n", + " uv_div = np.sum((u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1) / A\n", " return uv_div" ] }, @@ -140,22 +135,22 @@ " edge_orientation_vertex: np.array,\n", " edge_orientation_cell: np.array,\n", ") -> np.array:\n", - " # compute curl (on vertices)\n", - " uv_curl = curl_numpy(v2e, u, v, nx, ny, dualL, dualA, edge_orientation_vertex)\n", + " # compute curl (on vertices)\n", + " uv_curl = curl_numpy(v2e, u, v, nx, ny, dualL, dualA, edge_orientation_vertex)\n", "\n", - " # compute divergence (on cells)\n", - " uv_div = divergence_numpy(c2e, u, v, nx, ny, L, A, edge_orientation_cell)\n", - " \n", - " # first term of of nabla2 (gradient of curl)\n", - " grad_of_curl = (uv_curl[e2v[:, 1]] - uv_curl[e2v[:, 0]])*tangent_orientation/L\n", + " # compute divergence (on cells)\n", + " uv_div = divergence_numpy(c2e, u, v, nx, ny, L, A, edge_orientation_cell)\n", "\n", - " # second term of of nabla2 (gradient of divergence)\n", - " grad_of_div = (uv_div[e2c[:, 1]] - uv_div[e2c[:, 0]])/dualL \n", + " # first term of of nabla2 (gradient of curl)\n", + " grad_of_curl = (uv_curl[e2v[:, 1]] - uv_curl[e2v[:, 0]]) * tangent_orientation / L\n", "\n", - " # finalize nabla2 (difference between the two gradients)\n", - " uv_nabla2 = grad_of_div - grad_of_curl\n", + " # second term of of nabla2 (gradient of divergence)\n", + " grad_of_div = (uv_div[e2c[:, 1]] - uv_div[e2c[:, 0]]) / dualL\n", "\n", - " return uv_nabla2" + " # finalize nabla2 (difference between the two gradients)\n", + " uv_nabla2 = grad_of_div - grad_of_curl\n", + "\n", + " return uv_nabla2" ] }, { @@ -177,7 +172,7 @@ ") -> gtx.Field[Dims[C], float]:\n", " # compute divergence\n", " uv_div = A\n", - " \n", + "\n", " return uv_div" ] }, @@ -239,7 +234,6 @@ "outputs": [], "source": [ "def test_laplacian():\n", - "\n", " backend = None\n", " # backend = gtfn_cpu\n", " # backend = gtfn_gpu\n", @@ -248,7 +242,6 @@ " vertex_domain = gtx.domain({V: n_vertices})\n", " cell_domain = gtx.domain({C: n_cells})\n", "\n", - "\n", " u = random_field(edge_domain, allocator=backend)\n", " v = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", @@ -258,12 +251,8 @@ " tangent_orientation = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", " dualA = random_field(vertex_domain, allocator=backend)\n", - " edge_orientation_vertex = random_sign(\n", - " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n", - " )\n", - " edge_orientation_cell = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation_vertex = random_sign(gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend)\n", + " edge_orientation_cell = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " laplacian_ref = laplacian_numpy(\n", " c2e_table,\n", @@ -288,7 +277,6 @@ " e2v_connectivity = gtx.NeighborTableOffsetProvider(e2v_table, E, V, 2, has_skip_values=False)\n", " e2c_connectivity = gtx.NeighborTableOffsetProvider(e2c_table, E, C, 2, has_skip_values=False)\n", "\n", - "\n", " laplacian_gt4py = gtx.zeros(edge_domain, allocator=backend)\n", "\n", " laplacian_fvm(\n", @@ -303,14 +291,15 @@ " dualA,\n", " edge_orientation_vertex,\n", " edge_orientation_cell,\n", - " out = laplacian_gt4py,\n", - " offset_provider = {C2E.value: c2e_connectivity,\n", - " V2E.value: v2e_connectivity,\n", - " E2V.value: e2v_connectivity,\n", - " E2C.value: e2c_connectivity,\n", - " },\n", + " out=laplacian_gt4py,\n", + " offset_provider={\n", + " C2E.value: c2e_connectivity,\n", + " V2E.value: v2e_connectivity,\n", + " E2V.value: e2v_connectivity,\n", + " E2C.value: e2c_connectivity,\n", + " },\n", " )\n", - " \n", + "\n", " assert np.allclose(laplacian_gt4py.asnumpy(), laplacian_ref)" ] }, diff --git a/docs/user/next/workshop/exercises/5_vector_laplace_exercise_solution.ipynb b/docs/user/next/workshop/exercises/5_vector_laplace_exercise_solution.ipynb index 927d56d639..be846d199d 100644 --- a/docs/user/next/workshop/exercises/5_vector_laplace_exercise_solution.ipynb +++ b/docs/user/next/workshop/exercises/5_vector_laplace_exercise_solution.ipynb @@ -79,12 +79,7 @@ " A: np.array,\n", " edge_orientation: np.array,\n", ") -> np.array:\n", - " uv_div = (\n", - " np.sum(\n", - " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n", - " )\n", - " / A\n", - " )\n", + " uv_div = np.sum((u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1) / A\n", " return uv_div" ] }, @@ -140,22 +135,22 @@ " edge_orientation_vertex: np.array,\n", " edge_orientation_cell: np.array,\n", ") -> np.array:\n", - " # compute curl (on vertices)\n", - " uv_curl = curl_numpy(v2e, u, v, nx, ny, dualL, dualA, edge_orientation_vertex)\n", + " # compute curl (on vertices)\n", + " uv_curl = curl_numpy(v2e, u, v, nx, ny, dualL, dualA, edge_orientation_vertex)\n", "\n", - " # compute divergence (on cells)\n", - " uv_div = divergence_numpy(c2e, u, v, nx, ny, L, A, edge_orientation_cell)\n", - " \n", - " # first term of of nabla2 (gradient of curl)\n", - " grad_of_curl = (uv_curl[e2v[:, 1]] - uv_curl[e2v[:, 0]])*tangent_orientation/L\n", + " # compute divergence (on cells)\n", + " uv_div = divergence_numpy(c2e, u, v, nx, ny, L, A, edge_orientation_cell)\n", "\n", - " # second term of of nabla2 (gradient of divergence)\n", - " grad_of_div = (uv_div[e2c[:, 1]] - uv_div[e2c[:, 0]])/dualL \n", + " # first term of of nabla2 (gradient of curl)\n", + " grad_of_curl = (uv_curl[e2v[:, 1]] - uv_curl[e2v[:, 0]]) * tangent_orientation / L\n", + "\n", + " # second term of of nabla2 (gradient of divergence)\n", + " grad_of_div = (uv_div[e2c[:, 1]] - uv_div[e2c[:, 0]]) / dualL\n", "\n", - " # finalize nabla2 (difference between the two gradients)\n", - " uv_nabla2 = grad_of_div - grad_of_curl\n", + " # finalize nabla2 (difference between the two gradients)\n", + " uv_nabla2 = grad_of_div - grad_of_curl\n", "\n", - " return uv_nabla2" + " return uv_nabla2" ] }, { @@ -234,18 +229,17 @@ " edge_orientation_vertex: gtx.Field[Dims[V, V2EDim], float],\n", " edge_orientation_cell: gtx.Field[Dims[C, C2EDim], float],\n", ") -> gtx.Field[Dims[E], float]:\n", - " \n", " # compute curl (on vertices)\n", " uv_curl = curl(u, v, nx, ny, dualL, dualA, edge_orientation_vertex)\n", "\n", " # compute divergence (on cells)\n", " uv_div = divergence(u, v, nx, ny, L, A, edge_orientation_cell)\n", - " \n", + "\n", " # first term of of nabla2 (gradient of curl)\n", - " grad_of_curl = (uv_curl(E2V[1]) - uv_curl(E2V[0]))*tangent_orientation/L\n", + " grad_of_curl = (uv_curl(E2V[1]) - uv_curl(E2V[0])) * tangent_orientation / L\n", "\n", " # second term of of nabla2 (gradient of divergence)\n", - " grad_of_div = (uv_div(E2C[1]) - uv_div(E2C[0]))/dualL \n", + " grad_of_div = (uv_div(E2C[1]) - uv_div(E2C[0])) / dualL\n", "\n", " # finalize nabla2 (difference between the two gradients)\n", " uv_nabla2 = grad_of_div - grad_of_curl\n", @@ -261,7 +255,6 @@ "outputs": [], "source": [ "def test_laplacian():\n", - "\n", " backend = None\n", " # backend = gtfn_cpu\n", " # backend = gtfn_gpu\n", @@ -270,7 +263,6 @@ " vertex_domain = gtx.domain({V: n_vertices})\n", " cell_domain = gtx.domain({C: n_cells})\n", "\n", - "\n", " u = random_field(edge_domain, allocator=backend)\n", " v = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", @@ -280,12 +272,8 @@ " tangent_orientation = random_field(edge_domain, allocator=backend)\n", " A = random_field(cell_domain, allocator=backend)\n", " dualA = random_field(vertex_domain, allocator=backend)\n", - " edge_orientation_vertex = random_sign(\n", - " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n", - " )\n", - " edge_orientation_cell = random_sign(\n", - " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n", - " )\n", + " edge_orientation_vertex = random_sign(gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend)\n", + " edge_orientation_cell = random_sign(gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend)\n", "\n", " laplacian_ref = laplacian_numpy(\n", " c2e_table,\n", @@ -310,7 +298,6 @@ " e2v_connectivity = gtx.NeighborTableOffsetProvider(e2v_table, E, V, 2, has_skip_values=False)\n", " e2c_connectivity = gtx.NeighborTableOffsetProvider(e2c_table, E, C, 2, has_skip_values=False)\n", "\n", - "\n", " laplacian_gt4py = gtx.zeros(edge_domain, allocator=backend)\n", "\n", " laplacian_fvm(\n", @@ -325,14 +312,15 @@ " dualA,\n", " edge_orientation_vertex,\n", " edge_orientation_cell,\n", - " out = laplacian_gt4py,\n", - " offset_provider = {C2E.value: c2e_connectivity,\n", - " V2E.value: v2e_connectivity,\n", - " E2V.value: e2v_connectivity,\n", - " E2C.value: e2c_connectivity,\n", - " },\n", + " out=laplacian_gt4py,\n", + " offset_provider={\n", + " C2E.value: c2e_connectivity,\n", + " V2E.value: v2e_connectivity,\n", + " E2V.value: e2v_connectivity,\n", + " E2C.value: e2c_connectivity,\n", + " },\n", " )\n", - " \n", + "\n", " assert np.allclose(laplacian_gt4py.asnumpy(), laplacian_ref)" ] }, diff --git a/docs/user/next/workshop/exercises/6_where_domain.ipynb b/docs/user/next/workshop/exercises/6_where_domain.ipynb index c23e8c121a..3c50da2245 100644 --- a/docs/user/next/workshop/exercises/6_where_domain.ipynb +++ b/docs/user/next/workshop/exercises/6_where_domain.ipynb @@ -84,7 +84,7 @@ ], "source": [ "a_np = np.arange(10.0)\n", - "b_np = np.where(a_np < 6.0, a_np, a_np*10.0)\n", + "b_np = np.where(a_np < 6.0, a_np, a_np * 10.0)\n", "print(\"a_np array: {}\".format(a_np))\n", "print(\"b_np array: {}\".format(b_np))" ] @@ -153,7 +153,7 @@ " a = gtx.as_field([K], np.arange(10.0), allocator=backend)\n", " b = gtx.as_field([K], np.zeros(shape=10), allocator=backend)\n", " program_where(a, b, offset_provider={})\n", - " \n", + "\n", " assert np.allclose(b_np, b.asnumpy())" ] }, @@ -236,8 +236,9 @@ "\n", "\n", "@gtx.program(backend=backend)\n", - "def program_domain(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]):\n", - " ... # TODO write the call to fieldop_domain" + "def program_domain(\n", + " a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]\n", + "): ... # TODO write the call to fieldop_domain" ] }, { @@ -366,11 +367,11 @@ "source": [ "@gtx.field_operator\n", "def fieldop_domain_where(a: gtx.Field[Dims[K], float]) -> gtx.Field[Dims[K], float]:\n", - " return # TODO\n", + " return # TODO\n", + "\n", "\n", "@gtx.program(backend=backend)\n", - "def program_domain_where(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]):\n", - " ... # TODO " + "def program_domain_where(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]): ... # TODO" ] }, { @@ -380,11 +381,11 @@ "metadata": {}, "outputs": [], "source": [ - "def test_domain_where(): \n", + "def test_domain_where():\n", " a = gtx.as_field([K], np.arange(10.0), allocator=backend)\n", " b = gtx.as_field([K], np.zeros(shape=10), allocator=backend)\n", " program_domain_where(a, b, offset_provider={\"Koff\": K})\n", - " \n", + "\n", " assert np.allclose(a_np_result, b.asnumpy())" ] }, diff --git a/docs/user/next/workshop/exercises/6_where_domain_solutions.ipynb b/docs/user/next/workshop/exercises/6_where_domain_solutions.ipynb index f42701473c..790e0d6cae 100644 --- a/docs/user/next/workshop/exercises/6_where_domain_solutions.ipynb +++ b/docs/user/next/workshop/exercises/6_where_domain_solutions.ipynb @@ -84,7 +84,7 @@ ], "source": [ "a_np = np.arange(10.0)\n", - "b_np = np.where(a_np < 6.0, a_np, a_np*10.0)\n", + "b_np = np.where(a_np < 6.0, a_np, a_np * 10.0)\n", "print(\"a_np array: {}\".format(a_np))\n", "print(\"b_np array: {}\".format(b_np))" ] @@ -106,11 +106,12 @@ "source": [ "@gtx.field_operator\n", "def fieldop_where(a: gtx.Field[Dims[K], float]) -> gtx.Field[Dims[K], float]:\n", - " return where(a < 6.0, a, a*10.0)\n", + " return where(a < 6.0, a, a * 10.0)\n", + "\n", "\n", "@gtx.program(backend=backend)\n", "def program_where(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]):\n", - " fieldop_where(a, out=b) " + " fieldop_where(a, out=b)" ] }, { @@ -124,7 +125,7 @@ " a = gtx.as_field([K], np.arange(10.0), allocator=backend)\n", " b = gtx.as_field([K], np.zeros(shape=10), allocator=backend)\n", " program_where(a, b, offset_provider={})\n", - " \n", + "\n", " assert np.allclose(b_np, b.asnumpy())" ] }, @@ -182,12 +183,12 @@ "source": [ "@gtx.field_operator\n", "def fieldop_domain(a: gtx.Field[Dims[K], float]) -> gtx.Field[Dims[K], float]:\n", - " return a*10.0\n", + " return a * 10.0\n", + "\n", "\n", "@gtx.program(backend=backend)\n", - "def program_domain(a: gtx.Field[Dims[K], float],\n", - " b: gtx.Field[Dims[K], float]):\n", - " fieldop_domain(a, out=b, domain={K: (6, 10)}) " + "def program_domain(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]):\n", + " fieldop_domain(a, out=b, domain={K: (6, 10)})" ] }, { @@ -297,11 +298,12 @@ "source": [ "@gtx.field_operator\n", "def fieldop_domain_where(a: gtx.Field[Dims[K], float]) -> gtx.Field[Dims[K], float]:\n", - " return where(a<8.0, a(Koff[1])+a, a)\n", + " return where(a < 8.0, a(Koff[1]) + a, a)\n", + "\n", "\n", "@gtx.program(backend=backend)\n", "def program_domain_where(a: gtx.Field[Dims[K], float], b: gtx.Field[Dims[K], float]):\n", - " fieldop_domain_where(a, out=b, domain={K: (0, 9)}) " + " fieldop_domain_where(a, out=b, domain={K: (0, 9)})" ] }, { @@ -311,11 +313,11 @@ "metadata": {}, "outputs": [], "source": [ - "def test_domain_where(): \n", + "def test_domain_where():\n", " a = gtx.as_field([K], np.arange(10.0), allocator=backend)\n", " b = gtx.as_field([K], np.zeros(shape=10), allocator=backend)\n", " program_domain_where(a, b, offset_provider={\"Koff\": K})\n", - " \n", + "\n", " assert np.allclose(a_np_result, b.asnumpy())" ] }, diff --git a/docs/user/next/workshop/exercises/7_scan_operator.ipynb b/docs/user/next/workshop/exercises/7_scan_operator.ipynb index 90982c352d..626fd4ecd9 100644 --- a/docs/user/next/workshop/exercises/7_scan_operator.ipynb +++ b/docs/user/next/workshop/exercises/7_scan_operator.ipynb @@ -183,17 +183,17 @@ "\n", " # unpack state of previous iteration\n", " # TODO\n", - " \n", + "\n", " # Autoconversion: Cloud Drops -> Rain Drops\n", " # TODO\n", - " \n", + "\n", " ## Add sedimentation flux from level above\n", " # TODO\n", "\n", " # Remove mass due to sedimentation flux\n", " # TODO\n", "\n", - " return # TODO" + " return # TODO" ] }, { @@ -243,10 +243,7 @@ "@gtx.field_operator(backend=backend)\n", "def graupel_toy_scan(\n", " qc: gtx.Field[Dims[C, K], float], qr: gtx.Field[Dims[C, K], float]\n", - ") -> tuple[\n", - " gtx.Field[Dims[C, K], float],\n", - " gtx.Field[Dims[C, K], float]\n", - "]:\n", + ") -> tuple[gtx.Field[Dims[C, K], float], gtx.Field[Dims[C, K], float]]:\n", " qc, qr, _ = _graupel_toy_scan(qc, qr)\n", "\n", " return qc, qr" @@ -261,7 +258,7 @@ "source": [ "def test_scan_operator():\n", " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n", - " \n", + "\n", " qc = random_field(cell_k_domain, allocator=backend)\n", " qr = random_field(cell_k_domain, allocator=backend)\n", "\n", diff --git a/docs/user/next/workshop/exercises/7_scan_operator_solutions.ipynb b/docs/user/next/workshop/exercises/7_scan_operator_solutions.ipynb index e7831f3687..335c26a2a2 100644 --- a/docs/user/next/workshop/exercises/7_scan_operator_solutions.ipynb +++ b/docs/user/next/workshop/exercises/7_scan_operator_solutions.ipynb @@ -187,10 +187,7 @@ "@gtx.field_operator(backend=backend)\n", "def graupel_toy_scan(\n", " qc: gtx.Field[Dims[C, K], float], qr: gtx.Field[Dims[C, K], float]\n", - ") -> tuple[\n", - " gtx.Field[Dims[C, K], float],\n", - " gtx.Field[Dims[C, K], float]\n", - "]:\n", + ") -> tuple[gtx.Field[Dims[C, K], float], gtx.Field[Dims[C, K], float]]:\n", " qc, qr, _ = _graupel_toy_scan(qc, qr)\n", "\n", " return qc, qr" @@ -205,7 +202,7 @@ "source": [ "def test_scan_operator():\n", " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n", - " \n", + "\n", " qc = random_field(cell_k_domain, allocator=backend)\n", " qr = random_field(cell_k_domain, allocator=backend)\n", "\n", diff --git a/docs/user/next/workshop/exercises/8_diffusion_exercise_solution.ipynb b/docs/user/next/workshop/exercises/8_diffusion_exercise_solution.ipynb index e12c51973a..d4bcdb33d5 100644 --- a/docs/user/next/workshop/exercises/8_diffusion_exercise_solution.ipynb +++ b/docs/user/next/workshop/exercises/8_diffusion_exercise_solution.ipynb @@ -31,39 +31,36 @@ " kappa: float,\n", " dt: float,\n", ") -> tuple[np.array, np.array]:\n", - "\n", " # initialize\n", " TEinit = TE\n", " inv_primal_edge_length = inv_primal_edge_length[:, np.newaxis]\n", " inv_vert_vert_length = inv_vert_vert_length[:, np.newaxis]\n", "\n", " # predict\n", - " TE = TEinit + 0.5*dt*TE_t\n", + " TE = TEinit + 0.5 * dt * TE_t\n", "\n", " # interpolate temperature from edges to vertices\n", " TV = np.sum(TE[v2e], axis=1) / nnbhV\n", "\n", " # compute nabla2 using the finite differences\n", " TEnabla2 = np.sum(\n", - " TV[e2c2v] * inv_primal_edge_length ** 2\n", - " + TV[e2c2v] * inv_vert_vert_length ** 2,\n", + " TV[e2c2v] * inv_primal_edge_length**2 + TV[e2c2v] * inv_vert_vert_length**2,\n", " axis=1,\n", " )\n", "\n", " TEnabla2 = TEnabla2 - (\n", - " (2.0 * TE * inv_primal_edge_length ** 2)\n", - " + (2.0 * TE * inv_vert_vert_length ** 2)\n", + " (2.0 * TE * inv_primal_edge_length**2) + (2.0 * TE * inv_vert_vert_length**2)\n", " )\n", "\n", " # build ODEs\n", " TE_t = np.where(\n", " boundary_edge,\n", - " 0.,\n", - " kappa*TEnabla2,\n", + " 0.0,\n", + " kappa * TEnabla2,\n", " )\n", "\n", " # correct\n", - " TE = TEinit + dt*TE_t\n", + " TE = TEinit + dt * TE_t\n", " return TE_t, TE" ] }, @@ -88,38 +85,34 @@ " gtx.Field[Dims[E], float],\n", " gtx.Field[Dims[E], float],\n", "]:\n", - "\n", " # initialize\n", " TEinit = TE\n", "\n", " # predict\n", - " TE = TEinit + 0.5*dt*TE_t\n", + " TE = TEinit + 0.5 * dt * TE_t\n", "\n", " # interpolate temperature from edges to vertices\n", " TV = neighbor_sum(TE(V2E), axis=V2EDim) / nnbhV\n", "\n", " # compute nabla2 using the finite differences\n", " TEnabla2 = neighbor_sum(\n", - " (TV(E2C2V) * inv_primal_edge_length ** 2\n", - " + TV(E2C2V) * inv_vert_vert_length ** 2),\n", - " axis=E2C2VDim\n", + " (TV(E2C2V) * inv_primal_edge_length**2 + TV(E2C2V) * inv_vert_vert_length**2), axis=E2C2VDim\n", " )\n", "\n", " TEnabla2 = TEnabla2 - (\n", - " (2.0 * TE * inv_primal_edge_length ** 2)\n", - " + (2.0 * TE * inv_vert_vert_length ** 2)\n", + " (2.0 * TE * inv_primal_edge_length**2) + (2.0 * TE * inv_vert_vert_length**2)\n", " )\n", "\n", " # build ODEs\n", " TE_t = where(\n", " boundary_edge,\n", - " 0.,\n", - " kappa*TEnabla2,\n", + " 0.0,\n", + " kappa * TEnabla2,\n", " )\n", "\n", " # correct\n", - " TE = TEinit + dt*TE_t\n", - " \n", + " TE = TEinit + dt * TE_t\n", + "\n", " return TE_t, TE" ] }, @@ -134,23 +127,23 @@ " backend = None\n", " # backend = gtfn_cpu\n", " # backend = gtfn_gpu\n", - " \n", + "\n", " cell_domain = gtx.domain({C: n_cells})\n", " edge_domain = gtx.domain({E: n_edges})\n", " vertex_domain = gtx.domain({V: n_vertices})\n", - " \n", + "\n", " u = random_field(edge_domain, allocator=backend)\n", " v = random_field(edge_domain, allocator=backend)\n", " nx = random_field(edge_domain, allocator=backend)\n", " ny = random_field(edge_domain, allocator=backend)\n", " L = random_field(edge_domain, allocator=backend)\n", " dualL = random_field(vertex_domain, allocator=backend)\n", - " divergence_gt4py_1 = gtx.zeros(edge_domain, allocator=backend)\n", - " divergence_gt4py_2 = gtx.zeros(edge_domain, allocator=backend)\n", + " divergence_gt4py_1 = gtx.zeros(edge_domain, allocator=backend)\n", + " divergence_gt4py_2 = gtx.zeros(edge_domain, allocator=backend)\n", " kappa = 1.0\n", " dt = 1.0\n", "\n", - " divergence_ref_1, divergence_ref_2 = diffusion_step_numpy( \n", + " divergence_ref_1, divergence_ref_2 = diffusion_step_numpy(\n", " e2c2v_table,\n", " v2e_table,\n", " u.asnumpy(),\n", @@ -160,16 +153,27 @@ " dualL.asnumpy(),\n", " L.asnumpy(),\n", " kappa,\n", - " dt\n", + " dt,\n", " )\n", "\n", - " e2c2v_connectivity = gtx.NeighborTableOffsetProvider(e2c2v_table, E, V, 4, has_skip_values=False)\n", + " e2c2v_connectivity = gtx.NeighborTableOffsetProvider(\n", + " e2c2v_table, E, V, 4, has_skip_values=False\n", + " )\n", " v2e_connectivity = gtx.NeighborTableOffsetProvider(v2e_table, V, E, 6, has_skip_values=False)\n", "\n", " diffusion_step(\n", - " u, v, nx, ny, dualL, L, kappa, dt, out=(divergence_gt4py_1, divergence_gt4py_2), offset_provider = {E2C2V.value: e2c2v_connectivity, V2E.value: v2e_connectivity}\n", + " u,\n", + " v,\n", + " nx,\n", + " ny,\n", + " dualL,\n", + " L,\n", + " kappa,\n", + " dt,\n", + " out=(divergence_gt4py_1, divergence_gt4py_2),\n", + " offset_provider={E2C2V.value: e2c2v_connectivity, V2E.value: v2e_connectivity},\n", " )\n", - " \n", + "\n", " assert np.allclose(divergence_gt4py_1.asnumpy(), divergence_ref_1)\n", " assert np.allclose(divergence_gt4py_2.asnumpy(), divergence_ref_2)" ] diff --git a/docs/user/next/workshop/jupyter_intro.ipynb b/docs/user/next/workshop/jupyter_intro.ipynb index c1100223a1..e63b750b67 100644 --- a/docs/user/next/workshop/jupyter_intro.ipynb +++ b/docs/user/next/workshop/jupyter_intro.ipynb @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"\".join(map(lambda c: chr(ord(c) - 2), \"Kv\\\"yqtmgf#\")))" + "print(\"\".join(map(lambda c: chr(ord(c) - 2), 'Kv\"yqtmgf#')))" ] }, { diff --git a/docs/user/next/workshop/slides/slides_1.ipynb b/docs/user/next/workshop/slides/slides_1.ipynb index ff3ca764e4..f1e0229d4f 100644 --- a/docs/user/next/workshop/slides/slides_1.ipynb +++ b/docs/user/next/workshop/slides/slides_1.ipynb @@ -96,7 +96,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore')" + "\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -213,7 +214,7 @@ "\n", "a = gtx.zeros(domain, dtype=float64)\n", "b = gtx.full(domain, fill_value=3.0, dtype=float64)\n", - "c = gtx.as_field([Cell, K], np.fromfunction(lambda c, k: c*10+k, shape=domain.shape))\n", + "c = gtx.as_field([Cell, K], np.fromfunction(lambda c, k: c * 10 + k, shape=domain.shape))\n", "\n", "print(\"a definition: \\n {}\".format(a))\n", "print(\"a array: \\n {}\".format(a.asnumpy()))\n", @@ -241,8 +242,9 @@ "outputs": [], "source": [ "@gtx.field_operator\n", - "def add(a: gtx.Field[Dims[Cell, K], float64],\n", - " b: gtx.Field[Dims[Cell, K], float64]) -> gtx.Field[Dims[Cell, K], float64]:\n", + "def add(\n", + " a: gtx.Field[Dims[Cell, K], float64], b: gtx.Field[Dims[Cell, K], float64]\n", + ") -> gtx.Field[Dims[Cell, K], float64]:\n", " return a + b" ] }, @@ -311,9 +313,11 @@ "outputs": [], "source": [ "@gtx.program\n", - "def run_add(a : gtx.Field[Dims[Cell, K], float64],\n", - " b : gtx.Field[Dims[Cell, K], float64],\n", - " result : gtx.Field[Dims[Cell, K], float64]):\n", + "def run_add(\n", + " a: gtx.Field[Dims[Cell, K], float64],\n", + " b: gtx.Field[Dims[Cell, K], float64],\n", + " result: gtx.Field[Dims[Cell, K], float64],\n", + "):\n", " add(a, b, out=result)\n", " add(b, result, out=result)" ] diff --git a/docs/user/next/workshop/slides/slides_2.ipynb b/docs/user/next/workshop/slides/slides_2.ipynb index 903182b7bc..1e8925087f 100644 --- a/docs/user/next/workshop/slides/slides_2.ipynb +++ b/docs/user/next/workshop/slides/slides_2.ipynb @@ -17,7 +17,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore')" + "\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -122,10 +123,12 @@ "source": [ "Koff = gtx.FieldOffset(\"Koff\", source=K, target=(K,))\n", "\n", + "\n", "@gtx.field_operator\n", "def a_offset(a_off: gtx.Field[Dims[K], float64]) -> gtx.Field[Dims[K], float64]:\n", " return a_off(Koff[1])\n", "\n", + "\n", "result = gtx.zeros(gtx.domain({K: 6}))\n", "\n", "a_offset(a_off, out=result[:-1], offset_provider={\"Koff\": K})\n", @@ -177,29 +180,33 @@ "metadata": {}, "outputs": [], "source": [ - "e2c_table = np.array([\n", - " [0, -1], # edge 0 (neighbours: cell 0)\n", - " [2, -1], # edge 1\n", - " [2, -1], # edge 2\n", - " [3, -1], # edge 3\n", - " [4, -1], # edge 4\n", - " [5, -1], # edge 5\n", - " [0, 5], # edge 6 (neighbours: cell 0, cell 5)\n", - " [0, 1], # edge 7\n", - " [1, 2], # edge 8\n", - " [1, 3], # edge 9\n", - " [3, 4], # edge 10\n", - " [4, 5] # edge 11\n", - "])\n", - "\n", - "c2e_table = np.array([\n", - " [0, 6, 7], # cell 0 (neighbors: edge 0, edge 6, edge 7)\n", - " [7, 8, 9], # cell 1\n", - " [1, 2, 8], # cell 2\n", - " [3, 9, 10], # cell 3\n", - " [4, 10, 11], # cell 4\n", - " [5, 6, 11], # cell 5\n", - "])" + "e2c_table = np.array(\n", + " [\n", + " [0, -1], # edge 0 (neighbours: cell 0)\n", + " [2, -1], # edge 1\n", + " [2, -1], # edge 2\n", + " [3, -1], # edge 3\n", + " [4, -1], # edge 4\n", + " [5, -1], # edge 5\n", + " [0, 5], # edge 6 (neighbours: cell 0, cell 5)\n", + " [0, 1], # edge 7\n", + " [1, 2], # edge 8\n", + " [1, 3], # edge 9\n", + " [3, 4], # edge 10\n", + " [4, 5], # edge 11\n", + " ]\n", + ")\n", + "\n", + "c2e_table = np.array(\n", + " [\n", + " [0, 6, 7], # cell 0 (neighbors: edge 0, edge 6, edge 7)\n", + " [7, 8, 9], # cell 1\n", + " [1, 2, 8], # cell 2\n", + " [3, 9, 10], # cell 3\n", + " [4, 10, 11], # cell 4\n", + " [5, 6, 11], # cell 5\n", + " ]\n", + ")" ] }, { @@ -298,13 +305,19 @@ ], "source": [ "@gtx.field_operator\n", - "def nearest_cell_to_edge(cell_field: gtx.Field[Dims[Cell], float64]) -> gtx.Field[Dims[Edge], float64]:\n", - " return cell_field(E2C[0]) # 0th index to isolate edge dimension\n", + "def nearest_cell_to_edge(\n", + " cell_field: gtx.Field[Dims[Cell], float64],\n", + ") -> gtx.Field[Dims[Edge], float64]:\n", + " return cell_field(E2C[0]) # 0th index to isolate edge dimension\n", "\n", - "@gtx.program # uses skip_values, therefore we cannot use embedded\n", - "def run_nearest_cell_to_edge(cell_field: gtx.Field[Dims[Cell], float64], edge_field: gtx.Field[Dims[Edge], float64]):\n", + "\n", + "@gtx.program # uses skip_values, therefore we cannot use embedded\n", + "def run_nearest_cell_to_edge(\n", + " cell_field: gtx.Field[Dims[Cell], float64], edge_field: gtx.Field[Dims[Edge], float64]\n", + "):\n", " nearest_cell_to_edge(cell_field, out=edge_field)\n", "\n", + "\n", "run_nearest_cell_to_edge(cell_field, edge_field, offset_provider={\"E2C\": E2C_offset_provider})\n", "\n", "print(\"0th adjacent cell's value: {}\".format(edge_field.asnumpy()))" @@ -367,13 +380,19 @@ ], "source": [ "@gtx.field_operator\n", - "def sum_adjacent_cells(cell_field : gtx.Field[Dims[Cell], float64]) -> gtx.Field[Dims[Edge], float64]:\n", + "def sum_adjacent_cells(\n", + " cell_field: gtx.Field[Dims[Cell], float64],\n", + ") -> gtx.Field[Dims[Edge], float64]:\n", " return neighbor_sum(cell_field(E2C), axis=E2CDim)\n", "\n", - "@gtx.program # uses skip_values, therefore we cannot use embedded\n", - "def run_sum_adjacent_cells(cell_field : gtx.Field[Dims[Cell], float64], edge_field: gtx.Field[Dims[Edge], float64]):\n", + "\n", + "@gtx.program # uses skip_values, therefore we cannot use embedded\n", + "def run_sum_adjacent_cells(\n", + " cell_field: gtx.Field[Dims[Cell], float64], edge_field: gtx.Field[Dims[Edge], float64]\n", + "):\n", " sum_adjacent_cells(cell_field, out=edge_field)\n", "\n", + "\n", "run_sum_adjacent_cells(cell_field, edge_field, offset_provider={\"E2C\": E2C_offset_provider})\n", "\n", "print(\"sum of adjacent cells: {}\".format(edge_field.asnumpy()))" diff --git a/docs/user/next/workshop/slides/slides_3.ipynb b/docs/user/next/workshop/slides/slides_3.ipynb index 362a169322..bd85f5027b 100644 --- a/docs/user/next/workshop/slides/slides_3.ipynb +++ b/docs/user/next/workshop/slides/slides_3.ipynb @@ -17,7 +17,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore')" + "\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -95,13 +96,18 @@ "true_Field = gtx.as_field([Cell], np.asarray([11.0, 12.0, 13.0, 14.0, 15.0]))\n", "false_Field = gtx.as_field([Cell], np.asarray([21.0, 22.0, 23.0, 24.0, 25.0]))\n", "\n", - "result = gtx.zeros(gtx.domain({Cell:5}))\n", + "result = gtx.zeros(gtx.domain({Cell: 5}))\n", + "\n", "\n", "@gtx.field_operator\n", - "def conditional(mask: gtx.Field[Dims[Cell], bool], true_Field: gtx.Field[Dims[Cell], gtx.float64], false_Field: gtx.Field[Dims[Cell], gtx.float64]\n", + "def conditional(\n", + " mask: gtx.Field[Dims[Cell], bool],\n", + " true_Field: gtx.Field[Dims[Cell], gtx.float64],\n", + " false_Field: gtx.Field[Dims[Cell], gtx.float64],\n", ") -> gtx.Field[Dims[Cell], gtx.float64]:\n", " return where(mask, true_Field, false_Field)\n", "\n", + "\n", "conditional(mask, true_Field, false_Field, out=result, offset_provider={})\n", "print(\"mask array: {}\".format(mask.asnumpy()))\n", "print(\"true_Field array: {}\".format(true_Field.asnumpy()))\n", @@ -127,15 +133,19 @@ "outputs": [], "source": [ "@gtx.field_operator\n", - "def add(a: gtx.Field[Dims[Cell, K], gtx.float64],\n", - " b: gtx.Field[Dims[Cell, K], gtx.float64]) -> gtx.Field[Dims[Cell, K], gtx.float64]:\n", - " return a + b # 2.0 + 3.0\n", + "def add(\n", + " a: gtx.Field[Dims[Cell, K], gtx.float64], b: gtx.Field[Dims[Cell, K], gtx.float64]\n", + ") -> gtx.Field[Dims[Cell, K], gtx.float64]:\n", + " return a + b # 2.0 + 3.0\n", + "\n", "\n", "@gtx.program\n", - "def run_add_domain(a : gtx.Field[Dims[Cell, K], gtx.float64],\n", - " b : gtx.Field[Dims[Cell, K], gtx.float64],\n", - " result : gtx.Field[Dims[Cell, K], gtx.float64]):\n", - " add(a, b, out=result, domain={Cell: (1, 3), K: (1, 4)}) " + "def run_add_domain(\n", + " a: gtx.Field[Dims[Cell, K], gtx.float64],\n", + " b: gtx.Field[Dims[Cell, K], gtx.float64],\n", + " result: gtx.Field[Dims[Cell, K], gtx.float64],\n", + "):\n", + " add(a, b, out=result, domain={Cell: (1, 3), K: (1, 4)})" ] }, { diff --git a/docs/user/next/workshop/slides/slides_4.ipynb b/docs/user/next/workshop/slides/slides_4.ipynb index 324e30fe29..a5b8aaf78f 100644 --- a/docs/user/next/workshop/slides/slides_4.ipynb +++ b/docs/user/next/workshop/slides/slides_4.ipynb @@ -17,7 +17,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore')" + "\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -85,12 +86,16 @@ ], "source": [ "x = np.asarray([1.0, 2.0, 4.0, 6.0, 0.0, 2.0, 5.0])\n", + "\n", + "\n", "def partial_sum(x):\n", " for i in range(len(x)):\n", " if i > 0:\n", - " x[i] = x[i-1] + x[i]\n", + " x[i] = x[i - 1] + x[i]\n", " return x\n", - "print(f\"input:\\n {x}\") \n", + "\n", + "\n", + "print(f\"input:\\n {x}\")\n", "print(f\"partial sum:\\n {partial_sum(x)}\")" ] }, diff --git a/examples/cartesian/demo_burgers.ipynb b/examples/cartesian/demo_burgers.ipynb index 5e1a698cad..3521a845ea 100644 --- a/examples/cartesian/demo_burgers.ipynb +++ b/examples/cartesian/demo_burgers.ipynb @@ -103,8 +103,8 @@ "ny = 10 * 2**factor + 1\n", "\n", "# time\n", - "cfl = 1.\n", - "timestep = cfl / (nx-1)**2\n", + "cfl = 1.0\n", + "timestep = cfl / (nx - 1) ** 2\n", "niter = 4**factor * 100\n", "\n", "# output\n", @@ -127,84 +127,90 @@ "source": [ "@gtscript.function\n", "def absolute_value(phi):\n", - " abs_phi = phi[0, 0, 0] * (phi[0, 0, 0] >= 0.) - phi[0, 0, 0] * (phi[0, 0, 0] < 0.)\n", + " abs_phi = phi[0, 0, 0] * (phi[0, 0, 0] >= 0.0) - phi[0, 0, 0] * (phi[0, 0, 0] < 0.0)\n", " return abs_phi\n", "\n", + "\n", "@gtscript.function\n", "def advection_x(dx, u, abs_u, phi):\n", - " adv_phi_x = u[0, 0, 0] / (60. * dx) * (\n", - " + 45. * (phi[1, 0, 0] - phi[-1, 0, 0])\n", - " - 9. * (phi[2, 0, 0] - phi[-2, 0, 0])\n", - " + (phi[3, 0, 0] - phi[-3, 0, 0])\n", - " ) - abs_u[0, 0, 0] / (60. * dx) * (\n", - " + (phi[3, 0, 0] + phi[-3, 0, 0])\n", - " - 6. * (phi[2, 0, 0] + phi[-2, 0, 0])\n", - " + 15. * (phi[1, 0, 0] + phi[-1, 0, 0])\n", - " - 20. * phi[0, 0, 0]\n", + " adv_phi_x = u[0, 0, 0] / (60.0 * dx) * (\n", + " +45.0 * (phi[1, 0, 0] - phi[-1, 0, 0])\n", + " - 9.0 * (phi[2, 0, 0] - phi[-2, 0, 0])\n", + " + (phi[3, 0, 0] - phi[-3, 0, 0])\n", + " ) - abs_u[0, 0, 0] / (60.0 * dx) * (\n", + " +(phi[3, 0, 0] + phi[-3, 0, 0])\n", + " - 6.0 * (phi[2, 0, 0] + phi[-2, 0, 0])\n", + " + 15.0 * (phi[1, 0, 0] + phi[-1, 0, 0])\n", + " - 20.0 * phi[0, 0, 0]\n", " )\n", " return adv_phi_x\n", "\n", + "\n", "@gtscript.function\n", "def advection_y(dy, v, abs_v, phi):\n", - " adv_phi_y = v[0, 0, 0] / (60. * dy) * (\n", - " + 45. * (phi[0, 1, 0] - phi[0, -1, 0])\n", - " - 9. * (phi[0, 2, 0] - phi[0, -2, 0])\n", - " + (phi[0, 3, 0] - phi[0, -3, 0])\n", - " ) - abs_v[0, 0, 0] / (60. * dy) * (\n", - " + (phi[0, 3, 0] + phi[0, -3, 0])\n", - " - 6. * (phi[0, 2, 0] + phi[0, -2, 0])\n", - " + 15. * (phi[0, 1, 0] + phi[0, -1, 0])\n", - " - 20. * phi[0, 0, 0]\n", + " adv_phi_y = v[0, 0, 0] / (60.0 * dy) * (\n", + " +45.0 * (phi[0, 1, 0] - phi[0, -1, 0])\n", + " - 9.0 * (phi[0, 2, 0] - phi[0, -2, 0])\n", + " + (phi[0, 3, 0] - phi[0, -3, 0])\n", + " ) - abs_v[0, 0, 0] / (60.0 * dy) * (\n", + " +(phi[0, 3, 0] + phi[0, -3, 0])\n", + " - 6.0 * (phi[0, 2, 0] + phi[0, -2, 0])\n", + " + 15.0 * (phi[0, 1, 0] + phi[0, -1, 0])\n", + " - 20.0 * phi[0, 0, 0]\n", " )\n", " return adv_phi_y\n", "\n", + "\n", "@gtscript.function\n", "def advection(dx, dy, u, v):\n", " abs_u = absolute_value(phi=u)\n", " abs_v = absolute_value(phi=v)\n", - " \n", + "\n", " adv_u_x = advection_x(dx=dx, u=u, abs_u=abs_u, phi=u)\n", " adv_u_y = advection_y(dy=dy, v=v, abs_v=abs_v, phi=u)\n", " adv_u = adv_u_x[0, 0, 0] + adv_u_y[0, 0, 0]\n", - " \n", + "\n", " adv_v_x = advection_x(dx=dx, u=u, abs_u=abs_u, phi=v)\n", " adv_v_y = advection_y(dy=dy, v=v, abs_v=abs_v, phi=v)\n", " adv_v = adv_v_x[0, 0, 0] + adv_v_y[0, 0, 0]\n", - " \n", + "\n", " return adv_u, adv_v\n", "\n", + "\n", "@gtscript.function\n", "def diffusion_x(dx, phi):\n", " diff_phi = (\n", - " - phi[-2, 0, 0]\n", - " + 16. * phi[-1, 0, 0]\n", - " - 30. * phi[0, 0, 0]\n", - " + 16. * phi[1, 0, 0]\n", - " - phi[2, 0, 0]\n", - " ) / (12. * dx**2)\n", + " -phi[-2, 0, 0]\n", + " + 16.0 * phi[-1, 0, 0]\n", + " - 30.0 * phi[0, 0, 0]\n", + " + 16.0 * phi[1, 0, 0]\n", + " - phi[2, 0, 0]\n", + " ) / (12.0 * dx**2)\n", " return diff_phi\n", "\n", + "\n", "@gtscript.function\n", "def diffusion_y(dy, phi):\n", " diff_phi = (\n", - " - phi[0, -2, 0]\n", - " + 16. * phi[0, -1, 0]\n", - " - 30. * phi[0, 0, 0]\n", - " + 16. * phi[0, 1, 0]\n", - " - phi[0, 2, 0]\n", - " ) / (12. * dy**2)\n", + " -phi[0, -2, 0]\n", + " + 16.0 * phi[0, -1, 0]\n", + " - 30.0 * phi[0, 0, 0]\n", + " + 16.0 * phi[0, 1, 0]\n", + " - phi[0, 2, 0]\n", + " ) / (12.0 * dy**2)\n", " return diff_phi\n", "\n", + "\n", "@gtscript.function\n", "def diffusion(dx, dy, u, v):\n", " diff_u_x = diffusion_x(dx=dx, phi=u)\n", " diff_u_y = diffusion_y(dy=dy, phi=u)\n", " diff_u = diff_u_x[0, 0, 0] + diff_u_y[0, 0, 0]\n", - " \n", + "\n", " diff_v_x = diffusion_x(dx=dx, phi=v)\n", " diff_v_y = diffusion_y(dy=dy, phi=v)\n", " diff_v = diff_v_x[0, 0, 0] + diff_v_y[0, 0, 0]\n", - " \n", + "\n", " return diff_u, diff_v" ] }, @@ -232,24 +238,27 @@ ], "source": [ "# gt4py settings\n", - "backend = \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", + "backend = (\n", + " \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", + ")\n", "backend_opts = {\"verbose\": True} if backend.startswith(\"gt\") else {}\n", "dtype = np.float64\n", "origin = (3, 3, 0)\n", "rebuild = False\n", "\n", - "externals={\n", + "externals = {\n", " \"absolute_value\": absolute_value,\n", " \"advection_x\": advection_x,\n", " \"advection_y\": advection_y,\n", " \"advection\": advection,\n", " \"diffusion_x\": diffusion_x,\n", " \"diffusion_y\": diffusion_y,\n", - " \"diffusion\": diffusion\n", + " \"diffusion\": diffusion,\n", "}\n", "\n", "start_time = time.time()\n", "\n", + "\n", "@gtscript.stencil(backend=backend, externals=externals, rebuild=rebuild, **backend_opts)\n", "def rk_stage(\n", " in_u_now: gtscript.Field[dtype],\n", @@ -262,21 +271,22 @@ " dt: float,\n", " dx: float,\n", " dy: float,\n", - " mu: float\n", + " mu: float,\n", "):\n", " with computation(PARALLEL), interval(...):\n", " adv_u, adv_v = advection(dx=dx, dy=dy, u=in_u_tmp, v=in_v_tmp)\n", " diff_u, diff_v = diffusion(dx=dx, dy=dy, u=in_u_tmp, v=in_v_tmp)\n", - " out_u = in_u_now[0, 0, 0] + dt * (- adv_u[0, 0, 0] + mu * diff_u[0, 0, 0])\n", - " out_v = in_v_now[0, 0, 0] + dt * (- adv_v[0, 0, 0] + mu * diff_v[0, 0, 0])\n", + " out_u = in_u_now[0, 0, 0] + dt * (-adv_u[0, 0, 0] + mu * diff_u[0, 0, 0])\n", + " out_v = in_v_now[0, 0, 0] + dt * (-adv_v[0, 0, 0] + mu * diff_v[0, 0, 0])\n", + "\n", "\n", - " \n", "@gtscript.stencil(backend=backend)\n", "def copy(in_phi: gtscript.Field[dtype], out_phi: gtscript.Field[dtype]):\n", " with computation(PARALLEL), interval(...):\n", " out_phi = in_phi[0, 0, 0]\n", - " \n", - "print(\"\\nCompilation time: \", time.time() - start_time )" + "\n", + "\n", + "print(\"\\nCompilation time: \", time.time() - start_time)" ] }, { @@ -295,49 +305,67 @@ "source": [ "def solution_factory(t, x, y, slice_x=None, slice_y=None):\n", " nx, ny = x.shape[0], y.shape[0]\n", - " \n", + "\n", " slice_x = slice_x or slice(0, nx)\n", " slice_y = slice_y or slice(0, ny)\n", - " \n", + "\n", " mi = slice_x.stop - slice_x.start\n", " mj = slice_y.stop - slice_y.start\n", - " \n", + "\n", " x2d = np.tile(x[slice_x, np.newaxis, np.newaxis], (1, mj, 1))\n", " y2d = np.tile(y[np.newaxis, slice_y, np.newaxis], (mi, 1, 1))\n", - " \n", + "\n", " if use_case == \"zhao\":\n", - " u = - 4. * mu * np.pi * np.exp(- 5. * np.pi**2 * mu * t) * \\\n", - " np.cos(2. * np.pi * x2d) * np.sin(np.pi * y2d) / \\\n", - " (2. + np.exp(- 5. * np.pi**2 * mu * t) * np.sin(2. * np.pi * x2d) * np.sin(np.pi * y2d))\n", - " v = - 2. * mu * np.pi * np.exp(- 5.0 * np.pi**2 * mu * t) * \\\n", - " np.sin(2. * np.pi * x2d) * np.cos(np.pi * y2d) / \\\n", - " (2. + np.exp(- 5. * np.pi**2 * mu * t) * np.sin(2. * np.pi * x2d) * np.sin(np.pi * y2d))\n", + " u = (\n", + " -4.0\n", + " * mu\n", + " * np.pi\n", + " * np.exp(-5.0 * np.pi**2 * mu * t)\n", + " * np.cos(2.0 * np.pi * x2d)\n", + " * np.sin(np.pi * y2d)\n", + " / (\n", + " 2.0\n", + " + np.exp(-5.0 * np.pi**2 * mu * t) * np.sin(2.0 * np.pi * x2d) * np.sin(np.pi * y2d)\n", + " )\n", + " )\n", + " v = (\n", + " -2.0\n", + " * mu\n", + " * np.pi\n", + " * np.exp(-5.0 * np.pi**2 * mu * t)\n", + " * np.sin(2.0 * np.pi * x2d)\n", + " * np.cos(np.pi * y2d)\n", + " / (\n", + " 2.0\n", + " + np.exp(-5.0 * np.pi**2 * mu * t) * np.sin(2.0 * np.pi * x2d) * np.sin(np.pi * y2d)\n", + " )\n", + " )\n", " elif use_case == \"hopf_cole\":\n", - " u = .75 - 1. / (4. * (1. + np.exp(- t - 4.*x2d + 4.*y2d) / (32.*mu)))\n", - " v = .75 + 1. / (4. * (1. + np.exp(- t - 4.*x2d + 4.*y2d) / (32.*mu)))\n", + " u = 0.75 - 1.0 / (4.0 * (1.0 + np.exp(-t - 4.0 * x2d + 4.0 * y2d) / (32.0 * mu)))\n", + " v = 0.75 + 1.0 / (4.0 * (1.0 + np.exp(-t - 4.0 * x2d + 4.0 * y2d) / (32.0 * mu)))\n", " else:\n", " raise NotImplementedError()\n", - " \n", + "\n", " return u, v\n", "\n", "\n", "def set_initial_solution(x, y, u, v):\n", - " u[...], v[...] = solution_factory(0., x, y)\n", - " \n", - " \n", + " u[...], v[...] = solution_factory(0.0, x, y)\n", + "\n", + "\n", "def enforce_boundary_conditions(t, x, y, u, v):\n", " nx, ny = x.shape[0], y.shape[0]\n", - " \n", + "\n", " slice_x, slice_y = slice(0, 3), slice(0, ny)\n", " u[slice_x, slice_y], v[slice_x, slice_y] = solution_factory(t, x, y, slice_x, slice_y)\n", - " \n", - " slice_x, slice_y = slice(nx-3, nx), slice(0, ny)\n", + "\n", + " slice_x, slice_y = slice(nx - 3, nx), slice(0, ny)\n", " u[slice_x, slice_y], v[slice_x, slice_y] = solution_factory(t, x, y, slice_x, slice_y)\n", - " \n", - " slice_x, slice_y = slice(3, nx-3), slice(0, 3)\n", + "\n", + " slice_x, slice_y = slice(3, nx - 3), slice(0, 3)\n", " u[slice_x, slice_y], v[slice_x, slice_y] = solution_factory(t, x, y, slice_x, slice_y)\n", - " \n", - " slice_x, slice_y = slice(3, nx-3), slice(ny-3, ny)\n", + "\n", + " slice_x, slice_y = slice(3, nx - 3), slice(ny - 3, ny)\n", " u[slice_x, slice_y], v[slice_x, slice_y] = solution_factory(t, x, y, slice_x, slice_y)" ] }, @@ -372,10 +400,10 @@ } ], "source": [ - "x = np.linspace(0., 1., nx)\n", - "dx = 1. / (nx - 1)\n", - "y = np.linspace(0., 1., ny)\n", - "dy = 1. / (ny - 1)\n", + "x = np.linspace(0.0, 1.0, nx)\n", + "dx = 1.0 / (nx - 1)\n", + "y = np.linspace(0.0, 1.0, ny)\n", + "dy = 1.0 / (ny - 1)\n", "\n", "u_now = gt4py.storage.zeros((nx, ny, 1), dtype, backend=backend, aligned_index=origin)\n", "v_now = gt4py.storage.zeros((nx, ny, 1), dtype, backend=backend, aligned_index=origin)\n", @@ -384,29 +412,38 @@ "\n", "set_initial_solution(x, y, u_new, v_new)\n", "\n", - "rk_fraction = (1./3., .5, 1.)\n", + "rk_fraction = (1.0 / 3.0, 0.5, 1.0)\n", "\n", - "t = 0.\n", + "t = 0.0\n", "\n", "start_time = time.time()\n", "\n", "for i in range(niter):\n", " copy(in_phi=u_new, out_phi=u_now, origin=(0, 0, 0), domain=(nx, ny, 1))\n", " copy(in_phi=v_new, out_phi=v_now, origin=(0, 0, 0), domain=(nx, ny, 1))\n", - " \n", + "\n", " for k in range(3):\n", " dt = rk_fraction[k] * timestep\n", - " \n", + "\n", " rk_stage(\n", - " in_u_now=u_now, in_v_now=v_now, in_u_tmp=u_new, in_v_tmp=v_new,\n", - " out_u=u_new, out_v=v_new, dt=dt, dx=dx, dy=dy, mu=mu,\n", - " origin=(3, 3, 0), domain=(nx-6, ny-6, 1)\n", + " in_u_now=u_now,\n", + " in_v_now=v_now,\n", + " in_u_tmp=u_new,\n", + " in_v_tmp=v_new,\n", + " out_u=u_new,\n", + " out_v=v_new,\n", + " dt=dt,\n", + " dx=dx,\n", + " dy=dy,\n", + " mu=mu,\n", + " origin=(3, 3, 0),\n", + " domain=(nx - 6, ny - 6, 1),\n", " )\n", - " \n", + "\n", " enforce_boundary_conditions(t + dt, x, y, u_new, v_new)\n", - " \n", + "\n", " t += timestep\n", - " if print_period > 0 and ((i+1) % print_period == 0 or i+1 == niter):\n", + " if print_period > 0 and ((i + 1) % print_period == 0 or i + 1 == niter):\n", " u_ex, v_ex = solution_factory(t, x, y)\n", " err_u = np.linalg.norm(u_new[3:-3, 3:-3] - u_ex[3:-3, 3:-3]) * np.sqrt(dx * dy)\n", " err_v = np.linalg.norm(v_new[3:-3, 3:-3] - v_ex[3:-3, 3:-3]) * np.sqrt(dx * dy)\n", @@ -416,7 +453,7 @@ " )\n", " )\n", "\n", - "print(\"\\n- Running time: \", time.time() - start_time )\n" + "print(\"\\n- Running time: \", time.time() - start_time)" ] }, { diff --git a/examples/cartesian/demo_horizontal_diffusion.ipynb b/examples/cartesian/demo_horizontal_diffusion.ipynb index 8e77614457..d1410da2db 100644 --- a/examples/cartesian/demo_horizontal_diffusion.ipynb +++ b/examples/cartesian/demo_horizontal_diffusion.ipynb @@ -67,7 +67,9 @@ "metadata": {}, "outputs": [], "source": [ - "backend = \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", + "backend = (\n", + " \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", + ")\n", "dtype = np.float64" ] }, @@ -84,8 +86,7 @@ "metadata": {}, "outputs": [], "source": [ - "\n", - "@gtscript.stencil(backend) # this decorator triggers compilation of the stencil\n", + "@gtscript.stencil(backend) # this decorator triggers compilation of the stencil\n", "def horizontal_diffusion(\n", " in_field: gtscript.Field[dtype],\n", " out_field: gtscript.Field[dtype],\n", @@ -101,7 +102,7 @@ " fly_field = 0 if (res * (in_field[0, 1, 0] - in_field[0, 0, 0])) > 0 else res\n", " out_field = in_field[0, 0, 0] - coeff[0, 0, 0] * (\n", " flx_field[0, 0, 0] - flx_field[-1, 0, 0] + fly_field[0, 0, 0] - fly_field[0, -1, 0]\n", - " )\n" + " )" ] }, { @@ -152,13 +153,15 @@ "yy = jj / N\n", "zz = kk / N\n", "\n", - "in_data = 5. + 8. * (2. + np.cos(np.pi * (xx + 1.5 * yy)) + np.sin(2 * np.pi * (xx + 1.5 * yy))) / 4.\n", + "in_data = (\n", + " 5.0 + 8.0 * (2.0 + np.cos(np.pi * (xx + 1.5 * yy)) + np.sin(2 * np.pi * (xx + 1.5 * yy))) / 4.0\n", + ")\n", "out_data = np.zeros(shape)\n", "coeff_data = 0.025 * np.ones(shape)\n", "\n", "# Plot initialization\n", "projection = np.array(np.sum(in_data, axis=2))\n", - "plt.imshow(projection)\n" + "plt.imshow(projection)" ] }, { @@ -174,15 +177,9 @@ "metadata": {}, "outputs": [], "source": [ - "in_storage = gt4py.storage.from_array(\n", - " in_data, dtype, backend=backend, aligned_index=origin\n", - ")\n", - "out_storage = gt4py.storage.from_array(\n", - " out_data, dtype, backend=backend, aligned_index=origin\n", - ")\n", - "coeff_storage = gt4py.storage.from_array(\n", - " coeff_data, dtype, backend=backend, aligned_index=origin\n", - ")" + "in_storage = gt4py.storage.from_array(in_data, dtype, backend=backend, aligned_index=origin)\n", + "out_storage = gt4py.storage.from_array(out_data, dtype, backend=backend, aligned_index=origin)\n", + "coeff_storage = gt4py.storage.from_array(coeff_data, dtype, backend=backend, aligned_index=origin)" ] }, { diff --git a/examples/cartesian/demo_isentropic_diagnostics.ipynb b/examples/cartesian/demo_isentropic_diagnostics.ipynb index 56bd483fe0..8c0cfc7aa9 100644 --- a/examples/cartesian/demo_isentropic_diagnostics.ipynb +++ b/examples/cartesian/demo_isentropic_diagnostics.ipynb @@ -94,13 +94,13 @@ "nz = 64\n", "\n", "# brunt-vaisala frequency\n", - "bv = .01\n", + "bv = 0.01\n", "\n", "# physical constants\n", "rd = 287.05\n", "g = 9.81\n", "p_ref = 1.0e5\n", - "cp = 1004." + "cp = 1004.0" ] }, { @@ -127,8 +127,10 @@ ], "source": [ "# gridtools4py settings\n", - "backend = \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", - "backend_opts = {'verbose': True} if backend.startswith('gt') else {}\n", + "backend = (\n", + " \"numpy\" # options: \"numpy\", \"gt:cpu_ifirst\", \"gt:cpu_kfirst\", \"gt:gpu\", \"dace:cpu\", \"dace:gpu\"\n", + ")\n", + "backend_opts = {\"verbose\": True} if backend.startswith(\"gt\") else {}\n", "dtype = np.float64\n", "origin = (3, 3, 0)\n", "rebuild = True\n", @@ -137,6 +139,7 @@ "\n", "start_time = time.time()\n", "\n", + "\n", "@gtscript.stencil(backend=backend, externals=externals, **backend_opts)\n", "def diagnostic_step(\n", " in_theta: gtscript.Field[dtype],\n", @@ -148,10 +151,10 @@ " inout_h: gtscript.Field[dtype],\n", " *,\n", " dtheta: float,\n", - " pt: float\n", + " pt: float,\n", "):\n", " # retrieve the pressure\n", - " with computation(FORWARD), interval(0,1):\n", + " with computation(FORWARD), interval(0, 1):\n", " inout_p = pt\n", " with computation(FORWARD), interval(1, None):\n", " inout_p = inout_p[0, 0, -1] + g * dtheta * in_s[0, 0, -1]\n", @@ -161,10 +164,10 @@ " out_exn = cp * (inout_p[0, 0, 0] / p_ref) ** (rd / cp)\n", "\n", " # compute the Montgomery potential\n", - " with computation(BACKWARD), interval(-2,-1):\n", + " with computation(BACKWARD), interval(-2, -1):\n", " mtg_s = in_theta[0, 0, 1] * out_exn[0, 0, 1] + g * in_hs[0, 0, 1]\n", " inout_mtg = mtg_s + 0.5 * dtheta * out_exn[0, 0, 1]\n", - " with computation(BACKWARD), interval(0,-2):\n", + " with computation(BACKWARD), interval(0, -2):\n", " inout_mtg = inout_mtg[0, 0, 1] + dtheta * out_exn[0, 0, 1]\n", "\n", " # compute the geometric height of the isentropes\n", @@ -172,13 +175,11 @@ " inout_h = in_hs[0, 0, 0]\n", " with computation(BACKWARD), interval(0, -1):\n", " inout_h = inout_h[0, 0, 1] - rd * (\n", - " in_theta[0, 0, 0] * out_exn[0, 0, 0]\n", - " + in_theta[0, 0, 1] * out_exn[0, 0, 1]\n", - " ) * (inout_p[0, 0, 0] - inout_p[0, 0, 1]) / (\n", - " cp * g * (inout_p[0, 0, 0] + inout_p[0, 0, 1])\n", - " )\n", - " \n", - "print(\"\\n- Compilation time: \", time.time() - start_time ) " + " in_theta[0, 0, 0] * out_exn[0, 0, 0] + in_theta[0, 0, 1] * out_exn[0, 0, 1]\n", + " ) * (inout_p[0, 0, 0] - inout_p[0, 0, 1]) / (cp * g * (inout_p[0, 0, 0] + inout_p[0, 0, 1]))\n", + "\n", + "\n", + "print(\"\\n- Compilation time: \", time.time() - start_time)" ] }, { @@ -196,47 +197,53 @@ "outputs": [], "source": [ "# define the vertical grid\n", - "theta1d = np.linspace(340., 280., nz+1)\n", - "theta = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", + "theta1d = np.linspace(340.0, 280.0, nz + 1)\n", + "theta = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", "theta[...] = theta1d[np.newaxis, np.newaxis, :]\n", "\n", "# the vertical grid spacing\n", - "dtheta = 60. / nz\n", + "dtheta = 60.0 / nz\n", "\n", "# let us assume the topography consists of a bell-shaped isolated mountain\n", - "hs = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", + "hs = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", "x1d = np.linspace(-150e3, 150e3, nx)\n", "y1d = np.linspace(-150e3, 150e3, ny)\n", "x, y = np.meshgrid(x1d, y1d, indexing=\"ij\")\n", - "hs[:, :, -1] = 1000. * np.exp(- (x / 50e3)**2 - (y / 50e3)**2)\n", + "hs[:, :, -1] = 1000.0 * np.exp(-((x / 50e3) ** 2) - (y / 50e3) ** 2)\n", "\n", "# initialize the Exner function (needed to compute the isentropic density)\n", - "exn = np.zeros((nx, ny, nz+1), dtype=dtype)\n", + "exn = np.zeros((nx, ny, nz + 1), dtype=dtype)\n", "exn[:, :, -1] = cp\n", "for k in range(nz - 1, -1, -1):\n", - " exn[:, :, k] = exn[:, :, k + 1] - dtheta * (g ** 2) / (\n", - " (bv ** 2) * (theta[:, :, k] ** 2)\n", - " )\n", + " exn[:, :, k] = exn[:, :, k + 1] - dtheta * (g**2) / ((bv**2) * (theta[:, :, k] ** 2))\n", "\n", "# retrieve the air pressure (needed to compute the isentropic density)\n", "p = p_ref * ((exn / cp) ** (cp / rd))\n", "\n", "# diagnose the isentropic density\n", - "s = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", + "s = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", "s[:, :, :-1] = -(p[:, :, :-1] - p[:, :, 1:]) / (g * dtheta)\n", "\n", "# allocate the output storages\n", - "out_p = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", - "out_exn = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", - "out_mtg = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", - "out_h = gt4py.storage.zeros((nx, ny, nz+1), dtype, backend=backend, aligned_index=origin)\n", + "out_p = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", + "out_exn = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", + "out_mtg = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", + "out_h = gt4py.storage.zeros((nx, ny, nz + 1), dtype, backend=backend, aligned_index=origin)\n", "\n", "# compute all the diagnostic variables\n", "diagnostic_step(\n", - " in_theta=theta, in_hs=hs, in_s=s, inout_p=out_p, out_exn=out_exn, \n", - " inout_mtg=out_mtg, inout_h=out_h, dtheta=dtheta, pt=p[0, 0, 0],\n", - " origin=(0, 0, 0), domain=(nx, ny, nz+1)\n", - ")\n" + " in_theta=theta,\n", + " in_hs=hs,\n", + " in_s=s,\n", + " inout_p=out_p,\n", + " out_exn=out_exn,\n", + " inout_mtg=out_mtg,\n", + " inout_h=out_h,\n", + " dtheta=dtheta,\n", + " pt=p[0, 0, 0],\n", + " origin=(0, 0, 0),\n", + " domain=(nx, ny, nz + 1),\n", + ")" ] }, { @@ -264,18 +271,18 @@ } ], "source": [ - "out_p =np.asarray(out_p)\n", - "out_exn =np.asarray(out_exn)\n", - "out_mtg =np.asarray(out_mtg)\n", + "out_p = np.asarray(out_p)\n", + "out_exn = np.asarray(out_exn)\n", + "out_mtg = np.asarray(out_mtg)\n", "\n", "j = int(ny / 2)\n", - "xx = 1e-3 * np.repeat(x[:, j, np.newaxis], nz+1, axis=1)\n", + "xx = 1e-3 * np.repeat(x[:, j, np.newaxis], nz + 1, axis=1)\n", "yy = 1e-3 * np.asarray(out_h[:, j, :])\n", "\n", "fig = plt.figure(figsize=(10, 10))\n", "\n", "ax00 = fig.add_subplot(2, 2, 1)\n", - "surf = ax00.contourf(xx, yy, 1e-2*out_p[:, j, :], cmap=\"Blues\")\n", + "surf = ax00.contourf(xx, yy, 1e-2 * out_p[:, j, :], cmap=\"Blues\")\n", "plt.colorbar(surf, orientation=\"vertical\")\n", "ax00.plot(xx[:, -1], yy[:, -1], color=\"black\", linewidth=1.5)\n", "ax00.set_xlim((-100, 100))\n", @@ -295,7 +302,7 @@ "ax01.set_title(\"Exner function [J kg$^{-1}$ K$^{-1}$]\")\n", "\n", "ax10 = fig.add_subplot(2, 2, 3)\n", - "surf = ax10.contourf(xx, yy, 1e-3*out_mtg[:, j, :], cmap=\"Reds\")\n", + "surf = ax10.contourf(xx, yy, 1e-3 * out_mtg[:, j, :], cmap=\"Reds\")\n", "ax10.plot(xx[:, -1], yy[:, -1], color=\"black\", linewidth=1.5)\n", "plt.colorbar(surf, orientation=\"vertical\")\n", "ax10.set_xlim((-100, 100))\n", @@ -325,7 +332,7 @@ "outputs": [], "source": [ "# This cell only works in Jupyter Lab with itkwidgets extension.\n", - "# Installation commands: \n", + "# Installation commands:\n", "#!pip install jupyterlab itkwidgets\n", "#!jupyter labextension install @jupyter-widgets/jupyterlab-manager itkwidgets\n", "\n", diff --git a/examples/lap_cartesian_vs_next.ipynb b/examples/lap_cartesian_vs_next.ipynb index 28837c2154..f415e02f91 100644 --- a/examples/lap_cartesian_vs_next.ipynb +++ b/examples/lap_cartesian_vs_next.ipynb @@ -75,7 +75,7 @@ "source": [ "import gt4py.next as gtx\n", "\n", - "allocator = gtx.itir_python # should match the executor\n", + "allocator = gtx.itir_python # should match the executor\n", "# allocator = gtx.gtfn_cpu\n", "# allocator = gtx.gtfn_gpu\n", "\n", @@ -86,7 +86,12 @@ "\n", "domain = gtx.domain({I: nx, J: ny, K: nz})\n", "\n", - "inp = gtx.as_field(domain, np.fromfunction(lambda x, y, z: x**2+y**2, shape=(nx, ny, nz)), dtype, allocator=allocator)\n", + "inp = gtx.as_field(\n", + " domain,\n", + " np.fromfunction(lambda x, y, z: x**2 + y**2, shape=(nx, ny, nz)),\n", + " dtype,\n", + " allocator=allocator,\n", + ")\n", "out_cartesian = gtx.zeros(domain, dtype, allocator=allocator)\n", "out_next = gtx.zeros(domain, dtype, allocator=allocator)" ] @@ -112,6 +117,7 @@ "# cartesian_backend = \"gt:cpu_ifirst\"\n", "# cartesian_backend = \"gt:gpu\"\n", "\n", + "\n", "@gtscript.stencil(backend=cartesian_backend)\n", "def lap_cartesian(\n", " inp: gtscript.Field[dtype],\n", @@ -120,7 +126,8 @@ " with computation(PARALLEL), interval(...):\n", " out = -4.0 * inp[0, 0, 0] + inp[-1, 0, 0] + inp[1, 0, 0] + inp[0, -1, 0] + inp[0, 1, 0]\n", "\n", - "lap_cartesian(inp=inp, out=out_cartesian, origin=(1, 1, 0), domain=(nx-2, ny-2, nz))" + "\n", + "lap_cartesian(inp=inp, out=out_cartesian, origin=(1, 1, 0), domain=(nx - 2, ny - 2, nz))" ] }, { @@ -138,14 +145,17 @@ "Ioff = gtx.FieldOffset(\"I\", source=I, target=(I,))\n", "Joff = gtx.FieldOffset(\"J\", source=J, target=(J,))\n", "\n", + "\n", "@gtx.field_operator\n", "def lap_next(inp: Field[[I, J, K], dtype]) -> Field[[I, J, K], dtype]:\n", " return -4.0 * inp + inp(Ioff[-1]) + inp(Ioff[1]) + inp(Joff[-1]) + inp(Joff[1])\n", "\n", + "\n", "@gtx.program(backend=next_backend)\n", "def lap_next_program(inp: Field[[I, J, K], dtype], out: Field[[I, J, K], dtype]):\n", " lap_next(inp, out=out[1:-1, 1:-1, :])\n", "\n", + "\n", "lap_next_program(inp, out_next, offset_provider={\"Ioff\": I, \"Joff\": J})" ] }, diff --git a/min-extra-requirements-test.txt b/min-extra-requirements-test.txt index 44535bb890..6e17e33081 100644 --- a/min-extra-requirements-test.txt +++ b/min-extra-requirements-test.txt @@ -100,6 +100,7 @@ pybind11==2.10.1 pygments==2.7.3 pytest-cache==1.0 pytest-cov==2.8 +pytest-custom-exit-code==0.3.0 pytest-factoryboy==2.0.3 pytest-instafail==0.5.0 pytest-xdist[psutil]==2.4 diff --git a/min-requirements-test.txt b/min-requirements-test.txt index 17b07db9c9..3cd85ec441 100644 --- a/min-requirements-test.txt +++ b/min-requirements-test.txt @@ -95,6 +95,7 @@ pybind11==2.10.1 pygments==2.7.3 pytest-cache==1.0 pytest-cov==2.8 +pytest-custom-exit-code==0.3.0 pytest-factoryboy==2.0.3 pytest-instafail==0.5.0 pytest-xdist[psutil]==2.4 diff --git a/requirements-dev.in b/requirements-dev.in index a1530ee731..07ec89d447 100644 --- a/requirements-dev.in +++ b/requirements-dev.in @@ -33,6 +33,7 @@ pygments>=2.7.3 pytest # constraints in gt4py['testing'] pytest-cache>=1.0 pytest-cov>=2.8 +pytest-custom-exit-code>=0.3.0 pytest-factoryboy>=2.0.3 pytest-xdist[psutil]>=2.4 pytest-instafail>=0.5.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index de42f3e24e..81b4a725e9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,16 +10,16 @@ annotated-types==0.7.0 # via -c constraints.txt, pydantic asttokens==2.4.1 # via -c constraints.txt, devtools, stack-data astunparse==1.6.3 ; python_version < "3.9" # via -c constraints.txt, dace, gt4py (pyproject.toml) attrs==24.2.0 # via -c constraints.txt, flake8-bugbear, flake8-eradicate, gt4py (pyproject.toml), hypothesis, jsonschema, referencing -babel==2.15.0 # via -c constraints.txt, sphinx +babel==2.16.0 # via -c constraints.txt, sphinx backcall==0.2.0 # via -c constraints.txt, ipython black==24.8.0 # via -c constraints.txt, gt4py (pyproject.toml) boltons==24.0.0 # via -c constraints.txt, gt4py (pyproject.toml) bracex==2.5 # via -c constraints.txt, wcmatch build==1.2.1 # via -c constraints.txt, pip-tools -bump-my-version==0.25.0 # via -c constraints.txt, -r requirements-dev.in +bump-my-version==0.26.0 # via -c constraints.txt, -r requirements-dev.in cached-property==1.5.2 # via -c constraints.txt, gt4py (pyproject.toml) -cachetools==5.4.0 # via -c constraints.txt, tox -certifi==2024.7.4 # via -c constraints.txt, requests +cachetools==5.5.0 # via -c constraints.txt, tox +certifi==2024.8.30 # via -c constraints.txt, requests cfgv==3.4.0 # via -c constraints.txt, pre-commit chardet==5.2.0 # via -c constraints.txt, tox charset-normalizer==3.3.2 # via -c constraints.txt, requests @@ -37,7 +37,7 @@ dace==0.16.1 # via -c constraints.txt, gt4py (pyproject.toml) darglint==1.8.1 # via -c constraints.txt, -r requirements-dev.in debugpy==1.8.5 # via -c constraints.txt, ipykernel decorator==5.1.1 # via -c constraints.txt, ipython -deepdiff==7.0.1 # via -c constraints.txt, gt4py (pyproject.toml) +deepdiff==8.0.1 # via -c constraints.txt, gt4py (pyproject.toml) devtools==0.12.2 # via -c constraints.txt, gt4py (pyproject.toml) dill==0.3.8 # via -c constraints.txt, dace distlib==0.3.8 # via -c constraints.txt, virtualenv @@ -45,13 +45,13 @@ docutils==0.20.1 # via -c constraints.txt, restructuredtext-lint, sphin eradicate==2.3.0 # via -c constraints.txt, flake8-eradicate exceptiongroup==1.2.2 # via -c constraints.txt, hypothesis, pytest execnet==2.1.1 # via -c constraints.txt, pytest-cache, pytest-xdist -executing==2.0.1 # via -c constraints.txt, devtools, stack-data -factory-boy==3.3.0 # via -c constraints.txt, gt4py (pyproject.toml), pytest-factoryboy -faker==26.2.0 # via -c constraints.txt, factory-boy +executing==2.1.0 # via -c constraints.txt, devtools, stack-data +factory-boy==3.3.1 # via -c constraints.txt, gt4py (pyproject.toml), pytest-factoryboy +faker==28.1.0 # via -c constraints.txt, factory-boy fastjsonschema==2.20.0 # via -c constraints.txt, nbformat filelock==3.15.4 # via -c constraints.txt, tox, virtualenv flake8==7.1.1 # via -c constraints.txt, -r requirements-dev.in, flake8-bugbear, flake8-builtins, flake8-debugger, flake8-docstrings, flake8-eradicate, flake8-mutable, flake8-pyproject, flake8-rst-docstrings -flake8-bugbear==24.4.26 # via -c constraints.txt, -r requirements-dev.in +flake8-bugbear==24.8.19 # via -c constraints.txt, -r requirements-dev.in flake8-builtins==2.5.0 # via -c constraints.txt, -r requirements-dev.in flake8-debugger==4.1.2 # via -c constraints.txt, -r requirements-dev.in flake8-docstrings==1.7.0 # via -c constraints.txt, -r requirements-dev.in @@ -63,12 +63,12 @@ fonttools==4.53.1 # via -c constraints.txt, matplotlib fparser==0.1.4 # via -c constraints.txt, dace frozendict==2.4.4 # via -c constraints.txt, gt4py (pyproject.toml) gridtools-cpp==2.3.4 # via -c constraints.txt, gt4py (pyproject.toml) -hypothesis==6.109.0 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) +hypothesis==6.111.2 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml) identify==2.6.0 # via -c constraints.txt, pre-commit -idna==3.7 # via -c constraints.txt, requests +idna==3.8 # via -c constraints.txt, requests imagesize==1.4.1 # via -c constraints.txt, sphinx -importlib-metadata==8.2.0 # via -c constraints.txt, build, jupyter-client, sphinx -importlib-resources==6.4.0 ; python_version < "3.9" # via -c constraints.txt, gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib +importlib-metadata==8.4.0 # via -c constraints.txt, build, jupyter-client, sphinx +importlib-resources==6.4.4 ; python_version < "3.9" # via -c constraints.txt, gt4py (pyproject.toml), jsonschema, jsonschema-specifications, matplotlib inflection==0.5.1 # via -c constraints.txt, pytest-factoryboy iniconfig==2.0.0 # via -c constraints.txt, pytest ipykernel==6.29.5 # via -c constraints.txt, nbmake @@ -82,7 +82,7 @@ jupyter-client==8.6.2 # via -c constraints.txt, ipykernel, nbclient jupyter-core==5.7.2 # via -c constraints.txt, ipykernel, jupyter-client, nbformat jupytext==1.16.4 # via -c constraints.txt, -r requirements-dev.in kiwisolver==1.4.5 # via -c constraints.txt, matplotlib -lark==1.1.9 # via -c constraints.txt, gt4py (pyproject.toml) +lark==1.2.2 # via -c constraints.txt, gt4py (pyproject.toml) mako==1.3.5 # via -c constraints.txt, gt4py (pyproject.toml) markdown-it-py==3.0.0 # via -c constraints.txt, jupytext, mdit-py-plugins, rich markupsafe==2.1.5 # via -c constraints.txt, jinja2, mako @@ -92,9 +92,9 @@ mccabe==0.7.0 # via -c constraints.txt, flake8 mdit-py-plugins==0.4.1 # via -c constraints.txt, jupytext mdurl==0.1.2 # via -c constraints.txt, markdown-it-py mpmath==1.3.0 # via -c constraints.txt, sympy -mypy==1.11.1 # via -c constraints.txt, -r requirements-dev.in +mypy==1.11.2 # via -c constraints.txt, -r requirements-dev.in mypy-extensions==1.0.0 # via -c constraints.txt, black, mypy -nanobind==2.0.0 # via -c constraints.txt, gt4py (pyproject.toml) +nanobind==2.1.0 # via -c constraints.txt, gt4py (pyproject.toml) nbclient==0.6.8 # via -c constraints.txt, nbmake nbformat==5.10.4 # via -c constraints.txt, jupytext, nbclient, nbmake nbmake==1.5.4 # via -c constraints.txt, -r requirements-dev.in @@ -103,7 +103,7 @@ networkx==3.1 # via -c constraints.txt, dace ninja==1.11.1.1 # via -c constraints.txt, gt4py (pyproject.toml) nodeenv==1.9.1 # via -c constraints.txt, pre-commit numpy==1.24.4 # via -c constraints.txt, contourpy, dace, gt4py (pyproject.toml), matplotlib -ordered-set==4.1.0 # via -c constraints.txt, deepdiff +orderly-set==5.2.2 # via -c constraints.txt, deepdiff packaging==24.1 # via -c constraints.txt, black, build, gt4py (pyproject.toml), ipykernel, jupytext, matplotlib, pipdeptree, pyproject-api, pytest, pytest-factoryboy, setuptools-scm, sphinx, tox parso==0.8.4 # via -c constraints.txt, jedi pathspec==0.12.1 # via -c constraints.txt, black @@ -121,7 +121,7 @@ prompt-toolkit==3.0.36 # via -c constraints.txt, ipython, questionary psutil==6.0.0 # via -c constraints.txt, -r requirements-dev.in, ipykernel, pytest-xdist ptyprocess==0.7.0 # via -c constraints.txt, pexpect pure-eval==0.2.3 # via -c constraints.txt, stack-data -pybind11==2.13.1 # via -c constraints.txt, gt4py (pyproject.toml) +pybind11==2.13.5 # via -c constraints.txt, gt4py (pyproject.toml) pycodestyle==2.12.1 # via -c constraints.txt, flake8, flake8-debugger pydantic==2.8.2 # via -c constraints.txt, bump-my-version, pydantic-settings pydantic-core==2.20.1 # via -c constraints.txt, pydantic @@ -129,12 +129,13 @@ pydantic-settings==2.4.0 # via -c constraints.txt, bump-my-version pydocstyle==6.3.0 # via -c constraints.txt, flake8-docstrings pyflakes==3.2.0 # via -c constraints.txt, flake8 pygments==2.18.0 # via -c constraints.txt, -r requirements-dev.in, devtools, flake8-rst-docstrings, ipython, nbmake, rich, sphinx -pyparsing==3.1.2 # via -c constraints.txt, matplotlib +pyparsing==3.1.4 # via -c constraints.txt, matplotlib pyproject-api==1.7.1 # via -c constraints.txt, tox pyproject-hooks==1.1.0 # via -c constraints.txt, build, pip-tools -pytest==8.3.2 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-factoryboy, pytest-instafail, pytest-xdist +pytest==8.3.2 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -c constraints.txt, -r requirements-dev.in pytest-cov==5.0.0 # via -c constraints.txt, -r requirements-dev.in +pytest-custom-exit-code==0.3.0 # via -c constraints.txt, -r requirements-dev.in pytest-factoryboy==2.7.0 # via -c constraints.txt, -r requirements-dev.in pytest-instafail==0.5.0 # via -c constraints.txt, -r requirements-dev.in pytest-xdist[psutil]==3.6.1 # via -c constraints.txt, -r requirements-dev.in @@ -142,15 +143,15 @@ python-dateutil==2.9.0.post0 # via -c constraints.txt, faker, jupyter-client, m python-dotenv==1.0.1 # via -c constraints.txt, pydantic-settings pytz==2024.1 # via -c constraints.txt, babel pyyaml==6.0.2 # via -c constraints.txt, dace, jupytext, pre-commit -pyzmq==26.1.0 # via -c constraints.txt, ipykernel, jupyter-client +pyzmq==26.2.0 # via -c constraints.txt, ipykernel, jupyter-client questionary==2.0.1 # via -c constraints.txt, bump-my-version referencing==0.35.1 # via -c constraints.txt, jsonschema, jsonschema-specifications requests==2.32.3 # via -c constraints.txt, sphinx restructuredtext-lint==1.4.0 # via -c constraints.txt, flake8-rst-docstrings -rich==13.7.1 # via -c constraints.txt, bump-my-version, rich-click +rich==13.8.0 # via -c constraints.txt, bump-my-version, rich-click rich-click==1.8.3 # via -c constraints.txt, bump-my-version rpds-py==0.20.0 # via -c constraints.txt, jsonschema, referencing -ruff==0.5.6 # via -c constraints.txt, -r requirements-dev.in +ruff==0.6.3 # via -c constraints.txt, -r requirements-dev.in setuptools-scm==8.1.0 # via -c constraints.txt, fparser six==1.16.0 # via -c constraints.txt, asttokens, astunparse, python-dateutil snowballstemmer==2.2.0 # via -c constraints.txt, pydocstyle, sphinx @@ -168,10 +169,10 @@ stack-data==0.6.3 # via -c constraints.txt, ipython sympy==1.12.1 # via -c constraints.txt, dace, gt4py (pyproject.toml) tabulate==0.9.0 # via -c constraints.txt, gt4py (pyproject.toml) tomli==2.0.1 ; python_version < "3.11" # via -c constraints.txt, -r requirements-dev.in, black, build, coverage, flake8-pyproject, jupytext, mypy, pip-tools, pyproject-api, pytest, setuptools-scm, tox -tomlkit==0.13.0 # via -c constraints.txt, bump-my-version +tomlkit==0.13.2 # via -c constraints.txt, bump-my-version toolz==0.12.1 # via -c constraints.txt, cytoolz tornado==6.4.1 # via -c constraints.txt, ipykernel, jupyter-client -tox==4.17.0 # via -c constraints.txt, -r requirements-dev.in +tox==4.18.0 # via -c constraints.txt, -r requirements-dev.in traitlets==5.14.3 # via -c constraints.txt, comm, ipykernel, ipython, jupyter-client, jupyter-core, matplotlib-inline, nbclient, nbformat types-tabulate==0.9.0.20240106 # via -c constraints.txt, -r requirements-dev.in typing-extensions==4.12.2 # via -c constraints.txt, annotated-types, black, gt4py (pyproject.toml), ipython, mypy, pydantic, pydantic-core, pytest-factoryboy, rich, rich-click, setuptools-scm @@ -179,11 +180,11 @@ urllib3==2.2.2 # via -c constraints.txt, requests virtualenv==20.26.3 # via -c constraints.txt, pre-commit, tox wcmatch==9.0 # via -c constraints.txt, bump-my-version wcwidth==0.2.13 # via -c constraints.txt, prompt-toolkit -websockets==12.0 # via -c constraints.txt, dace +websockets==13.0.1 # via -c constraints.txt, dace wheel==0.44.0 # via -c constraints.txt, astunparse, pip-tools xxhash==3.0.0 # via -c constraints.txt, gt4py (pyproject.toml) -zipp==3.19.2 # via -c constraints.txt, importlib-metadata, importlib-resources +zipp==3.20.1 # via -c constraints.txt, importlib-metadata, importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.2 # via -c constraints.txt, pip-tools, pipdeptree -setuptools==72.1.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==74.1.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm diff --git a/tests/conftest.py b/tests/conftest.py index 3cbfa56fde..285ccda2b0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,24 +8,5 @@ """Global configuration of pytest for collecting and running tests.""" -import pytest - - # Ignore hidden folders and disabled tests collect_ignore_glob = [".*", "_disabled*"] - - -def pytest_addoption(parser): - group = parser.getgroup("This project") - group.addoption( - "--ignore-no-tests-collected", - action="store_true", - default=False, - help='Suppress the "no tests were collected" exit code.', - ) - - -def pytest_sessionfinish(session, exitstatus): - if session.config.getoption("--ignore-no-tests-collected"): - if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED: - session.exitstatus = pytest.ExitCode.OK diff --git a/tox.ini b/tox.ini index befddf22f8..9cdf95010c 100644 --- a/tox.ini +++ b/tox.ini @@ -82,7 +82,7 @@ set_env = {[testenv]set_env} PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = - python -m pytest --ignore-no-tests-collected --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + python -m pytest --suppress-no-test-exit-code --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ nomesh: not requires_atlas \ atlas: requires_atlas \ cpu: and not requires_gpu \ From c51dc75952f44633bc9b9859872e724c3ddc5c5b Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 14:52:19 +0200 Subject: [PATCH 18/24] Update dependencies again --- .pre-commit-config.yaml | 2 +- constraints.txt | 5 +++-- requirements-dev.txt | 5 +++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0e47535a46..9bb9917263 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -106,7 +106,7 @@ repos: - numpy==1.24.4 - packaging==24.1 - pybind11==2.13.5 - - setuptools==74.0.0 + - setuptools==74.1.0 - tabulate==0.9.0 - typing-extensions==4.12.2 - xxhash==3.0.0 diff --git a/constraints.txt b/constraints.txt index 1a8971fbdb..cb4864a2ee 100644 --- a/constraints.txt +++ b/constraints.txt @@ -118,9 +118,10 @@ pygments==2.18.0 # via -r requirements-dev.in, devtools, ipython, nbmak pyparsing==3.1.4 # via matplotlib pyproject-api==1.7.1 # via tox pyproject-hooks==1.1.0 # via build, pip-tools -pytest==8.3.2 # via -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-factoryboy, pytest-instafail, pytest-xdist +pytest==8.3.2 # via -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -r requirements-dev.in pytest-cov==5.0.0 # via -r requirements-dev.in +pytest-custom-exit-code==0.3.0 # via -r requirements-dev.in pytest-factoryboy==2.7.0 # via -r requirements-dev.in pytest-instafail==0.5.0 # via -r requirements-dev.in pytest-xdist==3.6.1 # via -r requirements-dev.in @@ -172,4 +173,4 @@ zipp==3.20.1 # via importlib-metadata, importlib-resources # The following packages are considered to be unsafe in a requirements file: pip==24.2 # via pip-tools, pipdeptree -setuptools==74.0.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==74.1.0 # via gt4py (pyproject.toml), pip-tools, setuptools-scm diff --git a/requirements-dev.txt b/requirements-dev.txt index 770de33c63..a00bbd584c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -118,9 +118,10 @@ pygments==2.18.0 # via -c constraints.txt, -r requirements-dev.in, devt pyparsing==3.1.4 # via -c constraints.txt, matplotlib pyproject-api==1.7.1 # via -c constraints.txt, tox pyproject-hooks==1.1.0 # via -c constraints.txt, build, pip-tools -pytest==8.3.2 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-factoryboy, pytest-instafail, pytest-xdist +pytest==8.3.2 # via -c constraints.txt, -r requirements-dev.in, gt4py (pyproject.toml), nbmake, pytest-cache, pytest-cov, pytest-custom-exit-code, pytest-factoryboy, pytest-instafail, pytest-xdist pytest-cache==1.0 # via -c constraints.txt, -r requirements-dev.in pytest-cov==5.0.0 # via -c constraints.txt, -r requirements-dev.in +pytest-custom-exit-code==0.3.0 # via -c constraints.txt, -r requirements-dev.in pytest-factoryboy==2.7.0 # via -c constraints.txt, -r requirements-dev.in pytest-instafail==0.5.0 # via -c constraints.txt, -r requirements-dev.in pytest-xdist[psutil]==3.6.1 # via -c constraints.txt, -r requirements-dev.in @@ -171,4 +172,4 @@ zipp==3.20.1 # via -c constraints.txt, importlib-metadata, importli # The following packages are considered to be unsafe in a requirements file: pip==24.2 # via -c constraints.txt, pip-tools, pipdeptree -setuptools==74.0.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm +setuptools==74.1.0 # via -c constraints.txt, gt4py (pyproject.toml), pip-tools, setuptools-scm From 1cf88a720f0e544baf31f69ed7db918a6401d159 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 14:55:14 +0200 Subject: [PATCH 19/24] Recover updated tox.ini --- tox.ini | 70 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/tox.ini b/tox.ini index 861ec02c0a..ecea644e14 100644 --- a/tox.ini +++ b/tox.ini @@ -5,27 +5,21 @@ requires = envlist = cartesian-py{310}-{internal,dace}-{cpu} eve-py{310} - next-py{310}-{nomesh,atlas} + next-py{310}-{nomesh,atlas}-{cpu} storage-py{310}-{internal,dace}-{cpu} -# docs + # docs labels = - test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ - cartesian-py311-dace-cpu - + test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-internal-py39-cpu, \ + cartesian-internal-py310-cpu, cartesian-py311-internal-cpu, \ + cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu test-eve-cpu = eve-py38, eve-py39, eve-py310, eve-py311 - - test-next-cpu = next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas - - test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, \ - storage-py311-internal-cpu, storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, \ - storage-py311-dace-cpu - - test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ - cartesian-py311-dace-cpu, \ + test-next-cpu = next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu + test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ + storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu + test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, cartesian-py311-internal-cpu, \ + cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, cartesian-py311-dace-cpu, \ eve-py38, eve-py39, eve-py310, eve-py311, \ - next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas, \ + next-py310-nomesh-cpu, next-py311-nomesh-cpu, next-py310-atlas-cpu, next-py311-atlas-cpu, \ storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu @@ -45,7 +39,7 @@ wheel_build_env = .pkg pass_env = CUDAARCHS, NUM_PROCESSES, GT4PY_* set_env = PYTEST_ADDOPTS = --color=auto --instafail - PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program ':UserWarning} + PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning,ignore:Field View Program:UserWarning} # -- Primary tests -- [testenv:cartesian-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] @@ -58,16 +52,18 @@ allowlist_externals = ldd rm commands = - internal-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu and not requires_dace" {posargs} tests{/}cartesian_tests - internal-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu and not requires_dace" {posargs} tests{/}cartesian_tests - dace-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu and requires_dace" {posargs} tests{/}cartesian_tests - dace-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu and requires_dace" {posargs} tests{/}cartesian_tests + python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + internal: not requires_dace \ + dace: requires_dace \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* -;commands_post = -; coverage json --rcfile=setup.cfg -; coverage html --rcfile=setup.cfg --show-contexts +# commands_post = +# coverage json --rcfile=setup.cfg +# coverage html --rcfile=setup.cfg --show-contexts [testenv:eve-py{38,39,310,311}] description = Run 'gt4py.eve' tests @@ -85,20 +81,30 @@ set_env = {[testenv]set_env} PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = - nomesh-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_atlas and not requires_gpu" {posargs} tests{/}next_tests - nomesh-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_atlas and requires_gpu" {posargs} tests{/}next_tests - atlas-cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_atlas and not requires_gpu" {posargs} tests{/}next_tests - # atlas-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_atlas and requires_gpu" {posargs} tests{/}next_tests # TODO(ricoh): activate when such tests exist + python -m pytest --suppress-no-test-exit-code --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + nomesh: not requires_atlas \ + atlas: requires_atlas \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next [testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = - cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu" {posargs} tests{/}storage_tests - {cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu" {posargs} tests{/}storage_tests - #pytest doctest-modules {posargs} src{/}gt4py{/}storage + python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ + cpu: not requires_gpu \ + {cuda,cuda11x,cuda12x}: requires_gpu \ + " {posargs} tests{/}storage_tests + # pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- +[testenv:linters-py{38,39,310,311}] +description = Run linters +commands = + flake8 .{/}src + mypy .{/}src + [testenv:notebooks-py{310,311}] description = Run notebooks commands_pre = From 985cfa314a33c3535d759a1b64f6e984ab4d4fef Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 16:08:42 +0200 Subject: [PATCH 20/24] Update links in the development docs. --- docs/development/tools/ci-infrastructure.md | 2 +- docs/development/tools/cscs-ci.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/development/tools/ci-infrastructure.md b/docs/development/tools/ci-infrastructure.md index 1a4786e6e6..242bea50bd 100644 --- a/docs/development/tools/ci-infrastructure.md +++ b/docs/development/tools/ci-infrastructure.md @@ -95,7 +95,7 @@ The general idea is to run workflows only when needed. In this monorepo structur ### Daily CI -There is an extra CI workflow on GitHub scheduled to run daily and testing `main` with different sets of requirements: newest dependencies, lowest dependencies versions and lowest dependencies versions including extras. Failures are accessible in [GitHub web interface](https://github.com/GridTools/gt4py/actions/workflows/daily-ci.yml) and as the 'Daily CI' badge in the main [README.md](../../../README.md) file. Additionally, in case of failure a message _might_ be posted in the [#jenkins](https://app.slack.com/client/T0A5HP547/C0E145U65) channel of the GridTols slack, but those notifications do not work reliably. +There is an extra CI workflow on GitHub scheduled to run daily and testing `main` with different sets of requirements: newest dependencies, lowest dependencies versions and lowest dependencies versions including extras. Failures are accessible in [GitHub web interface](https://github.com/GridTools/gt4py/actions/workflows/daily-ci.yml) and as the 'Daily CI' badge in the main [README.md](../../../README.md) file. Additionally, in case of failure a message _might_ be posted in the [#ci-notifications](https://app.slack.com/client/T0A5HP547/C0E145U65) channel of the GridTols slack, but those notifications do not work reliably. ## CSCS-CI diff --git a/docs/development/tools/cscs-ci.md b/docs/development/tools/cscs-ci.md index 4d84f311ff..8fde4276f1 100644 --- a/docs/development/tools/cscs-ci.md +++ b/docs/development/tools/cscs-ci.md @@ -4,7 +4,7 @@ CSCS provides a way of running CI on it's machines. This is currently only avail ## Initial Setup -Follow the steps in the [CSCS-CI documentation](https://gitlab.com/cscs-ci/ci-testing/containerised_ci_doc). As mentioned in the documentation, you will require the help of someone who can register the project to be allowed to run CI on CSCS machines. +Follow the steps in the [CSCS-CI documentation](https://confluence.cscs.ch/pages/viewpage.action?pageId=868812112). As mentioned in the documentation, you will require the help of someone who can register the project to be allowed to run CI on CSCS machines. ## Current Configuration From 0b4a6b743b2f57195fe344679187ada601b4c523 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 16:13:38 +0200 Subject: [PATCH 21/24] Update link URL --- docs/development/tools/cscs-ci.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/development/tools/cscs-ci.md b/docs/development/tools/cscs-ci.md index 8fde4276f1..264a96196e 100644 --- a/docs/development/tools/cscs-ci.md +++ b/docs/development/tools/cscs-ci.md @@ -4,7 +4,7 @@ CSCS provides a way of running CI on it's machines. This is currently only avail ## Initial Setup -Follow the steps in the [CSCS-CI documentation](https://confluence.cscs.ch/pages/viewpage.action?pageId=868812112). As mentioned in the documentation, you will require the help of someone who can register the project to be allowed to run CI on CSCS machines. +Follow the steps in the [CSCS-CI documentation](https://confluence.cscs.ch/x/UAXJMw). As mentioned in the documentation, you will require the help of someone who can register the project to be allowed to run CI on CSCS machines. ## Current Configuration From 59ff62e7d8f7fed654fcf18308395bea5ac90779 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 16:18:06 +0200 Subject: [PATCH 22/24] Remove linters again --- tox.ini | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tox.ini b/tox.ini index ecea644e14..c77fe5940e 100644 --- a/tox.ini +++ b/tox.ini @@ -99,12 +99,6 @@ commands = # pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- -[testenv:linters-py{38,39,310,311}] -description = Run linters -commands = - flake8 .{/}src - mypy .{/}src - [testenv:notebooks-py{310,311}] description = Run notebooks commands_pre = From 5242d809021bd1314892d9ebf3f5d6deb9019bb8 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 16:21:12 +0200 Subject: [PATCH 23/24] Format issues --- tox.ini | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tox.ini b/tox.ini index c77fe5940e..8da0e45810 100644 --- a/tox.ini +++ b/tox.ini @@ -53,11 +53,11 @@ allowlist_externals = rm commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - internal: not requires_dace \ - dace: requires_dace \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}cartesian_tests + internal: not requires_dace \ + dace: requires_dace \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}cartesian_tests python -m pytest --doctest-modules --doctest-ignore-import-errors src{/}gt4py{/}cartesian # commands_pre = # rm -Rf tests/_reports/coverage* @@ -82,20 +82,20 @@ set_env = PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://test.pypi.org/simple/} commands = python -m pytest --suppress-no-test-exit-code --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - nomesh: not requires_atlas \ - atlas: requires_atlas \ - cpu: and not requires_gpu \ - {cuda,cuda11x,cuda12x}: and requires_gpu \ - " {posargs} tests{/}next_tests + nomesh: not requires_atlas \ + atlas: requires_atlas \ + cpu: and not requires_gpu \ + {cuda,cuda11x,cuda12x}: and requires_gpu \ + " {posargs} tests{/}next_tests pytest --doctest-modules src{/}gt4py{/}next [testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "\ - cpu: not requires_gpu \ - {cuda,cuda11x,cuda12x}: requires_gpu \ - " {posargs} tests{/}storage_tests + cpu: not requires_gpu \ + {cuda,cuda11x,cuda12x}: requires_gpu \ + " {posargs} tests{/}storage_tests # pytest doctest-modules {posargs} src{/}gt4py{/}storage # -- Secondary tests -- From 922fb25fa49f788ba6a5c86ebb168950300186d4 Mon Sep 17 00:00:00 2001 From: Enrique Gonzalez Paredes Date: Tue, 3 Sep 2024 16:21:40 +0200 Subject: [PATCH 24/24] F --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9bb9917263..fcdef06ca9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,7 @@ repos: hooks: - id: pretty-format-ini args: [--autofix] + exclude: tox.ini - id: pretty-format-toml args: [--autofix] - id: pretty-format-yaml