From 4c41f02a5e8c87f235485defc6efbe3f95042a70 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 29 Oct 2024 16:27:23 +0100 Subject: [PATCH 01/22] [wip] start adding sdfg convertible Program for dace-fieldview --- .../runners/dace_fieldview/program.py | 116 ++++++++++++++++++ .../runners/dace_iterator/__init__.py | 5 +- 2 files changed, 117 insertions(+), 4 deletions(-) create mode 100644 src/gt4py/next/program_processors/runners/dace_fieldview/program.py diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py new file mode 100644 index 0000000000..dbed1baf3f --- /dev/null +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -0,0 +1,116 @@ +# GT4Py - GridTools Framework +# +# Copyright (c) 2014-2024, ETH Zurich +# All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause + +import dataclasses +import itertools +import typing +from typing import Any, Optional, Sequence + +import dace + +from gt4py._core import definitions as core_defs +from gt4py.next import allocators, backend as next_backend +from gt4py.next.ffront import decorator +from gt4py.next.otf import arguments, toolchain +from gt4py.next.program_processors.runners.dace_common import utility as dace_utils + + +@dataclasses.dataclass(frozen=True) +class Program(decorator.Program, dace.frontend.python.common.SDFGConvertible): + """Extension of GT4Py Program implementing the SDFGConvertible interface via GTIR.""" + + sdfg_closure_cache: dict[str, Any] = dataclasses.field(default_factory=dict) + + def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: + if (self.backend is None) or "dace" not in self.backend.name.lower(): + raise ValueError("The SDFG can be generated only for the DaCe backend.") + + offset_provider = (self.connectivities or {}) | self._implicit_offset_provider + column_axis = kwargs.get("column_axis", None) + + gtir_stage = typing.cast(next_backend.Transforms, self.backend.transforms).past_to_itir( + toolchain.CompilableProgram( + data=self.past_stage, + args=arguments.CompileTimeArgs( + args=tuple(p.type for p in self.past_stage.past_node.definition.params), + kwargs={}, + column_axis=column_axis, + offset_provider=offset_provider, + ), + ) + ) + + on_gpu = not allocators.is_field_allocator_for( + self.backend.allocator, core_defs.DeviceType.CPU + ) and not allocators.is_field_allocator_for( + self.backend.allocator, core_defs.DeviceType.CPU_PINNED + ) + + sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[attr-defined] # we can assume to get a DaCeTranslationStep here + gtir_stage.data, + offset_provider=offset_provider, + column_axis=gtir_stage.args.column_axis, + auto_opt=True, + on_gpu=on_gpu, + ) + + self.sdfg_closure_cache["arrays"] = sdfg.arrays + + return sdfg + + def __sdfg_closure__(self, reevaluate: Optional[dict[str, str]] = None) -> dict[str, Any]: + """ + Return the closure arrays of the SDFG represented by this object + as a mapping between array name and the corresponding value. + + The connectivity tables are defined symbolically, i.e. table sizes & strides are DaCe symbols. + The need to define the connectivity tables in the `__sdfg_closure__` arises from the fact that + the offset providers are not part of GT4Py Program's arguments. + Keep in mind, that `__sdfg_closure__` is called after `__sdfg__` method. + """ + symbols = {} + + if self.connectivities: + with_table = ( + name for name, conn in self.connectivities.items() if hasattr(conn, "table") + ) + in_arrays = ( + name + for name in with_table + if dace_utils.connectivity_identifier(name) in self.sdfg_closure_cache["arrays"] + ) + name_axis = list(itertools.product(in_arrays, [0, 1])) + + def size_symbol_name(name: str, axis: int) -> str: + return dace_utils.field_size_symbol_name( + dace_utils.connectivity_identifier(name), axis + ) + + connectivity_tables_size_symbols = { + (sname := size_symbol_name(name, axis)): dace.symbol(sname) + for name, axis in name_axis + } + + def stride_symbol_name(name: str, axis: int) -> str: + return dace_utils.field_stride_symbol_name( + dace_utils.connectivity_identifier(name), axis + ) + + connectivity_table_stride_symbols = { + (sname := stride_symbol_name(name, axis)): dace.symbol(sname) + for name, axis in name_axis + } + + symbols = connectivity_tables_size_symbols | connectivity_table_stride_symbols + + closure_dict: dict[str, Any] = {} + + return closure_dict + + def __sdfg_signature__(self) -> tuple[Sequence[str], Sequence[str]]: + return [p.id for p in self.past_stage.past_node.params], [] diff --git a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py index fc2772027e..54cfe91dcf 100644 --- a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py +++ b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py @@ -332,10 +332,7 @@ def __sdfg_closure__(self, reevaluate: Optional[dict[str, str]] = None) -> dict[ return closure_dict def __sdfg_signature__(self) -> tuple[Sequence[str], Sequence[str]]: - args = [] - for arg in self.past_stage.past_node.params: - args.append(arg.id) - return (args, []) + return [p.id for p in self.past_stage.past_node.params], [] def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: list[Any]) -> bool: From 481af34a550860b7edb4c6b3aceaac4592ab5477 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 19 Nov 2024 11:53:10 +0100 Subject: [PATCH 02/22] add SDFGConvertible Program replacement to dace_fieldview --- src/gt4py/next/ffront/decorator.py | 2 +- .../runners/dace_fieldview/program.py | 58 +++++++++++++++---- .../feature_tests/dace/test_orchestration.py | 11 ++-- 3 files changed, 54 insertions(+), 17 deletions(-) diff --git a/src/gt4py/next/ffront/decorator.py b/src/gt4py/next/ffront/decorator.py index dc2421e1d2..41e510d86f 100644 --- a/src/gt4py/next/ffront/decorator.py +++ b/src/gt4py/next/ffront/decorator.py @@ -310,7 +310,7 @@ def __call__( try: - from gt4py.next.program_processors.runners.dace_iterator import Program + from gt4py.next.program_processors.runners.dace_fieldview.program import Program except ImportError: pass diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index dbed1baf3f..4dbccd12ac 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -9,9 +9,10 @@ import dataclasses import itertools import typing -from typing import Any, Optional, Sequence +from typing import Any, ClassVar, Optional, Sequence import dace +import numpy as np from gt4py._core import definitions as core_defs from gt4py.next import allocators, backend as next_backend @@ -25,6 +26,12 @@ class Program(decorator.Program, dace.frontend.python.common.SDFGConvertible): """Extension of GT4Py Program implementing the SDFGConvertible interface via GTIR.""" sdfg_closure_cache: dict[str, Any] = dataclasses.field(default_factory=dict) + # Being a ClassVar ensures that in an SDFG with multiple nested GT4Py Programs, + # there is no name mangling of the connectivity tables used across the nested SDFGs + # since they share the same memory address. + connectivity_tables_data_descriptors: ClassVar[ + dict[str, dace.data.Array] + ] = {} # symbolically defined def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: if (self.backend is None) or "dace" not in self.backend.name.lower(): @@ -37,7 +44,7 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: toolchain.CompilableProgram( data=self.past_stage, args=arguments.CompileTimeArgs( - args=tuple(p.type for p in self.past_stage.past_node.definition.params), + args=tuple(p.type for p in self.past_stage.past_node.params), kwargs={}, column_axis=column_axis, offset_provider=offset_provider, @@ -53,8 +60,8 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[attr-defined] # we can assume to get a DaCeTranslationStep here gtir_stage.data, - offset_provider=offset_provider, - column_axis=gtir_stage.args.column_axis, + offset_provider=gtir_stage.args.offset_provider, + column_axis=kwargs.get("column_axis", gtir_stage.args.column_axis), auto_opt=True, on_gpu=on_gpu, ) @@ -73,17 +80,20 @@ def __sdfg_closure__(self, reevaluate: Optional[dict[str, str]] = None) -> dict[ the offset providers are not part of GT4Py Program's arguments. Keep in mind, that `__sdfg_closure__` is called after `__sdfg__` method. """ - symbols = {} + closure_dict: dict[str, Any] = {} if self.connectivities: - with_table = ( + symbols = {} + with_table = [ name for name, conn in self.connectivities.items() if hasattr(conn, "table") - ) - in_arrays = ( - name + ] + in_arrays_with_id = [ + (name, conn_id) for name in with_table - if dace_utils.connectivity_identifier(name) in self.sdfg_closure_cache["arrays"] - ) + if (conn_id := dace_utils.connectivity_identifier(name)) + in self.sdfg_closure_cache["arrays"] + ] + in_arrays = (name for name, _ in in_arrays_with_id) name_axis = list(itertools.product(in_arrays, [0, 1])) def size_symbol_name(name: str, axis: int) -> str: @@ -108,7 +118,31 @@ def stride_symbol_name(name: str, axis: int) -> str: symbols = connectivity_tables_size_symbols | connectivity_table_stride_symbols - closure_dict: dict[str, Any] = {} + # Define the storage location (e.g. CPU, GPU) of the connectivity tables + if "storage" not in self.connectivity_tables_data_descriptors: + for _, conn_id in in_arrays_with_id: + self.connectivity_tables_data_descriptors["storage"] = self.sdfg_closure_cache[ + "arrays" + ][conn_id].storage + break + + # Build the closure dictionary + for name, conn_id in in_arrays_with_id: + if conn_id not in self.connectivity_tables_data_descriptors: + conn = self.connectivities[name] + self.connectivity_tables_data_descriptors[conn_id] = dace.data.Array( + dtype=dace.int64 if conn.index_type == np.int64 else dace.int32, + shape=[ + symbols[dace_utils.field_size_symbol_name(conn_id, 0)], + symbols[dace_utils.field_size_symbol_name(conn_id, 1)], + ], + strides=[ + symbols[dace_utils.field_stride_symbol_name(conn_id, 0)], + symbols[dace_utils.field_stride_symbol_name(conn_id, 1)], + ], + storage=Program.connectivity_tables_data_descriptors["storage"], + ) + closure_dict[conn_id] = self.connectivity_tables_data_descriptors[conn_id] return closure_dict diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 1da34db3c0..00505d928d 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -33,8 +33,8 @@ try: import dace from gt4py.next.program_processors.runners.dace import ( - itir_cpu as run_dace_cpu, - itir_gpu as run_dace_gpu, + gtir_cpu as run_dace_cpu, + gtir_gpu as run_dace_gpu, ) except ImportError: dace: Optional[ModuleType] = None # type:ignore[no-redef] @@ -134,8 +134,11 @@ def sdfg( xp.asarray([[0, 1], [1, 2], [2, 0]]), Edge, Vertex, 2, False ) connectivities = {} - connectivities["E2V"] = arguments.CompileTimeConnectivity( - e2v.max_neighbors, e2v.has_skip_values, e2v.origin_axis, e2v.neighbor_axis, e2v.table.dtype + # connectivities["E2V"] = arguments.CompileTimeConnectivity( + # e2v.max_neighbors, e2v.has_skip_values, e2v.origin_axis, e2v.neighbor_axis, e2v.table.dtype + # ) + connectivities["E2V"] = ( + e2v # TODO(ricoh): replace with compile time only info (as soon as possible) ) offset_provider = OffsetProvider_t.dtype._typeclass.as_ctypes()(E2V=e2v.data_ptr()) From ad0a7b23c3923669d5599f44d8650897f9388428 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 19 Nov 2024 15:09:05 +0100 Subject: [PATCH 03/22] improve generate_sdfg args, remove old Program replacement --- .../runners/dace_fieldview/program.py | 4 +- .../runners/dace_iterator/__init__.py | 218 +----------------- 2 files changed, 3 insertions(+), 219 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 4dbccd12ac..38b5dc7c79 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -61,8 +61,8 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[attr-defined] # we can assume to get a DaCeTranslationStep here gtir_stage.data, offset_provider=gtir_stage.args.offset_provider, - column_axis=kwargs.get("column_axis", gtir_stage.args.column_axis), - auto_opt=True, + column_axis=gtir_stage.args.column_axis, + auto_opt=False, on_gpu=on_gpu, ) diff --git a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py index 54cfe91dcf..e575afba62 100644 --- a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py +++ b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py @@ -6,31 +6,23 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -import dataclasses import warnings -from collections import OrderedDict from collections.abc import Callable, Mapping, Sequence -from dataclasses import field from inspect import currentframe, getframeinfo from pathlib import Path -from typing import Any, ClassVar, Optional +from typing import Any, Optional import dace -import numpy as np from dace.sdfg import utils as sdutils from dace.transformation.auto import auto_optimize as autoopt import gt4py.next.iterator.ir as itir from gt4py.next import common -from gt4py.next.ffront import decorator -from gt4py.next.iterator import transforms as itir_transforms -from gt4py.next.iterator.ir import SymRef from gt4py.next.iterator.transforms import ( pass_manager_legacy as legacy_itir_transforms, program_to_fencil, ) from gt4py.next.iterator.type_system import inference as itir_type_inference -from gt4py.next.program_processors.runners.dace_common import utility as dace_utils from gt4py.next.type_system import type_specifications as ts from .itir_to_sdfg import ItirToSDFG @@ -155,211 +147,3 @@ def build_sdfg_from_itir( sdfg.save(sdfg_filename) return sdfg - - -@dataclasses.dataclass(frozen=True) -class Program(decorator.Program, dace.frontend.python.common.SDFGConvertible): - """Extension of GT4Py Program implementing the SDFGConvertible interface.""" - - sdfg_closure_vars: dict[str, Any] = field(default_factory=dict) - - # Being a ClassVar ensures that in an SDFG with multiple nested GT4Py Programs, - # there is no name mangling of the connectivity tables used across the nested SDFGs - # since they share the same memory address. - connectivity_tables_data_descriptors: ClassVar[ - dict[str, dace.data.Array] - ] = {} # symbolically defined - - def __sdfg__(self, *args, **kwargs) -> dace.sdfg.sdfg.SDFG: - if "dace" not in self.backend.name.lower(): # type: ignore[union-attr] - raise ValueError("The SDFG can be generated only for the DaCe backend.") - - params = {str(p.id): p.type for p in self.itir.params} - fields = {str(p.id): p.type for p in self.itir.params if hasattr(p.type, "dims")} - arg_types = [*params.values()] - - dace_parsed_args = [*args, *kwargs.values()] - gt4py_program_args = [*params.values()] - _crosscheck_dace_parsing(dace_parsed_args, gt4py_program_args) - - if self.connectivities is None: - raise ValueError( - "[DaCe Orchestration] Connectivities -at compile time- are required to generate the SDFG. Use `with_connectivities` method." - ) - offset_provider = ( - self.connectivities | self._implicit_offset_provider - ) # tables are None at this point - - sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[union-attr] - self.itir, - arg_types, - offset_provider=offset_provider, - column_axis=kwargs.get("column_axis", None), - ) - self.sdfg_closure_vars["sdfg.arrays"] = sdfg.arrays # use it in __sdfg_closure__ - - # Halo exchange related metadata, i.e. gt4py_program_input_fields, gt4py_program_output_fields, offset_providers_per_input_field - # Add them as dynamic properties to the SDFG - - assert all( - isinstance(in_field, SymRef) - for closure in self.itir.closures - for in_field in closure.inputs - ) # backend only supports SymRef inputs, not `index` calls - input_fields = [ - str(in_field.id) # type: ignore[union-attr] # ensured by assert - for closure in self.itir.closures - for in_field in closure.inputs - if str(in_field.id) in fields # type: ignore[union-attr] # ensured by assert - ] - sdfg.gt4py_program_input_fields = { - in_field: dim - for in_field in input_fields - for dim in fields[in_field].dims # type: ignore[union-attr] - if dim.kind == common.DimensionKind.HORIZONTAL - } - - output_fields = [] - for closure in self.itir.closures: - output = closure.output - if isinstance(output, itir.SymRef): - if str(output.id) in fields: - output_fields.append(str(output.id)) - else: - for arg in output.args: - if str(arg.id) in fields: # type: ignore[attr-defined] - output_fields.append(str(arg.id)) # type: ignore[attr-defined] - sdfg.gt4py_program_output_fields = { - output: dim - for output in output_fields - for dim in fields[output].dims # type: ignore[union-attr] - if dim.kind == common.DimensionKind.HORIZONTAL - } - - sdfg.offset_providers_per_input_field = {} - itir_tmp = legacy_itir_transforms.apply_common_transforms( - self.itir, offset_provider=offset_provider - ) - itir_tmp_fencil = program_to_fencil.program_to_fencil(itir_tmp) - for closure in itir_tmp_fencil.closures: - params_shifts = itir_transforms.trace_shifts.trace_stencil( - closure.stencil, num_args=len(closure.inputs) - ) - for param, shifts in zip(closure.inputs, params_shifts): - assert isinstance( - param, SymRef - ) # backend only supports SymRef inputs, not `index` calls - if not isinstance(param.id, str): - continue - if param.id not in sdfg.gt4py_program_input_fields: - continue - sdfg.offset_providers_per_input_field.setdefault(param.id, []).extend(list(shifts)) - - return sdfg - - def __sdfg_closure__(self, reevaluate: Optional[dict[str, str]] = None) -> dict[str, Any]: - """ - Returns the closure arrays of the SDFG represented by this object - as a mapping between array name and the corresponding value. - - The connectivity tables are defined symbolically, i.e. table sizes & strides are DaCe symbols. - The need to define the connectivity tables in the `__sdfg_closure__` arises from the fact that - the offset providers are not part of GT4Py Program's arguments. - Keep in mind, that `__sdfg_closure__` is called after `__sdfg__` method. - """ - offset_provider = self.connectivities - - # Define DaCe symbols - connectivity_table_size_symbols = { - dace_utils.field_size_symbol_name( - dace_utils.connectivity_identifier(k), axis - ): dace.symbol( - dace_utils.field_size_symbol_name(dace_utils.connectivity_identifier(k), axis) - ) - for k, v in offset_provider.items() # type: ignore[union-attr] - for axis in [0, 1] - if hasattr(v, "table") - and dace_utils.connectivity_identifier(k) in self.sdfg_closure_vars["sdfg.arrays"] - } - - connectivity_table_stride_symbols = { - dace_utils.field_stride_symbol_name( - dace_utils.connectivity_identifier(k), axis - ): dace.symbol( - dace_utils.field_stride_symbol_name(dace_utils.connectivity_identifier(k), axis) - ) - for k, v in offset_provider.items() # type: ignore[union-attr] - for axis in [0, 1] - if hasattr(v, "table") - and dace_utils.connectivity_identifier(k) in self.sdfg_closure_vars["sdfg.arrays"] - } - - symbols = {**connectivity_table_size_symbols, **connectivity_table_stride_symbols} - - # Define the storage location (e.g. CPU, GPU) of the connectivity tables - if "storage" not in Program.connectivity_tables_data_descriptors: - for k, v in offset_provider.items(): # type: ignore[union-attr] - if not hasattr(v, "table"): - continue - if dace_utils.connectivity_identifier(k) in self.sdfg_closure_vars["sdfg.arrays"]: - Program.connectivity_tables_data_descriptors["storage"] = ( - self.sdfg_closure_vars[ - "sdfg.arrays" - ][dace_utils.connectivity_identifier(k)].storage - ) - break - - # Build the closure dictionary - closure_dict = {} - for k, v in offset_provider.items(): # type: ignore[union-attr] - conn_id = dace_utils.connectivity_identifier(k) - if hasattr(v, "table") and conn_id in self.sdfg_closure_vars["sdfg.arrays"]: - if conn_id not in Program.connectivity_tables_data_descriptors: - Program.connectivity_tables_data_descriptors[conn_id] = dace.data.Array( - dtype=dace.int64 if v.index_type == np.int64 else dace.int32, - shape=[ - symbols[dace_utils.field_size_symbol_name(conn_id, 0)], - symbols[dace_utils.field_size_symbol_name(conn_id, 1)], - ], - strides=[ - symbols[dace_utils.field_stride_symbol_name(conn_id, 0)], - symbols[dace_utils.field_stride_symbol_name(conn_id, 1)], - ], - storage=Program.connectivity_tables_data_descriptors["storage"], - ) - closure_dict[conn_id] = Program.connectivity_tables_data_descriptors[conn_id] - - return closure_dict - - def __sdfg_signature__(self) -> tuple[Sequence[str], Sequence[str]]: - return [p.id for p in self.past_stage.past_node.params], [] - - -def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: list[Any]) -> bool: - for dace_parsed_arg, gt4py_program_arg in zip(dace_parsed_args, gt4py_program_args): - if isinstance(dace_parsed_arg, dace.data.Scalar): - assert dace_parsed_arg.dtype == dace_utils.as_dace_type(gt4py_program_arg) - elif isinstance( - dace_parsed_arg, (bool, int, float, str, np.bool_, np.integer, np.floating, np.str_) - ): # compile-time constant scalar - assert isinstance(gt4py_program_arg, ts.ScalarType) - if isinstance(dace_parsed_arg, (bool, np.bool_)): - assert gt4py_program_arg.kind == ts.ScalarKind.BOOL - elif isinstance(dace_parsed_arg, (int, np.integer)): - assert gt4py_program_arg.kind in [ts.ScalarKind.INT32, ts.ScalarKind.INT64] - elif isinstance(dace_parsed_arg, (float, np.floating)): - assert gt4py_program_arg.kind in [ts.ScalarKind.FLOAT32, ts.ScalarKind.FLOAT64] - elif isinstance(dace_parsed_arg, (str, np.str_)): - assert gt4py_program_arg.kind == ts.ScalarKind.STRING - elif isinstance(dace_parsed_arg, dace.data.Array): - assert isinstance(gt4py_program_arg, ts.FieldType) - assert len(dace_parsed_arg.shape) == len(gt4py_program_arg.dims) - assert dace_parsed_arg.dtype == dace_utils.as_dace_type(gt4py_program_arg.dtype) - elif isinstance( - dace_parsed_arg, (dace.data.Structure, dict, OrderedDict) - ): # offset_provider - continue - else: - raise ValueError(f"Unresolved case for {dace_parsed_arg} (==, !=) {gt4py_program_arg}") - - return True From 7c4d1975883ae48456bde2f4b39f57843adcc1d6 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 19 Nov 2024 15:33:49 +0100 Subject: [PATCH 04/22] turn auto_optimize back on in __sdfg__ --- .../next/program_processors/runners/dace_fieldview/program.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 38b5dc7c79..d54a96e081 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -62,7 +62,7 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: gtir_stage.data, offset_provider=gtir_stage.args.offset_provider, column_axis=gtir_stage.args.column_axis, - auto_opt=False, + auto_opt=True, on_gpu=on_gpu, ) From 3c56151fbb24ffa4c83a47b22b864609566274c0 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 19 Nov 2024 16:22:41 +0100 Subject: [PATCH 05/22] disable `auto_opt` once again in `__sdfg__` --- .../next/program_processors/runners/dace_fieldview/program.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index d54a96e081..38b5dc7c79 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -62,7 +62,7 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: gtir_stage.data, offset_provider=gtir_stage.args.offset_provider, column_axis=gtir_stage.args.column_axis, - auto_opt=True, + auto_opt=False, on_gpu=on_gpu, ) From c5b4c434549fc81a9fef947192d803139320cac6 Mon Sep 17 00:00:00 2001 From: DropD Date: Wed, 20 Nov 2024 09:43:34 +0100 Subject: [PATCH 06/22] support only CUDA device type in dace_fieldview Program --- .../program_processors/runners/dace_fieldview/program.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 38b5dc7c79..02b898d689 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -52,10 +52,8 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: ) ) - on_gpu = not allocators.is_field_allocator_for( - self.backend.allocator, core_defs.DeviceType.CPU - ) and not allocators.is_field_allocator_for( - self.backend.allocator, core_defs.DeviceType.CPU_PINNED + on_gpu = allocators.is_field_allocator_for( + self.backend.allocator, core_defs.DeviceType.CUDA ) sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[attr-defined] # we can assume to get a DaCeTranslationStep here From 835c1158afe55e3086428a694cd92531ef800186 Mon Sep 17 00:00:00 2001 From: DropD Date: Fri, 22 Nov 2024 17:14:52 +0100 Subject: [PATCH 07/22] [wip] bring back extra sdfg attributes for halo placement --- .../runners/dace_fieldview/program.py | 131 ++++++++++++++++-- 1 file changed, 117 insertions(+), 14 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 02b898d689..1a92680733 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -14,11 +14,13 @@ import dace import numpy as np -from gt4py._core import definitions as core_defs -from gt4py.next import allocators, backend as next_backend +from gt4py import eve +from gt4py.next import backend as next_backend, common from gt4py.next.ffront import decorator -from gt4py.next.otf import arguments, toolchain +from gt4py.next.iterator import ir as itir +from gt4py.next.otf import arguments, recipes, toolchain from gt4py.next.program_processors.runners.dace_common import utility as dace_utils +from gt4py.next.type_system import type_specifications as ts @dataclasses.dataclass(frozen=True) @@ -52,20 +54,48 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: ) ) - on_gpu = allocators.is_field_allocator_for( - self.backend.allocator, core_defs.DeviceType.CUDA - ) - - sdfg = self.backend.executor.step.translation.generate_sdfg( # type: ignore[attr-defined] # we can assume to get a DaCeTranslationStep here - gtir_stage.data, - offset_provider=gtir_stage.args.offset_provider, - column_axis=gtir_stage.args.column_axis, - auto_opt=False, - on_gpu=on_gpu, - ) + compile_workflow = typing.cast( + recipes.OTFCompileWorkflow, + self.backend.executor + if not hasattr(self.backend.executor, "step") + else self.backend.executor.step, + ) # We know which backend we are using, but we don't know if the compile workflow is cached. + sdfg = dace.SDFG.from_json(compile_workflow.translation(gtir_stage).source_code) self.sdfg_closure_cache["arrays"] = sdfg.arrays + # Halo exchange related metadata, i.e. gt4py_program_input_fields, gt4py_program_output_fields, offset_providers_per_input_field + # Add them as dynamic properties to the SDFG + program = typing.cast( + itir.Program, gtir_stage.data + ) # we already checked that our backend uses GTIR + field_params = { + str(param.id): param for param in program.params if isinstance(param.type, ts.FieldType) + } + + input_fields = (field_params[name] for name in InputNamesExtractor.only_fields(program)) + # TODO (ricoh): This will associate the last horizontal dimension of each field with it's name + # as in dace_iterator. Check if that was really the intention. + sdfg.gt4py_program_input_fields = { + str(field.id): dim + for field in input_fields + for dim in field.type.dims # type: ignore[union-attr] # we know the type is a FieldType + if dim.kind is common.DimensionKind.HORIZONTAL + } + + output_fields = (field_params[name] for name in OutputNamesExtractor.only_fields(program)) + # TODO (ricoh): This will associate the last horizontal dimension of each field with it's name + # as in dace_iterator. Check if that was really the intention. + sdfg.gt4py_program_output_fields = { + str(field.id): dim + for field in output_fields + for dim in field.type.dims # type: ignore[union-attr] # we know the type is a FieldType + if dim.kind is common.DimensionKind.HORIZONTAL + } + + # TODO (ricoh): bring back sdfg.offset_providers_per_input_field. + # This will require an equivalent to 'itir_transforms.trace_shifts' under GTIR + return sdfg def __sdfg_closure__(self, reevaluate: Optional[dict[str, str]] = None) -> dict[str, Any]: @@ -146,3 +176,76 @@ def stride_symbol_name(name: str, axis: int) -> str: def __sdfg_signature__(self) -> tuple[Sequence[str], Sequence[str]]: return [p.id for p in self.past_stage.past_node.params], [] + + +class SymbolNameSetExtractor(eve.NodeVisitor): + """Extract a set of symbol names""" + + def generic_visitor(self, node: itir.Node) -> set[str]: + input_fields: set[str] = set() + for child in eve.trees.iter_children_values(node): + input_fields |= self.visit(child) + return input_fields + + @classmethod + def only_fields(cls, program: itir.Program) -> set[str]: + field_param_names = [ + str(param.id) for param in program.params if isinstance(param.type, ts.FieldType) + ] + return {name for name in cls().visit(program) if name in field_param_names} + + +class InputNamesExtractor(SymbolNameSetExtractor): + """Extract the set of symbol names passed into field operators within a program.""" + + def visit_Program(self, node: itir.Program) -> set[str]: + input_fields = set() + for stmt in node.body: + input_fields |= self.visit(stmt) + return input_fields + + def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: + input_fields = set() + for stmt in node.true_branch + node.false_branch: + input_fields |= self.visit(stmt) + return input_fields + + def visit_Temporary(self, node: itir.Temporary) -> set[str]: + return set() + + def visit_SetAt(self, node: itir.SetAt) -> set[str]: + return self.visit(node.expr) + + def visit_FunCall(self, node: itir.FunCall) -> set[str]: + input_fields = set() + for arg in node.args: + input_fields |= self.visit(arg) + return input_fields + + def visit_SymRef(self, node: itir.SymRef) -> set[str]: + return {str(node.id)} + + +class OutputNamesExtractor(SymbolNameSetExtractor): + """Extract the set of symbol names written to within a program""" + + def visit_Program(self, node: itir.Program) -> set[str]: + output_fields = set() + for stmt in node.body: + output_fields |= self.visit(stmt) + return output_fields + + def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: + output_fields = set() + for stmt in node.true_branch + node.false_branch: + output_fields |= self.visit(stmt) + return output_fields + + def visit_Temporary(self, node: itir.Temporary) -> set[str]: + return set() + + def visit_SetAt(self, node: itir.SetAt) -> set[str]: + return self.visit(node.target) + + def visit_SymRef(self, node: itir.SymRef) -> set[str]: + return {str(node.id)} From c126cb2a7dbee28af6d9f12d7e02b279ce5d9e4a Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 26 Nov 2024 11:16:30 +0100 Subject: [PATCH 08/22] partially add halo exchange helper attrs with tests --- .../runners/dace_fieldview/program.py | 37 ++-- .../feature_tests/dace/test_orchestration.py | 1 + .../feature_tests/dace/test_program.py | 181 ++++++++++++++++++ 3 files changed, 202 insertions(+), 17 deletions(-) create mode 100644 tests/next_tests/integration_tests/feature_tests/dace/test_program.py diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 1a92680733..a757b41280 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -73,28 +73,31 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: str(param.id): param for param in program.params if isinstance(param.type, ts.FieldType) } + def single_horizontal_dim_per_field( + fields: typing.Iterable[itir.Sym], + ) -> typing.Iterator[tuple[str, common.Dimension]]: + for field in fields: + assert isinstance(field.type, ts.FieldType) + horizontal_dims = [ + dim for dim in field.type.dims if dim.kind is common.DimensionKind.HORIZONTAL + ] + # do nothing for fields with multiple horizontal dimensions + # or without horizontal dimensions + # this is only meant for use with unstructured grids + if len(horizontal_dims) == 1: + yield str(field.id), horizontal_dims[0] + input_fields = (field_params[name] for name in InputNamesExtractor.only_fields(program)) - # TODO (ricoh): This will associate the last horizontal dimension of each field with it's name - # as in dace_iterator. Check if that was really the intention. - sdfg.gt4py_program_input_fields = { - str(field.id): dim - for field in input_fields - for dim in field.type.dims # type: ignore[union-attr] # we know the type is a FieldType - if dim.kind is common.DimensionKind.HORIZONTAL - } + sdfg.gt4py_program_input_fields = dict(single_horizontal_dim_per_field(input_fields)) output_fields = (field_params[name] for name in OutputNamesExtractor.only_fields(program)) - # TODO (ricoh): This will associate the last horizontal dimension of each field with it's name - # as in dace_iterator. Check if that was really the intention. - sdfg.gt4py_program_output_fields = { - str(field.id): dim - for field in output_fields - for dim in field.type.dims # type: ignore[union-attr] # we know the type is a FieldType - if dim.kind is common.DimensionKind.HORIZONTAL - } + sdfg.gt4py_program_output_fields = dict(single_horizontal_dim_per_field(output_fields)) # TODO (ricoh): bring back sdfg.offset_providers_per_input_field. - # This will require an equivalent to 'itir_transforms.trace_shifts' under GTIR + # A starting point would be to use the "trace_shifts" pass on GTIR + # and associate the extracted shifts with each input field. + # Analogous to the version in `runners.dace_iterator.__init__`, which + # was removed when merging #1742. return sdfg diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 00505d928d..bd26462f1b 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -143,6 +143,7 @@ def sdfg( offset_provider = OffsetProvider_t.dtype._typeclass.as_ctypes()(E2V=e2v.data_ptr()) SDFG = sdfg.to_sdfg(connectivities=connectivities) + cSDFG = SDFG.compile() a = gtx.as_field([Vertex], xp.asarray([0.0, 1.0, 2.0]), allocator=allocator) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py new file mode 100644 index 0000000000..0524af1267 --- /dev/null +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py @@ -0,0 +1,181 @@ +# GT4Py - GridTools Framework +# +# Copyright (c) 2014-2024, ETH Zurich +# All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause + +import pytest + +from gt4py import next as gtx +from gt4py.next import common +from gt4py.next.program_processors.runners.dace_fieldview import program as dace_prg + +from next_tests.integration_tests import cases +from next_tests.integration_tests.feature_tests.ffront_tests.ffront_test_utils import ( + Cell, + Edge, + IDim, + JDim, + KDim, + Vertex, + mesh_descriptor, +) + + +try: + import dace + from gt4py.next.program_processors.runners.dace import gtir_cpu, gtir_gpu +except ImportError: + from typing import Optional + from types import ModuleType + from gt4py.next import backend as next_backend + + dace: Optional[ModuleType] = None + gtir_cpu: Optional[next_backend.Backend] = None + gtir_gpu: Optional[next_backend.Backend] = None + + +@pytest.fixture( + params=[ + pytest.param(gtir_cpu, marks=pytest.mark.requires_dace), + pytest.param(gtir_gpu, marks=(pytest.mark.requires_gpu, pytest.mark.requires_dace)), + ] +) +def gtir_dace_backend(request): + yield request.param + + +@pytest.fixture +def cartesian(request, gtir_dace_backend): + if gtir_dace_backend is None: + yield None + + yield cases.Case( + backend=gtir_dace_backend, + offset_provider={ + "Ioff": IDim, + "Joff": JDim, + "Koff": KDim, + }, + default_sizes={IDim: 10, JDim: 10, KDim: 10}, + grid_type=common.GridType.CARTESIAN, + allocator=gtir_dace_backend.allocator, + ) + + +@pytest.fixture +def unstructured(request, gtir_dace_backend, mesh_descriptor): + if gtir_dace_backend is None: + yield None + + yield cases.Case( + backend=gtir_dace_backend, + offset_provider=mesh_descriptor.offset_provider, + default_sizes={ + Vertex: mesh_descriptor.num_vertices, + Edge: mesh_descriptor.num_edges, + Cell: mesh_descriptor.num_cells, + KDim: 10, + }, + grid_type=common.GridType.UNSTRUCTURED, + allocator=gtir_dace_backend.allocator, + ) + + +@pytest.mark.skipif(dace is None, reason="DaCe not found") +def test_input_names_extractor_cartesian(cartesian): + @gtx.field_operator(backend=cartesian.backend) + def testee_op( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: + return a + + @gtx.program(backend=cartesian.backend) + def testee( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + b: gtx.Field[[IDim, JDim, KDim], gtx.int], + c: gtx.Field[[IDim, JDim, KDim], gtx.int], + ): + testee_op(b, out=c) + testee_op(a, out=b) + + input_field_names = dace_prg.InputNamesExtractor.only_fields(testee.itir) + assert input_field_names == {"a", "b"} + + +@pytest.mark.skipif(dace is None, reason="DaCe not found") +def test_output_names_extractor(cartesian): + @gtx.field_operator(backend=cartesian.backend) + def testee_op( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: + return a + + @gtx.program(backend=cartesian.backend) + def testee( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + b: gtx.Field[[IDim, JDim, KDim], gtx.int], + c: gtx.Field[[IDim, JDim, KDim], gtx.int], + ): + testee_op(a, out=b) + testee_op(a, out=c) + + output_field_names = dace_prg.OutputNamesExtractor.only_fields(testee.itir) + assert output_field_names == {"b", "c"} + + +@pytest.mark.skipif(dace is None, reason="DaCe not found") +def test_halo_exchange_helper_attrs(unstructured): + @gtx.field_operator(backend=unstructured.backend) + def testee_op( + a: gtx.Field[[Vertex, KDim], gtx.int], + ) -> gtx.Field[[Vertex, KDim], gtx.int]: + return a + + @gtx.program(backend=unstructured.backend) + def testee_prog( + a: gtx.Field[[Vertex, KDim], gtx.int], + b: gtx.Field[[Vertex, KDim], gtx.int], + c: gtx.Field[[Vertex, KDim], gtx.int], + ): + testee_op(b, out=c) + testee_op(a, out=b) + + dace_storage_type = ( + dace.StorageType.GPU_Global + if unstructured.backend == gtir_gpu + else dace.StorageType.Default + ) + + rows = dace.symbol("rows") + cols = dace.symbol("cols") + OffsetProvider_t = dace.data.Structure( + { + key: dace.data.Array(dtype=dace.int64, shape=[rows, cols], storage=dace_storage_type) + for key in unstructured.offset_provider + }, + name="OffsetProvider", + ) + + @dace.program + def testee_dace( + a: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), + b: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), + c: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), + offset_provider: OffsetProvider_t, + connectivities: dace.compiletime, + ): + testee_prog.with_grid_type(unstructured.grid_type).with_connectivities(connectivities)( + a, b, c, offset_provider=offset_provider + ) + + sdfg = testee_dace.to_sdfg(connectivities=unstructured.offset_provider) + + testee = next( + subgraph for subgraph in sdfg.all_sdfgs_recursive() if subgraph.name == "testee_prog" + ) + + assert testee.gt4py_program_input_fields == {"a": Vertex, "b": Vertex} + assert testee.gt4py_program_output_fields == {"b": Vertex, "c": Vertex} From 0b90583a66d0c4834fd523da5e36acaced44480f Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 26 Nov 2024 11:40:47 +0100 Subject: [PATCH 09/22] add dace/gt4py type parsing crosscheck --- .../runners/dace_fieldview/program.py | 50 +++++++++++++++++-- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index a757b41280..b139fc0272 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -6,6 +6,7 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause +import collections import dataclasses import itertools import typing @@ -53,6 +54,14 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: ), ) ) + program = typing.cast( + itir.Program, gtir_stage.data + ) # we already checked that our backend uses GTIR + + _crosscheck_dace_parsing( + dace_parsed_args=[*args, *kwargs.values()], + gt4py_program_args=[p.type for p in program.params], + ) compile_workflow = typing.cast( recipes.OTFCompileWorkflow, @@ -64,11 +73,8 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: self.sdfg_closure_cache["arrays"] = sdfg.arrays - # Halo exchange related metadata, i.e. gt4py_program_input_fields, gt4py_program_output_fields, offset_providers_per_input_field - # Add them as dynamic properties to the SDFG - program = typing.cast( - itir.Program, gtir_stage.data - ) # we already checked that our backend uses GTIR + # Halo exchange related metadata, i.e. gt4py_program_input_fields, gt4py_program_output_fields, + # offset_providers_per_input_field. Add them as dynamic attributes to the SDFG field_params = { str(param.id): param for param in program.params if isinstance(param.type, ts.FieldType) } @@ -252,3 +258,37 @@ def visit_SetAt(self, node: itir.SetAt) -> set[str]: def visit_SymRef(self, node: itir.SymRef) -> set[str]: return {str(node.id)} + + +def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: list[Any]) -> None: + for dace_parsed_arg, gt4py_program_arg in zip( + dace_parsed_args, + gt4py_program_args, + strict=False, # dace does not see implicit size args + ): + match dace_parsed_arg: + case dace.data.Scalar(): + assert dace_parsed_arg.type == dace_utils.as_dace_type(gt4py_program_arg) + case bool() | np.bool_(): + assert isinstance(gt4py_program_arg, ts.ScalarType) + assert gt4py_program_arg.kind == ts.ScalarKind.BOOL + case int() | np.integer(): + assert isinstance(gt4py_program_arg, ts.ScalarType) + assert gt4py_program_arg.kind in [ts.ScalarKind.INT32, ts.ScalarKind.INT64] + case float() | np.floating(): + assert isinstance(gt4py_program_arg, ts.ScalarType) + assert gt4py_program_arg.kind in [ts.ScalarKind.FLOAT32, ts.ScalarKind.FLOAT64] + case str() | np.str_(): + assert isinstance(gt4py_program_arg, ts.ScalarType) + assert gt4py_program_arg.kind == ts.ScalarKind.STRING + case dace.data.Array(): + assert isinstance(gt4py_program_arg, ts.FieldType) + assert len(dace_parsed_arg.shape) == len(gt4py_program_arg.dims) + assert dace_parsed_arg.dtype == dace_utils.as_dace_type(gt4py_program_arg.dtype) + case dace.data.Structure() | dict() | collections.OrderedDict(): + # offset provider + pass + case _: + raise ValueError( + f"Unresolved case for {dace_parsed_arg} (==, !=) {gt4py_program_arg}" + ) From 77c250b6190774b4e598c8e01f64bbeccbf62eac Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 26 Nov 2024 14:02:42 +0100 Subject: [PATCH 10/22] fix dace_fieldview.program tests --- .../feature_tests/dace/test_program.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py index 0524af1267..8b8f206ef7 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py @@ -151,27 +151,19 @@ def testee_prog( rows = dace.symbol("rows") cols = dace.symbol("cols") - OffsetProvider_t = dace.data.Structure( - { - key: dace.data.Array(dtype=dace.int64, shape=[rows, cols], storage=dace_storage_type) - for key in unstructured.offset_provider - }, - name="OffsetProvider", - ) @dace.program def testee_dace( a: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), b: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), c: dace.data.Array(dtype=dace.int64, shape=(rows, cols), storage=dace_storage_type), - offset_provider: OffsetProvider_t, - connectivities: dace.compiletime, ): - testee_prog.with_grid_type(unstructured.grid_type).with_connectivities(connectivities)( - a, b, c, offset_provider=offset_provider - ) + testee_prog(a, b, c) - sdfg = testee_dace.to_sdfg(connectivities=unstructured.offset_provider) + # if simplify=True, DaCe might inline the nested SDFG coming from Program.__sdfg__, + # effectively erasing the attributes we want to test for here + sdfg = testee_dace.to_sdfg(simplify=False) + sdfg.view() testee = next( subgraph for subgraph in sdfg.all_sdfgs_recursive() if subgraph.name == "testee_prog" From f8ec8f5a319a3558309a8356dfd17b985583c6f1 Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 3 Dec 2024 14:25:19 +0100 Subject: [PATCH 11/22] refactor extractors and remove debuginfo warning --- .../next/iterator/transforms/extractors.py | 84 ++++++++++++++ .../runners/dace_fieldview/program.py | 83 ++------------ .../runners/dace_iterator/__init__.py | 9 +- .../feature_tests/dace/test_program.py | 51 +-------- .../iterator_tests/test_extractors.py | 103 ++++++++++++++++++ 5 files changed, 200 insertions(+), 130 deletions(-) create mode 100644 src/gt4py/next/iterator/transforms/extractors.py create mode 100644 tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py diff --git a/src/gt4py/next/iterator/transforms/extractors.py b/src/gt4py/next/iterator/transforms/extractors.py new file mode 100644 index 0000000000..68f74970eb --- /dev/null +++ b/src/gt4py/next/iterator/transforms/extractors.py @@ -0,0 +1,84 @@ +# GT4Py - GridTools Framework +# +# Copyright (c) 2014-2024, ETH Zurich +# All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause + +from gt4py import eve +from gt4py.next.iterator import ir as itir +from gt4py.next.type_system import type_specifications as ts + + +class SymbolNameSetExtractor(eve.NodeVisitor): + """Extract a set of symbol names""" + + def generic_visitor(self, node: itir.Node) -> set[str]: + input_fields: set[str] = set() + for child in eve.trees.iter_children_values(node): + input_fields |= self.visit(child) + return input_fields + + @classmethod + def only_fields(cls, program: itir.Program) -> set[str]: + field_param_names = [ + str(param.id) for param in program.params if isinstance(param.type, ts.FieldType) + ] + return {name for name in cls().visit(program) if name in field_param_names} + + +class InputNamesExtractor(SymbolNameSetExtractor): + """Extract the set of symbol names passed into field operators within a program.""" + + def visit_Program(self, node: itir.Program) -> set[str]: + input_fields = set() + for stmt in node.body: + input_fields |= self.visit(stmt) + return input_fields + + def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: + input_fields = set() + for stmt in node.true_branch + node.false_branch: + input_fields |= self.visit(stmt) + return input_fields + + def visit_Temporary(self, node: itir.Temporary) -> set[str]: + return set() + + def visit_SetAt(self, node: itir.SetAt) -> set[str]: + return self.visit(node.expr) + + def visit_FunCall(self, node: itir.FunCall) -> set[str]: + input_fields = set() + for arg in node.args: + input_fields |= self.visit(arg) + return input_fields + + def visit_SymRef(self, node: itir.SymRef) -> set[str]: + return {str(node.id)} + + +class OutputNamesExtractor(SymbolNameSetExtractor): + """Extract the set of symbol names written to within a program""" + + def visit_Program(self, node: itir.Program) -> set[str]: + output_fields = set() + for stmt in node.body: + output_fields |= self.visit(stmt) + return output_fields + + def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: + output_fields = set() + for stmt in node.true_branch + node.false_branch: + output_fields |= self.visit(stmt) + return output_fields + + def visit_Temporary(self, node: itir.Temporary) -> set[str]: + return set() + + def visit_SetAt(self, node: itir.SetAt) -> set[str]: + return self.visit(node.target) + + def visit_SymRef(self, node: itir.SymRef) -> set[str]: + return {str(node.id)} diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index ea04a430b9..803ae866fb 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -15,10 +15,10 @@ import dace import numpy as np -from gt4py import eve from gt4py.next import backend as next_backend, common from gt4py.next.ffront import decorator from gt4py.next.iterator import ir as itir +from gt4py.next.iterator.transforms import extractors as extractors from gt4py.next.otf import arguments, recipes, toolchain from gt4py.next.program_processors.runners.dace_common import utility as dace_utils from gt4py.next.type_system import type_specifications as ts @@ -96,10 +96,14 @@ def single_horizontal_dim_per_field( if len(horizontal_dims) == 1: yield str(field.id), horizontal_dims[0] - input_fields = (field_params[name] for name in InputNamesExtractor.only_fields(program)) + input_fields = ( + field_params[name] for name in extractors.InputNamesExtractor.only_fields(program) + ) sdfg.gt4py_program_input_fields = dict(single_horizontal_dim_per_field(input_fields)) - output_fields = (field_params[name] for name in OutputNamesExtractor.only_fields(program)) + output_fields = ( + field_params[name] for name in extractors.OutputNamesExtractor.only_fields(program) + ) sdfg.gt4py_program_output_fields = dict(single_horizontal_dim_per_field(output_fields)) # TODO (ricoh): bring back sdfg.offset_providers_per_input_field. @@ -191,79 +195,6 @@ def __sdfg_signature__(self) -> tuple[Sequence[str], Sequence[str]]: return [p.id for p in self.past_stage.past_node.params], [] -class SymbolNameSetExtractor(eve.NodeVisitor): - """Extract a set of symbol names""" - - def generic_visitor(self, node: itir.Node) -> set[str]: - input_fields: set[str] = set() - for child in eve.trees.iter_children_values(node): - input_fields |= self.visit(child) - return input_fields - - @classmethod - def only_fields(cls, program: itir.Program) -> set[str]: - field_param_names = [ - str(param.id) for param in program.params if isinstance(param.type, ts.FieldType) - ] - return {name for name in cls().visit(program) if name in field_param_names} - - -class InputNamesExtractor(SymbolNameSetExtractor): - """Extract the set of symbol names passed into field operators within a program.""" - - def visit_Program(self, node: itir.Program) -> set[str]: - input_fields = set() - for stmt in node.body: - input_fields |= self.visit(stmt) - return input_fields - - def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: - input_fields = set() - for stmt in node.true_branch + node.false_branch: - input_fields |= self.visit(stmt) - return input_fields - - def visit_Temporary(self, node: itir.Temporary) -> set[str]: - return set() - - def visit_SetAt(self, node: itir.SetAt) -> set[str]: - return self.visit(node.expr) - - def visit_FunCall(self, node: itir.FunCall) -> set[str]: - input_fields = set() - for arg in node.args: - input_fields |= self.visit(arg) - return input_fields - - def visit_SymRef(self, node: itir.SymRef) -> set[str]: - return {str(node.id)} - - -class OutputNamesExtractor(SymbolNameSetExtractor): - """Extract the set of symbol names written to within a program""" - - def visit_Program(self, node: itir.Program) -> set[str]: - output_fields = set() - for stmt in node.body: - output_fields |= self.visit(stmt) - return output_fields - - def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: - output_fields = set() - for stmt in node.true_branch + node.false_branch: - output_fields |= self.visit(stmt) - return output_fields - - def visit_Temporary(self, node: itir.Temporary) -> set[str]: - return set() - - def visit_SetAt(self, node: itir.SetAt) -> set[str]: - return self.visit(node.target) - - def visit_SymRef(self, node: itir.SymRef) -> set[str]: - return {str(node.id)} - - def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: list[Any]) -> None: for dace_parsed_arg, gt4py_program_arg in zip( dace_parsed_args, diff --git a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py index ba24e1f7df..2a3946a77e 100644 --- a/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py +++ b/src/gt4py/next/program_processors/runners/dace_iterator/__init__.py @@ -6,7 +6,6 @@ # Please, refer to the LICENSE file in the root directory. # SPDX-License-Identifier: BSD-3-Clause -import warnings from collections.abc import Callable, Sequence from inspect import currentframe, getframeinfo from pathlib import Path @@ -120,13 +119,7 @@ def build_sdfg_from_itir( for nested_sdfg in sdfg.all_sdfgs_recursive(): if not nested_sdfg.debuginfo: - _, frameinfo = ( - warnings.warn( - f"{nested_sdfg.label} does not have debuginfo. Consider adding them in the corresponding nested sdfg.", - stacklevel=2, - ), - getframeinfo(currentframe()), # type: ignore[arg-type] - ) + frameinfo = getframeinfo(currentframe()) # type: ignore[arg-type] nested_sdfg.debuginfo = dace.dtypes.DebugInfo( start_line=frameinfo.lineno, end_line=frameinfo.lineno, filename=frameinfo.filename ) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py index 8b8f206ef7..46a908e6f6 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py @@ -10,7 +10,6 @@ from gt4py import next as gtx from gt4py.next import common -from gt4py.next.program_processors.runners.dace_fieldview import program as dace_prg from next_tests.integration_tests import cases from next_tests.integration_tests.feature_tests.ffront_tests.ffront_test_utils import ( @@ -20,16 +19,18 @@ JDim, KDim, Vertex, - mesh_descriptor, + mesh_descriptor, # noqa: F401 ) try: import dace + from gt4py.next.program_processors.runners.dace import gtir_cpu, gtir_gpu except ImportError: - from typing import Optional from types import ModuleType + from typing import Optional + from gt4py.next import backend as next_backend dace: Optional[ModuleType] = None @@ -66,7 +67,7 @@ def cartesian(request, gtir_dace_backend): @pytest.fixture -def unstructured(request, gtir_dace_backend, mesh_descriptor): +def unstructured(request, gtir_dace_backend, mesh_descriptor): # noqa: F811 if gtir_dace_backend is None: yield None @@ -84,48 +85,6 @@ def unstructured(request, gtir_dace_backend, mesh_descriptor): ) -@pytest.mark.skipif(dace is None, reason="DaCe not found") -def test_input_names_extractor_cartesian(cartesian): - @gtx.field_operator(backend=cartesian.backend) - def testee_op( - a: gtx.Field[[IDim, JDim, KDim], gtx.int], - ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: - return a - - @gtx.program(backend=cartesian.backend) - def testee( - a: gtx.Field[[IDim, JDim, KDim], gtx.int], - b: gtx.Field[[IDim, JDim, KDim], gtx.int], - c: gtx.Field[[IDim, JDim, KDim], gtx.int], - ): - testee_op(b, out=c) - testee_op(a, out=b) - - input_field_names = dace_prg.InputNamesExtractor.only_fields(testee.itir) - assert input_field_names == {"a", "b"} - - -@pytest.mark.skipif(dace is None, reason="DaCe not found") -def test_output_names_extractor(cartesian): - @gtx.field_operator(backend=cartesian.backend) - def testee_op( - a: gtx.Field[[IDim, JDim, KDim], gtx.int], - ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: - return a - - @gtx.program(backend=cartesian.backend) - def testee( - a: gtx.Field[[IDim, JDim, KDim], gtx.int], - b: gtx.Field[[IDim, JDim, KDim], gtx.int], - c: gtx.Field[[IDim, JDim, KDim], gtx.int], - ): - testee_op(a, out=b) - testee_op(a, out=c) - - output_field_names = dace_prg.OutputNamesExtractor.only_fields(testee.itir) - assert output_field_names == {"b", "c"} - - @pytest.mark.skipif(dace is None, reason="DaCe not found") def test_halo_exchange_helper_attrs(unstructured): @gtx.field_operator(backend=unstructured.backend) diff --git a/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py b/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py new file mode 100644 index 0000000000..48d7b04dda --- /dev/null +++ b/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py @@ -0,0 +1,103 @@ +# GT4Py - GridTools Framework +# +# Copyright (c) 2014-2024, ETH Zurich +# All rights reserved. +# +# Please, refer to the LICENSE file in the root directory. +# SPDX-License-Identifier: BSD-3-Clause + +import typing + +import pytest + +from gt4py import next as gtx +from gt4py.next import common +from gt4py.next.iterator.transforms import extractors + +from next_tests.integration_tests import cases +from next_tests.integration_tests.feature_tests.ffront_tests.ffront_test_utils import ( + IDim, + JDim, + KDim, +) + + +if typing.TYPE_CHECKING: + from types import ModuleType + from typing import Optional + +try: + import dace + + from gt4py.next.program_processors.runners.dace import gtir_cpu, gtir_gpu +except ImportError: + from gt4py.next import backend as next_backend + + dace: Optional[ModuleType] = None + gtir_cpu: Optional[next_backend.Backend] = None + gtir_gpu: Optional[next_backend.Backend] = None + + +@pytest.fixture(params=[pytest.param(gtir_cpu, marks=pytest.mark.requires_dace), gtx.gtfn_cpu]) +def gtir_dace_backend(request): + yield request.param + + +@pytest.fixture +def cartesian(request, gtir_dace_backend): + if gtir_dace_backend is None: + yield None + + yield cases.Case( + backend=gtir_dace_backend, + offset_provider={ + "Ioff": IDim, + "Joff": JDim, + "Koff": KDim, + }, + default_sizes={IDim: 10, JDim: 10, KDim: 10}, + grid_type=common.GridType.CARTESIAN, + allocator=gtir_dace_backend.allocator, + ) + + +@pytest.mark.skipif(dace is None, reason="DaCe not found") +def test_input_names_extractor_cartesian(cartesian): + @gtx.field_operator(backend=cartesian.backend) + def testee_op( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: + return a + + @gtx.program(backend=cartesian.backend) + def testee( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + b: gtx.Field[[IDim, JDim, KDim], gtx.int], + c: gtx.Field[[IDim, JDim, KDim], gtx.int], + ): + testee_op(b, out=c) + testee_op(a, out=b) + + input_field_names = extractors.InputNamesExtractor.only_fields(testee.itir) + assert input_field_names == {"a", "b"} + + +@pytest.mark.skipif(dace is None, reason="DaCe not found") +def test_output_names_extractor(cartesian): + @gtx.field_operator(backend=cartesian.backend) + def testee_op( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + ) -> gtx.Field[[IDim, JDim, KDim], gtx.int]: + return a + + @gtx.program(backend=cartesian.backend) + def testee( + a: gtx.Field[[IDim, JDim, KDim], gtx.int], + b: gtx.Field[[IDim, JDim, KDim], gtx.int], + c: gtx.Field[[IDim, JDim, KDim], gtx.int], + ): + testee_op(a, out=b) + testee_op(a, out=c) + + output_field_names = extractors.OutputNamesExtractor.only_fields(testee.itir) + assert output_field_names == {"b", "c"} From 19c76ad27be03be1338b7fab60dfb1c28429d0af Mon Sep 17 00:00:00 2001 From: DropD Date: Tue, 17 Dec 2024 11:39:35 +0100 Subject: [PATCH 12/22] work around gtir transforms requiring connectivity tables --- .../runners/dace_fieldview/program.py | 24 +++++- .../runners/dace_fieldview/workflow.py | 4 +- .../feature_tests/dace/test_orchestration.py | 78 ++++++++++--------- 3 files changed, 66 insertions(+), 40 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 803ae866fb..4c325db931 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -17,7 +17,7 @@ from gt4py.next import backend as next_backend, common from gt4py.next.ffront import decorator -from gt4py.next.iterator import ir as itir +from gt4py.next.iterator import ir as itir, transforms as itir_transforms from gt4py.next.iterator.transforms import extractors as extractors from gt4py.next.otf import arguments, recipes, toolchain from gt4py.next.program_processors.runners.dace_common import utility as dace_utils @@ -46,6 +46,7 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: } column_axis = kwargs.get("column_axis", None) + # TODO(ricoh): connectivity tables required here for now. gtir_stage = typing.cast(next_backend.Transforms, self.backend.transforms).past_to_itir( toolchain.CompilableProgram( data=self.past_stage, @@ -60,6 +61,20 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: program = typing.cast( itir.Program, gtir_stage.data ) # we already checked that our backend uses GTIR + program = itir_transforms.apply_fieldview_transforms( # run the transforms separately because they require the runtime info + program, offset_provider=offset_provider + ) + object.__setattr__( + gtir_stage, + "data", + program, + ) + object.__setattr__( + gtir_stage.args, "offset_provider", gtir_stage.args.offset_provider_type + ) # TODO(ricoh): currently this is circumventing the frozenness of CompileTimeArgs + # in order to isolate DaCe from the runtime tables in connectivities.offset_provider. + # These are needed at the time of writing for mandatory GTIR passes. + # Remove this as soon as Program does not expect connectivity tables anymore. _crosscheck_dace_parsing( dace_parsed_args=[*args, *kwargs.values()], @@ -72,7 +87,12 @@ def __sdfg__(self, *args: Any, **kwargs: Any) -> dace.sdfg.sdfg.SDFG: if not hasattr(self.backend.executor, "step") else self.backend.executor.step, ) # We know which backend we are using, but we don't know if the compile workflow is cached. - sdfg = dace.SDFG.from_json(compile_workflow.translation(gtir_stage).source_code) + # TODO(ricoh): switch 'itir_transforms_off=True' because we ran them separately previously + # and so we can ensure the SDFG does not know any runtime info it shouldn't know. Remove with + # the other parts of the workaround when possible. + sdfg = dace.SDFG.from_json( + compile_workflow.translation.replace(itir_transforms_off=True)(gtir_stage).source_code + ) self.sdfg_closure_cache["arrays"] = sdfg.arrays diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py b/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py index 40d44f5ab0..e9a5305344 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/workflow.py @@ -37,6 +37,7 @@ class DaCeTranslator( ): device_type: core_defs.DeviceType auto_optimize: bool + itir_transforms_off: bool = False def _language_settings(self) -> languages.LanguageSettings: return languages.LanguageSettings( @@ -51,7 +52,8 @@ def generate_sdfg( auto_opt: bool, on_gpu: bool, ) -> dace.SDFG: - ir = itir_transforms.apply_fieldview_transforms(ir, offset_provider=offset_provider) + if not self.itir_transforms_off: + ir = itir_transforms.apply_fieldview_transforms(ir, offset_provider=offset_provider) sdfg = gtir_sdfg.build_sdfg_from_gtir( ir, offset_provider_type=common.offset_provider_to_type(offset_provider) ) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 9bde9acadc..da20045f3b 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -16,14 +16,14 @@ from gt4py.next import backend as next_backend, common from next_tests.integration_tests import cases -from next_tests.integration_tests.cases import cartesian_case, unstructured_case +from next_tests.integration_tests.cases import cartesian_case, unstructured_case # noqa: F401 from next_tests.integration_tests.feature_tests.ffront_tests.ffront_test_utils import ( E2V, E2VDim, Edge, Vertex, - exec_alloc_descriptor, - mesh_descriptor, + exec_alloc_descriptor, # noqa: F401 + mesh_descriptor, # noqa: F401 ) from next_tests.integration_tests.multi_feature_tests.ffront_tests.test_laplacian import ( lap_program, @@ -34,6 +34,7 @@ try: import dace + from gt4py.next.program_processors.runners.dace import ( gtir_cpu as run_dace_cpu, gtir_gpu as run_dace_gpu, @@ -46,7 +47,7 @@ pytestmark = pytest.mark.requires_dace -def test_sdfgConvertible_laplap(cartesian_case): +def test_sdfgConvertible_laplap(cartesian_case): # noqa: F811 # TODO(kotsaloscv): Temporary solution until the `requires_dace` marker is fully functional if cartesian_case.backend not in [run_dace_cpu, run_dace_gpu]: pytest.skip("DaCe-related test: Test SDFGConvertible interface for GT4Py programs") @@ -93,7 +94,7 @@ def testee(a: gtx.Field[gtx.Dims[Vertex], gtx.float64], b: gtx.Field[gtx.Dims[Ed @pytest.mark.uses_unstructured_shift -def test_sdfgConvertible_connectivities(unstructured_case): +def test_sdfgConvertible_connectivities(unstructured_case): # noqa: F811 # TODO(kotsaloscv): Temporary solution until the `requires_dace` marker is fully functional if unstructured_case.backend not in [run_dace_cpu, run_dace_gpu]: pytest.skip("DaCe-related test: Test SDFGConvertible interface for GT4Py programs") @@ -116,6 +117,23 @@ def test_sdfgConvertible_connectivities(unstructured_case): name="OffsetProvider", ) + e2v = gtx.as_connectivity( + [Edge, E2VDim], + codomain=Vertex, + data=xp.asarray([[0, 1], [1, 2], [2, 0]]), + allocator=allocator, + ) + e2v_ndarray_copy = ( + e2v.ndarray.copy() + ) # otherwise DaCe complains about the gt4py custom allocated view + # This is a low level interface to call the compiled SDFG. + # It is not supposed to be used in user code. + # The high level interface should be provided by a DaCe Orchestrator, + # i.e. decorator that hides the low level operations. + # This test checks only that the SDFGConvertible interface works correctly. + + testee2 = testee.with_backend(backend).with_connectivities({"E2V": e2v}) + @dace.program def sdfg( a: dace.data.Array(dtype=dace.float64, shape=(rows,), storage=dace_storage_type), @@ -123,17 +141,10 @@ def sdfg( offset_provider: OffsetProvider_t, connectivities: dace.compiletime, ): - testee.with_backend(backend).with_connectivities(connectivities)( - a, out, offset_provider=offset_provider - ) + testee2.with_connectivities(connectivities)(a, out, offset_provider=offset_provider) + return out - e2v = gtx.as_connectivity( - [Edge, E2VDim], - codomain=Vertex, - data=xp.asarray([[0, 1], [1, 2], [2, 0]]), - allocator=allocator, - ) - connectivities = {"E2V": e2v.__gt_type__()} + connectivities = {"E2V": e2v} # replace 'e2v' with 'e2v.__gt_type__()' when GTIR is AOT offset_provider = OffsetProvider_t.dtype._typeclass.as_ctypes()(E2V=e2v.data_ptr()) SDFG = sdfg.to_sdfg(connectivities=connectivities) @@ -142,23 +153,15 @@ def sdfg( a = gtx.as_field([Vertex], xp.asarray([0.0, 1.0, 2.0]), allocator=allocator) out = gtx.zeros({Edge: 3}, allocator=allocator) - e2v_ndarray_copy = ( - e2v.ndarray.copy() - ) # otherwise DaCe complains about the gt4py custom allocated view - # This is a low level interface to call the compiled SDFG. - # It is not supposed to be used in user code. - # The high level interface should be provided by a DaCe Orchestrator, - # i.e. decorator that hides the low level operations. - # This test checks only that the SDFGConvertible interface works correctly. cSDFG( a, out, offset_provider, rows=3, cols=2, - connectivity_E2V=e2v_ndarray_copy, - __connectivity_E2V_stride_0=get_stride_from_numpy_to_dace(e2v_ndarray_copy, 0), - __connectivity_E2V_stride_1=get_stride_from_numpy_to_dace(e2v_ndarray_copy, 1), + connectivity_E2V=e2v, + __connectivity_E2V_stride_0=2, + __connectivity_E2V_stride_1=1, ) e2v_np = e2v.asnumpy() @@ -170,18 +173,19 @@ def sdfg( data=xp.asarray([[1, 0], [2, 1], [0, 2]]), allocator=allocator, ) - e2v_ndarray_copy = e2v.ndarray.copy() offset_provider = OffsetProvider_t.dtype._typeclass.as_ctypes()(E2V=e2v.data_ptr()) - cSDFG( - a, - out, - offset_provider, - rows=3, - cols=2, - connectivity_E2V=e2v_ndarray_copy, - __connectivity_E2V_stride_0=get_stride_from_numpy_to_dace(e2v_ndarray_copy, 0), - __connectivity_E2V_stride_1=get_stride_from_numpy_to_dace(e2v_ndarray_copy, 1), - ) + with dace.config.temporary_config(): + dace.config.Config.set("compiler", "allow_view_arguments", value=True) + cSDFG( + a, + out, + offset_provider, + rows=3, + cols=2, + connectivity_E2V=e2v, + __connectivity_E2V_stride_0=2, + __connectivity_E2V_stride_1=1, + ) e2v_np = e2v.asnumpy() assert np.allclose(out.asnumpy(), a.asnumpy()[e2v_np[:, 0]]) From fd7f472c77d49172b7e416ae03204764c46d5ed7 Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Wed, 18 Dec 2024 12:07:55 +0100 Subject: [PATCH 13/22] Add visitor fir Literal node --- src/gt4py/next/iterator/transforms/extractors.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/gt4py/next/iterator/transforms/extractors.py b/src/gt4py/next/iterator/transforms/extractors.py index 68f74970eb..1a06939af8 100644 --- a/src/gt4py/next/iterator/transforms/extractors.py +++ b/src/gt4py/next/iterator/transforms/extractors.py @@ -14,6 +14,9 @@ class SymbolNameSetExtractor(eve.NodeVisitor): """Extract a set of symbol names""" + def visit_Literal(self, node: itir.Literal) -> set[str]: + return set() + def generic_visitor(self, node: itir.Node) -> set[str]: input_fields: set[str] = set() for child in eve.trees.iter_children_values(node): From 71e54fdb6b6aad6a371b01cf080550cfa7d28f42 Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Wed, 18 Dec 2024 14:48:26 +0100 Subject: [PATCH 14/22] Fix for attribute rename itir -> gtir on latest main --- .../feature_tests/iterator_tests/test_extractors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py b/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py index 5e39f93815..7358ab3d8f 100644 --- a/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py +++ b/tests/next_tests/integration_tests/feature_tests/iterator_tests/test_extractors.py @@ -77,7 +77,7 @@ def testee( testee_op(b, out=c) testee_op(a, out=b) - input_field_names = extractors.InputNamesExtractor.only_fields(testee.itir) + input_field_names = extractors.InputNamesExtractor.only_fields(testee.gtir) assert input_field_names == {"a", "b"} @@ -98,5 +98,5 @@ def testee( testee_op(a, out=b) testee_op(a, out=c) - output_field_names = extractors.OutputNamesExtractor.only_fields(testee.itir) + output_field_names = extractors.OutputNamesExtractor.only_fields(testee.gtir) assert output_field_names == {"b", "c"} From 99030d03d0edfaf0b5c0da56dfb1a8c1ee15cd7f Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Wed, 18 Dec 2024 15:25:36 +0100 Subject: [PATCH 15/22] Fix error on main (is_field_allocator_factory_for -> is_field_allocator_for) --- .../feature_tests/dace/test_orchestration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 8c9f17b08f..22d9957633 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -43,7 +43,7 @@ def test_sdfgConvertible_laplap(cartesian_case): # noqa: F811 allocator, backend = cartesian_case.allocator, cartesian_case.backend - if gtx_allocators.is_field_allocator_factory_for(allocator, gtx_allocators.CUPY_DEVICE): + if gtx_allocators.is_field_allocator_for(allocator, gtx_allocators.CUPY_DEVICE): import cupy as xp else: import numpy as xp @@ -91,7 +91,7 @@ def test_sdfgConvertible_connectivities(unstructured_case): # noqa: F811 allocator, backend = unstructured_case.allocator, unstructured_case.backend - if gtx_allocators.is_field_allocator_factory_for(allocator, gtx_allocators.CUPY_DEVICE): + if gtx_allocators.is_field_allocator_for(allocator, gtx_allocators.CUPY_DEVICE): import cupy as xp dace_storage_type = dace.StorageType.GPU_Global From 439ec4352983b774791361bcdda1dc1a860fec76 Mon Sep 17 00:00:00 2001 From: DropD Date: Mon, 6 Jan 2025 09:57:19 +0100 Subject: [PATCH 16/22] cover addition in the program tests --- src/gt4py/next/iterator/transforms/extractors.py | 3 +++ .../integration_tests/feature_tests/dace/test_program.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/gt4py/next/iterator/transforms/extractors.py b/src/gt4py/next/iterator/transforms/extractors.py index 68f74970eb..d95058743a 100644 --- a/src/gt4py/next/iterator/transforms/extractors.py +++ b/src/gt4py/next/iterator/transforms/extractors.py @@ -20,6 +20,9 @@ def generic_visitor(self, node: itir.Node) -> set[str]: input_fields |= self.visit(child) return input_fields + def visit_Node(self, node: itir.Node) -> set[str]: + return set() + @classmethod def only_fields(cls, program: itir.Program) -> set[str]: field_param_names = [ diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py index 46a908e6f6..8d706c26fd 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py @@ -91,7 +91,7 @@ def test_halo_exchange_helper_attrs(unstructured): def testee_op( a: gtx.Field[[Vertex, KDim], gtx.int], ) -> gtx.Field[[Vertex, KDim], gtx.int]: - return a + return a + 10 @gtx.program(backend=unstructured.backend) def testee_prog( From 6c6a4e3d0b4bcf1ef4cdbde0b87343f43f3eb26a Mon Sep 17 00:00:00 2001 From: DropD Date: Mon, 6 Jan 2025 10:29:45 +0100 Subject: [PATCH 17/22] clean up dace-program tests and extractors --- .../next/iterator/transforms/extractors.py | 54 +++++++------------ .../feature_tests/dace/test_program.py | 18 ++++--- 2 files changed, 28 insertions(+), 44 deletions(-) diff --git a/src/gt4py/next/iterator/transforms/extractors.py b/src/gt4py/next/iterator/transforms/extractors.py index 51c9b3f927..04c2b09139 100644 --- a/src/gt4py/next/iterator/transforms/extractors.py +++ b/src/gt4py/next/iterator/transforms/extractors.py @@ -26,6 +26,24 @@ def generic_visitor(self, node: itir.Node) -> set[str]: def visit_Node(self, node: itir.Node) -> set[str]: return set() + def visit_Program(self, node: itir.Program) -> set[str]: + names = set() + for stmt in node.body: + names |= self.visit(stmt) + return names + + def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: + names = set() + for stmt in node.true_branch + node.false_branch: + names |= self.visit(stmt) + return names + + def visit_Temporary(self, node: itir.Temporary) -> set[str]: + return set() + + def visit_SymRef(self, node: itir.SymRef) -> set[str]: + return {str(node.id)} + @classmethod def only_fields(cls, program: itir.Program) -> set[str]: field_param_names = [ @@ -37,21 +55,6 @@ def only_fields(cls, program: itir.Program) -> set[str]: class InputNamesExtractor(SymbolNameSetExtractor): """Extract the set of symbol names passed into field operators within a program.""" - def visit_Program(self, node: itir.Program) -> set[str]: - input_fields = set() - for stmt in node.body: - input_fields |= self.visit(stmt) - return input_fields - - def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: - input_fields = set() - for stmt in node.true_branch + node.false_branch: - input_fields |= self.visit(stmt) - return input_fields - - def visit_Temporary(self, node: itir.Temporary) -> set[str]: - return set() - def visit_SetAt(self, node: itir.SetAt) -> set[str]: return self.visit(node.expr) @@ -61,30 +64,9 @@ def visit_FunCall(self, node: itir.FunCall) -> set[str]: input_fields |= self.visit(arg) return input_fields - def visit_SymRef(self, node: itir.SymRef) -> set[str]: - return {str(node.id)} - class OutputNamesExtractor(SymbolNameSetExtractor): """Extract the set of symbol names written to within a program""" - def visit_Program(self, node: itir.Program) -> set[str]: - output_fields = set() - for stmt in node.body: - output_fields |= self.visit(stmt) - return output_fields - - def visit_IfStmt(self, node: itir.IfStmt) -> set[str]: - output_fields = set() - for stmt in node.true_branch + node.false_branch: - output_fields |= self.visit(stmt) - return output_fields - - def visit_Temporary(self, node: itir.Temporary) -> set[str]: - return set() - def visit_SetAt(self, node: itir.SetAt) -> set[str]: return self.visit(node.target) - - def visit_SymRef(self, node: itir.SymRef) -> set[str]: - return {str(node.id)} diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py index 8d706c26fd..db0f90b409 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_program.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_program.py @@ -26,7 +26,7 @@ try: import dace - from gt4py.next.program_processors.runners.dace import gtir_cpu, gtir_gpu + from gt4py.next.program_processors.runners import dace as dace_backends except ImportError: from types import ModuleType from typing import Optional @@ -34,14 +34,15 @@ from gt4py.next import backend as next_backend dace: Optional[ModuleType] = None - gtir_cpu: Optional[next_backend.Backend] = None - gtir_gpu: Optional[next_backend.Backend] = None + dace_backends: Optional[ModuleType] = None @pytest.fixture( params=[ - pytest.param(gtir_cpu, marks=pytest.mark.requires_dace), - pytest.param(gtir_gpu, marks=(pytest.mark.requires_gpu, pytest.mark.requires_dace)), + pytest.param(dace_backends.run_dace_cpu, marks=pytest.mark.requires_dace), + pytest.param( + dace_backends.run_dace_gpu, marks=(pytest.mark.requires_gpu, pytest.mark.requires_dace) + ), ] ) def gtir_dace_backend(request): @@ -87,11 +88,13 @@ def unstructured(request, gtir_dace_backend, mesh_descriptor): # noqa: F811 @pytest.mark.skipif(dace is None, reason="DaCe not found") def test_halo_exchange_helper_attrs(unstructured): + local_int = gtx.int + @gtx.field_operator(backend=unstructured.backend) def testee_op( a: gtx.Field[[Vertex, KDim], gtx.int], ) -> gtx.Field[[Vertex, KDim], gtx.int]: - return a + 10 + return a + local_int(10) @gtx.program(backend=unstructured.backend) def testee_prog( @@ -104,7 +107,7 @@ def testee_prog( dace_storage_type = ( dace.StorageType.GPU_Global - if unstructured.backend == gtir_gpu + if unstructured.backend == dace_backends.run_dace_gpu else dace.StorageType.Default ) @@ -122,7 +125,6 @@ def testee_dace( # if simplify=True, DaCe might inline the nested SDFG coming from Program.__sdfg__, # effectively erasing the attributes we want to test for here sdfg = testee_dace.to_sdfg(simplify=False) - sdfg.view() testee = next( subgraph for subgraph in sdfg.all_sdfgs_recursive() if subgraph.name == "testee_prog" From f799bc06b6b8d2a4cf980433e57d876c669fe393 Mon Sep 17 00:00:00 2001 From: DropD Date: Mon, 6 Jan 2025 11:37:24 +0100 Subject: [PATCH 18/22] cleanup orchestration tests --- .../feature_tests/dace/test_orchestration.py | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 22d9957633..5af2cf6d18 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -41,16 +41,13 @@ def test_sdfgConvertible_laplap(cartesian_case): # noqa: F811 if not cartesian_case.backend or "dace" not in cartesian_case.backend.name: pytest.skip("DaCe-related test: Test SDFGConvertible interface for GT4Py programs") - allocator, backend = cartesian_case.allocator, cartesian_case.backend - - if gtx_allocators.is_field_allocator_for(allocator, gtx_allocators.CUPY_DEVICE): - import cupy as xp - else: - import numpy as xp + backend = cartesian_case.backend in_field = cases.allocate(cartesian_case, laplap_program, "in_field")() out_field = cases.allocate(cartesian_case, laplap_program, "out_field")() + xp = in_field.array_ns + # Test DaCe closure support @dace.program def sdfg(): @@ -113,14 +110,6 @@ def test_sdfgConvertible_connectivities(unstructured_case): # noqa: F811 data=xp.asarray([[0, 1], [1, 2], [2, 0]]), allocator=allocator, ) - e2v_ndarray_copy = ( - e2v.ndarray.copy() - ) # otherwise DaCe complains about the gt4py custom allocated view - # This is a low level interface to call the compiled SDFG. - # It is not supposed to be used in user code. - # The high level interface should be provided by a DaCe Orchestrator, - # i.e. decorator that hides the low level operations. - # This test checks only that the SDFGConvertible interface works correctly. testee2 = testee.with_backend(backend).with_connectivities({"E2V": e2v}) From 9c884e330803e8b81d967433cab5098ea07995a2 Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Mon, 13 Jan 2025 11:29:22 +0100 Subject: [PATCH 19/22] Fix attribute dtype error --- .../next/program_processors/runners/dace_fieldview/program.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 7e59dec50a..6e5bbdebb7 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -221,7 +221,7 @@ def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: li ): match dace_parsed_arg: case dace.data.Scalar(): - assert dace_parsed_arg.type == dace_utils.as_dace_type(gt4py_program_arg) + assert dace_parsed_arg.dtype == dace_utils.as_dace_type(gt4py_program_arg) case bool() | np.bool_(): assert isinstance(gt4py_program_arg, ts.ScalarType) assert gt4py_program_arg.kind == ts.ScalarKind.BOOL From 3ecaab49e322a1532149c72c4a393d55cf78645d Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Mon, 13 Jan 2025 14:13:31 +0100 Subject: [PATCH 20/22] fix mypy error --- .../next/program_processors/runners/dace_fieldview/program.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index 6e5bbdebb7..c1ef00f106 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -236,6 +236,7 @@ def _crosscheck_dace_parsing(dace_parsed_args: list[Any], gt4py_program_args: li assert gt4py_program_arg.kind == ts.ScalarKind.STRING case dace.data.Array(): assert isinstance(gt4py_program_arg, ts.FieldType) + assert isinstance(gt4py_program_arg.dtype, ts.ScalarType) assert len(dace_parsed_arg.shape) == len(gt4py_program_arg.dims) assert dace_parsed_arg.dtype == dace_utils.as_dace_type(gt4py_program_arg.dtype) case dace.data.Structure() | dict() | collections.OrderedDict(): From 8976caa7a6661beadc10354aeb3807dfcad39e35 Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Mon, 13 Jan 2025 18:22:02 +0100 Subject: [PATCH 21/22] fix stride problem --- .../program_processors/runners/dace_fieldview/program.py | 2 +- .../feature_tests/dace/test_orchestration.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py index c1ef00f106..7f809152c5 100644 --- a/src/gt4py/next/program_processors/runners/dace_fieldview/program.py +++ b/src/gt4py/next/program_processors/runners/dace_fieldview/program.py @@ -194,7 +194,7 @@ def stride_symbol_name(name: str, axis: int) -> str: conn = self.connectivities[name] assert common.is_neighbor_table(conn) self.connectivity_tables_data_descriptors[conn_id] = dace.data.Array( - dtype=dace.int64 if conn.dtype == np.int64 else dace.int32, + dtype=dace.dtypes.dtype_to_typeclass(conn.dtype.dtype.type), shape=[ symbols[dace_utils.field_size_symbol_name(conn_id, 0)], symbols[dace_utils.field_size_symbol_name(conn_id, 1)], diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index 5af2cf6d18..e262147369 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -139,8 +139,8 @@ def sdfg( rows=3, cols=2, connectivity_E2V=e2v, - __connectivity_E2V_stride_0=2, - __connectivity_E2V_stride_1=1, + __connectivity_E2V_stride_0=e2v.ndarray.strides[0] // e2v.ndarray.itemsize, + __connectivity_E2V_stride_1=e2v.ndarray.strides[1] // e2v.ndarray.itemsize, ) e2v_np = e2v.asnumpy() @@ -162,8 +162,8 @@ def sdfg( rows=3, cols=2, connectivity_E2V=e2v, - __connectivity_E2V_stride_0=2, - __connectivity_E2V_stride_1=1, + __connectivity_E2V_stride_0=e2v.ndarray.strides[0] // e2v.ndarray.itemsize, + __connectivity_E2V_stride_1=e2v.ndarray.strides[1] // e2v.ndarray.itemsize, ) e2v_np = e2v.asnumpy() From d89383d6d35927f4d1aaa562c96bcdfdfaabda41 Mon Sep 17 00:00:00 2001 From: Edoardo Paone Date: Mon, 13 Jan 2025 18:29:25 +0100 Subject: [PATCH 22/22] fix stride problem (1) --- .../feature_tests/dace/test_orchestration.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py index e262147369..cd71c306eb 100644 --- a/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py +++ b/tests/next_tests/integration_tests/feature_tests/dace/test_orchestration.py @@ -12,6 +12,7 @@ import gt4py.next as gtx from gt4py.next import allocators as gtx_allocators, common as gtx_common +from gt4py._core import definitions as core_defs from next_tests.integration_tests import cases from next_tests.integration_tests.cases import cartesian_case, unstructured_case # noqa: F401 from next_tests.integration_tests.feature_tests.ffront_tests.ffront_test_utils import ( @@ -127,11 +128,16 @@ def sdfg( offset_provider = OffsetProvider_t.dtype._typeclass.as_ctypes()(E2V=e2v.data_ptr()) SDFG = sdfg.to_sdfg(connectivities=connectivities) - cSDFG = SDFG.compile() a = gtx.as_field([Vertex], xp.asarray([0.0, 1.0, 2.0]), allocator=allocator) out = gtx.zeros({Edge: 3}, allocator=allocator) + + def get_stride_from_numpy_to_dace(arg: core_defs.NDArrayObject, axis: int) -> int: + # NumPy strides: number of bytes to jump + # DaCe strides: number of elements to jump + return arg.strides[axis] // arg.itemsize + cSDFG( a, out, @@ -139,8 +145,8 @@ def sdfg( rows=3, cols=2, connectivity_E2V=e2v, - __connectivity_E2V_stride_0=e2v.ndarray.strides[0] // e2v.ndarray.itemsize, - __connectivity_E2V_stride_1=e2v.ndarray.strides[1] // e2v.ndarray.itemsize, + __connectivity_E2V_stride_0=get_stride_from_numpy_to_dace(e2v.ndarray, 0), + __connectivity_E2V_stride_1=get_stride_from_numpy_to_dace(e2v.ndarray, 1), ) e2v_np = e2v.asnumpy() @@ -162,8 +168,8 @@ def sdfg( rows=3, cols=2, connectivity_E2V=e2v, - __connectivity_E2V_stride_0=e2v.ndarray.strides[0] // e2v.ndarray.itemsize, - __connectivity_E2V_stride_1=e2v.ndarray.strides[1] // e2v.ndarray.itemsize, + __connectivity_E2V_stride_0=get_stride_from_numpy_to_dace(e2v.ndarray, 0), + __connectivity_E2V_stride_1=get_stride_from_numpy_to_dace(e2v.ndarray, 1), ) e2v_np = e2v.asnumpy()