From bfa78104f6fac812197b913d285b914b37210ddf Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Fri, 8 Dec 2023 17:05:18 +0000 Subject: [PATCH 1/6] sparsity: fix some tests --- test/unit/test_api.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 9de89cb04..b89ac09cd 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1087,21 +1087,25 @@ def test_sparsity_mmap_iter(self, ms): def test_sparsity_mmap_getitem(self, ms): """Sparsity block i, j should be defined on the corresponding row and column DataSets and Maps.""" - for i, (rds, rm) in enumerate(zip(ms.dsets[0], ms.rmaps)): - for j, (cds, cm) in enumerate(zip(ms.dsets[1], ms.cmaps)): + rmaps, = ms.rmaps + cmaps, = ms.cmaps + for i, (rds, rm) in enumerate(zip(ms.dsets[0], rmaps)): + for j, (cds, cm) in enumerate(zip(ms.dsets[1], cmaps)): block = ms[i, j] # Indexing with a tuple and double index is equivalent assert block == ms[i][j] assert (block.dsets == (rds, cds) - and block.maps == [(rm.split[i], cm.split[j])]) + and block.maps == [(rm, cm)]) def test_sparsity_mmap_getrow(self, ms): """Indexing a Sparsity with a single index should yield a row of blocks.""" - for i, (rds, rm) in enumerate(zip(ms.dsets[0], ms.rmaps)): - for j, (s, cds, cm) in enumerate(zip(ms[i], ms.dsets[1], ms.cmaps)): + rmaps, = ms.rmaps + cmaps, = ms.cmaps + for i, (rds, rm) in enumerate(zip(ms.dsets[0], rmaps)): + for j, (s, cds, cm) in enumerate(zip(ms[i], ms.dsets[1], cmaps)): assert (s.dsets == (rds, cds) - and s.maps == [(rm.split[i], cm.split[j])]) + and s.maps == [(rm, cm)]) def test_sparsity_mmap_shape(self, ms): "Sparsity shape of should be the sizes of the mixed space." From 09968a4b75d8bf23e48f9601a3984656f4d3c331 Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Fri, 8 Dec 2023 17:05:24 +0000 Subject: [PATCH 2/6] sparsity: repack rmaps, cmaps, and iteration_regions in maps_and_regions - maps_and_regions[(i, j)] = list of (rmap, cmap, iteration_regions) - virtually noop --- pyop2/sparsity.pyx | 11 +++-- pyop2/types/mat.py | 99 +++++++++++++++++-------------------------- test/unit/test_api.py | 46 ++++++-------------- 3 files changed, 57 insertions(+), 99 deletions(-) diff --git a/pyop2/sparsity.pyx b/pyop2/sparsity.pyx index d88bab456..1d02c1e1a 100644 --- a/pyop2/sparsity.pyx +++ b/pyop2/sparsity.pyx @@ -141,17 +141,16 @@ def build_sparsity(sparsity): bsize=1) preallocator.setUp() - iteration_regions = sparsity.iteration_regions if mixed: for i, r in enumerate(rset): for j, c in enumerate(cset): - maps = list(zip((m.split[i] for m in sparsity.rmaps), - (m.split[j] for m in sparsity.cmaps))) + maps = sparsity.rcmaps[(i, j)] + iter_regions = sparsity.iteration_regions[(i, j)] mat = preallocator.getLocalSubMatrix(isrow=rset.local_ises[i], iscol=cset.local_ises[j]) fill_with_zeros(mat, (r.cdim, c.cdim), maps, - iteration_regions, + iter_regions, set_diag=((i == j) and sparsity._has_diagonal)) mat.assemble() preallocator.restoreLocalSubMatrix(isrow=rset.local_ises[i], @@ -160,8 +159,8 @@ def build_sparsity(sparsity): preallocator.assemble() nnz, onnz = get_preallocation(preallocator, nrows) else: - fill_with_zeros(preallocator, (1, 1), sparsity.maps, - iteration_regions, set_diag=sparsity._has_diagonal) + fill_with_zeros(preallocator, (1, 1), sparsity.rcmaps[(0, 0)], + sparsity.iteration_regions[(0, 0)], set_diag=sparsity._has_diagonal) preallocator.assemble() nnz, onnz = get_preallocation(preallocator, nrows) if not (sparsity._block_sparse and rset.cdim == cset.cdim): diff --git a/pyop2/types/mat.py b/pyop2/types/mat.py index b96594a1e..3b4472e09 100644 --- a/pyop2/types/mat.py +++ b/pyop2/types/mat.py @@ -55,48 +55,41 @@ def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, # Protect against re-initialization when retrieved from cache if self._initialized: return - - self._block_sparse = block_sparse - # Split into a list of row maps and a list of column maps - maps, iteration_regions = zip(*maps) - self._rmaps, self._cmaps = zip(*maps) self._dsets = dsets - + maps, iteration_regions = zip(*maps) + _rmaps, _cmaps = zip(*maps) + _maps_and_regions = {} + if isinstance(dsets[0], MixedDataSet) or isinstance(dsets[1], MixedDataSet): + rset, cset = dsets + for i, _ in enumerate(rset): + for j, _ in enumerate(cset): + _maps_and_regions[(i, j)] = list(zip((rm.split[i] for rm in _rmaps), + (cm.split[j] for cm in _cmaps), + iteration_regions)) + else: + _maps_and_regions[(0, 0)] = list(zip(_rmaps, _cmaps, iteration_regions)) + self._maps_and_regions = _maps_and_regions + self._block_sparse = block_sparse + self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) + self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) if isinstance(dsets[0], GlobalDataSet) or isinstance(dsets[1], GlobalDataSet): self._dims = (((1, 1),),) self._d_nnz = None self._o_nnz = None - self.lcomm = mpi.internal_comm( - dsets[0].comm if isinstance(dsets[0], GlobalDataSet) else self._rmaps[0].comm, - self - ) - self.rcomm = mpi.internal_comm( - dsets[1].comm if isinstance(dsets[1], GlobalDataSet) else self._cmaps[0].comm, - self - ) else: - self.lcomm = mpi.internal_comm(self._rmaps[0].comm, self) - self.rcomm = mpi.internal_comm(self._cmaps[0].comm, self) - rset, cset = self.dsets - self._has_diagonal = (rset == cset) - - tmp = itertools.product([x.cdim for x in self._dsets[0]], - [x.cdim for x in self._dsets[1]]) - + tmp = itertools.product([x.cdim for x in self.dsets[0]], + [x.cdim for x in self.dsets[1]]) dims = [[None for _ in range(self.shape[1])] for _ in range(self.shape[0])] for r in range(self.shape[0]): for c in range(self.shape[1]): dims[r][c] = next(tmp) - self._dims = tuple(tuple(d) for d in dims) - if self.lcomm != self.rcomm: raise ValueError("Haven't thought hard enough about different left and right communicators") self.comm = mpi.internal_comm(self.lcomm, self) self._name = name or "sparsity_#x%x" % id(self) - self.iteration_regions = iteration_regions # If the Sparsity is defined on MixedDataSets, we need to build each # block separately if (isinstance(dsets[0], MixedDataSet) or isinstance(dsets[1], MixedDataSet)) \ @@ -106,9 +99,8 @@ def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, for i, rds in enumerate(dsets[0]): row = [] for j, cds in enumerate(dsets[1]): - row.append(Sparsity((rds, cds), [(rm.split[i], cm.split[j]) for - rm, cm in maps], - iteration_regions=iteration_regions, + row.append(Sparsity((rds, cds), self.rcmaps[(i, j)], + iteration_regions=self.iteration_regions[(i, j)], block_sparse=block_sparse)) self._blocks.append(row) self._d_nnz = tuple(s._d_nnz for s in self) @@ -233,26 +225,12 @@ def dsets(self): return self._dsets @utils.cached_property - def maps(self): - """A list of pairs (rmap, cmap) where each pair of - :class:`Map` objects will later be used to assemble into this - matrix. The iterset of each of the maps in a pair must be the - same, while the toset of all the maps which appear first - must be common, this will form the row :class:`Set` of the - sparsity. Similarly, the toset of all the maps which appear - second must be common and will form the column :class:`Set` of - the ``Sparsity``.""" - return list(zip(self._rmaps, self._cmaps)) + def rcmaps(self): + return {key: [(_rmap, _cmap) for _rmap, _cmap, _ in val] for key, val in self._maps_and_regions.items()} @utils.cached_property - def cmaps(self): - """The list of column maps this sparsity is assembled from.""" - return self._cmaps - - @utils.cached_property - def rmaps(self): - """The list of row maps this sparsity is assembled from.""" - return self._rmaps + def iteration_regions(self): + return {key: [_iteration_regions for _, _, _iteration_regions in val] for key, val in self._maps_and_regions.items()} @utils.cached_property def dims(self): @@ -296,11 +274,10 @@ def __iter__(self): yield s def __str__(self): - return "OP2 Sparsity: dsets %s, rmaps %s, cmaps %s, name %s" % \ - (self._dsets, self._rmaps, self._cmaps, self._name) + raise NotImplementedError def __repr__(self): - return "Sparsity(%r, %r, %r)" % (self.dsets, self.maps, self.name) + raise NotImplementedError @utils.cached_property def nnz(self): @@ -332,12 +309,14 @@ def __contains__(self, other): """Return true if other is a pair of maps in self.maps(). This will also return true if the elements of other have parents in self.maps().""" - - for maps in self.maps: - if tuple(other) <= maps: - return True - - return False + for i, rm in enumerate(other[0]): + for j, cm in enumerate(other[1]): + for maps in self.rcmaps[(i, j)]: + if (rm, cm) <= maps: + break + else: + return False + return True class SparsityBlock(Sparsity): @@ -357,13 +336,11 @@ def __init__(self, parent, i, j): return self._dsets = (parent.dsets[0][i], parent.dsets[1][j]) - self._rmaps = tuple(m.split[i] for m in parent.rmaps) - self._cmaps = tuple(m.split[j] for m in parent.cmaps) + self._maps_and_regions = {(0, 0): parent._maps_and_regions[(i, j)]} self._has_diagonal = i == j and parent._has_diagonal self._parent = parent self._dims = tuple([tuple([parent.dims[i][j]])]) self._blocks = [[self]] - self.iteration_regions = parent.iteration_regions self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) # TODO: think about lcomm != rcomm @@ -695,8 +672,8 @@ def _init_monolithic(self): for j in range(cols): sparsity.fill_with_zeros(self[i, j].handle, self[i, j].sparsity.dims[0][0], - self[i, j].sparsity.maps, - self[i, j].sparsity.iteration_regions, + self[i, j].sparsity.rcmaps[(0, 0)], + self[i, j].sparsity.iteration_regions[(0, 0)], set_diag=self[i, j].sparsity._has_diagonal) self[i, j].handle.assemble() @@ -769,7 +746,7 @@ def _init_block(self): # Put zeros in all the places we might eventually put a value. with profiling.timed_region("MatZeroInitial"): sparsity.fill_with_zeros(mat, self.sparsity.dims[0][0], - self.sparsity.maps, self.sparsity.iteration_regions, + self.sparsity.rcmaps[(0, 0)], self.sparsity.iteration_regions[(0, 0)], set_diag=self.sparsity._has_diagonal) mat.assemble() mat.setOption(mat.Option.NEW_NONZERO_LOCATION_ERR, True) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index b89ac09cd..6d9a8d4df 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1011,44 +1011,44 @@ def test_sparsity_illegal_name(self, di, mi): def test_sparsity_single_dset(self, di, mi): "Sparsity constructor should accept single Map and turn it into tuple" s = op2.Sparsity(di, mi, name="foo") - assert (s.maps[0] == (mi, mi) and s.dims[0][0] == (1, 1) + assert (s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) and s.name == "foo" and s.dsets == (di, di)) def test_sparsity_set_not_dset(self, di, mi): "If we pass a Set, not a DataSet, it default to dimension 1." s = op2.Sparsity(mi.toset, mi) - assert s.maps[0] == (mi, mi) and s.dims[0][0] == (1, 1) \ + assert s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) \ and s.dsets == (di, di) def test_sparsity_map_pair(self, di, mi): "Sparsity constructor should accept a pair of maps" s = op2.Sparsity((di, di), (mi, mi), name="foo") - assert (s.maps[0] == (mi, mi) and s.dims[0][0] == (1, 1) + assert (s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) and s.name == "foo" and s.dsets == (di, di)) def test_sparsity_map_pair_different_dataset(self, mi, md, di, dd, m_iterset_toset): """Sparsity can be built from different row and column maps as long as the tosets match the row and column DataSet.""" s = op2.Sparsity((di, dd), (m_iterset_toset, md), name="foo") - assert (s.maps[0] == (m_iterset_toset, md) and s.dims[0][0] == (1, 1) + assert (s.rcmaps[(0, 0)][0] == (m_iterset_toset, md) and s.dims[0][0] == (1, 1) and s.name == "foo" and s.dsets == (di, dd)) def test_sparsity_unique_map_pairs(self, mi, di): "Sparsity constructor should filter duplicate tuples of pairs of maps." s = op2.Sparsity((di, di), ((mi, mi), (mi, mi)), name="foo") - assert s.maps == [(mi, mi)] and s.dims[0][0] == (1, 1) + assert s.rcmaps[(0, 0)] == [(mi, mi)] and s.dims[0][0] == (1, 1) def test_sparsity_map_pairs_different_itset(self, mi, di, dd, m_iterset_toset): "Sparsity constructor should accept maps with different iteration sets" maps = ((m_iterset_toset, m_iterset_toset), (mi, mi)) s = op2.Sparsity((di, di), maps, name="foo") - assert frozenset(s.maps) == frozenset(maps) and s.dims[0][0] == (1, 1) + assert frozenset(s.rcmaps[(0, 0)]) == frozenset(maps) and s.dims[0][0] == (1, 1) def test_sparsity_map_pairs_sorted(self, mi, di, dd, m_iterset_toset): "Sparsity maps should have a deterministic order." s1 = op2.Sparsity((di, di), [(m_iterset_toset, m_iterset_toset), (mi, mi)]) s2 = op2.Sparsity((di, di), [(mi, mi), (m_iterset_toset, m_iterset_toset)]) - assert s1.maps == s2.maps + assert s1.rcmaps[(0, 0)] == s2.rcmaps[(0, 0)] def test_sparsity_illegal_itersets(self, mi, md, di, dd): "Both maps in a (rmap,cmap) tuple must have same iteration set" @@ -1087,25 +1087,21 @@ def test_sparsity_mmap_iter(self, ms): def test_sparsity_mmap_getitem(self, ms): """Sparsity block i, j should be defined on the corresponding row and column DataSets and Maps.""" - rmaps, = ms.rmaps - cmaps, = ms.cmaps - for i, (rds, rm) in enumerate(zip(ms.dsets[0], rmaps)): - for j, (cds, cm) in enumerate(zip(ms.dsets[1], cmaps)): + for i, rds in enumerate(ms.dsets[0]): + for j, cds in enumerate(ms.dsets[1]): block = ms[i, j] # Indexing with a tuple and double index is equivalent assert block == ms[i][j] assert (block.dsets == (rds, cds) - and block.maps == [(rm, cm)]) + and block.rcmaps[(0, 0)] == ms.rcmaps[(i, j)]) def test_sparsity_mmap_getrow(self, ms): """Indexing a Sparsity with a single index should yield a row of blocks.""" - rmaps, = ms.rmaps - cmaps, = ms.cmaps - for i, (rds, rm) in enumerate(zip(ms.dsets[0], rmaps)): - for j, (s, cds, cm) in enumerate(zip(ms[i], ms.dsets[1], cmaps)): + for i, rds in enumerate(ms.dsets[0]): + for j, (s, cds) in enumerate(zip(ms[i], ms.dsets[1])): assert (s.dsets == (rds, cds) - and s.maps == [(rm, cm)]) + and s.rcmaps[(0, 0)] == ms.rcmaps[(i, j)]) def test_sparsity_mmap_shape(self, ms): "Sparsity shape of should be the sizes of the mixed space." @@ -1133,20 +1129,6 @@ def test_sparsity_mmap_illegal_col_datasets(self, m_iterset_toset, op2.Sparsity((mds, mds), (op2.MixedMap((m_set_toset, m_set_toset)), op2.MixedMap((m_iterset_toset, m_iterset_set)))) - def test_sparsity_repr(self, sparsity): - "Sparsity should have the expected repr." - - # Note: We can't actually reproduce a Sparsity from its repr because - # the Sparsity constructor checks that the maps are populated - r = "Sparsity(%r, %r, %r)" % (sparsity.dsets, sparsity.maps, sparsity.name) - assert repr(sparsity) == r - - def test_sparsity_str(self, sparsity): - "Sparsity should have the expected string representation." - s = "OP2 Sparsity: dsets %s, rmaps %s, cmaps %s, name %s" % \ - (sparsity.dsets, sparsity.rmaps, sparsity.cmaps, sparsity.name) - assert str(sparsity) == s - class TestMatAPI: @@ -1600,7 +1582,7 @@ def test_illegal_mat_iterset(self, sparsity): set from the par_loop's.""" set1 = op2.Set(2) m = op2.Mat(sparsity) - rmap, cmap = sparsity.maps[0] + rmap, cmap = sparsity.rcmaps[(0, 0)][0] kernel = op2.Kernel("static void k() { }", "k") with pytest.raises(exceptions.MapValueError): op2.par_loop( From 3e436eef2d5808cbdb910e154346e7800e08f81c Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Mon, 11 Dec 2023 20:54:49 +0000 Subject: [PATCH 3/6] sparsity: change interface to directly accept maps_and_regions - deprecate some convenience Sparsity construction features --- pyop2/sparsity.pyx | 14 ++-- pyop2/types/mat.py | 155 ++++++++++++++---------------------- test/unit/test_api.py | 92 +++++++++++---------- test/unit/test_caching.py | 60 +++++++------- test/unit/test_extrusion.py | 3 +- test/unit/test_matrices.py | 22 ++--- test/unit/test_subset.py | 2 +- 7 files changed, 159 insertions(+), 189 deletions(-) diff --git a/pyop2/sparsity.pyx b/pyop2/sparsity.pyx index 1d02c1e1a..3b41e4c15 100644 --- a/pyop2/sparsity.pyx +++ b/pyop2/sparsity.pyx @@ -144,8 +144,8 @@ def build_sparsity(sparsity): if mixed: for i, r in enumerate(rset): for j, c in enumerate(cset): - maps = sparsity.rcmaps[(i, j)] - iter_regions = sparsity.iteration_regions[(i, j)] + maps = sparsity.rcmaps.get((i, j), []) + iter_regions = sparsity.iteration_regions.get((i, j), []) mat = preallocator.getLocalSubMatrix(isrow=rset.local_ises[i], iscol=cset.local_ises[j]) fill_with_zeros(mat, (r.cdim, c.cdim), @@ -159,8 +159,10 @@ def build_sparsity(sparsity): preallocator.assemble() nnz, onnz = get_preallocation(preallocator, nrows) else: - fill_with_zeros(preallocator, (1, 1), sparsity.rcmaps[(0, 0)], - sparsity.iteration_regions[(0, 0)], set_diag=sparsity._has_diagonal) + fill_with_zeros(preallocator, (1, 1), + sparsity.rcmaps[(0, 0)], + sparsity.iteration_regions[(0, 0)], + set_diag=sparsity._has_diagonal) preallocator.assemble() nnz, onnz = get_preallocation(preallocator, nrows) if not (sparsity._block_sparse and rset.cdim == cset.cdim): @@ -215,8 +217,10 @@ def fill_with_zeros(PETSc.Mat mat not None, dims, maps, iteration_regions, set_d if i < ncol // cdim: CHKERR(MatSetValuesBlockedLocal(mat.mat, 1, &i, 1, &i, diag_values, PETSC_INSERT_VALUES)) CHKERR(PetscFree(diag_values)) + if len(maps) == 0: + return extruded = maps[0][0].iterset._extruded - for iteration_region, pair in zip(iteration_regions, maps): + for pair, iteration_region in zip(maps, iteration_regions): # Iterate over row map values including value entries set_size = pair[0].iterset.size if set_size == 0: diff --git a/pyop2/types/mat.py b/pyop2/types/mat.py index 3b4472e09..23574ee51 100644 --- a/pyop2/types/mat.py +++ b/pyop2/types/mat.py @@ -1,6 +1,7 @@ import abc import ctypes import itertools +from collections.abc import Sequence import numpy as np from petsc4py import PETSc @@ -19,34 +20,33 @@ from pyop2.types.data_carrier import DataCarrier from pyop2.types.dataset import DataSet, GlobalDataSet, MixedDataSet from pyop2.types.map import Map, ComposedMap -from pyop2.types.set import MixedSet, Set, Subset +from pyop2.types.set import MixedSet, Subset class Sparsity(caching.ObjectCached): - """OP2 Sparsity, the non-zero structure a matrix derived from the union of - the outer product of pairs of :class:`Map` objects. + """OP2 Sparsity, the non-zero structure of a matrix derived from the block-wise specified pairs of :class:`Map` objects. Examples of constructing a Sparsity: :: - Sparsity(single_dset, single_map, 'mass') - Sparsity((row_dset, col_dset), (single_rowmap, single_colmap)) Sparsity((row_dset, col_dset), - [(first_rowmap, first_colmap), (second_rowmap, second_colmap)]) + [(first_rowmap, first_colmap), (second_rowmap, second_colmap), None]) .. _MatMPIAIJSetPreallocation: http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatMPIAIJSetPreallocation.html """ - def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, block_sparse=None): + def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=None): r""" :param dsets: :class:`DataSet`\s for the left and right function spaces this :class:`Sparsity` maps between - :param maps: :class:`Map`\s to build the :class:`Sparsity` from - :type maps: a pair of :class:`Map`\s specifying a row map and a column - map, or an iterable of pairs of :class:`Map`\s specifying multiple - row and column maps - if a single :class:`Map` is passed, it is - used as both a row map and a column map - :param iteration_regions: regions that select subsets of extruded maps to iterate over. + :param maps_and_regions: `dict` to build the :class:`Sparsity` from. + ``maps_and_regions`` must be keyed by the block index pair (i, j). + ``maps_and_regions[(i, j)]`` must be a list of tuples of + ``(rmap, cmap, iteration_regions)``, where ``rmap`` and ``cmap`` + is a pair of :class:`Map`\s specifying a row map and a column map, + and ``iteration_regions`` represent regions that select subsets + of extruded maps to iterate over. If the matrix only has a single + block, one can altenatively pass the value ``maps_and_regions[(0, 0)]``. :param string name: user-defined label (optional) :param nest: Should the sparsity over mixed set be built as nested blocks? :param block_sparse: Should the sparsity for datasets with @@ -56,19 +56,7 @@ def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, if self._initialized: return self._dsets = dsets - maps, iteration_regions = zip(*maps) - _rmaps, _cmaps = zip(*maps) - _maps_and_regions = {} - if isinstance(dsets[0], MixedDataSet) or isinstance(dsets[1], MixedDataSet): - rset, cset = dsets - for i, _ in enumerate(rset): - for j, _ in enumerate(cset): - _maps_and_regions[(i, j)] = list(zip((rm.split[i] for rm in _rmaps), - (cm.split[j] for cm in _cmaps), - iteration_regions)) - else: - _maps_and_regions[(0, 0)] = list(zip(_rmaps, _cmaps, iteration_regions)) - self._maps_and_regions = _maps_and_regions + self._maps_and_regions = maps_and_regions self._block_sparse = block_sparse self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) @@ -99,8 +87,7 @@ def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, for i, rds in enumerate(dsets[0]): row = [] for j, cds in enumerate(dsets[1]): - row.append(Sparsity((rds, cds), self.rcmaps[(i, j)], - iteration_regions=self.iteration_regions[(i, j)], + row.append(Sparsity((rds, cds), tuple(self._maps_and_regions[(i, j)]) if (i, j) in self._maps_and_regions else (), block_sparse=block_sparse)) self._blocks.append(row) self._d_nnz = tuple(s._d_nnz for s in self) @@ -125,69 +112,49 @@ def __init__(self, dsets, maps, *, iteration_regions=None, name=None, nest=None, _cache = {} @classmethod - @utils.validate_type(('dsets', (Set, DataSet, tuple, list), ex.DataSetTypeError), - ('maps', (Map, tuple, list), ex.MapTypeError)) - def _process_args(cls, dsets, maps, *, iteration_regions=None, name=None, nest=None, block_sparse=None): - "Turn maps argument into a canonical tuple of pairs." + @utils.validate_type(('name', str, ex.NameTypeError)) + def _process_args(cls, dsets, maps_and_regions, name=None, nest=None, block_sparse=None): from pyop2.types import IterationRegion - # A single data set becomes a pair of identical data sets - dsets = [dsets, dsets] if isinstance(dsets, (Set, DataSet)) else list(dsets) - # Upcast Sets to DataSets - dsets = [s ** 1 if isinstance(s, Set) else s for s in dsets] - - # Check data sets are valid + if len(dsets) != 2: + raise RuntimeError(f"dsets must be a tuple of two DataSets: got {dsets}") for dset in dsets: if not isinstance(dset, DataSet) and dset is not None: raise ex.DataSetTypeError("All data sets must be of type DataSet, not type %r" % type(dset)) - - # A single map becomes a pair of identical maps - maps = (maps, maps) if isinstance(maps, Map) else maps - # A single pair becomes a tuple of one pair - maps = (maps,) if isinstance(maps[0], Map) else maps - - # Check maps are sane - for pair in maps: - if pair[0] is None or pair[1] is None: - # None of this checking makes sense if one of the - # matrix operands is a Global. - continue - for m in pair: - if not isinstance(m, Map): - raise ex.MapTypeError( - "All maps must be of type map, not type %r" % type(m)) - if not isinstance(m, ComposedMap) and len(m.values_with_halo) == 0 and m.iterset.total_size > 0: - raise ex.MapValueError( - "Unpopulated map values when trying to build sparsity.") - # Make sure that the "to" Set of each map in a pair is the set of - # the corresponding DataSet set - if not (pair[0].toset == dsets[0].set - and pair[1].toset == dsets[1].set): - raise RuntimeError("Map to set must be the same as corresponding DataSet set") - - # Each pair of maps must have the same from-set (iteration set) - if not pair[0].iterset == pair[1].iterset: - raise RuntimeError("Iterset of both maps in a pair must be the same") - - rmaps, cmaps = zip(*maps) - if iteration_regions is None: - iteration_regions = tuple((IterationRegion.ALL, ) for _ in maps) - else: - iteration_regions = tuple(tuple(sorted(region)) for region in iteration_regions) - if not len(rmaps) == len(cmaps): - raise RuntimeError("Must pass equal number of row and column maps") - - if rmaps[0] is not None and cmaps[0] is not None: - # Each row map must have the same to-set (data set) - if not all(m.toset == rmaps[0].toset for m in rmaps): - raise RuntimeError("To set of all row maps must be the same") - - # Each column map must have the same to-set (data set) - if not all(m.toset == cmaps[0].toset for m in cmaps): - raise RuntimeError("To set of all column maps must be the same") - + if isinstance(maps_and_regions, Sequence): + # Convert short-hand notation to generic one. + maps_and_regions = {(0, 0): maps_and_regions} + elif not isinstance(maps_and_regions, dict): + raise TypeError(f"maps_and_regions must be dict or Sequence: got {type(maps_and_regions)}") + processed_maps_and_regions = {(i, j): frozenset() for i, _ in enumerate(dsets[0]) for j, _ in enumerate(dsets[1])} + for key, val in maps_and_regions.items(): + i, j = key # block indices: (0, 0) if not mixed + if i >= len(dsets[0]) or j >= len(dsets[1]): + raise RuntimeError(f"(i, j) must be < {(len(dsets[0]), len(dsets[1]))}: got {(i, j)}") + processed_val = set() + for rmap, cmap, iteration_regions in set(val): + if not isinstance(dsets[0][i], GlobalDataSet) and not isinstance(dsets[1][j], GlobalDataSet): + for m in [rmap, cmap]: + if not isinstance(m, Map): + raise ex.MapTypeError( + "All maps must be of type map, not type %r" % type(m)) + if not isinstance(m, ComposedMap) and len(m.values_with_halo) == 0 and m.iterset.total_size > 0: + raise ex.MapValueError( + "Unpopulated map values when trying to build sparsity.") + if rmap.toset is not dsets[0][i].set or cmap.toset is not dsets[1][j].set: + raise RuntimeError("Map toset must be the same as DataSet set") + if rmap.iterset is not cmap.iterset: + raise RuntimeError("Iterset of both maps in a pair must be the same") + if iteration_regions is None: + iteration_regions = (IterationRegion.ALL, ) + else: + iteration_regions = tuple(sorted(iteration_regions)) + processed_val.update(((rmap, cmap, iteration_regions), )) + if len(processed_val) > 0: + processed_maps_and_regions[key] = frozenset(processed_val) + processed_maps_and_regions = dict(sorted(processed_maps_and_regions.items())) # Need to return the caching object, a tuple of the processed - # arguments and a dict of kwargs (empty in this case) + # arguments and a dict of kwargs. if isinstance(dsets[0], GlobalDataSet): cache = None elif isinstance(dsets[0].set, MixedSet): @@ -198,16 +165,14 @@ def _process_args(cls, dsets, maps, *, iteration_regions=None, name=None, nest=N nest = conf.configuration["matnest"] if block_sparse is None: block_sparse = conf.configuration["block_sparsity"] - - maps = frozenset(zip(maps, iteration_regions)) kwargs = {"name": name, "nest": nest, "block_sparse": block_sparse} - return (cache,) + (tuple(dsets), maps), kwargs + return (cache,) + (tuple(dsets), processed_maps_and_regions), kwargs @classmethod - def _cache_key(cls, dsets, maps, name, nest, block_sparse, *args, **kwargs): - return (dsets, maps, nest, block_sparse) + def _cache_key(cls, dsets, maps_and_regions, name, nest, block_sparse, *args, **kwargs): + return (dsets, tuple(maps_and_regions.items()), nest, block_sparse) def __getitem__(self, idx): """Return :class:`Sparsity` block with row and column given by ``idx`` @@ -274,10 +239,11 @@ def __iter__(self): yield s def __str__(self): - raise NotImplementedError + return "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s" % \ + (self._dsets, self._maps_and_regions, self._name, self._nested, self._block_sparse) def __repr__(self): - raise NotImplementedError + return "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r)" % (self.dsets, self._maps_and_regions, self.name, self._nested, self._block_sparse) @utils.cached_property def nnz(self): @@ -336,7 +302,7 @@ def __init__(self, parent, i, j): return self._dsets = (parent.dsets[0][i], parent.dsets[1][j]) - self._maps_and_regions = {(0, 0): parent._maps_and_regions[(i, j)]} + self._maps_and_regions = {(0, 0): tuple(parent._maps_and_regions[(i, j)]) if (i, j) in parent._maps_and_regions else ()} self._has_diagonal = i == j and parent._has_diagonal self._parent = parent self._dims = tuple([tuple([parent.dims[i][j]])]) @@ -746,7 +712,8 @@ def _init_block(self): # Put zeros in all the places we might eventually put a value. with profiling.timed_region("MatZeroInitial"): sparsity.fill_with_zeros(mat, self.sparsity.dims[0][0], - self.sparsity.rcmaps[(0, 0)], self.sparsity.iteration_regions[(0, 0)], + self.sparsity.rcmaps[(0, 0)], + self.sparsity.iteration_regions[(0, 0)], set_diag=self.sparsity._has_diagonal) mat.assemble() mat.setOption(mat.Option.NEW_NONZERO_LOCATION_ERR, True) diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 6d9a8d4df..1185c965f 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -155,13 +155,13 @@ def mds(dtoset, set): ('mds', 'dtoset', 'mmap', 'm_iterset_toset'), ('dtoset', 'mds', 'm_iterset_toset', 'mmap')]) def ms(request): - rds, cds, rm, cm = [request.getfixturevalue(p) for p in request.param] - return op2.Sparsity((rds, cds), (rm, cm)) + rds, cds, rmm, cmm = [request.getfixturevalue(p) for p in request.param] + return op2.Sparsity((rds, cds), {(i, j): [(rm, cm, None)] for i, rm in enumerate(rmm) for j, cm in enumerate(cmm)}) @pytest.fixture def sparsity(m_iterset_toset, dtoset): - return op2.Sparsity((dtoset, dtoset), (m_iterset_toset, m_iterset_toset)) + return op2.Sparsity((dtoset, dtoset), [(m_iterset_toset, m_iterset_toset, None)]) @pytest.fixture @@ -171,7 +171,9 @@ def mat(sparsity): @pytest.fixture def diag_mat(toset): - return op2.Mat(op2.Sparsity(toset, op2.Map(toset, toset, 1, np.arange(toset.size)))) + _d = toset ** 1 + _m = op2.Map(toset, toset, 1, np.arange(toset.size)) + return op2.Mat(op2.Sparsity((_d, _d), [(_m, _m, None)])) @pytest.fixture @@ -973,97 +975,82 @@ def dd(cls, dataset2): @pytest.fixture def s(cls, di, mi): - return op2.Sparsity(di, mi) + return op2.Sparsity((di, di), [(mi, mi, None)]) @pytest.fixture def mixed_row_sparsity(cls, dtoset, mds, m_iterset_toset, mmap): - return op2.Sparsity((mds, dtoset), (mmap, m_iterset_toset)) + return op2.Sparsity((mds, dtoset), {(0, 0): [(mmap[0], m_iterset_toset, None)], + (1, 0): [(mmap[1], m_iterset_toset, None)]}) @pytest.fixture def mixed_col_sparsity(cls, dtoset, mds, m_iterset_toset, mmap): - return op2.Sparsity((dtoset, mds), (m_iterset_toset, mmap)) + return op2.Sparsity((dtoset, mds), {(0, 0): [(m_iterset_toset, mmap[0], None)], + (0, 1): [(m_iterset_toset, mmap[1], None)]}) def test_sparsity_illegal_rdset(self, di, mi): "Sparsity rdset should be a DataSet" with pytest.raises(TypeError): - op2.Sparsity(('illegalrmap', di), (mi, mi)) + op2.Sparsity(('illegalrmap', di), [(mi, mi, None)]) def test_sparsity_illegal_cdset(self, di, mi): "Sparsity cdset should be a DataSet" with pytest.raises(TypeError): - op2.Sparsity((di, 'illegalrmap'), (mi, mi)) + op2.Sparsity((di, 'illegalrmap'), [(mi, mi, None)]) def test_sparsity_illegal_rmap(self, di, mi): "Sparsity rmap should be a Map" with pytest.raises(TypeError): - op2.Sparsity((di, di), ('illegalrmap', mi)) + op2.Sparsity((di, di), [('illegalrmap', mi, None)]) def test_sparsity_illegal_cmap(self, di, mi): "Sparsity cmap should be a Map" with pytest.raises(TypeError): - op2.Sparsity((di, di), (mi, 'illegalcmap')) + op2.Sparsity((di, di), [(mi, 'illegalcmap', None)]) def test_sparsity_illegal_name(self, di, mi): "Sparsity name should be a string." with pytest.raises(TypeError): - op2.Sparsity(di, mi, 0) - - def test_sparsity_single_dset(self, di, mi): - "Sparsity constructor should accept single Map and turn it into tuple" - s = op2.Sparsity(di, mi, name="foo") - assert (s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) - and s.name == "foo" and s.dsets == (di, di)) - - def test_sparsity_set_not_dset(self, di, mi): - "If we pass a Set, not a DataSet, it default to dimension 1." - s = op2.Sparsity(mi.toset, mi) - assert s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) \ - and s.dsets == (di, di) - - def test_sparsity_map_pair(self, di, mi): - "Sparsity constructor should accept a pair of maps" - s = op2.Sparsity((di, di), (mi, mi), name="foo") - assert (s.rcmaps[(0, 0)][0] == (mi, mi) and s.dims[0][0] == (1, 1) - and s.name == "foo" and s.dsets == (di, di)) + op2.Sparsity((di, di), [(mi, mi, None)], 0) def test_sparsity_map_pair_different_dataset(self, mi, md, di, dd, m_iterset_toset): """Sparsity can be built from different row and column maps as long as the tosets match the row and column DataSet.""" - s = op2.Sparsity((di, dd), (m_iterset_toset, md), name="foo") + s = op2.Sparsity((di, dd), [(m_iterset_toset, md, None)], name="foo") assert (s.rcmaps[(0, 0)][0] == (m_iterset_toset, md) and s.dims[0][0] == (1, 1) and s.name == "foo" and s.dsets == (di, dd)) def test_sparsity_unique_map_pairs(self, mi, di): "Sparsity constructor should filter duplicate tuples of pairs of maps." - s = op2.Sparsity((di, di), ((mi, mi), (mi, mi)), name="foo") + s = op2.Sparsity((di, di), [(mi, mi, None), (mi, mi, None)], name="foo") assert s.rcmaps[(0, 0)] == [(mi, mi)] and s.dims[0][0] == (1, 1) def test_sparsity_map_pairs_different_itset(self, mi, di, dd, m_iterset_toset): "Sparsity constructor should accept maps with different iteration sets" maps = ((m_iterset_toset, m_iterset_toset), (mi, mi)) - s = op2.Sparsity((di, di), maps, name="foo") + s = op2.Sparsity((di, di), [(*maps[0], None), + (*maps[1], None)], name="foo") assert frozenset(s.rcmaps[(0, 0)]) == frozenset(maps) and s.dims[0][0] == (1, 1) def test_sparsity_map_pairs_sorted(self, mi, di, dd, m_iterset_toset): "Sparsity maps should have a deterministic order." - s1 = op2.Sparsity((di, di), [(m_iterset_toset, m_iterset_toset), (mi, mi)]) - s2 = op2.Sparsity((di, di), [(mi, mi), (m_iterset_toset, m_iterset_toset)]) + s1 = op2.Sparsity((di, di), [(m_iterset_toset, m_iterset_toset, None), (mi, mi, None)]) + s2 = op2.Sparsity((di, di), [(mi, mi, None), (m_iterset_toset, m_iterset_toset, None)]) assert s1.rcmaps[(0, 0)] == s2.rcmaps[(0, 0)] def test_sparsity_illegal_itersets(self, mi, md, di, dd): "Both maps in a (rmap,cmap) tuple must have same iteration set" with pytest.raises(RuntimeError): - op2.Sparsity((dd, di), (md, mi)) + op2.Sparsity((dd, di), [(md, mi, None)]) def test_sparsity_illegal_row_datasets(self, mi, md, di): "All row maps must share the same data set" with pytest.raises(RuntimeError): - op2.Sparsity((di, di), ((mi, mi), (md, mi))) + op2.Sparsity((di, di), [(mi, mi, None), (md, mi, None)]) def test_sparsity_illegal_col_datasets(self, mi, md, di, dd): "All column maps must share the same data set" with pytest.raises(RuntimeError): - op2.Sparsity((di, di), ((mi, mi), (mi, md))) + op2.Sparsity((di, di), [(mi, mi, None), (mi, md, None)]) def test_sparsity_shape(self, s): "Sparsity shape of a single block should be (1, 1)." @@ -1111,23 +1098,40 @@ def test_sparsity_mmap_illegal_itersets(self, m_iterset_toset, m_iterset_set, m_set_toset, m_set_set, mds): "Both maps in a (rmap,cmap) tuple must have same iteration set." + rmm = op2.MixedMap((m_iterset_toset, m_iterset_set)) + cmm = op2.MixedMap((m_set_toset, m_set_set)) with pytest.raises(RuntimeError): - op2.Sparsity((mds, mds), (op2.MixedMap((m_iterset_toset, m_iterset_set)), - op2.MixedMap((m_set_toset, m_set_set)))) + op2.Sparsity((mds, mds), {(i, j): [(rm, cm, None)] for i, rm in enumerate(rmm) for j, cm in enumerate(cmm)}) def test_sparsity_mmap_illegal_row_datasets(self, m_iterset_toset, m_iterset_set, m_set_toset, mds): "All row maps must share the same data set." + rmm = op2.MixedMap((m_iterset_toset, m_iterset_set)) + cmm = op2.MixedMap((m_set_toset, m_set_toset)) with pytest.raises(RuntimeError): - op2.Sparsity((mds, mds), (op2.MixedMap((m_iterset_toset, m_iterset_set)), - op2.MixedMap((m_set_toset, m_set_toset)))) + op2.Sparsity((mds, mds), {(i, j): [(rm, cm, None)] for i, rm in enumerate(rmm) for j, cm in enumerate(cmm)}) def test_sparsity_mmap_illegal_col_datasets(self, m_iterset_toset, m_iterset_set, m_set_toset, mds): "All column maps must share the same data set." + rmm = op2.MixedMap((m_set_toset, m_set_toset)) + cmm = op2.MixedMap((m_iterset_toset, m_iterset_set)) with pytest.raises(RuntimeError): - op2.Sparsity((mds, mds), (op2.MixedMap((m_set_toset, m_set_toset)), - op2.MixedMap((m_iterset_toset, m_iterset_set)))) + op2.Sparsity((mds, mds), {(i, j): [(rm, cm, None)] for i, rm in enumerate(rmm) for j, cm in enumerate(cmm)}) + + def test_sparsity_repr(self, sparsity): + "Sparsity should have the expected repr." + + # Note: We can't actually reproduce a Sparsity from its repr because + # the Sparsity constructor checks that the maps are populated + r = "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r)" % (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse) + assert repr(sparsity) == r + + def test_sparsity_str(self, sparsity): + "Sparsity should have the expected string representation." + s = "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s" % \ + (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse) + assert str(sparsity) == s class TestMatAPI: diff --git a/test/unit/test_caching.py b/test/unit/test_caching.py index 1b95abebc..40c4256fb 100644 --- a/test/unit/test_caching.py +++ b/test/unit/test_caching.py @@ -233,44 +233,46 @@ def test_mixeddataset_cache_miss(self, base_set, base_set2): assert not mds2 == mds3 def test_sparsity_cache_hit(self, base_set, base_map): - dsets = (base_set, base_set) + dsets = (base_set ** 1, base_set ** 1) maps = (base_map, base_map) - sp = op2.Sparsity(dsets, maps) - sp2 = op2.Sparsity(dsets, maps) + sp = op2.Sparsity(dsets, [(*maps, None)]) + sp2 = op2.Sparsity(dsets, [(*maps, None)]) assert sp is sp2 assert not sp != sp2 assert sp == sp2 - dsets = op2.MixedSet([base_set, base_set]) + mixed_set = op2.MixedSet([base_set, base_set]) + dsets = (mixed_set ** 1, mixed_set ** 1) maps = op2.MixedMap([base_map, base_map]) - sp = op2.Sparsity(dsets, maps) + sp = op2.Sparsity(dsets, {(i, j): [(rm, cm, None)] for i, rm in enumerate(maps) for j, cm in enumerate(maps)}) - dsets2 = op2.MixedSet([base_set, base_set]) + mixed_set2 = op2.MixedSet([base_set, base_set]) + dsets2 = (mixed_set2 ** 1, mixed_set2 ** 1) maps2 = op2.MixedMap([base_map, base_map]) - sp2 = op2.Sparsity(dsets2, maps2) + sp2 = op2.Sparsity(dsets2, {(i, j): [(rm, cm, None)] for i, rm in enumerate(maps2) for j, cm in enumerate(maps2)}) assert sp is sp2 assert not sp != sp2 assert sp == sp2 def test_sparsity_cache_miss(self, base_set, base_set2, base_map, base_map2): - dsets = (base_set, base_set) + dsets = (base_set ** 1, base_set ** 1) maps = (base_map, base_map) - sp = op2.Sparsity(dsets, maps, iteration_regions=[(op2.ALL, )]) + sp = op2.Sparsity(dsets, [(*maps, (op2.ALL, ))]) - dsets2 = op2.MixedSet([base_set, base_set]) + mixed_set = op2.MixedSet([base_set, base_set]) + dsets2 = (mixed_set ** 1, mixed_set ** 1) maps2 = op2.MixedMap([base_map, base_map]) - sp2 = op2.Sparsity(dsets2, maps2, iteration_regions=[(op2.ALL, )]) + sp2 = op2.Sparsity(dsets2, {(i, j): [(rm, cm, (op2.ALL, ))] for i, rm in enumerate(maps2) for j, cm in enumerate(maps2)}) assert sp is not sp2 assert sp != sp2 assert not sp == sp2 - dsets2 = (base_set, base_set2) + dsets2 = (base_set ** 1, base_set2 ** 1) maps2 = (base_map, base_map2) - - sp2 = op2.Sparsity(dsets2, maps2, iteration_regions=[(op2.ALL, )]) + sp2 = op2.Sparsity(dsets2, [(*maps2, (op2.ALL, ))]) assert sp is not sp2 assert sp != sp2 assert not sp == sp2 @@ -486,44 +488,38 @@ def m2(cls, s1, s2): def test_sparsities_differing_maps_not_cached(self, m1, m2, ds2): """Sparsities with different maps should not share a C handle.""" - sp1 = op2.Sparsity(ds2, m1) - sp2 = op2.Sparsity(ds2, m2) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m1, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m2, m2, None)]) assert sp1 is not sp2 def test_sparsities_differing_map_pairs_not_cached(self, m1, m2, ds2): """Sparsities with different maps should not share a C handle.""" - sp1 = op2.Sparsity((ds2, ds2), (m1, m2)) - sp2 = op2.Sparsity((ds2, ds2), (m2, m1)) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m2, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m2, m1, None)]) assert sp1 is not sp2 def test_sparsities_differing_map_tuples_not_cached(self, m1, m2, ds2): """Sparsities with different maps should not share a C handle.""" - sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2))) - sp2 = op2.Sparsity((ds2, ds2), ((m2, m2), (m2, m2))) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m1, None), (m2, m2, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m2, m2, None), (m2, m2, None)]) assert sp1 is not sp2 - def test_sparsities_same_map_cached(self, m1, ds2): - """Sparsities with the same map should share a C handle.""" - sp1 = op2.Sparsity(ds2, m1) - sp2 = op2.Sparsity(ds2, m1) - assert sp1 is sp2 - def test_sparsities_same_map_pair_cached(self, m1, ds2): """Sparsities with the same map pair should share a C handle.""" - sp1 = op2.Sparsity((ds2, ds2), (m1, m1)) - sp2 = op2.Sparsity((ds2, ds2), (m1, m1)) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m1, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m1, m1, None)]) assert sp1 is sp2 def test_sparsities_same_map_tuple_cached(self, m1, m2, ds2): "Sparsities with the same tuple of map pairs should share a C handle." - sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2))) - sp2 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2))) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m1, None), (m2, m2, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m1, m1, None), (m2, m2, None)]) assert sp1 is sp2 def test_sparsities_different_ordered_map_tuple_cached(self, m1, m2, ds2): "Sparsities with the same tuple of map pairs should share a C handle." - sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2))) - sp2 = op2.Sparsity((ds2, ds2), ((m2, m2), (m1, m1))) + sp1 = op2.Sparsity((ds2, ds2), [(m1, m1, None), (m2, m2, None)]) + sp2 = op2.Sparsity((ds2, ds2), [(m2, m2, None), (m1, m1, None)]) assert sp1 is sp2 diff --git a/test/unit/test_extrusion.py b/test/unit/test_extrusion.py index 69ee5bf1f..7a24d581b 100644 --- a/test/unit/test_extrusion.py +++ b/test/unit/test_extrusion.py @@ -295,8 +295,7 @@ def xtr_elem_node(xtr_elements, xtr_nodes): @pytest.fixture def xtr_mat(xtr_elem_node, xtr_dnodes): - sparsity = op2.Sparsity((xtr_dnodes, xtr_dnodes), ( - xtr_elem_node, xtr_elem_node), "xtr_sparsity") + sparsity = op2.Sparsity((xtr_dnodes, xtr_dnodes), {(0, 0): [(xtr_elem_node, xtr_elem_node, None, None)]}, "xtr_sparsity") return op2.Mat(sparsity, valuetype, "xtr_mat") diff --git a/test/unit/test_matrices.py b/test/unit/test_matrices.py index 6ae7fb284..306637ee7 100644 --- a/test/unit/test_matrices.py +++ b/test/unit/test_matrices.py @@ -89,7 +89,7 @@ def elem_node(elements, nodes): @pytest.fixture def mat(elem_node, dnodes): - sparsity = op2.Sparsity((dnodes, dnodes), (elem_node, elem_node), name="sparsity") + sparsity = op2.Sparsity((dnodes, dnodes), [(elem_node, elem_node, None)], name="sparsity") return op2.Mat(sparsity, valuetype, "mat") @@ -525,17 +525,17 @@ def mmap(mset): @pytest.fixture def msparsity(mset, mmap): - return op2.Sparsity(mset, mmap) + return op2.Sparsity((mset ** 1, mset ** 1), {(i, j): [(rm, cm, None)] for i, rm in enumerate(mmap) for j, cm in enumerate(mmap)}) @pytest.fixture def non_nest_mixed_sparsity(mset, mmap): - return op2.Sparsity(mset, mmap, nest=False) + return op2.Sparsity((mset ** 1, mset ** 1), {(i, j): [(rm, cm, None)] for i, rm in enumerate(mmap) for j, cm in enumerate(mmap)}, nest=False) @pytest.fixture def mvsparsity(mset, mmap): - return op2.Sparsity(mset ** 2, mmap) + return op2.Sparsity((mset ** 2, mset ** 2), {(i, j): [(rm, cm, None)] for i, rm in enumerate(mmap) for j, cm in enumerate(mmap)}) class TestSparsity: @@ -549,7 +549,7 @@ def test_sparsity_null_maps(self): s = op2.Set(5) with pytest.raises(MapValueError): m = op2.Map(s, s, 1) - op2.Sparsity((s, s), (m, m)) + op2.Sparsity((s ** 1, s ** 1), [(m, m, None)]) def test_sparsity_has_diagonal_space(self): # A sparsity should have space for diagonal entries if rmap==cmap @@ -558,8 +558,8 @@ def test_sparsity_has_diagonal_space(self): m = op2.Map(s, d, 2, [1, 3]) d2 = op2.Set(4) m2 = op2.Map(s, d2, 3, [1, 2, 3]) - sparsity = op2.Sparsity((d, d), (m, m)) - sparsity2 = op2.Sparsity((d, d2), (m, m2)) + sparsity = op2.Sparsity((d ** 1, d ** 1), [(m, m, None)]) + sparsity2 = op2.Sparsity((d ** 1, d2 ** 1), [(m, m2, None)]) assert all(sparsity.nnz == [1, 2, 1, 2]) assert all(sparsity2.nnz == [0, 3, 0, 3]) @@ -581,7 +581,7 @@ def test_invalid_mode(self, elements, elem_node, mat, mode): @pytest.mark.parametrize('n', [1, 2]) def test_mat_set_diagonal(self, nodes, elem_node, n): "Set the diagonal of the entire matrix to 1.0" - mat = op2.Mat(op2.Sparsity(nodes**n, elem_node), valuetype) + mat = op2.Mat(op2.Sparsity((nodes ** n, nodes ** n), [(elem_node, elem_node, None)]), valuetype) nrows = mat.nblock_rows mat.set_local_diagonal_entries(list(range(nrows))) mat.assemble() @@ -590,7 +590,7 @@ def test_mat_set_diagonal(self, nodes, elem_node, n): @pytest.mark.parametrize('n', [1, 2]) def test_mat_repeated_set_diagonal(self, nodes, elem_node, n): "Set the diagonal of the entire matrix to 1.0" - mat = op2.Mat(op2.Sparsity(nodes**n, elem_node), valuetype) + mat = op2.Mat(op2.Sparsity((nodes ** n, nodes ** n), [(elem_node, elem_node, None)]), valuetype) nrows = mat.nblock_rows mat.set_local_diagonal_entries(list(range(nrows))) mat.assemble() @@ -606,7 +606,7 @@ def test_mat_always_has_diagonal_space(self): m = op2.Map(s, d, 1, [2]) d2 = op2.Set(3) m2 = op2.Map(s, d2, 1, [1]) - sparsity = op2.Sparsity((d, d2), (m, m2)) + sparsity = op2.Sparsity((d ** 1, d2 ** 1), [(m, m2, None)]) from petsc4py import PETSc # petsc4py default error handler swallows SETERRQ, so just @@ -628,7 +628,7 @@ def test_minimal_zero_mat(self): nelems = 128 set = op2.Set(nelems) map = op2.Map(set, set, 1, np.array(list(range(nelems)), np.uint32)) - sparsity = op2.Sparsity((set, set), (map, map)) + sparsity = op2.Sparsity((set ** 1, set ** 1), [(map, map, None)]) mat = op2.Mat(sparsity, np.float64) kernel = op2.Kernel(zero_mat_code, "zero_mat") op2.par_loop(kernel, set, diff --git a/test/unit/test_subset.py b/test/unit/test_subset.py index 33936df7d..ebd824a31 100644 --- a/test/unit/test_subset.py +++ b/test/unit/test_subset.py @@ -217,7 +217,7 @@ def test_matrix(self): dat = op2.Dat(idset ** 1, data=[0, 1], dtype=np.float64) map = op2.Map(iterset, indset, 4, [0, 1, 2, 3, 0, 1, 2, 3]) idmap = op2.Map(iterset, idset, 1, [0, 1]) - sparsity = op2.Sparsity((indset, indset), (map, map)) + sparsity = op2.Sparsity((indset ** 1, indset ** 1), {(0, 0): [(map, map, None)]}) mat = op2.Mat(sparsity, np.float64) mat01 = op2.Mat(sparsity, np.float64) mat10 = op2.Mat(sparsity, np.float64) From dc513fbc0d263d860e492fa7519757eaa830beee Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Wed, 20 Dec 2023 18:16:35 +0000 Subject: [PATCH 4/6] sparsity: add diagonal_block to kwargs --- pyop2/types/mat.py | 23 ++++++++++++++--------- test/unit/test_api.py | 6 +++--- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/pyop2/types/mat.py b/pyop2/types/mat.py index 23574ee51..94a34564e 100644 --- a/pyop2/types/mat.py +++ b/pyop2/types/mat.py @@ -35,7 +35,7 @@ class Sparsity(caching.ObjectCached): .. _MatMPIAIJSetPreallocation: http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatMPIAIJSetPreallocation.html """ - def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=None): + def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=None, diagonal_block=True): r""" :param dsets: :class:`DataSet`\s for the left and right function spaces this :class:`Sparsity` maps between @@ -51,6 +51,8 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N :param nest: Should the sparsity over mixed set be built as nested blocks? :param block_sparse: Should the sparsity for datasets with cdim > 1 be built as a block sparsity? + :param diagonal_block: Flag indicating whether this sparsity is for + a matrix/submatrix located on the diagonal. """ # Protect against re-initialization when retrieved from cache if self._initialized: @@ -58,6 +60,7 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N self._dsets = dsets self._maps_and_regions = maps_and_regions self._block_sparse = block_sparse + self._diagonal_block = diagonal_block self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) if isinstance(dsets[0], GlobalDataSet) or isinstance(dsets[1], GlobalDataSet): @@ -66,7 +69,7 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N self._o_nnz = None else: rset, cset = self.dsets - self._has_diagonal = (rset == cset) + self._has_diagonal = (rset == cset) and diagonal_block tmp = itertools.product([x.cdim for x in self.dsets[0]], [x.cdim for x in self.dsets[1]]) dims = [[None for _ in range(self.shape[1])] for _ in range(self.shape[0])] @@ -88,7 +91,8 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N row = [] for j, cds in enumerate(dsets[1]): row.append(Sparsity((rds, cds), tuple(self._maps_and_regions[(i, j)]) if (i, j) in self._maps_and_regions else (), - block_sparse=block_sparse)) + block_sparse=block_sparse, + diagonal_block=(dsets[0] is dsets[1] and i == j))) self._blocks.append(row) self._d_nnz = tuple(s._d_nnz for s in self) self._o_nnz = tuple(s._o_nnz for s in self) @@ -113,7 +117,7 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N @classmethod @utils.validate_type(('name', str, ex.NameTypeError)) - def _process_args(cls, dsets, maps_and_regions, name=None, nest=None, block_sparse=None): + def _process_args(cls, dsets, maps_and_regions, name=None, nest=None, block_sparse=None, diagonal_block=True): from pyop2.types import IterationRegion if len(dsets) != 2: @@ -167,11 +171,12 @@ def _process_args(cls, dsets, maps_and_regions, name=None, nest=None, block_spar block_sparse = conf.configuration["block_sparsity"] kwargs = {"name": name, "nest": nest, - "block_sparse": block_sparse} + "block_sparse": block_sparse, + "diagonal_block": diagonal_block} return (cache,) + (tuple(dsets), processed_maps_and_regions), kwargs @classmethod - def _cache_key(cls, dsets, maps_and_regions, name, nest, block_sparse, *args, **kwargs): + def _cache_key(cls, dsets, maps_and_regions, name, nest, block_sparse, diagonal_block, *args, **kwargs): return (dsets, tuple(maps_and_regions.items()), nest, block_sparse) def __getitem__(self, idx): @@ -239,11 +244,11 @@ def __iter__(self): yield s def __str__(self): - return "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s" % \ - (self._dsets, self._maps_and_regions, self._name, self._nested, self._block_sparse) + return "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s, diagonal_block %s" % \ + (self._dsets, self._maps_and_regions, self._name, self._nested, self._block_sparse, self._diagonal_block) def __repr__(self): - return "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r)" % (self.dsets, self._maps_and_regions, self.name, self._nested, self._block_sparse) + return "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r, diagonal_block=%r)" % (self.dsets, self._maps_and_regions, self.name, self._nested, self._block_sparse, self._diagonal_block) @utils.cached_property def nnz(self): diff --git a/test/unit/test_api.py b/test/unit/test_api.py index 1185c965f..066d4aa9b 100644 --- a/test/unit/test_api.py +++ b/test/unit/test_api.py @@ -1124,13 +1124,13 @@ def test_sparsity_repr(self, sparsity): # Note: We can't actually reproduce a Sparsity from its repr because # the Sparsity constructor checks that the maps are populated - r = "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r)" % (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse) + r = "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r, diagonal_block=%r)" % (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse, sparsity._diagonal_block) assert repr(sparsity) == r def test_sparsity_str(self, sparsity): "Sparsity should have the expected string representation." - s = "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s" % \ - (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse) + s = "OP2 Sparsity: dsets %s, maps_and_regions %s, name %s, nested %s, block_sparse %s, diagonal_block %s" % \ + (sparsity.dsets, sparsity._maps_and_regions, sparsity.name, sparsity._nested, sparsity._block_sparse, sparsity._diagonal_block) assert str(sparsity) == s From 48a54d3dd3ffb693a8a625a673c363719edec47a Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Wed, 20 Dec 2023 23:51:39 +0000 Subject: [PATCH 5/6] sparsity: ignore negative map indices --- pyop2/sparsity.pyx | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pyop2/sparsity.pyx b/pyop2/sparsity.pyx index 3b41e4c15..131e91888 100644 --- a/pyop2/sparsity.pyx +++ b/pyop2/sparsity.pyx @@ -267,8 +267,16 @@ def fill_with_zeros(PETSc.Mat mat not None, dims, maps, iteration_regions, set_d cset_entry = set_entry for i in range(nrcomposedmaps): rset_entry = rcomposedmaps[nrcomposedmaps - 1 - i][rset_entry] + if rset_entry < 0: + break + if rset_entry < 0: + continue for i in range(nccomposedmaps): cset_entry = ccomposedmaps[nccomposedmaps - 1 - i][cset_entry] + if cset_entry < 0: + break + if cset_entry < 0: + continue CHKERR(MatSetValuesBlockedLocal(mat.mat, rarity, &rmap[rset_entry, 0], carity, &cmap[cset_entry, 0], values, PETSC_INSERT_VALUES)) @@ -321,8 +329,16 @@ def fill_with_zeros(PETSc.Mat mat not None, dims, maps, iteration_regions, set_d cset_entry = set_entry for i in range(nrcomposedmaps): rset_entry = rcomposedmaps[nrcomposedmaps - 1 - i][rset_entry] + if rset_entry < 0: + break + if rset_entry < 0: + continue for i in range(nccomposedmaps): cset_entry = ccomposedmaps[nccomposedmaps - 1 - i][cset_entry] + if cset_entry < 0: + break + if cset_entry < 0: + continue if constant_layers: layer_start = layers[0, 0] layer_end = layers[0, 1] - 1 From d60101ba6fb6b2572f18fea9172875e53180df25 Mon Sep 17 00:00:00 2001 From: ksagiyam Date: Wed, 20 Dec 2023 23:51:50 +0000 Subject: [PATCH 6/6] sparsity: add test --- test/unit/test_matrices.py | 40 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/test/unit/test_matrices.py b/test/unit/test_matrices.py index 306637ee7..4f8ab1d1e 100644 --- a/test/unit/test_matrices.py +++ b/test/unit/test_matrices.py @@ -39,6 +39,7 @@ from pyop2 import op2 from pyop2.exceptions import MapValueError, ModeValueError from pyop2.mpi import COMM_WORLD +from pyop2.datatypes import IntType from petsc4py.PETSc import ScalarType @@ -941,6 +942,45 @@ def test_assemble_mixed_rhs_vector(self, mset, mmap, mvdat): assert_allclose(dat[1].data_ro, exp, eps) +def test_matrices_sparsity_blockwise_specification(): + # + # 0 1 2 3 nodesetA + # x----x----x----x + # 0 1 2 setA + # + # 0 1 2 nodesetB + # x----x----x + # 0 1 setB + # + # 0 1 2 3 | 0 1 2 + # 0 x | + # 1 x | x x + # 2 x | x x x + # 3 x | x x sparsity + # ----------+------ + # 0 x x | x + # 1 x x x | x + # 2 x x | x + # + arity = 2 + setA = op2.Set(3) + nodesetA = op2.Set(4) + setB = op2.Set(2) + nodesetB = op2.Set(3) + nodesetAB = op2.MixedSet((nodesetA, nodesetB)) + datasetAB = nodesetAB ** 1 + mapA = op2.Map(setA, nodesetA, arity, values=[[0, 1], [1, 2], [2, 3]]) + mapB = op2.Map(setB, nodesetB, arity, values=[[0, 1], [1, 2]]) + mapBA = op2.Map(setB, setA, 1, values=[1, 2]) + mapAB = op2.Map(setA, setB, 1, values=[-1, 0, 1]) # "inverse" map + s = op2.Sparsity((datasetAB, datasetAB), {(1, 0): [(mapB, op2.ComposedMap(mapA, mapBA), None)], + (0, 1): [(mapA, op2.ComposedMap(mapB, mapAB), None)]}) + assert np.all(s._blocks[0][0].nnz == np.array([1, 1, 1, 1], dtype=IntType)) + assert np.all(s._blocks[0][1].nnz == np.array([0, 2, 3, 2], dtype=IntType)) + assert np.all(s._blocks[1][0].nnz == np.array([2, 3, 2], dtype=IntType)) + assert np.all(s._blocks[1][1].nnz == np.array([1, 1, 1], dtype=IntType)) + + if __name__ == '__main__': import os pytest.main(os.path.abspath(__file__))