Skip to content
This repository has been archived by the owner on Nov 27, 2024. It is now read-only.

Commit

Permalink
Merge pull request #521 from OP2/int64
Browse files Browse the repository at this point in the history
* int64:
  Mark subblocks of monolithic matrices when calling assemble
  Introduce IntType and ScalarType and use everywhere
  • Loading branch information
wence- committed Feb 24, 2017
2 parents 741a21b + 84d84ca commit afff500
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 67 deletions.
31 changes: 11 additions & 20 deletions pyop2/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import types
from hashlib import md5

from pyop2.datatypes import IntType, as_cstr
from pyop2.configuration import configuration
from pyop2.caching import Cached, ObjectCached
from pyop2.exceptions import *
Expand Down Expand Up @@ -916,7 +917,7 @@ def __init__(self, superset, indices):
'Subset construction failed, should not happen'

self._superset = superset
self._indices = verify_reshape(indices, np.int32, (len(indices),))
self._indices = verify_reshape(indices, IntType, (len(indices),))

if len(self._indices) > 0 and (self._indices[0] < 0 or
self._indices[-1] >= self._superset.total_size):
Expand Down Expand Up @@ -1650,21 +1651,7 @@ def dtype(self):
@cached_property
def ctype(self):
"""The c type of the data."""
# FIXME: Complex and float16 not supported
typemap = {"bool": "unsigned char",
"int": "int",
"int8": "char",
"int16": "short",
"int32": "int",
"int64": "long long",
"uint8": "unsigned char",
"uint16": "unsigned short",
"uint32": "unsigned int",
"uint64": "unsigned long",
"float": "double",
"float32": "float",
"float64": "double"}
return typemap[self.dtype.name]
return as_cstr(self.dtype)

@cached_property
def name(self):
Expand Down Expand Up @@ -2838,10 +2825,14 @@ def __init__(self, iterset, toset, arity, values=None, name=None, offset=None, p
self._toset = toset
self.comm = toset.comm
self._arity = arity
self._values = verify_reshape(values, np.int32, (iterset.total_size, arity),
self._values = verify_reshape(values, IntType,
(iterset.total_size, arity),
allow_none=True)
self._name = name or "map_%d" % Map._globalcount
self._offset = offset
if offset is None or len(offset) == 0:
self._offset = None
else:
self._offset = verify_reshape(offset, IntType, (arity, ))
# This is intended to be used for modified maps, for example
# where a boundary condition is imposed by setting some map
# entries negative.
Expand All @@ -2855,9 +2846,9 @@ def __init__(self, iterset, toset, arity, values=None, name=None, offset=None, p

if offset is not None and bt_masks is not None:
for name, mask in six.iteritems(bt_masks):
self._bottom_mask[name] = np.zeros(len(offset))
self._bottom_mask[name] = np.zeros(len(offset), dtype=IntType)
self._bottom_mask[name][mask[0]] = -1
self._top_mask[name] = np.zeros(len(offset))
self._top_mask[name] = np.zeros(len(offset), dtype=IntType)
self._top_mask[name][mask[1]] = -1
Map._globalcount += 1

Expand Down
42 changes: 42 additions & 0 deletions pyop2/datatypes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from __future__ import absolute_import, print_function, division

import ctypes

import numpy
from petsc4py.PETSc import IntType, RealType, ScalarType

IntType = numpy.dtype(IntType)
RealType = numpy.dtype(RealType)
ScalarType = numpy.dtype(ScalarType)


def as_cstr(dtype):
"""Convert a numpy dtype like object to a C type as a string."""
return {"bool": "unsigned char",
"int": "int",
"int8": "int8_t",
"int16": "int16_t",
"int32": "int32_t",
"int64": "int64_t",
"uint8": "uint8_t",
"uint16": "uint16_t",
"uint32": "uint32_t",
"uint64": "uint64_t",
"float32": "float",
"float64": "double"}[numpy.dtype(dtype).name]


def as_ctypes(dtype):
"""Convert a numpy dtype like object to a ctypes type."""
return {"bool": ctypes.c_bool,
"int": ctypes.c_int,
"int8": ctypes.c_char,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"float32": ctypes.c_float,
"float64": ctypes.c_double}[numpy.dtype(dtype).name]
25 changes: 15 additions & 10 deletions pyop2/petsc_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from functools import partial
import numpy as np

from pyop2.datatypes import IntType
from pyop2 import base
from pyop2 import mpi
from pyop2 import sparsity
Expand All @@ -55,7 +56,7 @@ def lgmap(self):
"""
lgmap = PETSc.LGMap()
if self.comm.size == 1:
lgmap.create(indices=np.arange(self.size, dtype=PETSc.IntType),
lgmap.create(indices=np.arange(self.size, dtype=IntType),
bsize=self.cdim, comm=self.comm)
else:
lgmap.create(indices=self.halo.global_to_petsc_numbering,
Expand Down Expand Up @@ -134,7 +135,7 @@ def lgmap(self):
indices for this :class:`DataSet`.
"""
lgmap = PETSc.LGMap()
lgmap.create(indices=np.arange(1, dtype=PETSc.IntType),
lgmap.create(indices=np.arange(1, dtype=IntType),
bsize=self.cdim, comm=self.comm)
return lgmap

Expand Down Expand Up @@ -237,7 +238,7 @@ def lgmap(self):
lgmap = PETSc.LGMap()
if self.comm.size == 1:
size = sum(s.size * s.cdim for s in self)
lgmap.create(indices=np.arange(size, dtype=PETSc.IntType),
lgmap.create(indices=np.arange(size, dtype=IntType),
bsize=1, comm=self.comm)
return lgmap
# Compute local to global maps for a monolithic mixed system
Expand All @@ -264,18 +265,19 @@ def lgmap(self):
# Finally, we need to shift the field-local entry by the
# current field offset.
idx_size = sum(s.total_size*s.cdim for s in self)
indices = np.full(idx_size, -1, dtype=PETSc.IntType)
owned_sz = np.array([sum(s.size * s.cdim for s in self)], dtype=PETSc.IntType)
indices = np.full(idx_size, -1, dtype=IntType)
owned_sz = np.array([sum(s.size * s.cdim for s in self)],
dtype=IntType)
field_offset = np.empty_like(owned_sz)
self.comm.Scan(owned_sz, field_offset)
field_offset -= owned_sz

all_field_offsets = np.empty(self.comm.size, dtype=PETSc.IntType)
all_field_offsets = np.empty(self.comm.size, dtype=IntType)
self.comm.Allgather(field_offset, all_field_offsets)

start = 0
all_local_offsets = np.zeros(self.comm.size, dtype=PETSc.IntType)
current_offsets = np.zeros(self.comm.size + 1, dtype=PETSc.IntType)
all_local_offsets = np.zeros(self.comm.size, dtype=IntType)
current_offsets = np.zeros(self.comm.size + 1, dtype=IntType)
for s in self:
idx = indices[start:start + s.total_size * s.cdim]
owned_sz[0] = s.size * s.cdim
Expand Down Expand Up @@ -557,7 +559,7 @@ def _flush_assembly(self):
self._parent._flush_assembly()

def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None):
rows = np.asarray(rows, dtype=PETSc.IntType)
rows = np.asarray(rows, dtype=IntType)
rbs, _ = self.dims[0][0]
if len(rows) == 0:
# No need to set anything if we didn't get any rows, but
Expand Down Expand Up @@ -844,7 +846,7 @@ def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None):
The indices in ``rows`` should index the process-local rows of
the matrix (no mapping to global indexes is applied).
"""
rows = np.asarray(rows, dtype=PETSc.IntType)
rows = np.asarray(rows, dtype=IntType)
rbs, _ = self.dims[0][0]
if len(rows) == 0:
# No need to set anything if we didn't get any rows, but
Expand Down Expand Up @@ -877,6 +879,9 @@ def _assemble(self):
if self.assembly_state is not Mat.ASSEMBLED:
self.handle.assemble()
self.assembly_state = Mat.ASSEMBLED
# Mark blocks as assembled as well.
for m in self:
m.handle.assemble()

def addto_values(self, rows, cols, values):
"""Add a block of values to the :class:`Mat`."""
Expand Down
Loading

0 comments on commit afff500

Please sign in to comment.