From f66d8c8ebff824a6405d8b161c9d689685953102 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 21 Jan 2023 21:14:08 -0600 Subject: [PATCH] More linting (#373) --- .pre-commit-config.yaml | 2 +- graphblas/__init__.py | 2 +- graphblas/binary/numpy.py | 2 +- graphblas/core/agg.py | 2 +- graphblas/core/automethods.py | 2 +- graphblas/core/base.py | 6 +- graphblas/core/expr.py | 7 +- graphblas/core/mask.py | 62 +++++++-------- graphblas/core/matrix.py | 34 +++----- graphblas/core/operator.py | 2 +- graphblas/core/recorder.py | 2 +- graphblas/core/scalar.py | 22 +++-- graphblas/core/ss/descriptor.py | 2 +- graphblas/core/ss/matrix.py | 64 +++++++-------- graphblas/core/ss/vector.py | 30 +++---- graphblas/core/utils.py | 6 +- graphblas/core/vector.py | 22 +---- graphblas/dtypes.py | 11 ++- graphblas/io.py | 10 +-- graphblas/monoid/numpy.py | 2 +- graphblas/select/__init__.py | 2 +- graphblas/semiring/numpy.py | 2 +- graphblas/ss/_core.py | 8 +- graphblas/tests/conftest.py | 6 +- graphblas/tests/test_descriptor.py | 4 +- graphblas/tests/test_dtype.py | 2 +- graphblas/tests/test_infix.py | 124 ++++++++++++++--------------- graphblas/tests/test_io.py | 8 +- graphblas/tests/test_matrix.py | 36 ++++----- graphblas/tests/test_op.py | 12 +-- graphblas/tests/test_scalar.py | 5 +- graphblas/tests/test_ss_utils.py | 2 +- graphblas/tests/test_vector.py | 16 ++-- graphblas/unary/numpy.py | 2 +- graphblas/viz.py | 4 +- pyproject.toml | 28 ++++++- scripts/autogenerate.py | 2 +- scripts/create_pickle.py | 2 +- 38 files changed, 274 insertions(+), 283 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9f4e9d72..9499ceeb5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -71,7 +71,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.227 + rev: v0.0.229 hooks: - id: ruff args: [--force-exclude] diff --git a/graphblas/__init__.py b/graphblas/__init__.py index b7e1f498d..879571eab 100644 --- a/graphblas/__init__.py +++ b/graphblas/__init__.py @@ -61,7 +61,7 @@ def get_config(): def __getattr__(name): - """Auto-initialize if special attrs used without explicit init call by user""" + """Auto-initialize if special attrs used without explicit init call by user.""" if name in _SPECIAL_ATTRS: if _init_params is None: _init("suitesparse", None, automatic=True) diff --git a/graphblas/binary/numpy.py b/graphblas/binary/numpy.py index a5855718c..21ed568ea 100644 --- a/graphblas/binary/numpy.py +++ b/graphblas/binary/numpy.py @@ -1,4 +1,4 @@ -""" Create UDFs of numpy functions supported by numba. +"""Create UDFs of numpy functions supported by numba. See list of numpy ufuncs supported by numpy here: diff --git a/graphblas/core/agg.py b/graphblas/core/agg.py index 9e50c8622..3afcbc408 100644 --- a/graphblas/core/agg.py +++ b/graphblas/core/agg.py @@ -9,7 +9,7 @@ def _get_types(ops, initdtype): - """Determine the input and output types of an aggregator based on a list of ops""" + """Determine the input and output types of an aggregator based on a list of ops.""" if initdtype is None: prev = dict(ops[0].types) else: diff --git a/graphblas/core/automethods.py b/graphblas/core/automethods.py index f4aea3bdf..5a3ce0753 100644 --- a/graphblas/core/automethods.py +++ b/graphblas/core/automethods.py @@ -1,4 +1,4 @@ -""" Define functions to use as property methods on expressions. +"""Define functions to use as property methods on expressions. These will automatically compute the value and avoid the need for `.new()`. diff --git a/graphblas/core/base.py b/graphblas/core/base.py index a0be99f2d..a4e48b612 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -312,9 +312,7 @@ def __lshift__(self, expr, **opts): return self._update(expr, opts=opts) def update(self, expr, **opts): - """ - Convenience function when no output arguments (mask, accum, replace) are used - """ + """Convenience function when no output arguments (mask, accum, replace) are used.""" return self._update(expr, opts=opts) def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, *, opts): @@ -497,7 +495,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, * @property def _name_html(self): - """Treat characters after _ as subscript""" + """Treat characters after _ as subscript.""" split = self.name.split("_", 1) if len(split) == 1: return self.name diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index d6c4c89b1..9046795db 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -54,7 +54,7 @@ def _expr_name(self): return f"[{', '.join(map(str, idx[:3]))}, ...]" def _py_index(self): - """Convert resolved index back into a valid Python index""" + """Convert resolved index back into a valid Python index.""" if self.size is None: return self.index.value if self.index is _ALL_INDICES: @@ -149,6 +149,7 @@ def py_indices(self): def parse_indices(self, indices, shape): """ Returns + ------- [(rows, rowsize), (cols, colsize)] for Matrix [(idx, idx_size)] for Vector @@ -251,7 +252,7 @@ def parse_index(self, index, typ, size): return self.parse_index(np.array(index), np.ndarray, size) def get_index(self, dim): - """Return a new IndexerResolver with index for the selected dimension""" + """Return a new IndexerResolver with index for the selected dimension.""" rv = object.__new__(IndexerResolver) rv.obj = self.obj rv.indices = (self.indices[dim],) @@ -327,7 +328,7 @@ def new(self, dtype=None, *, mask=None, input_mask=None, name=None, **opts): return delayed_extractor.new(dtype, mask=mask, name=name, **opts) def _extract_delayed(self): - """Return an Expression object, treating this as an extract call""" + """Return an Expression object, treating this as an extract call.""" return self.parent._prep_for_extract(self.resolved_indexes) def _input_mask_to_mask(self, input_mask, **opts): diff --git a/graphblas/core/mask.py b/graphblas/core/mask.py index 4d42b12f0..9ad209095 100644 --- a/graphblas/core/mask.py +++ b/graphblas/core/mask.py @@ -208,22 +208,22 @@ def _name_html(self): # CS: complemented structural # CV: complemented value def _combine_S_S(m1, m2, dtype, name, opts): - """S-S""" + """S-S.""" return pair(m1.parent & m2.parent).new(dtype, name=name, **opts) def _combine_S_A(m1, m2, dtype, name, opts): - """S-S, S-V, S-CS, S-CV""" + """S-S, S-V, S-CS, S-CV.""" return one(m1.parent).new(dtype, mask=m2, name=name, **opts) def _combine_A_S(m1, m2, dtype, name, opts): - """S-S, V-S, CS-S, CV-S""" + """S-S, V-S, CS-S, CV-S.""" return one(m2.parent).new(dtype, mask=m1, name=name, **opts) def _combine_V_A(m1, m2, dtype, name, opts): - """V-S, V-V, V-CS, V-CV""" + """V-S, V-V, V-CS, V-CV.""" if isinstance(m2, ValueMask) and m1.parent._nvals > m2.parent._nvals: m1, m2 = m2, m1 val = valuene(m1.parent).new(dtype, mask=m2, name=name, **opts) @@ -233,7 +233,7 @@ def _combine_V_A(m1, m2, dtype, name, opts): def _combine_A_V(m1, m2, dtype, name, opts): - """S-V, V-V, CS-V, CV-V""" + """S-V, V-V, CS-V, CV-V.""" val = valuene(m2.parent).new(dtype, mask=m1, name=name, **opts) if dtype != BOOL or backend != "suitesparse" or not val.ss.is_iso: val(val.S, **opts) << True @@ -241,28 +241,28 @@ def _combine_A_V(m1, m2, dtype, name, opts): def _combine_CS_CS(m1, m2, dtype, name, opts): - """CS-CS""" + """CS-CS.""" val = pair(m1.parent | m2.parent).new(dtype, name=name, **opts) val(~val.S, replace=True, **opts) << True return val def _combine_CS_CV(m1, m2, dtype, name, opts): - """CS-CV""" + """CS-CV.""" val = pair(one(m1.parent).new(**opts) | m2.parent).new(dtype, name=name, **opts) val(~val.V, replace=True, **opts) << True return val def _combine_CV_CS(m1, m2, dtype, name, opts): - """CV-CS""" + """CV-CS.""" val = pair(m1.parent | one(m2.parent).new(**opts)).new(dtype, name=name, **opts) val(~val.V, replace=True, **opts) << True return val def _combine_CV_CV(m1, m2, dtype, name, opts): - """CV-CV""" + """CV-CV.""" val = lor(m1.parent | m2.parent).new(dtype, name=name, **opts) val(~val.V, replace=True, **opts) << True return val @@ -300,84 +300,84 @@ def _combine_CV_CV(m1, m2, dtype, name, opts): # Recipes to return the *complement* of combining two masks def _complement_S_S(m1, m2, dtype, name, opts): - """S-S""" + """S-S.""" val = pair(m1.parent & m2.parent).new(dtype, name=name, **opts) val(~val.S, replace=True, **opts) << True return val def _complement_S_A(m1, m2, dtype, name, opts): - """S-S, S-V, S-CS, S-CV""" + """S-S, S-V, S-CS, S-CV.""" val = one(m1.parent).new(dtype, mask=m2, name=name, **opts) val(~val.S, replace=True, **opts) << True return val def _complement_A_S(m1, m2, dtype, name, opts): - """S-S, V-S, CS-S, CV-S""" + """S-S, V-S, CS-S, CV-S.""" val = one(m2.parent).new(dtype, mask=m1, name=name, **opts) val(~val.S, replace=True, **opts) << True return val def _complement_V_V(m1, m2, dtype, name, opts): - """V-V""" + """V-V.""" val = land(m1.parent & m2.parent).new(dtype, name=name, **opts) val(~val.V, replace=True, **opts) << True return val def _complement_CS_CS(m1, m2, dtype, name, opts): - """CS-CS""" + """CS-CS.""" return pair(one(m1.parent).new(**opts) | one(m2.parent).new(**opts)).new( dtype, name=name, **opts ) def _complement_CS_A(m1, m2, dtype, name, opts): - """CS-S, CS-V, CS-CS, CS-CV""" + """CS-S, CS-V, CS-CS, CS-CV.""" val = one(m1.parent).new(dtype, name=name, **opts) val(~m2, **opts) << True return val def _complement_A_CS(m1, m2, dtype, name, opts): - """S-CS, V-CS, CS-CS, CV-CS""" + """S-CS, V-CS, CS-CS, CV-CS.""" val = one(m2.parent).new(dtype, name=name, **opts) val(~m1, **opts) << True return val def _complement_CS_CV(m1, m2, dtype, name, opts): - """CS-CV""" + """CS-CV.""" val = pair(one(m1.parent).new(**opts) | m2.parent).new(dtype, name=name, **opts) val(val.V, replace=True, **opts) << True return val def _complement_CV_CS(m1, m2, dtype, name, opts): - """CV-CS""" + """CV-CS.""" val = pair(m1.parent | one(m2.parent).new(**opts)).new(dtype, name=name, **opts) val(val.V, replace=True, **opts) << True return val def _complement_CV_CV(m1, m2, dtype, name, opts): - """CV-CV""" + """CV-CV.""" val = lor(m1.parent | m2.parent).new(dtype, name=name, **opts) val(val.V, replace=True, **opts) << True return val def _complement_CV_A(m1, m2, dtype, name, opts): - """CV-S, CV-V, CV-CS, CV-CV""" + """CV-S, CV-V, CV-CS, CV-CV.""" val = one(m1.parent).new(dtype, mask=~m1, name=name, **opts) val(~m2, **opts) << True return val def _complement_A_CV(m1, m2, dtype, name, opts): - """S-CV, V-CV, CS-CV, CV-CV""" + """S-CV, V-CV, CS-CV, CV-CV.""" val = one(m2.parent).new(dtype, mask=~m2, name=name, **opts) val(~m1, **opts) << True return val @@ -414,7 +414,7 @@ def _complement_A_CV(m1, m2, dtype, name, opts): def _combine_S_S_mask_or(m1, m2, opts): - """S-S""" + """S-S.""" val = monoid.any(one(m1.parent).new(bool, **opts) | one(m2.parent).new(bool, **opts)).new( **opts ) @@ -422,7 +422,7 @@ def _combine_S_S_mask_or(m1, m2, opts): def _combine_S_SV_mask_or(m1, m2, opts): - """S-V""" + """S-V.""" val = monoid.any( one(m1.parent).new(bool, **opts) | one(m2.parent).new(bool, mask=m2, **opts) ).new(**opts) @@ -430,7 +430,7 @@ def _combine_S_SV_mask_or(m1, m2, opts): def _combine_SV_S_mask_or(m1, m2, opts): - """V-S""" + """V-S.""" val = monoid.any( one(m1.parent).new(bool, mask=m1, **opts) | one(m2.parent).new(bool, **opts) ).new(**opts) @@ -438,31 +438,31 @@ def _combine_SV_S_mask_or(m1, m2, opts): def _complement_A_CS_mask_or(m1, m2, opts): - """~S-CS, ~V-CS, ~CV-CS""" + """~S-CS, ~V-CS, ~CV-CS.""" val = one(m2.parent).new(bool, mask=~m1, **opts) return ComplementedStructuralMask(val) def _complement_CS_A_mask_or(m1, m2, opts): - """~CS-S, ~CS-V, ~CS-CV""" + """~CS-S, ~CS-V, ~CS-CV.""" val = one(m1.parent).new(bool, mask=~m2, **opts) return ComplementedStructuralMask(val) def _complement_A_CV_mask_or(m1, m2, opts): - """~S-CV, ~V-CV""" + """~S-CV, ~V-CV.""" val = valuene(m2.parent).new(bool, mask=~m1, **opts) return ComplementedStructuralMask(val) def _complement_CV_A_mask_or(m1, m2, opts): - """~CV-S, ~CV-V""" + """~CV-S, ~CV-V.""" val = valuene(m1.parent).new(bool, mask=~m2, **opts) return ComplementedStructuralMask(val) def _combine_V_V_mask_or(m1, m2, opts): - """V-V""" + """V-V.""" val = monoid.any(valuene(m1.parent).new(**opts) | valuene(m2.parent).new(**opts)).new( bool, **opts ) @@ -470,13 +470,13 @@ def _combine_V_V_mask_or(m1, m2, opts): def _complement_CS_CS_mask_or(m1, m2, opts): - """~CS-CS""" + """~CS-CS.""" val = pair(m1.parent & m2.parent).new(bool, **opts) return ComplementedStructuralMask(val) def _complement_CV_CV_mask_or(m1, m2, opts): - """~CV-CV""" + """~CV-CV.""" val = valuene(land(m1.parent & m2.parent).new(bool, **opts)).new(**opts) return ComplementedStructuralMask(val) diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index c7483aaf4..26c7f23bf 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -223,7 +223,6 @@ def __delitem__(self, keys, **opts): Examples -------- - >>> del M[1, 5] """ del Updater(self, opts=opts)[keys] @@ -236,7 +235,6 @@ def __getitem__(self, keys): Examples -------- - .. code-block:: python subM = M[[1, 3, 5], :].new() @@ -258,7 +256,6 @@ def __setitem__(self, keys, expr, **opts): Examples -------- - .. code-block:: python M[0, 0:3] = 17 @@ -270,7 +267,6 @@ def __contains__(self, index): Examples -------- - .. code-block:: python (10, 15) in M @@ -410,7 +406,7 @@ def nvals(self): @property def _nvals(self): - """Like nvals, but doesn't record calls""" + """Like nvals, but doesn't record calls.""" n = ffi_new("GrB_Index*") check_status(lib.GrB_Matrix_nvals(n, self.gb_obj[0]), self) return n[0] @@ -682,7 +678,7 @@ def diag(self, k=0, dtype=None, *, name=None, **opts): return rv def wait(self, how="materialize"): - """Wait for a computation to complete or establish a "happens-before" relation + """Wait for a computation to complete or establish a "happens-before" relation. Parameters ---------- @@ -1210,7 +1206,7 @@ def from_dcsc( @classmethod def _from_dense(cls, values, dtype=None, *, name=None): - """Create a new Matrix from a dense numpy array""" + """Create a new Matrix from a dense numpy array.""" # TODO: GraphBLAS needs a way to import or assign dense # We could also handle F-contiguous data w/o a copy # TODO: handle `Matrix._from_dense(np.arange(3*4*5).reshape(3, 4, 5))` as 3x4 Matrix @@ -1238,7 +1234,7 @@ def _from_dense(cls, values, dtype=None, *, name=None): def from_dicts( cls, nested_dicts, dtype=None, *, order="rowwise", nrows=None, ncols=None, name=None ): - """Create a new Matrix from a dict of dicts or list of dicts + """Create a new Matrix from a dict of dicts or list of dicts. A dict of dicts is of the form ``{row: {col: val}}`` if order is "rowwise" and of the form ``{col: {row: val}}`` if order is "columnwise". @@ -1345,7 +1341,7 @@ def _to_csx(self, fmt, dtype=None): return Ap, Ai, Ax def to_csr(self, dtype=None): - """Returns three arrays of the standard CSR representation: indptr, col_indices, values + """Returns three arrays of the standard CSR representation: indptr, col_indices, values. In CSR, the column indices for row i are stored in ``col_indices[indptr[i]:indptr[i+1]]`` and the values are stored in ``values[indptr[i]:indptr[i+1]]``. @@ -1370,7 +1366,7 @@ def to_csr(self, dtype=None): return self._to_csx(_CSR_FORMAT, dtype) def to_csc(self, dtype=None): - """Returns three arrays of the standard CSC representation: indptr, row_indices, values + """Returns three arrays of the standard CSC representation: indptr, row_indices, values. In CSC, the row indices for column i are stored in ``row_indices[indptr[i]:indptr[i+1]]`` and the values are stored in ``values[indptr[i]:indptr[i+1]]``. @@ -1395,7 +1391,7 @@ def to_csc(self, dtype=None): return self._to_csx(_CSC_FORMAT, dtype) def to_dcsr(self, dtype=None): - """Returns four arrays of DCSR representation: compressed_rows, indptr, col_indices, values + """Returns four arrays of DCSR representation: compressed_rows, indptr, col_indices, values. In DCSR, we store the index of each non-empty row in ``compressed_rows``. The column indices for row ``compressed_rows[i]`` are stored in @@ -1441,7 +1437,7 @@ def to_dcsr(self, dtype=None): return compressed_rows, indptr, cols, values def to_dcsc(self, dtype=None): - """Returns four arrays of DCSC representation: compressed_cols, indptr, row_indices, values + """Returns four arrays of DCSC representation: compressed_cols, indptr, row_indices, values. In DCSC, we store the index of each non-empty column in ``compressed_cols``. The row indices for column ``compressed_cols[i]`` are stored in @@ -1491,7 +1487,7 @@ def to_dcsc(self, dtype=None): return compressed_cols, indptr, rows, values def to_dicts(self, order="rowwise"): - """Return Matrix as a dict of dicts in the form ``{row: {col: val}}`` + """Return Matrix as a dict of dicts in the form ``{row: {col: val}}``. Parameters ---------- @@ -1566,7 +1562,6 @@ def ewise_add(self, other, op=monoid.plus, *, require_monoid=None): Examples -------- - .. code-block:: python # Method syntax @@ -1653,7 +1648,6 @@ def ewise_mult(self, other, op=binary.times): Examples -------- - .. code-block:: python # Method syntax @@ -1722,7 +1716,6 @@ def ewise_union(self, other, op, left_default, right_default): Examples -------- - .. code-block:: python # Method syntax @@ -1836,7 +1829,6 @@ def mxv(self, other, op=semiring.plus_times): Examples -------- - .. code-block:: python # Method syntax @@ -1880,7 +1872,6 @@ def mxm(self, other, op=semiring.plus_times): Examples -------- - .. code-block:: python # Method syntax @@ -1928,7 +1919,6 @@ def kronecker(self, other, op=binary.times): Examples -------- - .. code-block:: python C << A.kronecker(B, op=binary.times) @@ -1981,7 +1971,6 @@ def apply(self, op, right=None, *, left=None): Examples -------- - .. code-block:: python # Method syntax @@ -2130,7 +2119,6 @@ def select(self, op, thunk=None): Examples -------- - .. code-block:: python # Method syntax @@ -2228,7 +2216,6 @@ def reduce_rowwise(self, op=monoid.plus): Examples -------- - .. code-block:: python w << A.reduce_rowwise(monoid.plus) @@ -2266,7 +2253,6 @@ def reduce_columnwise(self, op=monoid.plus): Examples -------- - .. code-block:: python w << A.reduce_columnwise(monoid.plus) @@ -2308,7 +2294,6 @@ def reduce_scalar(self, op=monoid.plus, *, allow_empty=True): Examples -------- - .. code-block:: python total << A.reduce_scalar(monoid.plus) @@ -2369,7 +2354,6 @@ def reposition(self, row_offset, column_offset, *, nrows=None, ncols=None): Examples -------- - .. code-block:: python C = A.reposition(1, 2).new() diff --git a/graphblas/core/operator.py b/graphblas/core/operator.py index 46e1028ec..c5e518c99 100644 --- a/graphblas/core/operator.py +++ b/graphblas/core/operator.py @@ -198,7 +198,7 @@ def _call_op(op, left, right=None, thunk=None, **kwargs): def _udt_mask(dtype): - """Create mask to determine which bytes of UDTs to use for equality check""" + """Create mask to determine which bytes of UDTs to use for equality check.""" if dtype in _udt_mask_cache: return _udt_mask_cache[dtype] if dtype.subdtype is not None: diff --git a/graphblas/core/recorder.py b/graphblas/core/recorder.py index 358d8f34b..b7cb567f2 100644 --- a/graphblas/core/recorder.py +++ b/graphblas/core/recorder.py @@ -11,7 +11,7 @@ def gbstr(arg): - """Convert arg to a string as an argument in a GraphBLAS call""" + """Convert arg to a string as an argument in a GraphBLAS call.""" if arg is None: return "NULL" if isinstance(arg, TypedOpBase): diff --git a/graphblas/core/scalar.py b/graphblas/core/scalar.py index 1e6420f16..cc34e27e2 100644 --- a/graphblas/core/scalar.py +++ b/graphblas/core/scalar.py @@ -105,13 +105,13 @@ def is_grbscalar(self): @property def _expr_name(self): - """The name used in the text for expressions""" + """The name used in the text for expressions.""" # Always using `repr(self.value)` may also be reasonable return self.name or repr(self.value) @property def _expr_name_html(self): - """The name used in the text for expressions in HTML formatting""" + """The name used in the text for expressions in HTML formatting.""" return self._name_html or repr(self.value) def __repr__(self, expr=None): @@ -293,7 +293,7 @@ def is_empty(self): @property def _is_empty(self): - """Like is_empty, but doesn't record calls""" + """Like is_empty, but doesn't record calls.""" if self._is_cscalar: return self._empty return self._nvals == 0 @@ -374,7 +374,7 @@ def nvals(self): @property def _nvals(self): - """Like nvals, but doesn't record calls""" + """Like nvals, but doesn't record calls.""" if self._is_cscalar: return 0 if self._empty else 1 n = ffi_new("GrB_Index*") @@ -441,7 +441,7 @@ def dup(self, dtype=None, *, clear=False, is_cscalar=None, name=None): return new_scalar def wait(self, how="materialize"): - """Wait for a computation to complete or establish a "happens-before" relation + """Wait for a computation to complete or establish a "happens-before" relation. Parameters ---------- @@ -542,7 +542,7 @@ def _deserialize(value, dtype, is_cscalar, name): return Scalar.from_value(value, dtype, is_cscalar=is_cscalar, name=name) def _as_vector(self, *, name=None): - """Copy or cast this Scalar to a Vector + """Copy or cast this Scalar to a Vector. This casts to a Vector when using GrB_Scalar from SuiteSparse. """ @@ -562,7 +562,7 @@ def _as_vector(self, *, name=None): return rv def _as_matrix(self, *, name=None): - """Copy or cast this Scalar to a Matrix + """Copy or cast this Scalar to a Matrix. This casts to a Matrix when using GrB_Scalar from SuiteSparse. """ @@ -610,7 +610,6 @@ def ewise_add(self, other, op=monoid.plus): Examples -------- - .. code-block:: python # Method syntax @@ -665,7 +664,6 @@ def ewise_mult(self, other, op=binary.times): Examples -------- - .. code-block:: python # Method syntax @@ -724,7 +722,6 @@ def ewise_union(self, other, op, left_default, right_default): Examples -------- - .. code-block:: python # Method syntax @@ -810,7 +807,7 @@ def ewise_union(self, other, op, left_default, right_default): return expr def apply(self, op, right=None, *, left=None): - """Create a new Scalar by applying ``op`` + """Create a new Scalar by applying ``op``. See the `Apply <../user_guide/operations.html#apply>`__ section in the User Guide for more details. @@ -839,7 +836,6 @@ def apply(self, op, right=None, *, left=None): Examples -------- - .. code-block:: python # Method syntax @@ -1053,7 +1049,7 @@ def _as_scalar(scalar, dtype=None, *, is_cscalar): def _dict_to_record(np_type, d): - """Converts e.g. `{"x": 1, "y": 2.3}` to `(1, 2.3)`""" + """Converts e.g. `{"x": 1, "y": 2.3}` to `(1, 2.3)`.""" rv = [] for name, (dtype, _) in np_type.fields.items(): val = d[name] diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 316e17e46..dffc4dec1 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -109,7 +109,7 @@ def __init__(self): def get_descriptor(**opts): - """Create descriptor with SuiteSparse:GraphBLAS options + """Create descriptor with SuiteSparse:GraphBLAS options. See SuiteSparse:GraphBLAS documentation for more details. diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index f89c61d30..fcfdabf3b 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -55,7 +55,7 @@ def head(matrix, n=10, dtype=None, *, sort=False): def _concat_mn(tiles, *, is_matrix=None): - """Argument checking for `Matrix.ss.concat` and returns number of tiles in each dimension""" + """Argument checking for `Matrix.ss.concat` and returns number of tiles in each dimension.""" from ..matrix import Matrix, TransposedMatrix from ..vector import Vector @@ -243,7 +243,7 @@ def orientation(self): def build_diag(self, vector, k=0, **opts): """ - GxB_Matrix_diag + GxB_Matrix_diag. Construct a diagonal Matrix from the given vector. Existing entries in the Matrix are discarded. @@ -272,7 +272,7 @@ def build_diag(self, vector, k=0, **opts): def split(self, chunks, *, name=None, **opts): """ - GxB_Matrix_split + GxB_Matrix_split. Split a Matrix into a 2D array of sub-matrices according to `chunks`. @@ -354,7 +354,7 @@ def _concat(self, tiles, m, n, opts): def concat(self, tiles, **opts): """ - GxB_Matrix_concat + GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into the current Matrix. Any existing values in the current Matrix will be discarded. @@ -374,7 +374,7 @@ def concat(self, tiles, **opts): def build_scalar(self, rows, columns, value): """ - GxB_Matrix_build_Scalar + GxB_Matrix_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -529,7 +529,7 @@ def iteritems(self, seek=0): def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): """ - GxB_Matrix_export_xxx + GxB_Matrix_export_xxx. Parameters ---------- @@ -722,7 +722,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** def unpack(self, format=None, *, sort=False, raw=False, **opts): """ - GxB_Matrix_unpack_xxx + GxB_Matrix_unpack_xxx. `unpack` is like `export`, except that the Matrix remains valid but empty. `pack_*` methods are the opposite of `unpack`. @@ -1176,7 +1176,7 @@ def import_csr( **opts, ): """ - GxB_Matrix_import_CSR + GxB_Matrix_import_CSR. Create a new Matrix from standard CSR format. @@ -1253,7 +1253,7 @@ def pack_csr( **opts, ): """ - GxB_Matrix_pack_CSR + GxB_Matrix_pack_CSR. `pack_csr` is like `import_csr` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csr")`` @@ -1364,7 +1364,7 @@ def import_csc( **opts, ): """ - GxB_Matrix_import_CSC + GxB_Matrix_import_CSC. Create a new Matrix from standard CSC format. @@ -1441,7 +1441,7 @@ def pack_csc( **opts, ): """ - GxB_Matrix_pack_CSC + GxB_Matrix_pack_CSC. `pack_csc` is like `import_csc` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("csc")`` @@ -1554,7 +1554,7 @@ def import_hypercsr( **opts, ): """ - GxB_Matrix_import_HyperCSR + GxB_Matrix_import_HyperCSR. Create a new Matrix from standard HyperCSR format. @@ -1639,7 +1639,7 @@ def pack_hypercsr( **opts, ): """ - GxB_Matrix_pack_HyperCSR + GxB_Matrix_pack_HyperCSR. `pack_hypercsr` is like `import_hypercsr` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsr")`` @@ -1776,7 +1776,7 @@ def import_hypercsc( **opts, ): """ - GxB_Matrix_import_HyperCSC + GxB_Matrix_import_HyperCSC. Create a new Matrix from standard HyperCSC format. @@ -1860,7 +1860,7 @@ def pack_hypercsc( **opts, ): """ - GxB_Matrix_pack_HyperCSC + GxB_Matrix_pack_HyperCSC. `pack_hypercsc` is like `import_hypercsc` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("hypercsc")`` @@ -1994,7 +1994,7 @@ def import_bitmapr( **opts, ): """ - GxB_Matrix_import_BitmapR + GxB_Matrix_import_BitmapR. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2075,7 +2075,7 @@ def pack_bitmapr( **opts, ): """ - GxB_Matrix_pack_BitmapR + GxB_Matrix_pack_BitmapR. `pack_bitmapr` is like `import_bitmapr` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapr")`` @@ -2185,7 +2185,7 @@ def import_bitmapc( **opts, ): """ - GxB_Matrix_import_BitmapC + GxB_Matrix_import_BitmapC. Create a new Matrix from values and bitmap (as mask) arrays. @@ -2266,7 +2266,7 @@ def pack_bitmapc( **opts, ): """ - GxB_Matrix_pack_BitmapC + GxB_Matrix_pack_BitmapC. `pack_bitmapc` is like `import_bitmapc` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("bitmapc")`` @@ -2374,7 +2374,7 @@ def import_fullr( **opts, ): """ - GxB_Matrix_import_FullR + GxB_Matrix_import_FullR. Create a new Matrix from values. @@ -2446,7 +2446,7 @@ def pack_fullr( **opts, ): """ - GxB_Matrix_pack_FullR + GxB_Matrix_pack_FullR. `pack_fullr` is like `import_fullr` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullr")`` @@ -2531,7 +2531,7 @@ def import_fullc( **opts, ): """ - GxB_Matrix_import_FullC + GxB_Matrix_import_FullC. Create a new Matrix from values. @@ -2603,7 +2603,7 @@ def pack_fullc( **opts, ): """ - GxB_Matrix_pack_FullC + GxB_Matrix_pack_FullC. `pack_fullc` is like `import_fullc` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("fullc")`` @@ -2691,7 +2691,7 @@ def import_coo( **opts, ): """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar + GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. Create a new Matrix from indices and values in coordinate format. @@ -2764,7 +2764,7 @@ def pack_coo( **opts, ): """ - GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar + GrB_Matrix_build_XXX and GxB_Matrix_build_Scalar. `pack_coo` is like `import_coo` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coo")`` @@ -2877,7 +2877,7 @@ def import_coor( **opts, ): """ - GxB_Matrix_import_CSR + GxB_Matrix_import_CSR. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -2960,7 +2960,7 @@ def pack_coor( **opts, ): """ - GxB_Matrix_pack_CSR + GxB_Matrix_pack_CSR. `pack_coor` is like `import_coor` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("coor")`` @@ -3046,7 +3046,7 @@ def import_cooc( **opts, ): """ - GxB_Matrix_import_CSC + GxB_Matrix_import_CSC. Create a new Matrix from indices and values in coordinate format. Rows must be sorted. @@ -3129,7 +3129,7 @@ def pack_cooc( **opts, ): """ - GxB_Matrix_pack_CSC + GxB_Matrix_pack_CSC. `pack_cooc` is like `import_cooc` except it "packs" data into an existing Matrix. This is the opposite of ``unpack("cooc")`` @@ -3231,7 +3231,7 @@ def import_any( **opts, ): """ - GxB_Matrix_import_xxx + GxB_Matrix_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and `Matrix.ss.export`` for details. @@ -3329,7 +3329,7 @@ def pack_any( **opts, ): """ - GxB_Matrix_pack_xxx + GxB_Matrix_pack_xxx. `pack_any` is like `import_any` except it "packs" data into an existing Matrix. This is the opposite of ``unpack()`` @@ -4191,7 +4191,7 @@ def _compactify(self, how, reverse, asindex, nkey, nval, fmt, indices_name, name ) def sort(self, op=binary.lt, order="rowwise", *, values=True, permutation=True, **opts): - """GxB_Matrix_sort to sort values along the rows (default) or columns of the Matrix + """GxB_Matrix_sort to sort values along the rows (default) or columns of the Matrix. Sorting moves all the elements to the left (if rowwise) or top (if columnwise) just like `compactify`. The returned matrices will be the same shape as the input Matrix. diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index 06d16c48d..2395eb13f 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -146,7 +146,7 @@ def format(self): def build_diag(self, matrix, k=0, **opts): """ - GxB_Vector_diag + GxB_Vector_diag. Extract a diagonal from a Matrix or TransposedMatrix into a Vector. Existing entries in the Vector are discarded. @@ -184,7 +184,7 @@ def build_diag(self, matrix, k=0, **opts): def split(self, chunks, *, name=None, **opts): """ - GxB_Matrix_split + GxB_Matrix_split. Split a Vector into a 1D array of sub-vectors according to `chunks`. @@ -250,7 +250,7 @@ def _concat(self, tiles, m, opts): def concat(self, tiles, **opts): """ - GxB_Matrix_concat + GxB_Matrix_concat. Concatenate a 1D list of Vector objects into the current Vector. Any existing values in the current Vector will be discarded. @@ -268,7 +268,7 @@ def concat(self, tiles, **opts): def build_scalar(self, indices, value): """ - GxB_Vector_build_Scalar + GxB_Vector_build_Scalar. Like ``build``, but uses a scalar for all the values. @@ -411,7 +411,7 @@ def iteritems(self, seek=0): def export(self, format=None, *, sort=False, give_ownership=False, raw=False, **opts): """ - GxB_Vextor_export_xxx + GxB_Vextor_export_xxx. Parameters ---------- @@ -480,7 +480,7 @@ def export(self, format=None, *, sort=False, give_ownership=False, raw=False, ** def unpack(self, format=None, *, sort=False, raw=False, **opts): """ - GxB_Vector_unpack_xxx + GxB_Vector_unpack_xxx. `unpack` is like `export`, except that the Vector remains valid but empty. `pack_*` methods are the opposite of `unpack`. @@ -659,7 +659,7 @@ def import_any( **opts, ): """ - GxB_Vector_import_xxx + GxB_Vector_import_xxx. Dispatch to appropriate import method inferred from inputs. See the other import functions and `Vector.ss.export`` for details. @@ -726,7 +726,7 @@ def pack_any( **opts, ): """ - GxB_Vector_pack_xxx + GxB_Vector_pack_xxx. `pack_any` is like `import_any` except it "packs" data into an existing Vector. This is the opposite of ``unpack()`` @@ -848,7 +848,7 @@ def import_sparse( **opts, ): """ - GxB_Vector_import_CSC + GxB_Vector_import_CSC. Create a new Vector from sparse input. @@ -924,7 +924,7 @@ def pack_sparse( **opts, ): """ - GxB_Vector_pack_CSC + GxB_Vector_pack_CSC. `pack_sparse` is like `import_sparse` except it "packs" data into an existing Vector. This is the opposite of ``unpack("sparse")`` @@ -1031,7 +1031,7 @@ def import_bitmap( **opts, ): """ - GxB_Vector_import_Bitmap + GxB_Vector_import_Bitmap. Create a new Vector from values and bitmap (as mask) arrays. @@ -1105,7 +1105,7 @@ def pack_bitmap( **opts, ): """ - GxB_Vector_pack_Bitmap + GxB_Vector_pack_Bitmap. `pack_bitmap` is like `import_bitmap` except it "packs" data into an existing Vector. This is the opposite of ``unpack("bitmap")`` @@ -1214,7 +1214,7 @@ def import_full( **opts, ): """ - GxB_Vector_import_Full + GxB_Vector_import_Full. Create a new Vector from values. @@ -1280,7 +1280,7 @@ def pack_full( **opts, ): """ - GxB_Vector_pack_Full + GxB_Vector_pack_Full. `pack_full` is like `import_full` except it "packs" data into an existing Vector. This is the opposite of ``unpack("full")`` @@ -1557,7 +1557,7 @@ def compactify(self, how="first", size=None, *, reverse=False, asindex=False, na ) def sort(self, op=binary.lt, *, values=True, permutation=True, **opts): - """GxB_Vector_sort to sort values of the Vector + """GxB_Vector_sort to sort values of the Vector. Sorting moves all the elements to the left just like `compactify`. The returned vectors will be the same size as the input Vector. diff --git a/graphblas/core/utils.py b/graphblas/core/utils.py index e1641b03e..8d07eadea 100644 --- a/graphblas/core/utils.py +++ b/graphblas/core/utils.py @@ -7,7 +7,7 @@ def libget(name): - """Helper to get items from GraphBLAS which might be GrB or GxB""" + """Helper to get items from GraphBLAS which might be GrB or GxB.""" try: return getattr(lib, name) except AttributeError: @@ -22,7 +22,7 @@ def libget(name): def wrapdoc(func_with_doc): - """Decorator to copy `__doc__` from a function onto the wrapped function""" + """Decorator to copy `__doc__` from a function onto the wrapped function.""" def inner(func_wo_doc): func_wo_doc.__doc__ = func_with_doc.__doc__ @@ -326,7 +326,7 @@ def _autogenerate_code( begin="# Begin auto-generated code", end="# End auto-generated code", ): - """Super low-tech auto-code generation used by automethods.py and infixmethods.py""" + """Super low-tech auto-code generation used by automethods.py and infixmethods.py.""" with open(filename) as f: # pragma: no branch (flaky) orig_text = f.read() if specializer: diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 59c910900..1b1af308f 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -218,7 +218,6 @@ def __delitem__(self, keys, **opts): Examples -------- - >>> del v[1:-1] """ del Updater(self, opts=opts)[keys] @@ -231,7 +230,6 @@ def __getitem__(self, keys): Examples -------- - .. code-block:: python sub_v = v[[1, 3, 5]].new() @@ -250,7 +248,6 @@ def __setitem__(self, keys, expr, **opts): Examples -------- - .. code-block:: python # This makes a dense iso-value vector @@ -263,7 +260,6 @@ def __contains__(self, index): Examples -------- - .. code-block:: python # Check if v[15] is non-empty @@ -291,7 +287,7 @@ def __sizeof__(self): raise TypeError("Unable to get size of Vector with backend: {backend}") def isequal(self, other, *, check_dtype=False, **opts): - """Check for exact equality (same size, same structure) + """Check for exact equality (same size, same structure). Parameters ---------- @@ -389,7 +385,7 @@ def nvals(self): @property def _nvals(self): - """Like nvals, but doesn't record calls""" + """Like nvals, but doesn't record calls.""" n = ffi_new("GrB_Index*") check_status(lib.GrB_Vector_nvals(n, self.gb_obj[0]), self) return n[0] @@ -600,7 +596,7 @@ def diag(self, k=0, *, name=None): return rv def wait(self, how="materialize"): - """Wait for a computation to complete or establish a "happens-before" relation + """Wait for a computation to complete or establish a "happens-before" relation. Parameters ---------- @@ -777,7 +773,6 @@ def ewise_add(self, other, op=monoid.plus, *, require_monoid=None): Examples -------- - .. code-block:: python # Method syntax @@ -861,7 +856,6 @@ def ewise_mult(self, other, op=binary.times): Examples -------- - .. code-block:: python # Method syntax @@ -930,7 +924,6 @@ def ewise_union(self, other, op, left_default, right_default): Examples -------- - .. code-block:: python # Method syntax @@ -1045,7 +1038,6 @@ def vxm(self, other, op=semiring.plus_times): Examples -------- - .. code-block:: python # Method syntax @@ -1104,7 +1096,6 @@ def apply(self, op, right=None, *, left=None): Examples -------- - .. code-block:: python # Method syntax @@ -1250,7 +1241,6 @@ def select(self, op, thunk=None): Examples -------- - .. code-block:: python # Method syntax @@ -1348,7 +1338,6 @@ def reduce(self, op=monoid.plus, *, allow_empty=True): Examples -------- - .. code-block:: python total << v.reduce(monoid.plus) @@ -1393,7 +1382,6 @@ def inner(self, other, op=semiring.plus_times): Examples -------- - .. code-block:: python # Method syntax @@ -1438,7 +1426,6 @@ def outer(self, other, op=binary.times): Examples -------- - .. code-block:: python C << v.outer(w, op=binary.times) @@ -1489,7 +1476,6 @@ def reposition(self, offset, *, size=None): Examples -------- - .. code-block:: python w = v.reposition(20).new() @@ -1771,7 +1757,7 @@ def from_dict(cls, d, dtype=None, *, size=None, name=None): return cls.from_coo(indices, values, dtype, size=size, name=name) def to_dict(self): - """Return Vector as a dict in the form ``{index: val}`` + """Return Vector as a dict in the form ``{index: val}``. Returns ------- diff --git a/graphblas/dtypes.py b/graphblas/dtypes.py index d768c116d..e864a412f 100644 --- a/graphblas/dtypes.py +++ b/graphblas/dtypes.py @@ -42,9 +42,10 @@ def __lt__(self, other): try: t1 = self.np_type t2 = lookup_dtype(other).np_type - return (t1.kind, t1.itemsize, t1.name) < (t2.kind, t2.itemsize, t2.name) except ValueError: raise TypeError(f"Invalid or unknown datatype: {other}") from None + else: + return (t1.kind, t1.itemsize, t1.name) < (t2.kind, t2.itemsize, t2.name) def __reduce__(self): if self._is_udt: @@ -291,7 +292,7 @@ def lookup_dtype(key, value=None): def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): """ - Returns a type that can hold both type1 and type2 + Returns a type that can hold both type1 and type2. For example: unify(INT32, INT64) -> INT64 @@ -363,7 +364,11 @@ def _dtype_to_string(dtype): def _string_to_dtype(s): - """_string_to_dtype(_dtype_to_string(dtype)) == dtype""" + """Convert a string back to a dtype. + + >>> _string_to_dtype(_dtype_to_string(dtype)) == dtype + True + """ try: return lookup_dtype(s) except Exception: diff --git a/graphblas/io.py b/graphblas/io.py index b29c483c5..59e4a3a0c 100644 --- a/graphblas/io.py +++ b/graphblas/io.py @@ -96,7 +96,7 @@ def from_numpy(m): def from_scipy_sparse_matrix(m, *, dup_op=None, name=None): - """dtype is inferred from m.dtype""" + """Matrix dtype is inferred from m.dtype.""" _warn( "`from_scipy_sparse_matrix` is deprecated; please use `from_scipy_sparse` instead.", DeprecationWarning, @@ -222,7 +222,7 @@ def from_awkward(A, *, name=None): # TODO: add parameters to allow different networkx classes and attribute names def to_networkx(m, edge_attribute="weight"): - """Create a networkx DiGraph from a square adjacency Matrix + """Create a networkx DiGraph from a square adjacency Matrix. Parameters ---------- @@ -276,7 +276,7 @@ def to_numpy(m): def to_scipy_sparse_matrix(m, format="csr"): # pragma: no cover (deprecated) - """format: str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}""" + """format: str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}.""" import scipy.sparse as ss _warn( @@ -302,7 +302,7 @@ def to_scipy_sparse_matrix(m, format="csr"): # pragma: no cover (deprecated) def to_scipy_sparse(A, format="csr"): - """Create a scipy.sparse array from a GraphBLAS Matrix or Vector + """Create a scipy.sparse array from a GraphBLAS Matrix or Vector. Parameters ---------- @@ -362,7 +362,7 @@ def to_scipy_sparse(A, format="csr"): def to_awkward(A, format=None): - """Create an Awkward Array from a GraphBLAS Matrix + """Create an Awkward Array from a GraphBLAS Matrix. Parameters ---------- diff --git a/graphblas/monoid/numpy.py b/graphblas/monoid/numpy.py index 788b984b9..702151551 100644 --- a/graphblas/monoid/numpy.py +++ b/graphblas/monoid/numpy.py @@ -1,4 +1,4 @@ -""" Create UDFs of numpy functions supported by numba. +"""Create UDFs of numpy functions supported by numba. See list of numpy ufuncs supported by numpy here: diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index 4f9376ee3..c7a1897f5 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -57,7 +57,7 @@ def _resolve_expr(expr, callname, opname): def _match_expr(parent, expr): - """Match expressions to rewrite `A.select(A < 5)` into select expression + """Match expressions to rewrite `A.select(A < 5)` into select expression. The argument must match the parent, so this _won't_ be rewritten: `A.select(B < 5)` """ diff --git a/graphblas/semiring/numpy.py b/graphblas/semiring/numpy.py index de25570d6..64169168a 100644 --- a/graphblas/semiring/numpy.py +++ b/graphblas/semiring/numpy.py @@ -1,4 +1,4 @@ -""" Create UDFs of numpy functions supported by numba. +"""Create UDFs of numpy functions supported by numba. See list of numpy ufuncs supported by numpy here: diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index a68222c87..441458a42 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -12,7 +12,7 @@ class _graphblas_ss: - """Used in `_expect_type`""" + """Used in `_expect_type`.""" _graphblas_ss.__name__ = "graphblas.ss" @@ -21,7 +21,7 @@ class _graphblas_ss: def diag(x, k=0, dtype=None, *, name=None, **opts): """ - GxB_Matrix_diag, GxB_Vector_diag + GxB_Matrix_diag, GxB_Vector_diag. Extract a diagonal Vector from a Matrix, or construct a diagonal Matrix from a Vector. Unlike ``Matrix.diag`` and ``Vector.diag``, this function @@ -67,7 +67,7 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): def concat(tiles, dtype=None, *, name=None, **opts): """ - GxB_Matrix_concat + GxB_Matrix_concat. Concatenate a 2D list of Matrix objects into a new Matrix, or a 1D list of Vector objects into a new Vector. To concatenate into existing objects, @@ -103,7 +103,7 @@ def concat(tiles, dtype=None, *, name=None, **opts): class GlobalConfig(BaseConfig): - """Get and set global configuration options for SuiteSparse:GraphBLAS + """Get and set global configuration options for SuiteSparse:GraphBLAS. See SuiteSparse:GraphBLAS documentation for more details. diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index 3a5b850a2..cd66efa6f 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -84,8 +84,8 @@ def pytest_runtest_setup(item): @pytest.fixture(autouse=True, scope="function") -def reset_name_counters(): - """Reset automatic names for each test for easier comparison of record.txt""" +def _reset_name_counters(): + """Reset automatic names for each test for easier comparison of record.txt.""" gb.Matrix._name_counter = itertools.count() gb.Vector._name_counter = itertools.count() gb.Scalar._name_counter = itertools.count() @@ -93,7 +93,7 @@ def reset_name_counters(): @pytest.fixture(scope="session", autouse=True) def ic(): # pragma: no cover (debug) - """Make `ic` available everywhere during testing for easier debugging""" + """Make `ic` available everywhere during testing for easier debugging.""" try: import icecream except ImportError: diff --git a/graphblas/tests/test_descriptor.py b/graphblas/tests/test_descriptor.py index bc2ced1c9..9209a8055 100644 --- a/graphblas/tests/test_descriptor.py +++ b/graphblas/tests/test_descriptor.py @@ -17,8 +17,6 @@ def test_caching(): def test_null_desc(): - """ - The default descriptor is not actually defined, but should be NULL - """ + """The default descriptor is not actually defined, but should be NULL.""" default = descriptor.lookup() assert default is None diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index a54d08819..59288a096 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -228,7 +228,7 @@ def test_dtype_to_from_string(): try: dtype2 = dtypes._string_to_dtype(s) except Exception: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unknown dtype"): lookup_dtype(dtype) else: assert dtype == dtype2 diff --git a/graphblas/tests/test_infix.py b/graphblas/tests/test_infix.py index 826ae03c4..3d6a674b2 100644 --- a/graphblas/tests/test_infix.py +++ b/graphblas/tests/test_infix.py @@ -1,4 +1,4 @@ -from pytest import fixture, raises +import pytest from graphblas import monoid, op from graphblas.exceptions import DimensionMismatch @@ -8,27 +8,27 @@ from graphblas import Matrix, Scalar, Vector # isort:skip (for dask-graphblas) -@fixture +@pytest.fixture def v1(): return Vector.from_coo([0, 2], [2.0, 5.0], name="v_1") -@fixture +@pytest.fixture def v2(): return Vector.from_coo([1, 2], [3.0, 7.0], name="v_2") -@fixture +@pytest.fixture def A1(): return Matrix.from_coo([0, 0], [0, 1], [0.0, 4.0], ncols=3, name="A_1") -@fixture +@pytest.fixture def A2(): return Matrix.from_coo([0, 2], [0, 0], [6.0, 8.0], name="A_2") -@fixture +@pytest.fixture def s1(): return Scalar.from_value(3, name="s_1") @@ -120,9 +120,9 @@ def test_bad_ewise(s1, v1, A1, A2): (A1, 1), (1, A1), ]: - with raises(TypeError, match="Bad type for argument"): + with pytest.raises(TypeError, match="Bad type for argument"): left | right - with raises(TypeError, match="Bad type for argument"): + with pytest.raises(TypeError, match="Bad type for argument"): left & right # These are okay now for left, right in [ @@ -139,55 +139,55 @@ def test_bad_ewise(s1, v1, A1, A2): (v1, A1), (A1.T, v1), ]: - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): left | right - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): left & right - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): left.ewise_add(right) - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): left.ewise_mult(right) - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): left.ewise_union(right, op.plus, 0, 0) w = v1[: v1.size - 1].new() - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): v1 | w - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): v1 & w - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A2 | A1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A2 & A1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1.T | A1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1.T & A1 # These are okay now - # with raises(TypeError): + # with pytest.raises(TypeError): s1 | 1 - # with raises(TypeError): + # with pytest.raises(TypeError): 1 | s1 - # with raises(TypeError): + # with pytest.raises(TypeError): s1 & 1 - # with raises(TypeError): + # with pytest.raises(TypeError): 1 & s1 - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): v1 |= v1 - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): A1 |= A1 - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): v1 &= v1 - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): A1 &= A1 - # with raises(TypeError, match="require_monoid"): + # with pytest.raises(TypeError, match="require_monoid"): op.minus(v1 | v1) # ok now - with raises(TypeError): + with pytest.raises(TypeError): op.minus(v1 & v1, require_monoid=False) - with raises(TypeError, match="Bad dtype"): + with pytest.raises(TypeError, match="Bad dtype"): op.plus(v1 & v1, 1) @@ -204,39 +204,39 @@ def test_bad_matmul(s1, v1, A1, A2): (A1, 1), (1, A1), ]: - with raises(TypeError, match="Bad type for argument"): + with pytest.raises(TypeError, match="Bad type for argument"): left @ right - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): v1 @ A1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1.T @ v1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A2 @ v1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): v1 @ A2.T - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1 @ A1 - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1.T @ A1.T - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): A1 @= A1 - with raises(TypeError): + with pytest.raises(TypeError): s1 @ 1 - with raises(TypeError): + with pytest.raises(TypeError): 1 @ s1 w = v1[:1].new() - with raises(DimensionMismatch): + with pytest.raises(DimensionMismatch): w @ v1 - with raises(TypeError): + with pytest.raises(TypeError): v1 @= v1 - with raises(TypeError, match="Bad type when calling semiring.plus_times"): + with pytest.raises(TypeError, match="Bad type when calling semiring.plus_times"): op.plus_times(A1) - with raises(TypeError, match="Bad types when calling semiring.plus_times."): + with pytest.raises(TypeError, match="Bad types when calling semiring.plus_times."): op.plus_times(A1, A2) - with raises(TypeError, match="Bad types when calling semiring.plus_times."): + with pytest.raises(TypeError, match="Bad types when calling semiring.plus_times."): op.plus_times(A1 @ A2, 1) @@ -251,15 +251,15 @@ def test_apply_unary(v1, A1): @autocompute def test_apply_unary_bad(s1, v1): - with raises(TypeError, match="__call__"): + with pytest.raises(TypeError, match="__call__"): op.exp(v1, 1) - with raises(TypeError, match="__call__"): + with pytest.raises(TypeError, match="__call__"): op.exp(1, v1) - # with raises(TypeError, match="Bad type when calling unary.exp"): + # with pytest.raises(TypeError, match="Bad type when calling unary.exp"): op.exp(s1) # Okay now - # with raises(TypeError, match="Bad type when calling unary.exp"): + # with pytest.raises(TypeError, match="Bad type when calling unary.exp"): op.exp(1) # Okay now - with raises(TypeError, match="Bad dtype"): + with pytest.raises(TypeError, match="Bad dtype"): op.exp(v1 | v1) @@ -283,20 +283,20 @@ def test_apply_binary(v1, A1): def test_apply_binary_bad(v1): - # with raises(TypeError, match="Bad types when calling binary.plus"): + # with pytest.raises(TypeError, match="Bad types when calling binary.plus"): op.plus(1, 1) # Okay now - with raises(TypeError, match="Bad type when calling binary.plus"): + with pytest.raises(TypeError, match="Bad type when calling binary.plus"): op.plus(v1) - with raises(TypeError, match="Bad type for keyword argument `right="): + with pytest.raises(TypeError, match="Bad type for keyword argument `right="): op.plus(v1, v1) - with raises(TypeError, match="may only be used when performing an ewise_add"): + with pytest.raises(TypeError, match="may only be used when performing an ewise_add"): op.plus(v1, 1, require_monoid=False) def test_infix_nonscalars(v1, v2): - # with raises(TypeError, match="refuse to guess"): + # with pytest.raises(TypeError, match="refuse to guess"): assert (v1 + v2).new().isequal(op.plus(v1 | v2).new()) - # with raises(TypeError, match="refuse to guess"): + # with pytest.raises(TypeError, match="refuse to guess"): assert (v1 - v2).new().isequal(v1.ewise_union(v2, "-", 0, 0).new()) @@ -308,9 +308,9 @@ def test_inplace_infix(s1, v1, v2, A1, A2): x @= A assert isinstance(x, Vector) assert x.isequal(v1 @ A) - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): v1 |= v2 - with raises(TypeError, match="not supported for FP64"): + with pytest.raises(TypeError, match="not supported for FP64"): A1 &= A2.T v1 = v1.dup(bool) @@ -335,11 +335,11 @@ def test_inplace_infix(s1, v1, v2, A1, A2): assert x.isequal(A1 & A2.T) expr = v1 | v2 - with raises(TypeError, match="not supported"): + with pytest.raises(TypeError, match="not supported"): expr |= v1 - with raises(TypeError, match="not supported"): + with pytest.raises(TypeError, match="not supported"): expr &= v1 - with raises(TypeError, match="not supported"): + with pytest.raises(TypeError, match="not supported"): expr @= A - with raises(TypeError, match="not supported"): + with pytest.raises(TypeError, match="not supported"): s1 @= v1 diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index b420fa5c5..d1edba027 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -372,13 +372,13 @@ def test_awkward_errors(): m = gb.Matrix.from_coo([0, 0, 3, 5], [1, 4, 0, 2], [1, 1, 1, 1], nrows=7, ncols=6) with pytest.raises(ValueError, match="Missing parameters"): gb.io.from_awkward(ak.Array([1, 2, 3])) + kv = gb.io.to_awkward(v) + kv = ak.with_parameter(kv, "format", "csr") with pytest.raises(ValueError, match="Invalid format for Vector"): - kv = gb.io.to_awkward(v) - kv = ak.with_parameter(kv, "format", "csr") gb.io.from_awkward(kv) + km = gb.io.to_awkward(m) + km = ak.with_parameter(km, "format", "dcsr") with pytest.raises(ValueError, match="Invalid format for Matrix"): - km = gb.io.to_awkward(m) - km = ak.with_parameter(km, "format", "dcsr") gb.io.from_awkward(km) with pytest.raises(ValueError, match="Invalid format for Vector"): gb.io.to_awkward(v, format="csr") diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index fc7f7ea08..9b6c9fc8d 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -1286,7 +1286,7 @@ def threex_minusthunk(x, row, col, thunk): # pragma: no cover (numba) indexunary.register_new("threex_minusthunk", threex_minusthunk) assert hasattr(indexunary, "threex_minusthunk") assert not hasattr(select, "threex_minusthunk") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="SelectOp must have BOOL return type"): select.register_anonymous(threex_minusthunk) expected = Matrix.from_coo( [3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1], @@ -1417,7 +1417,7 @@ def test_reduce_agg(A): assert B.reduce_scalar(agg.sum, allow_empty=True).new().is_empty assert B.reduce_scalar(agg.sum, allow_empty=False).new() == 0 assert B.reduce_scalar(agg.vars, allow_empty=True).new().is_empty - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="allow_empty=False not allowed when using Aggregators"): B.reduce_scalar(agg.vars, allow_empty=False) @@ -1863,7 +1863,7 @@ def test_transpose_exceptional(): def test_nested_matrix_operations(): - """Make sure temporaries aren't garbage-collected too soon""" + """Make sure temporaries aren't garbage-collected too soon.""" A = Matrix(int, 8, 8) A.ewise_mult(A.mxm(A.T).new()).new().reduce_scalar().new() A.ewise_mult(A.ewise_mult(A.ewise_mult(A.ewise_mult(A).new()).new()).new()) @@ -2575,7 +2575,7 @@ def test_wait(A): assert A2.isequal(A) A2.wait("materialize") A2.wait("complete") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be"): A2.wait("badmode") @@ -3044,7 +3044,7 @@ def test_ss_flatten(A): A.ss.flatten(order="bad") with pytest.raises(ValueError, match="cannot reshape"): v.ss.reshape(100, 100) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Shape tuple must be of length 2"): v.ss.reshape((*A.shape, 1)) @@ -3064,17 +3064,17 @@ def test_ss_reshape(A): assert rv.isequal(expected) assert rv.ss.reshape(8, 8, inplace=True) is None assert rv.isequal(A) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="cannot reshape array"): A.ss.reshape(5, 5) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="cannot reshape array"): A.ss.reshape(4) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="cannot reshape array"): A.ss.reshape((4,)) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="cannot reshape array"): A.ss.reshape((4, 5)) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Shape tuple must be of length 2"): A.ss.reshape((4, 4, 4)) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Bad value for order"): A.ss.reshape(4, 16, order="bad_order") idx = r + 8 * c @@ -3243,11 +3243,11 @@ def test_ss_random(A): expected = Vector.from_coo(range(A.ncols), 1) assert counts.isequal(expected) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be one of:"): A.ss.selectk("bad", 1) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be one of:"): A.ss.selectk("bad", 1, order="col") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="negative k is not allowed"): A.ss.selectk("random", -1, order="columnwise") @@ -3478,7 +3478,7 @@ def compare(A, expected, isequal=True, **kwargs): [3, 0, 6, 6, 6, 4, 1], ) assert B.isequal(expected) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be one of:"): A.ss.compactify("bad_how") @@ -3842,7 +3842,7 @@ def test_get(A): assert A.T.get(1, 0) == 2 assert A.get(0, 1, "mittens") == 2 assert type(compute(A.get(0, 1))) is int - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Bad row, col"): # Not yet supported A.get(0, [0, 1]) @@ -3890,7 +3890,7 @@ def test_ss_config(A): assert A.ss.config == d for key, val in d.items(): if key in A.ss.config._read_only: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Config option .* is read-only"): A.ss.config[key] = val else: A.ss.config[key] = val @@ -4007,7 +4007,7 @@ def test_to_dcsr_from_dcsc(A): @autocompute def test_as_vector(A): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Matrix must have a single column"): A._as_vector() v = A[:, [1]]._as_vector() expected = A[:, 1].new() diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index 408c56bf7..16ac983cc 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -254,11 +254,11 @@ def inner(left, right): with pytest.raises(TypeError, match="UDF argument must be a function"): BinaryOp.register_new("bad", object()) assert not hasattr(binary, "bad") - with pytest.raises(UdfParseError, match="Unable to parse function using Numba"): - def bad(x, y): # pragma: no cover (numba) - return v + def bad(x, y): # pragma: no cover (numba) + return v + with pytest.raises(UdfParseError, match="Unable to parse function using Numba"): BinaryOp.register_new("bad", bad) def my_add(x, y): @@ -1082,7 +1082,7 @@ def test_lazy_op(): assert isinstance(unary.misc.lazy, UnaryOp) with pytest.raises(AttributeError): unary.misc.bad - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unknown unary string:"): unary.from_string("misc.lazy.badpath") assert op.from_string("lazy") is unary.lazy assert op.from_string("numpy.lazy") is unary.numpy.lazy @@ -1129,9 +1129,9 @@ def _udt_identity(val): assert udt in udt_identity assert int in udt_identity assert operator.get_typed_op(udt_identity, udt) is udt_identity[udt] - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unknown dtype:"): assert "badname" in binary.eq - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unknown dtype:"): assert "badname" in udt_identity def _udt_getx(val): diff --git a/graphblas/tests/test_scalar.py b/graphblas/tests/test_scalar.py index 91da4847c..04cc0e900 100644 --- a/graphblas/tests/test_scalar.py +++ b/graphblas/tests/test_scalar.py @@ -74,7 +74,8 @@ def test_dup(s): *uint_data, ]: s5 = s4.dup(dtype=dtype, name="s5") - assert s5.dtype == dtype and s5.value == val + assert s5.dtype == dtype + assert s5.value == val s6 = s_empty.dup(dtype=dtype, name="s6") assert s6.is_empty assert compute(s6.value) is None @@ -326,7 +327,7 @@ def test_wait(s): s.wait() s.wait("materialize") s.wait("complete") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be"): s.wait("badmode") diff --git a/graphblas/tests/test_ss_utils.py b/graphblas/tests/test_ss_utils.py index dd3b99208..d21f41f03 100644 --- a/graphblas/tests/test_ss_utils.py +++ b/graphblas/tests/test_ss_utils.py @@ -224,7 +224,7 @@ def test_global_config(): if k in config._defaults: config[k] = None else: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unable to set default value for"): config[k] = None with pytest.raises(ValueError, match="Wrong number"): config["memory_pool"] = [1, 2] diff --git a/graphblas/tests/test_vector.py b/graphblas/tests/test_vector.py index ad133f05c..7709c77a0 100644 --- a/graphblas/tests/test_vector.py +++ b/graphblas/tests/test_vector.py @@ -801,7 +801,7 @@ def twox_minusthunk(x, row, col, thunk): # pragma: no cover (numba) indexunary.register_new("twox_minusthunk", twox_minusthunk) assert hasattr(indexunary, "twox_minusthunk") assert not hasattr(select, "twox_minusthunk") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="SelectOp must have BOOL return type"): select.register_anonymous(twox_minusthunk) with pytest.raises(TypeError, match="must be a function"): select.register_anonymous(object()) @@ -868,7 +868,7 @@ def test_reduce_empty(): assert w.reduce(agg.sum, allow_empty=True).new().is_empty assert w.reduce(agg.sum, allow_empty=False).new() == 0 assert w.reduce(agg.mean, allow_empty=True).new().is_empty - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="allow_empty=False not allowed when using Aggregators"): w.reduce(agg.mean, allow_empty=False) @@ -1365,7 +1365,7 @@ def test_wait(v): assert v2.isequal(v) v2.wait("materialize") v2.wait("complete") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be"): v2.wait("badmode") @@ -1729,7 +1729,7 @@ def test_ss_random(v): r = v.ss.selectk("random", k) assert r.nvals == k assert monoid.any(v & r).new().nvals == k - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be one of"): v.ss.selectk("bad", 1) @@ -1744,7 +1744,7 @@ def test_ss_firstk(v): x = w.ss.selectk("first", k) expected = Vector.from_coo(data[0][:k], data[1][:k], size=w.size) assert x.isequal(expected) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="negative k is not allowed"): v.ss.selectk("first", -1) @@ -1888,7 +1888,7 @@ def compare(v, expected, isequal=True, sort=False, **kwargs): compare(v, w, size=i, asindex=asindex, sort=sort) if not do_iso: compare(v, w, size=i, asindex=asindex, isequal=True, sort=sort) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="`how` argument must be one of"): v.ss.compactify("bad_how") @@ -2390,7 +2390,7 @@ def test_get(v): assert v.get(1) == 1 assert v.get(1, "mittens") == 1 assert type(compute(v.get(1))) is int - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Bad index in Vector.get"): # Not yet supported v.get([0, 1]) @@ -2430,7 +2430,7 @@ def test_ss_config(v): assert v.ss.config == d for key, val in d.items(): if key in v.ss.config._read_only: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Config option .* is read-only"): v.ss.config[key] = val else: v.ss.config[key] = val diff --git a/graphblas/unary/numpy.py b/graphblas/unary/numpy.py index b4cea6ae0..06086569d 100644 --- a/graphblas/unary/numpy.py +++ b/graphblas/unary/numpy.py @@ -1,4 +1,4 @@ -""" Create UDFs of numpy functions supported by numba. +"""Create UDFs of numpy functions supported by numba. See list of numpy ufuncs supported by numpy here: diff --git a/graphblas/viz.py b/graphblas/viz.py index c58a1b81b..72e18361a 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -67,7 +67,7 @@ def draw(m): # pragma: no cover def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, **kwargs): - """Plot the sparsity pattern of a Matrix using `matplotlib.spy` + """Plot the sparsity pattern of a Matrix using `matplotlib.spy`. See: - https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.spy.html @@ -104,7 +104,7 @@ def spy(M, *, centered=False, show=True, figure=None, axes=None, figsize=None, * def datashade(M, agg="count", *, width=None, height=None, opts_kwargs=None, **kwargs): - """Interactive plot of the sparsity pattern of a Matrix using hvplot and datashader + """Interactive plot of the sparsity pattern of a Matrix using hvplot and datashader. The `datashader` library rasterizes large data into a 2d grid of pixels. Each pixel may contain multiple data points, which are combined by an aggregator (`agg="count"`). diff --git a/pyproject.toml b/pyproject.toml index 206fa9534..07a99b129 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -167,7 +167,7 @@ select = [ "W", # pycodestyle Warning # "C90", # mccabe # "I", # isort - # "D", # pydocstyle + "D", # pydocstyle "UP", # pyupgrade # "N", # pep8-naming "YTT", # flake8-2020 @@ -183,7 +183,7 @@ select = [ "ISC", # flake8-implicit-str-concat # "ICN", # flake8-import-conventions "T20", # flake8-print - # "PT", # flake8-pytest-style + "PT", # flake8-pytest-style "Q", # flake8-quotes # "RET", # flake8-return "SIM", # flake8-simplify @@ -200,22 +200,44 @@ select = [ "PIE", # flake8-pie "COM", # flake8-commas # "INP", # flake8-no-pep420 + "EXE", # flake8-executable + "TYP", # flake8-type-checking + "TRY", # tryceratops "RUF", # ruff-specific rules ] external = [ # noqa codes that ruff doesn't know about: https://github.com/charliermarsh/ruff#external ] ignore = [ + # Would be nice to fix these + "D100", # Missing docstring in public module + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + "D103", # Missing docstring in public function + "D104", # Missing docstring in public package + "D105", # Missing docstring in magic method + "D107", # Missing docstring in `__init__` + "D205", # 1 blank line required between summary line and description + "D212", # Multi-line docstring summary should start at the first line + "D213", # Multi-line docstring summary should start at the second line + "D401", # First line of docstring should be in imperative mood: + "D417", # Missing argument description in the docstring: + + # Intentionally ignored "D203", # 1 blank line required before class docstring (Note: conflicts with D211, which is preferred) + "SIM102", # Use a single `if` statement instead of nested `if` statements (Note: often necessary) "SIM105", # Use contextlib.suppress(...) instead of try-except-pass (Note: try-except-pass is much faster) "SIM108", # Use ternary operator ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) "PLR2004", # Magic number used in comparison, consider replacing magic with a constant variable "COM812", # Trailing comma missing + "PT001", # Use `@pytest.fixture()` over `@pytest.fixture` (Note: why?) + "PT003", # `scope='function'` is implied in `@pytest.fixture()` (Note: no harm in being explicit) + "PT023", # Use `@pytest.mark.slow()` over `@pytest.mark.slow` (Note: why?) ] [tool.ruff.per-file-ignores] "graphblas/core/operator.py" = ["S102"] -"graphblas/tests/*py" = ["S101", "T201"] +"graphblas/tests/*py" = ["S101", "T201", "D103", "D100"] "graphblas/tests/test_dtype.py" = ["UP003"] "graphblas/tests/test_formatting.py" = ["E501"] "graphblas/**/__init__.py" = ["F401"] diff --git a/scripts/autogenerate.py b/scripts/autogenerate.py index a8abde580..5f1116674 100755 --- a/scripts/autogenerate.py +++ b/scripts/autogenerate.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -"""This script is used to auto-generate code. +"""Run this script to auto-generate code after modifying automethods or infixmethods. This can also be done via: diff --git a/scripts/create_pickle.py b/scripts/create_pickle.py index 0e0c5ceae..2af759f71 100755 --- a/scripts/create_pickle.py +++ b/scripts/create_pickle.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" Script used to create the pickle files used in tests/test_pickle.py +"""Script used to create the pickle files used in tests/test_pickle.py. Note that the exact binary of the pickle files may differ depending on which Python version is used to create them.