diff --git a/include/spla.h b/include/spla.h index fc6f3265e..f8219bd59 100644 --- a/include/spla.h +++ b/include/spla.h @@ -85,6 +85,27 @@ typedef enum spla_AcceleratorType { SPLA_ACCELERATOR_TYPE_OPENCL = 1 } spla_AcceleratorType; +typedef enum spla_FormatMatrix { + SPLA_FORMAT_MATRIX_CPU_LIL = 0, + SPLA_FORMAT_MATRIX_CPU_DOK = 1, + SPLA_FORMAT_MATRIX_CPU_COO = 2, + SPLA_FORMAT_MATRIX_CPU_CSR = 3, + SPLA_FORMAT_MATRIX_CPU_CSC = 4, + SPLA_FORMAT_MATRIX_ACC_COO = 5, + SPLA_FORMAT_MATRIX_ACC_CSR = 6, + SPLA_FORMAT_MATRIX_ACC_CSC = 7, + SPLA_FORMAT_MATRIX_COUNT = 8 +} spla_FormatMatrix; + +typedef enum spla_FormatVector { + SPLA_FORMAT_VECTOR_CPU_DOK = 0, + SPLA_FORMAT_VECTOR_CPU_DENSE = 1, + SPLA_FORMAT_VECTOR_CPU_COO = 2, + SPLA_FORMAT_VECTOR_ACC_DENSE = 3, + SPLA_FORMAT_VECTOR_ACC_COO = 4, + SPLA_FORMAT_VECTOR_COUNT = 5 +} spla_FormatVector; + #define SPLA_NULL NULL typedef int32_t spla_bool; @@ -257,6 +278,7 @@ SPLA_API spla_Status spla_Array_clear(spla_Array a); /* Vector container creation and manipulation */ SPLA_API spla_Status spla_Vector_make(spla_Vector* v, spla_uint n_rows, spla_Type type); +SPLA_API spla_Status spla_Vector_set_format(spla_Vector v, int format); SPLA_API spla_Status spla_Vector_set_fill_value(spla_Vector v, spla_Scalar value); SPLA_API spla_Status spla_Vector_set_reduce(spla_Vector v, spla_OpBinary reduce); SPLA_API spla_Status spla_Vector_set_int(spla_Vector v, spla_uint row_id, int value); @@ -274,6 +296,7 @@ SPLA_API spla_Status spla_Vector_clear(spla_Vector v); /* Matrix container creation and manipulation */ SPLA_API spla_Status spla_Matrix_make(spla_Matrix* M, spla_uint n_rows, spla_uint n_cols, spla_Type type); +SPLA_API spla_Status spla_Matrix_set_format(spla_Matrix M, int format); SPLA_API spla_Status spla_Matrix_set_fill_value(spla_Matrix M, spla_Scalar value); SPLA_API spla_Status spla_Matrix_set_reduce(spla_Matrix M, spla_OpBinary reduce); SPLA_API spla_Status spla_Matrix_set_int(spla_Matrix M, spla_uint row_id, spla_uint col_id, int value); diff --git a/include/spla/config.hpp b/include/spla/config.hpp index 40b8ad960..dcca5980a 100644 --- a/include/spla/config.hpp +++ b/include/spla/config.hpp @@ -141,21 +141,6 @@ namespace spla { Count = 5 }; - /** - * @class FormatArray - * @brief Named storage formats of array - * - * @warning Do not change order of values - */ - enum class FormatArray : uint { - /** CPU side data allocation */ - Cpu = 0, - /** Acc side allocation */ - Acc = 1, - /** Total formats count */ - Count = 2 - }; - /** * @class MessageCallback * @brief Callback function called on library message event diff --git a/python/example.py b/python/example.py index c59696b4d..6b9b24c1a 100644 --- a/python/example.py +++ b/python/example.py @@ -23,10 +23,12 @@ t.assign(m, pyspla.Scalar(pyspla.INT, 10), pyspla.INT.SECOND, pyspla.INT.GEZERO) print(t.to_list()) -M = pyspla.Matrix((10, 10), pyspla.INT) G = pyspla.Matrix.generate((10, 10), pyspla.INT, density=0.1, dist=[0, 10]) - -print(M.to_list()) print(G.to_lists()) - print(G.reduce(pyspla.INT.PLUS)) + +M = pyspla.Matrix.from_lists([1, 2, 3], [1, 2, 3], [-1, 5, 10], (10, 10), pyspla.INT) +M.set_format(pyspla.FormatMatrix.ACC_CSR) +print(M.get(1, 0)) +print(M.get(1, 1)) +print(M.to_list()) diff --git a/python/pyspla/__init__.py b/python/pyspla/__init__.py index 917f09b71..fbe808fad 100644 --- a/python/pyspla/__init__.py +++ b/python/pyspla/__init__.py @@ -191,6 +191,8 @@ "INT", "UINT", "FLOAT", + "FormatMatrix", + "FormatVector", "Descriptor", "Op", "OpUnary", diff --git a/python/pyspla/array.py b/python/pyspla/array.py index dec7f8166..77fb83dfa 100644 --- a/python/pyspla/array.py +++ b/python/pyspla/array.py @@ -189,7 +189,6 @@ def clear(self): """ Clears array removing all elements, so it has 0 values. """ - check(backend().spla_Array_clear(self._hnd)) def to_list(self): diff --git a/python/pyspla/bridge.py b/python/pyspla/bridge.py index 74a5c5bca..5580a0cc1 100644 --- a/python/pyspla/bridge.py +++ b/python/pyspla/bridge.py @@ -29,7 +29,9 @@ __all__ = [ "backend", "check", - "is_docs" + "is_docs", + "FormatMatrix", + "FormatVector" ] import os @@ -37,6 +39,7 @@ import pathlib import platform import atexit +import enum ARCH = {'AMD64': 'x64', 'x86_64': 'x64', 'arm64': 'arm64'}[platform.machine()] SYSTEM = {'Darwin': 'macos', 'Linux': 'linux', 'Windows': 'windows'}[platform.system()] @@ -95,6 +98,35 @@ class SplaNotImplemented(SplaError): pass +class FormatMatrix(enum.Enum): + """ + Mapping for spla supported matrix storage formats enumeration. + """ + + CPU_LIL = 0 + CPU_DOK = 1 + CPU_COO = 2 + CPU_CSR = 3 + CPU_CSC = 4 + ACC_COO = 5 + ACC_CSR = 6 + ACC_CSC = 7 + COUNT = 8 + + +class FormatVector(enum.Enum): + """ + Mapping for spla supported vector storage formats enumeration. + """ + + CPU_DOK = 0 + CPU_DENSE = 1 + CPU_COO = 2 + ACC_DENSE = 3 + ACC_COO = 4 + COUNT = 5 + + _status_mapping = { 1: SplaError, 2: SplaNoAcceleration, @@ -350,6 +382,7 @@ def load_library(lib_path): _spla.spla_Array_clear.argtypes = [_object_t] _spla.spla_Vector_make.restype = _status_t + _spla.spla_Vector_set_format.restype = _status_t _spla.spla_Vector_set_fill_value.restype = _status_t _spla.spla_Vector_set_reduce.restype = _status_t _spla.spla_Vector_set_int.restype = _status_t @@ -363,6 +396,7 @@ def load_library(lib_path): _spla.spla_Vector_clear.restype = _status_t _spla.spla_Vector_make.argtypes = [_p_object_t, _uint, _object_t] + _spla.spla_Vector_set_format.argtypes = [_object_t, ctypes.c_int] _spla.spla_Vector_set_fill_value.argtypes = [_object_t, _object_t] _spla.spla_Vector_set_reduce.argtypes = [_object_t, _object_t] _spla.spla_Vector_set_int.argtypes = [_object_t, _uint, _int] @@ -376,6 +410,7 @@ def load_library(lib_path): _spla.spla_Vector_clear.argtypes = [_object_t] _spla.spla_Matrix_make.restype = _status_t + _spla.spla_Matrix_set_format.restype = _status_t _spla.spla_Matrix_set_fill_value.restype = _status_t _spla.spla_Matrix_set_reduce.restype = _status_t _spla.spla_Matrix_set_int.restype = _status_t @@ -389,6 +424,7 @@ def load_library(lib_path): _spla.spla_Matrix_clear.restype = _status_t _spla.spla_Matrix_make.argtypes = [_p_object_t, _uint, _uint, _object_t] + _spla.spla_Matrix_set_format.argtypes = [_object_t, ctypes.c_int] _spla.spla_Matrix_set_fill_value.argtypes = [_object_t, _object_t] _spla.spla_Matrix_set_reduce.argtypes = [_object_t, _object_t] _spla.spla_Matrix_set_int.argtypes = [_object_t, _uint, _uint, _int] diff --git a/python/pyspla/matrix.py b/python/pyspla/matrix.py index 664649c2f..ad39511f3 100644 --- a/python/pyspla/matrix.py +++ b/python/pyspla/matrix.py @@ -137,6 +137,53 @@ def shape(self): return self._shape + def set_format(self, fmt): + """ + Instruct container to format internal data with desired storage format. + Multiple different formats may be set at same time, data will be duplicated in different formats. + If selected data already in a selected format, then nothing to do. + + See `FormatMatrix` enumeration for all supported formats. + + :param fmt: FormatMatrix. + One of built-in storage formats to set. + """ + + check(backend().spla_Matrix_set_format(self.hnd, ctypes.c_int(fmt.value))) + + def set(self, i, j, v): + """ + Set value at specified index + + :param i: uint. + Row index to set. + + :param j: uint. + Column index to set. + + :param v: any. + Value to set. + """ + + check(self._dtype._matrix_set(self.hnd, ctypes.c_uint(i), ctypes.c_uint(j), self._dtype._c_type(v))) + + def get(self, i, j): + """ + Get value at specified index. + + :param i: uint. + Row index of value to get. + + :param j: uint. + Column index of value to get. + + :return: Value. + """ + + c_value = self._dtype._c_type(0) + check(self._dtype._matrix_get(self.hnd, ctypes.c_uint(i), ctypes.c_uint(j), ctypes.byref(c_value))) + return self._dtype.cast_value(c_value) + def build(self, view_I: MemView, view_J: MemView, view_V: MemView): """ Builds matrix content from a raw memory view resources. @@ -198,6 +245,13 @@ def to_lists(self): return list(buffer_I), list(buffer_J), list(buffer_V) + def clear(self): + """ + Clears matrix removing all elements, so it has 0 values. + """ + + check(backend().spla_Vector_clear(self.hnd)) + def to_list(self): """ Read matrix data as a python lists of tuples where key and value stored together. diff --git a/python/pyspla/vector.py b/python/pyspla/vector.py index 5e998249b..169317d67 100644 --- a/python/pyspla/vector.py +++ b/python/pyspla/vector.py @@ -127,6 +127,47 @@ def shape(self): return self._shape + def set_format(self, fmt): + """ + Instruct container to format internal data with desired storage format. + Multiple different formats may be set at same time, data will be duplicated in different formats. + If selected data already in a selected format, then nothing to do. + + See `FormatVector` enumeration for all supported formats. + + :param fmt: FormatVector. + One of built-in storage formats to set. + """ + + check(backend().spla_Vector_set_format(self.hnd, ctypes.c_int(fmt.value))) + + def set(self, i, v): + """ + Set value at specified index + + :param i: uint. + Row index to set. + + :param v: any. + Value to set. + """ + + check(self._dtype._vector_set(self.hnd, ctypes.c_uint(i), self._dtype._c_type(v))) + + def get(self, i): + """ + Get value at specified index. + + :param i: uint. + Row index of value to get. + + :return: Value. + """ + + c_value = self._dtype._c_type(0) + check(self._dtype._vector_get(self.hnd, ctypes.c_uint(i), ctypes.byref(c_value))) + return self._dtype.cast_value(c_value) + def build(self, view_I: MemView, view_V: MemView): """ Builds vector content from a raw memory view resources. @@ -155,6 +196,13 @@ def read(self): check(backend().spla_Vector_read(self.hnd, ctypes.byref(keys_view_hnd), ctypes.byref(values_view_hnd))) return MemView(hnd=keys_view_hnd, owner=self), MemView(hnd=values_view_hnd, owner=self) + def clear(self): + """ + Clears vector removing all elements, so it has 0 values. + """ + + check(backend().spla_Vector_clear(self.hnd)) + def to_lists(self): """ Read vector data as a python lists of keys and values. diff --git a/src/binding/c_matrix.cpp b/src/binding/c_matrix.cpp index 8bf0e80e3..d15bdd709 100644 --- a/src/binding/c_matrix.cpp +++ b/src/binding/c_matrix.cpp @@ -32,6 +32,9 @@ spla_Status spla_Matrix_make(spla_Matrix* M, spla_uint n_rows, spla_uint n_cols, *M = as_ptr(matrix.release()); return SPLA_STATUS_OK; } +spla_Status spla_Matrix_set_format(spla_Matrix M, int format) { + return to_c_status(as_ptr(M)->set_format(static_cast(format))); +} spla_Status spla_Matrix_set_fill_value(spla_Matrix M, spla_Scalar value) { return to_c_status(as_ptr(M)->set_fill_value(as_ref(value))); } diff --git a/src/binding/c_vector.cpp b/src/binding/c_vector.cpp index 9cf47bfec..25619d1bc 100644 --- a/src/binding/c_vector.cpp +++ b/src/binding/c_vector.cpp @@ -32,6 +32,9 @@ spla_Status spla_Vector_make(spla_Vector* v, spla_uint n_rows, spla_Type type) { *v = as_ptr(vector.release()); return SPLA_STATUS_OK; } +spla_Status spla_Vector_set_format(spla_Vector v, int format) { + return to_c_status(as_ptr(v)->set_format(static_cast(format))); +} spla_Status spla_Vector_set_fill_value(spla_Vector v, spla_Scalar value) { return to_c_status(as_ptr(v)->set_fill_value(as_ref(value))); } diff --git a/src/core/tmatrix.hpp b/src/core/tmatrix.hpp index 8a8cb2f95..959ccc6df 100644 --- a/src/core/tmatrix.hpp +++ b/src/core/tmatrix.hpp @@ -156,19 +156,19 @@ namespace spla { template Status TMatrix::set_int(uint row_id, uint col_id, std::int32_t value) { - validate_rw(FormatMatrix::CpuLil); + validate_rwd(FormatMatrix::CpuLil); cpu_lil_add_element(row_id, col_id, static_cast(value), *get>()); return Status::Ok; } template Status TMatrix::set_uint(uint row_id, uint col_id, std::uint32_t value) { - validate_rw(FormatMatrix::CpuLil); + validate_rwd(FormatMatrix::CpuLil); cpu_lil_add_element(row_id, col_id, static_cast(value), *get>()); return Status::Ok; } template Status TMatrix::set_float(uint row_id, uint col_id, float value) { - validate_rw(FormatMatrix::CpuLil); + validate_rwd(FormatMatrix::CpuLil); cpu_lil_add_element(row_id, col_id, static_cast(value), *get>()); return Status::Ok; } diff --git a/src/core/tvector.hpp b/src/core/tvector.hpp index 5d7ca4712..af3e5c71f 100644 --- a/src/core/tvector.hpp +++ b/src/core/tvector.hpp @@ -158,6 +158,7 @@ namespace spla { template Status TVector::set_int(uint row_id, std::int32_t value) { if (is_valid(FormatVector::CpuDense)) { + validate_rwd(FormatVector::CpuDense); get>()->Ax[row_id] = static_cast(value); return Status::Ok; } @@ -169,6 +170,7 @@ namespace spla { template Status TVector::set_uint(uint row_id, std::uint32_t value) { if (is_valid(FormatVector::CpuDense)) { + validate_rwd(FormatVector::CpuDense); get>()->Ax[row_id] = static_cast(value); return Status::Ok; } @@ -180,6 +182,7 @@ namespace spla { template Status TVector::set_float(uint row_id, float value) { if (is_valid(FormatVector::CpuDense)) { + validate_rwd(FormatVector::CpuDense); get>()->Ax[row_id] = static_cast(value); return Status::Ok; } diff --git a/src/cpu/cpu_format_coo.hpp b/src/cpu/cpu_format_coo.hpp index d4aea0129..31fd999c6 100644 --- a/src/cpu/cpu_format_coo.hpp +++ b/src/cpu/cpu_format_coo.hpp @@ -37,6 +37,15 @@ namespace spla { * @{ */ + template + void cpu_coo_resize(const uint n_values, + CpuCoo& storage) { + storage.Ai.resize(n_values); + storage.Aj.resize(n_values); + storage.Ax.resize(n_values); + storage.values = n_values; + } + template void cpu_coo_clear(CpuCoo& in) { in.Ai.clear(); @@ -45,6 +54,23 @@ namespace spla { in.values = 0; } + template + void cpu_coo_to_dok(const CpuCoo& in, + CpuDok& out) { + auto& Rx = out.Ax; + + auto& Ai = in.Ai; + auto& Aj = in.Aj; + auto& Ax = in.Ax; + + assert(Rx.empty()); + + for (uint i = 0; i < in.values; i++) { + typename CpuDok::Key key{Ai[i], Aj[i]}; + Rx[key] = Ax[i]; + } + } + template void cpu_coo_to_csr(uint n_rows, const CpuCoo& in, diff --git a/src/storage/storage_manager.hpp b/src/storage/storage_manager.hpp index 61c47131f..c41444628 100644 --- a/src/storage/storage_manager.hpp +++ b/src/storage/storage_manager.hpp @@ -154,6 +154,8 @@ namespace spla { std::queue queue; reached.fill(infinity); + assert(storage.is_valid_any()); + for (int i = 0; i < capacity; ++i) { if (storage.is_valid_i(i)) { reached[i] = source; @@ -162,6 +164,12 @@ namespace spla { } while (reached[target] == infinity) { + if (queue.empty()) { + LOG_MSG(Status::NotImplemented, "no conversion path to target format " << target); + assert(false); + return; + } + int u = queue.front(); queue.pop(); diff --git a/src/storage/storage_manager_matrix.hpp b/src/storage/storage_manager_matrix.hpp index fa5166218..1317666dd 100644 --- a/src/storage/storage_manager_matrix.hpp +++ b/src/storage/storage_manager_matrix.hpp @@ -84,6 +84,12 @@ namespace spla { cpu_dok_clear(*dok); cpu_lil_to_dok(s.get_n_rows(), *lil, *dok); }); + manager.register_converter(FormatMatrix::CpuLil, FormatMatrix::CpuCoo, [](Storage& s) { + auto* lil = s.template get>(); + auto* coo = s.template get>(); + cpu_coo_resize(lil->values, *coo); + cpu_lil_to_coo(s.get_n_rows(), *lil, *coo); + }); manager.register_converter(FormatMatrix::CpuLil, FormatMatrix::CpuCsr, [](Storage& s) { auto* lil = s.template get>(); auto* csr = s.template get>(); @@ -91,6 +97,12 @@ namespace spla { cpu_lil_to_csr(s.get_n_rows(), *lil, *csr); }); + manager.register_converter(FormatMatrix::CpuCoo, FormatMatrix::CpuDok, [](Storage& s) { + auto* coo = s.template get>(); + auto* dok = s.template get>(); + cpu_dok_clear(*dok); + cpu_coo_to_dok(*coo, *dok); + }); manager.register_converter(FormatMatrix::CpuCoo, FormatMatrix::CpuCsr, [](Storage& s) { auto* coo = s.template get>(); auto* csr = s.template get>(); @@ -104,6 +116,12 @@ namespace spla { cpu_dok_clear(*dok); cpu_csr_to_dok(s.get_n_rows(), *csr, *dok); }); + manager.register_converter(FormatMatrix::CpuCsr, FormatMatrix::CpuCoo, [](Storage& s) { + auto* csr = s.template get>(); + auto* coo = s.template get>(); + cpu_coo_resize(csr->values, *coo); + cpu_csr_to_coo(s.get_n_rows(), *csr, *coo); + }); #if defined(SPLA_BUILD_OPENCL) manager.register_constructor(FormatMatrix::AccCsr, [](Storage& s) {