From 55df7240e7a24fbf62070afad1ba2b7147f2585c Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 20:46:03 +0100 Subject: [PATCH 01/33] Switch to gtype trait based mpi type dispatching --- cpp/dolfinx/common/MPI.h | 61 ++++++++++++-------------- cpp/dolfinx/common/Scatterer.h | 16 +++---- cpp/dolfinx/common/utils.h | 9 ++-- cpp/dolfinx/fem/interpolate.h | 4 +- cpp/dolfinx/geometry/BoundingBoxTree.h | 4 +- cpp/dolfinx/geometry/utils.h | 8 ++-- cpp/dolfinx/graph/partitioners.cpp | 10 ++--- cpp/dolfinx/io/xdmf_utils.cpp | 8 ++-- cpp/dolfinx/la/MatrixCSR.h | 4 +- cpp/dolfinx/la/Vector.h | 6 +-- 10 files changed, 61 insertions(+), 69 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 0c32ba144a3..3e993c3f7a8 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,39 +269,32 @@ struct dependent_false : std::false_type }; /// MPI Type +template +struct mpi_type_mapping; + template -constexpr MPI_Datatype mpi_type() -{ - if constexpr (std::is_same_v) - return MPI_FLOAT; - else if constexpr (std::is_same_v) - return MPI_DOUBLE; - else if constexpr (std::is_same_v>) - return MPI_C_DOUBLE_COMPLEX; - else if constexpr (std::is_same_v>) - return MPI_C_FLOAT_COMPLEX; - else if constexpr (std::is_same_v) - return MPI_SHORT; - else if constexpr (std::is_same_v) - return MPI_INT; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED; - else if constexpr (std::is_same_v) - return MPI_LONG; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED_LONG; - else if constexpr (std::is_same_v) - return MPI_LONG_LONG; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED_LONG_LONG; - else if constexpr (std::is_same_v) - return MPI_C_BOOL; - else if constexpr (std::is_same_v) - return MPI_INT8_T; - else - // Issue compile time error - static_assert(!std::is_same_v); -} +MPI_Datatype mpi_t = mpi_type_mapping::type; + +#define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ + template <> \ + struct mpi_type_mapping \ + { \ + static inline MPI_Datatype type = mpi_t; \ + }; + +MAP_TO_MPI_TYPE(float, MPI_FLOAT) +MAP_TO_MPI_TYPE(double, MPI_DOUBLE) +MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) +MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) +MAP_TO_MPI_TYPE(short int, MPI_SHORT) +MAP_TO_MPI_TYPE(int, MPI_INT) +MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) +MAP_TO_MPI_TYPE(long int, MPI_LONG) +MAP_TO_MPI_TYPE(unsigned long, MPI_UNSIGNED_LONG) +MAP_TO_MPI_TYPE(long long, MPI_LONG_LONG) +MAP_TO_MPI_TYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG) +MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) //--------------------------------------------------------------------------- template @@ -432,7 +425,7 @@ distribute_to_postoffice(MPI_Comm comm, const U& x, // Send/receive data (x) MPI_Datatype compound_type; - MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type(), &compound_type); + MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t, &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer_data(shape[1] * recv_disp.back()); err = MPI_Neighbor_alltoallv( @@ -614,7 +607,7 @@ distribute_from_postoffice(MPI_Comm comm, std::span indices, dolfinx::MPI::check_error(comm, err); MPI_Datatype compound_type0; - MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type(), &compound_type0); + MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t, &compound_type0); MPI_Type_commit(&compound_type0); std::vector recv_buffer_data(shape[1] * send_disp.back()); diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index f3d297388bc..b07ba03ec23 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -208,8 +208,8 @@ class Scatterer assert(requests.size() == std::size_t(1)); MPI_Ineighbor_alltoallv( send_buffer.data(), _sizes_local.data(), _displs_local.data(), - dolfinx::MPI::mpi_type(), recv_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), dolfinx::MPI::mpi_type(), _comm0.comm(), + dolfinx::MPI::mpi_t(), recv_buffer.data(), _sizes_remote.data(), + _displs_remote.data(), dolfinx::MPI::mpi_t(), _comm0.comm(), requests.data()); break; } @@ -219,14 +219,14 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_type(), _src[i], MPI_ANY_TAG, + dolfinx::MPI::mpi_t(), _src[i], MPI_ANY_TAG, _comm0.comm(), &requests[i]); } for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Isend(send_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_type(), _dest[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t(), _dest[i], 0, _comm0.comm(), &requests[i + _src.size()]); } break; @@ -404,9 +404,9 @@ class Scatterer { assert(requests.size() == 1); MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), MPI::mpi_type(), + _displs_remote.data(), MPI::mpi_t(), recv_buffer.data(), _sizes_local.data(), - _displs_local.data(), MPI::mpi_type(), + _displs_local.data(), MPI::mpi_t(), _comm1.comm(), &requests[0]); break; } @@ -417,7 +417,7 @@ class Scatterer for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_type(), _dest[i], MPI_ANY_TAG, + dolfinx::MPI::mpi_t(), _dest[i], MPI_ANY_TAG, _comm0.comm(), &requests[i]); } @@ -426,7 +426,7 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Isend(send_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_type(), _src[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t(), _src[i], 0, _comm0.comm(), &requests[i + _dest.size()]); } break; diff --git a/cpp/dolfinx/common/utils.h b/cpp/dolfinx/common/utils.h index 945e5ad44d7..bbe7ec08e6d 100644 --- a/cpp/dolfinx/common/utils.h +++ b/cpp/dolfinx/common/utils.h @@ -88,9 +88,9 @@ std::size_t hash_global(MPI_Comm comm, const T& x) // Gather hash keys on root process std::vector all_hashes(dolfinx::MPI::size(comm)); - int err = MPI_Gather(&local_hash, 1, dolfinx::MPI::mpi_type(), - all_hashes.data(), 1, - dolfinx::MPI::mpi_type(), 0, comm); + int err = MPI_Gather(&local_hash, 1, dolfinx::MPI::mpi_t, + all_hashes.data(), 1, dolfinx::MPI::mpi_t, + 0, comm); dolfinx::MPI::check_error(comm, err); // Hash the received hash keys @@ -98,8 +98,7 @@ std::size_t hash_global(MPI_Comm comm, const T& x) std::size_t global_hash = hash(all_hashes); // Broadcast hash key to all processes - err = MPI_Bcast(&global_hash, 1, dolfinx::MPI::mpi_type(), 0, - comm); + err = MPI_Bcast(&global_hash, 1, dolfinx::MPI::mpi_t, 0, comm); dolfinx::MPI::check_error(comm, err); return global_hash; diff --git a/cpp/dolfinx/fem/interpolate.h b/cpp/dolfinx/fem/interpolate.h index e25fdcbfa2b..4b6f05c9b49 100644 --- a/cpp/dolfinx/fem/interpolate.h +++ b/cpp/dolfinx/fem/interpolate.h @@ -273,9 +273,9 @@ void scatter_values(MPI_Comm comm, std::span src_ranks, std::vector values(recv_offsets.back()); values.reserve(1); MPI_Neighbor_alltoallv(send_values.data_handle(), send_sizes.data(), - send_offsets.data(), dolfinx::MPI::mpi_type(), + send_offsets.data(), dolfinx::MPI::mpi_t(), values.data(), recv_sizes.data(), recv_offsets.data(), - dolfinx::MPI::mpi_type(), reverse_comm); + dolfinx::MPI::mpi_t(), reverse_comm); MPI_Comm_free(&reverse_comm); // Insert values received from neighborhood communicator in output diff --git a/cpp/dolfinx/geometry/BoundingBoxTree.h b/cpp/dolfinx/geometry/BoundingBoxTree.h index 64ede45a057..2c9cc9cd266 100644 --- a/cpp/dolfinx/geometry/BoundingBoxTree.h +++ b/cpp/dolfinx/geometry/BoundingBoxTree.h @@ -335,8 +335,8 @@ class BoundingBoxTree if (num_bboxes() > 0) std::copy_n(std::prev(_bbox_coordinates.end(), 6), 6, send_bbox.begin()); std::vector recv_bbox(mpi_size * 6); - MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_type(), - recv_bbox.data(), 6, dolfinx::MPI::mpi_type(), comm); + MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_t(), + recv_bbox.data(), 6, dolfinx::MPI::mpi_t(), comm); std::vector, std::int32_t>> _recv_bbox(mpi_size); for (std::size_t i = 0; i < _recv_bbox.size(); ++i) diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index 0f643e62643..fab8b64dd74 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -771,8 +771,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector received_points((std::size_t)recv_offsets.back()); MPI_Neighbor_alltoallv( send_data.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_type(), received_points.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_type(), forward_comm); + dolfinx::MPI::mpi_t(), received_points.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t(), forward_comm); // Get mesh geometry for closest entity const mesh::Geometry& geometry = mesh.geometry(); @@ -905,8 +905,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector recv_distances(recv_offsets.back()); MPI_Neighbor_alltoallv( squared_distances.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_type(), recv_distances.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_type(), reverse_comm); + dolfinx::MPI::mpi_t(), recv_distances.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t(), reverse_comm); // Update point ownership with extrapolation information std::vector closest_distance(point_owners.size(), diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index 68bc0a57847..1bcb4f692d7 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -444,7 +444,7 @@ graph::partition_fn graph::scotch::partitioner(graph::scotch::strategy strategy, // Exchange halo with node_partition data for ghosts common::Timer timer3("SCOTCH: call SCOTCH_dgraphHalo"); err = SCOTCH_dgraphHalo(&dgrafdat, node_partition.data(), - dolfinx::MPI::mpi_type()); + dolfinx::MPI::mpi_t); if (err != 0) throw std::runtime_error("Error during SCOTCH halo exchange"); timer3.stop(); @@ -554,8 +554,8 @@ graph::partition_fn graph::parmetis::partitioner(double imbalance, const int psize = dolfinx::MPI::size(pcomm); const idx_t num_local_nodes = graph.num_nodes(); node_disp = std::vector(psize + 1, 0); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_type(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_type(), + MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t(), + node_disp.data() + 1, 1, dolfinx::MPI::mpi_t(), pcomm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); @@ -631,8 +631,8 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_type(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_type(), comm); + MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t(), + node_disp.data() + 1, 1, dolfinx::MPI::mpi_t(), comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), graph.offsets().end()); diff --git a/cpp/dolfinx/io/xdmf_utils.cpp b/cpp/dolfinx/io/xdmf_utils.cpp index 3cedcc06d8c..bb43ba4855a 100644 --- a/cpp/dolfinx/io/xdmf_utils.cpp +++ b/cpp/dolfinx/io/xdmf_utils.cpp @@ -378,8 +378,8 @@ xdmf_utils::distribute_entity_data( std::vector recv_values_buffer(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), - dolfinx::MPI::mpi_type(), recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_type(), + dolfinx::MPI::mpi_t, recv_values_buffer.data(), + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&comm0); @@ -551,8 +551,8 @@ xdmf_utils::distribute_entity_data( std::vector recv_values_buffer(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), - dolfinx::MPI::mpi_type(), recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_type(), + dolfinx::MPI::mpi_t, recv_values_buffer.data(), + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index bd1181532f6..d641bf18cb8 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -684,9 +684,9 @@ void MatrixCSR::scatter_rev_begin() int status = MPI_Ineighbor_alltoallv( _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(), - dolfinx::MPI::mpi_type(), _ghost_value_data_in.data(), + dolfinx::MPI::mpi_t(), _ghost_value_data_in.data(), val_recv_count.data(), _val_recv_disp.data(), - dolfinx::MPI::mpi_type(), _comm.comm(), &_request); + dolfinx::MPI::mpi_t(), _comm.comm(), &_request); assert(status == MPI_SUCCESS); } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/la/Vector.h b/cpp/dolfinx/la/Vector.h index 9b69e4670fb..1775653358f 100644 --- a/cpp/dolfinx/la/Vector.h +++ b/cpp/dolfinx/la/Vector.h @@ -245,7 +245,7 @@ auto inner_product(const V& a, const V& b) }); T result; - MPI_Allreduce(&local, &result, 1, dolfinx::MPI::mpi_type(), MPI_SUM, + MPI_Allreduce(&local, &result, 1, dolfinx::MPI::mpi_t, MPI_SUM, a.index_map()->comm()); return result; } @@ -279,7 +279,7 @@ auto norm(const V& x, Norm type = Norm::l2) = std::accumulate(data.begin(), data.end(), U(0), [](auto norm, auto x) { return norm + std::abs(x); }); U l1(0); - MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_type(), MPI_SUM, + MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_t(), MPI_SUM, x.index_map()->comm()); return l1; } @@ -293,7 +293,7 @@ auto norm(const V& x, Norm type = Norm::l2) data, [](T a, T b) { return std::norm(a) < std::norm(b); }); auto local_linf = std::abs(*max_pos); decltype(local_linf) linf = 0; - MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_type(), + MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_t(), MPI_MAX, x.index_map()->comm()); return linf; } From db6ba654681a8617b3037c724c5387d8ef622c57 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 20:50:20 +0100 Subject: [PATCH 02/33] Add mpi type mapping for std::int64_t --- cpp/dolfinx/common/IndexMap.cpp | 92 +++++++++++++----------- cpp/dolfinx/common/MPI.h | 13 ++-- cpp/dolfinx/common/Scatterer.h | 32 ++++----- cpp/dolfinx/fem/DirichletBC.cpp | 7 +- cpp/dolfinx/fem/dofmapbuilder.cpp | 9 +-- cpp/dolfinx/graph/partition.cpp | 28 +++++--- cpp/dolfinx/graph/partitioners.cpp | 11 +-- cpp/dolfinx/io/xdmf_function.cpp | 3 +- cpp/dolfinx/io/xdmf_mesh.cpp | 10 +-- cpp/dolfinx/io/xdmf_mesh.h | 4 +- cpp/dolfinx/io/xdmf_utils.cpp | 18 ++--- cpp/dolfinx/io/xdmf_utils.h | 3 +- cpp/dolfinx/la/MatrixCSR.h | 7 +- cpp/dolfinx/la/SparsityPattern.cpp | 5 +- cpp/dolfinx/mesh/Topology.cpp | 24 ++++--- cpp/dolfinx/mesh/graphbuild.cpp | 18 ++--- cpp/dolfinx/mesh/topologycomputation.cpp | 19 ++--- cpp/dolfinx/mesh/utils.h | 6 +- cpp/dolfinx/refinement/utils.cpp | 10 +-- cpp/dolfinx/refinement/utils.h | 8 ++- 20 files changed, 182 insertions(+), 145 deletions(-) diff --git a/cpp/dolfinx/common/IndexMap.cpp b/cpp/dolfinx/common/IndexMap.cpp index dd3ae3a7145..3603c69120f 100644 --- a/cpp/dolfinx/common/IndexMap.cpp +++ b/cpp/dolfinx/common/IndexMap.cpp @@ -136,10 +136,11 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span src, // Send ghost indices to owner, and receive indices recv_indices.resize(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_indices.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm0); + ierr = MPI_Neighbor_alltoallv( + send_indices.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_indices.data(), + recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, + comm0); dolfinx::MPI::check_error(comm, ierr); ierr = MPI_Comm_free(&comm0); @@ -511,10 +512,10 @@ compute_submap_ghost_indices(std::span submap_src, dolfinx::MPI::check_error(imap.comm(), ierr); // Send indices to ghosting ranks - ierr = MPI_Neighbor_alltoallv(send_gidx.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, - recv_gidx.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, comm1); + ierr = MPI_Neighbor_alltoallv( + send_gidx.data(), recv_sizes.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, recv_gidx.data(), send_sizes.data(), + send_disp.data(), dolfinx::MPI::mpi_t, comm1); dolfinx::MPI::check_error(imap.comm(), ierr); ierr = MPI_Comm_free(&comm1); @@ -607,10 +608,10 @@ common::compute_owned_indices(std::span indices, // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); std::vector& send_buffer = global_indices; - ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm); + ierr = MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, comm); dolfinx::MPI::check_error(comm, ierr); ierr = MPI_Comm_free(&comm); dolfinx::MPI::check_error(map.comm(), ierr); @@ -752,10 +753,11 @@ common::stack_index_maps( // Send ghost indices to owner, and receive indices std::vector recv_indices(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_indices.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm0); + ierr = MPI_Neighbor_alltoallv( + send_indices.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_indices.data(), + recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, + comm0); dolfinx::MPI::check_error(comm0, ierr); // For each received index (which I should own), compute its new @@ -773,10 +775,11 @@ common::stack_index_maps( // Send back/receive new indices std::vector ghosts_new_idx(send_disp.back()); - ierr = MPI_Neighbor_alltoallv(ghost_old_to_new.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, - ghosts_new_idx.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, comm1); + ierr = MPI_Neighbor_alltoallv( + ghost_old_to_new.data(), recv_sizes.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, ghosts_new_idx.data(), + send_sizes.data(), send_disp.data(), dolfinx::MPI::mpi_t, + comm1); dolfinx::MPI::check_error(comm1, ierr); // Unpack new indices and store owner @@ -825,8 +828,9 @@ common::create_sub_index_map(const IndexMap& imap, // Compute submap offset for this rank std::int64_t submap_local_size = submap_owned.size(); std::int64_t submap_offset = 0; - int ierr = MPI_Exscan(&submap_local_size, &submap_offset, 1, MPI_INT64_T, - MPI_SUM, imap.comm()); + int ierr + = MPI_Exscan(&submap_local_size, &submap_offset, 1, + dolfinx::MPI::mpi_t, MPI_SUM, imap.comm()); dolfinx::MPI::check_error(imap.comm(), ierr); // Compute the global indices (w.r.t. the submap) of the submap ghosts @@ -858,14 +862,16 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size) : _comm(comm, true) std::int64_t offset = 0; const std::int64_t local_size_tmp = local_size; MPI_Request request_scan; - int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM, + int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, + dolfinx::MPI::mpi_t, MPI_SUM, _comm.comm(), &request_scan); dolfinx::MPI::check_error(_comm.comm(), ierr); // Send local size to sum reduction to get global size MPI_Request request; - ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM, - comm, &request); + ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm, + &request); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Wait(&request_scan, MPI_STATUS_IGNORE); @@ -901,14 +907,16 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size, std::int64_t offset = 0; const std::int64_t local_size_tmp = local_size; MPI_Request request_scan; - int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM, - comm, &request_scan); + int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm, + &request_scan); dolfinx::MPI::check_error(_comm.comm(), ierr); // Send local size to sum reduction to get global size MPI_Request request; - ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM, - comm, &request); + ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm, + &request); dolfinx::MPI::check_error(_comm.comm(), ierr); // Wait for MPI_Iexscan to complete (get offset) @@ -1070,10 +1078,11 @@ graph::AdjacencyList IndexMap::index_to_dest_ranks() const // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm0); + ierr = MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), + recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, + comm0); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(_comm.comm(), ierr); @@ -1169,10 +1178,11 @@ graph::AdjacencyList IndexMap::index_to_dest_ranks() const std::next(recv_disp.begin())); std::vector recv_indices(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_indices.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm); + ierr = MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_indices.data(), + recv_sizes.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, comm); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm); dolfinx::MPI::check_error(_comm.comm(), ierr); @@ -1267,10 +1277,10 @@ std::vector IndexMap::shared_indices() const // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, - recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm); + ierr = MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, comm); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm); diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 3e993c3f7a8..f66f52319a8 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -294,6 +294,7 @@ MAP_TO_MPI_TYPE(unsigned long, MPI_UNSIGNED_LONG) MAP_TO_MPI_TYPE(long long, MPI_LONG_LONG) MAP_TO_MPI_TYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) //--------------------------------------------------------------------------- @@ -419,8 +420,8 @@ distribute_to_postoffice(MPI_Comm comm, const U& x, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(), - MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(), - recv_disp.data(), MPI_INT64_T, neigh_comm); + dolfinx::MPI::mpi_t, recv_buffer_index.data(), num_items_recv.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm); dolfinx::MPI::check_error(comm, err); // Send/receive data (x) @@ -549,8 +550,8 @@ distribute_from_postoffice(MPI_Comm comm, std::span indices, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_src.data(), send_disp.data(), - MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(), - recv_disp.data(), MPI_INT64_T, neigh_comm0); + dolfinx::MPI::mpi_t, recv_buffer_index.data(), num_items_recv.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&neigh_comm0); @@ -675,14 +676,14 @@ distribute_data(MPI_Comm comm0, std::span indices, int err; std::int64_t shape0 = 0; - err = MPI_Allreduce(&shape0_local, &shape0, 1, MPI_INT64_T, MPI_SUM, comm0); + err = MPI_Allreduce(&shape0_local, &shape0, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm0); dolfinx::MPI::check_error(comm0, err); std::int64_t rank_offset = -1; if (comm1 != MPI_COMM_NULL) { rank_offset = 0; - err = MPI_Exscan(&shape0_local, &rank_offset, 1, MPI_INT64_T, MPI_SUM, + err = MPI_Exscan(&shape0_local, &rank_offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm1); dolfinx::MPI::check_error(comm1, err); } diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index b07ba03ec23..20d05d0a225 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -131,10 +131,11 @@ class Scatterer // Send ghost global indices to owning rank, and receive owned // indices that are ghosts on other ranks std::vector recv_buffer(_displs_local.back(), 0); - MPI_Neighbor_alltoallv(ghosts_sorted.data(), _sizes_remote.data(), - _displs_remote.data(), MPI_INT64_T, - recv_buffer.data(), _sizes_local.data(), - _displs_local.data(), MPI_INT64_T, _comm1.comm()); + MPI_Neighbor_alltoallv( + ghosts_sorted.data(), _sizes_remote.data(), _displs_remote.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), + _sizes_local.data(), _displs_local.data(), + dolfinx::MPI::mpi_t, _comm1.comm()); const std::array range = map.local_range(); #ifndef NDEBUG @@ -206,11 +207,11 @@ class Scatterer case type::neighbor: { assert(requests.size() == std::size_t(1)); - MPI_Ineighbor_alltoallv( - send_buffer.data(), _sizes_local.data(), _displs_local.data(), - dolfinx::MPI::mpi_t(), recv_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), dolfinx::MPI::mpi_t(), _comm0.comm(), - requests.data()); + MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_local.data(), + _displs_local.data(), dolfinx::MPI::mpi_t(), + recv_buffer.data(), _sizes_remote.data(), + _displs_remote.data(), dolfinx::MPI::mpi_t(), + _comm0.comm(), requests.data()); break; } case type::p2p: @@ -219,8 +220,8 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_t(), _src[i], MPI_ANY_TAG, - _comm0.comm(), &requests[i]); + dolfinx::MPI::mpi_t(), _src[i], MPI_ANY_TAG, _comm0.comm(), + &requests[i]); } for (std::size_t i = 0; i < _dest.size(); i++) @@ -403,11 +404,10 @@ class Scatterer case type::neighbor: { assert(requests.size() == 1); - MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), MPI::mpi_t(), - recv_buffer.data(), _sizes_local.data(), - _displs_local.data(), MPI::mpi_t(), - _comm1.comm(), &requests[0]); + MPI_Ineighbor_alltoallv( + send_buffer.data(), _sizes_remote.data(), _displs_remote.data(), + MPI::mpi_t(), recv_buffer.data(), _sizes_local.data(), + _displs_local.data(), MPI::mpi_t(), _comm1.comm(), &requests[0]); break; } case type::p2p: diff --git a/cpp/dolfinx/fem/DirichletBC.cpp b/cpp/dolfinx/fem/DirichletBC.cpp index 97d9b0689d9..1017b0e0934 100644 --- a/cpp/dolfinx/fem/DirichletBC.cpp +++ b/cpp/dolfinx/fem/DirichletBC.cpp @@ -140,9 +140,10 @@ get_remote_dofs(MPI_Comm comm, const common::IndexMap& map, int bs_map, // MPI_Neighbor_alltoallv to send only to relevant processes. // Send/receive global index of dofs with bcs to all neighbors std::vector dofs_received(disp.back()); - MPI_Ineighbor_allgatherv(dofs_global.data(), dofs_global.size(), MPI_INT64_T, - dofs_received.data(), num_dofs_recv.data(), - disp.data(), MPI_INT64_T, comm, &request); + MPI_Ineighbor_allgatherv( + dofs_global.data(), dofs_global.size(), dolfinx::MPI::mpi_t, + dofs_received.data(), num_dofs_recv.data(), disp.data(), + dolfinx::MPI::mpi_t, comm, &request); // FIXME: check that dofs is sorted // Build vector of local dof indices that have been marked by another diff --git a/cpp/dolfinx/fem/dofmapbuilder.cpp b/cpp/dolfinx/fem/dofmapbuilder.cpp index f7cfcf25d7b..e8ed7de366a 100644 --- a/cpp/dolfinx/fem/dofmapbuilder.cpp +++ b/cpp/dolfinx/fem/dofmapbuilder.cpp @@ -539,10 +539,11 @@ std::pair, std::vector> get_global_indices( // TODO: use MPI_Ineighbor_alltoallv // Send global index of dofs to neighbors all_dofs_received[d].resize(disp_recv[d].back()); - MPI_Ineighbor_allgatherv(global[d].data(), global[d].size(), MPI_INT64_T, - all_dofs_received[d].data(), size_recv.data(), - disp_recv[d].data(), MPI_INT64_T, comm[d], - &requests[requests_dim.size()]); + MPI_Ineighbor_allgatherv( + global[d].data(), global[d].size(), dolfinx::MPI::mpi_t, + all_dofs_received[d].data(), size_recv.data(), disp_recv[d].data(), + dolfinx::MPI::mpi_t, comm[d], + &requests[requests_dim.size()]); requests_dim.push_back(d); } diff --git a/cpp/dolfinx/graph/partition.cpp b/cpp/dolfinx/graph/partition.cpp index d6080643003..10996ae67a5 100644 --- a/cpp/dolfinx/graph/partition.cpp +++ b/cpp/dolfinx/graph/partition.cpp @@ -49,7 +49,8 @@ graph::build::distribute(MPI_Comm comm, std::int64_t offset_global = 0; { const std::int64_t num_owned = list.num_nodes(); - MPI_Exscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset_global, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); } // TODO: Do this on the neighbourhood only @@ -151,7 +152,8 @@ graph::build::distribute(MPI_Comm comm, // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); + MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, + &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), @@ -242,7 +244,8 @@ graph::build::distribute(MPI_Comm comm, std::span list, // Get global offset for converting local index to global index for // nodes in 'list' std::int64_t offset_global = 0; - MPI_Exscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset_global, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); // Buffer size (max number of edges + 2 for owning rank, // and node global index) @@ -334,7 +337,8 @@ graph::build::distribute(MPI_Comm comm, std::span list, // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); + MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, + &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), @@ -402,8 +406,8 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::int64_t offset_local = 0; MPI_Request request_offset_scan; const std::int64_t num_local = owned_indices.size(); - MPI_Iexscan(&num_local, &offset_local, 1, MPI_INT64_T, MPI_SUM, comm, - &request_offset_scan); + MPI_Iexscan(&num_local, &offset_local, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm, &request_offset_scan); // Find out how many ghosts are on each neighboring process std::vector ghost_index_count; @@ -472,8 +476,9 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::vector recv_data(recv_offsets.back()); MPI_Neighbor_alltoallv(send_data.data(), ghost_index_count.data(), - send_offsets.data(), MPI_INT64_T, recv_data.data(), - recv_sizes.data(), recv_offsets.data(), MPI_INT64_T, + send_offsets.data(), dolfinx::MPI::mpi_t, + recv_data.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, neighbor_comm_fwd); // Complete global_offset scan @@ -502,9 +507,10 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::vector new_recv(send_data.size()); MPI_Neighbor_alltoallv(recv_data.data(), recv_sizes.data(), - recv_offsets.data(), MPI_INT64_T, new_recv.data(), - ghost_index_count.data(), send_offsets.data(), - MPI_INT64_T, neighbor_comm_rev); + recv_offsets.data(), dolfinx::MPI::mpi_t, + new_recv.data(), ghost_index_count.data(), + send_offsets.data(), dolfinx::MPI::mpi_t, + neighbor_comm_rev); MPI_Comm_free(&neighbor_comm_fwd); MPI_Comm_free(&neighbor_comm_rev); diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index 1bcb4f692d7..ae73bbcc44b 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -143,10 +143,10 @@ graph::AdjacencyList compute_destination_ranks( // Send/receive data std::vector recv_buffer(recv_disp.back()); - MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, recv_buffer.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, - neigh_comm); + MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm); MPI_Comm_free(&neigh_comm); // Prepare (local node index, destination rank) array. Add local data, @@ -321,7 +321,8 @@ graph::partition_fn graph::scotch::partitioner(graph::scotch::strategy strategy, std::int64_t offset_global = 0; const std::int64_t num_owned = graph.num_nodes(); MPI_Request request_offset_scan; - MPI_Iexscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm, + MPI_Iexscan(&num_owned, &offset_global, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm, &request_offset_scan); // C-style array indexing diff --git a/cpp/dolfinx/io/xdmf_function.cpp b/cpp/dolfinx/io/xdmf_function.cpp index 47ce57ddd6e..94bdd2f1d6c 100644 --- a/cpp/dolfinx/io/xdmf_function.cpp +++ b/cpp/dolfinx/io/xdmf_function.cpp @@ -160,7 +160,8 @@ void xdmf_function::add_function(MPI_Comm comm, const fem::Function& u, const std::int64_t num_local = data_values.size() / num_components; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, + comm); const bool use_mpi_io = dolfinx::MPI::size(comm) > 1; diff --git a/cpp/dolfinx/io/xdmf_mesh.cpp b/cpp/dolfinx/io/xdmf_mesh.cpp index d6a228ce167..43c23e497cb 100644 --- a/cpp/dolfinx/io/xdmf_mesh.cpp +++ b/cpp/dolfinx/io/xdmf_mesh.cpp @@ -135,8 +135,8 @@ void xdmf_mesh::add_topology_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_entities_local = topology_data.size() / num_nodes_per_entity; std::int64_t num_entities_global = 0; - MPI_Allreduce(&num_entities_local, &num_entities_global, 1, MPI_INT64_T, - MPI_SUM, comm); + MPI_Allreduce(&num_entities_local, &num_entities_global, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm); topology_node.append_attribute("NumberOfElements") = std::to_string(num_entities_global).c_str(); topology_node.append_attribute("NodesPerElement") = num_nodes_per_entity; @@ -149,7 +149,8 @@ void xdmf_mesh::add_topology_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_local = num_entities_local; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, + comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item(topology_node, h5_id, h5_path, std::span(topology_data), @@ -203,7 +204,8 @@ void xdmf_mesh::add_geometry_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_local = num_points_local; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, + comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item(geometry_node, h5_id, h5_path, std::span(x), offset, shape, "", diff --git a/cpp/dolfinx/io/xdmf_mesh.h b/cpp/dolfinx/io/xdmf_mesh.h index 804f0ee5a35..c8e2ddf629d 100644 --- a/cpp/dolfinx/io/xdmf_mesh.h +++ b/cpp/dolfinx/io/xdmf_mesh.h @@ -129,11 +129,11 @@ void add_meshtags(MPI_Comm comm, const mesh::MeshTags& meshtags, std::int64_t global_num_values = 0; const std::int64_t local_num_values = num_active_entities; - MPI_Allreduce(&local_num_values, &global_num_values, 1, MPI_INT64_T, MPI_SUM, + MPI_Allreduce(&local_num_values, &global_num_values, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm); const std::int64_t num_local = num_active_entities; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item( attribute_node, h5_id, path_prefix + std::string("/Values"), diff --git a/cpp/dolfinx/io/xdmf_utils.cpp b/cpp/dolfinx/io/xdmf_utils.cpp index bb43ba4855a..38f11963cf4 100644 --- a/cpp/dolfinx/io/xdmf_utils.cpp +++ b/cpp/dolfinx/io/xdmf_utils.cpp @@ -284,7 +284,8 @@ xdmf_utils::distribute_entity_data( MPI_Comm comm = topology.comm(); MPI_Datatype compound_type; - MPI_Type_contiguous(entities_v.extent(1), MPI_INT64_T, &compound_type); + MPI_Type_contiguous(entities_v.extent(1), dolfinx::MPI::mpi_t, + &compound_type); MPI_Type_commit(&compound_type); // -- B. Send entities and entity data to postmaster @@ -379,8 +380,7 @@ xdmf_utils::distribute_entity_data( err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), dolfinx::MPI::mpi_t, recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, - comm0); + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(comm, err); @@ -463,10 +463,11 @@ xdmf_utils::distribute_entity_data( [](auto x) { return x.second; }); std::vector recv_buffer(recv_disp.back()); - err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(), - send_disp.data(), MPI_INT64_T, - recv_buffer.data(), num_items_recv.data(), - recv_disp.data(), MPI_INT64_T, comm0); + err = MPI_Neighbor_alltoallv( + send_buffer.data(), num_items_send.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), + num_items_recv.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(comm, err); @@ -552,8 +553,7 @@ xdmf_utils::distribute_entity_data( err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), dolfinx::MPI::mpi_t, recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, - comm0); + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); diff --git a/cpp/dolfinx/io/xdmf_utils.h b/cpp/dolfinx/io/xdmf_utils.h index c18eed98e80..6f26a86ccdd 100644 --- a/cpp/dolfinx/io/xdmf_utils.h +++ b/cpp/dolfinx/io/xdmf_utils.h @@ -318,7 +318,8 @@ std::vector get_dataset(MPI_Comm comm, const pugi::xml_node& dataset_node, std::int64_t size_global = 0; const std::int64_t size_local = data_vector.size(); - MPI_Allreduce(&size_local, &size_global, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Allreduce(&size_local, &size_global, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm); if (size != size_global) { throw std::runtime_error( diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index d641bf18cb8..3be42d0840a 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -573,9 +573,10 @@ MatrixCSR::MatrixCSR(const SparsityPattern& p, BlockMode mode) ghost_index_array.resize(recv_disp.back()); MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, + send_disp.data(), dolfinx::MPI::mpi_t, ghost_index_array.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, _comm.comm()); + recv_disp.data(), dolfinx::MPI::mpi_t, + _comm.comm()); } // Store receive displacements for future use, when transferring @@ -637,7 +638,7 @@ MatrixCSR::to_dense() const for (int i0 = 0; i0 < _bs[0]; ++i0) for (int i1 = 0; i1 < _bs[1]; ++i1) { - std::array local_col {_cols[j]}; + std::array local_col{_cols[j]}; std::array global_col{0}; _index_maps[1]->local_to_global(local_col, global_col); A[(r * _bs[1] + i0) * ncols * _bs[0] + global_col[0] * _bs[1] + i1] diff --git a/cpp/dolfinx/la/SparsityPattern.cpp b/cpp/dolfinx/la/SparsityPattern.cpp index ac7ee1b6efc..4ca4233890a 100644 --- a/cpp/dolfinx/la/SparsityPattern.cpp +++ b/cpp/dolfinx/la/SparsityPattern.cpp @@ -335,8 +335,9 @@ void SparsityPattern::finalize() ghost_data_in.resize(recv_disp.back()); MPI_Neighbor_alltoallv(ghost_data.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, ghost_data_in.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + send_disp.data(), dolfinx::MPI::mpi_t, + ghost_data_in.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, comm); MPI_Comm_free(&comm); } diff --git a/cpp/dolfinx/mesh/Topology.cpp b/cpp/dolfinx/mesh/Topology.cpp index c1bed136587..3b89faf84d3 100644 --- a/cpp/dolfinx/mesh/Topology.cpp +++ b/cpp/dolfinx/mesh/Topology.cpp @@ -50,7 +50,8 @@ determine_sharing_ranks(MPI_Comm comm, std::span indices) { std::int64_t max_index = indices.empty() ? 0 : *std::ranges::max_element(indices); - MPI_Allreduce(&max_index, &global_range, 1, MPI_INT64_T, MPI_MAX, comm); + MPI_Allreduce(&max_index, &global_range, 1, + dolfinx::MPI::mpi_t, MPI_MAX, comm); global_range += 1; } @@ -468,8 +469,9 @@ exchange_indexing(MPI_Comm comm, std::span indices, std::next(recv_disp.begin())); recv_data = std::vector(recv_disp.back()); MPI_Neighbor_alltoallv(sbuffer.data(), send_sizes.data(), send_disp.data(), - MPI_INT64_T, recv_data.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm0); + dolfinx::MPI::mpi_t, recv_data.data(), + recv_sizes.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, comm0); MPI_Comm_free(&comm0); } @@ -583,8 +585,9 @@ std::vector> exchange_ghost_indexing( // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, recv_buffer.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + send_disp.data(), dolfinx::MPI::mpi_t, + recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, comm1); MPI_Comm_free(&comm1); @@ -665,10 +668,10 @@ std::vector> exchange_ghost_indexing( std::partial_sum(recv_sizes.begin(), recv_sizes.end(), std::next(recv_disp.begin())); std::vector recv_buffer(recv_disp.back()); - MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, recv_buffer.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, - comm); + MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, comm); std::vector> data; data.reserve(recv_buffer.size() / 3); @@ -1125,7 +1128,8 @@ Topology mesh::create_topology( std::int64_t global_offset_v = 0; { const std::int64_t nlocal = owned_vertices.size(); - MPI_Exscan(&nlocal, &global_offset_v, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&nlocal, &global_offset_v, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); } // Get global indices of ghost cells diff --git a/cpp/dolfinx/mesh/graphbuild.cpp b/cpp/dolfinx/mesh/graphbuild.cpp index 9229c686eb5..fc6a906b73a 100644 --- a/cpp/dolfinx/mesh/graphbuild.cpp +++ b/cpp/dolfinx/mesh/graphbuild.cpp @@ -81,8 +81,8 @@ graph::AdjacencyList compute_nonlocal_dual_graph( MPI_Request request_cell_offset; { const std::int64_t num_local = local_graph.num_nodes(); - MPI_Iexscan(&num_local, &cell_offset, 1, MPI_INT64_T, MPI_SUM, comm, - &request_cell_offset); + MPI_Iexscan(&num_local, &cell_offset, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm, &request_cell_offset); } // Find (max_vert_per_facet, min_vertex_index, max_vertex_index) @@ -100,8 +100,8 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Compute reductions std::array recv_buffer_r; - MPI_Allreduce(send_buffer_r.data(), recv_buffer_r.data(), 3, MPI_INT64_T, - MPI_MAX, comm); + MPI_Allreduce(send_buffer_r.data(), recv_buffer_r.data(), 3, + dolfinx::MPI::mpi_t, MPI_MAX, comm); assert(recv_buffer_r[1] != std::numeric_limits::min()); assert(recv_buffer_r[2] != -1); fshape1 = recv_buffer_r[0]; @@ -217,7 +217,8 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); + MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, + &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), @@ -292,9 +293,10 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Send back data std::vector recv_buffer1(send_disp.back()); MPI_Neighbor_alltoallv(send_buffer1.data(), num_items_recv.data(), - recv_disp.data(), MPI_INT64_T, recv_buffer1.data(), - num_items_per_dest.data(), send_disp.data(), - MPI_INT64_T, neigh_comm1); + recv_disp.data(), dolfinx::MPI::mpi_t, + recv_buffer1.data(), num_items_per_dest.data(), + send_disp.data(), dolfinx::MPI::mpi_t, + neigh_comm1); MPI_Comm_free(&neigh_comm1); // --- Build new graph diff --git a/cpp/dolfinx/mesh/topologycomputation.cpp b/cpp/dolfinx/mesh/topologycomputation.cpp index edefdbd1e26..d5b86324d04 100644 --- a/cpp/dolfinx/mesh/topologycomputation.cpp +++ b/cpp/dolfinx/mesh/topologycomputation.cpp @@ -256,10 +256,10 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, std::back_inserter(recv_disp)); recv_data.resize(recv_disp.back()); - MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, recv_data.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, - neighbor_comm); + MPI_Neighbor_alltoallv( + send_buffer.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_data.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, neighbor_comm); } // List of (local index, sorted global vertices) pairs received from @@ -376,7 +376,8 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, { const std::int64_t _num_local = num_local; std::int64_t local_offset = 0; - MPI_Exscan(&_num_local, &local_offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&_num_local, &local_offset, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); // Send global indices for same entities that we sent before. This // uses the same pattern as before, so we can match up the received @@ -400,10 +401,10 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, { return a / num_vertices_per_e; }); recv_data.resize(recv_disp.back()); - MPI_Neighbor_alltoallv(send_global_index_data.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, recv_data.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, - neighbor_comm); + MPI_Neighbor_alltoallv( + send_global_index_data.data(), send_sizes.data(), send_disp.data(), + dolfinx::MPI::mpi_t, recv_data.data(), recv_sizes.data(), + recv_disp.data(), dolfinx::MPI::mpi_t, neighbor_comm); MPI_Comm_free(&neighbor_comm); // Map back received indices diff --git a/cpp/dolfinx/mesh/utils.h b/cpp/dolfinx/mesh/utils.h index 1fa5724e39d..a746b1df395 100644 --- a/cpp/dolfinx/mesh/utils.h +++ b/cpp/dolfinx/mesh/utils.h @@ -831,7 +831,8 @@ Mesh> create_mesh( assert(cells1.size() % num_cell_nodes == 0); std::int64_t offset = 0; std::int64_t num_owned = cells1.size() / num_cell_nodes; - MPI_Exscan(&num_owned, &offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); original_idx1.resize(num_owned); std::iota(original_idx1.begin(), original_idx1.end(), offset); } @@ -1039,7 +1040,8 @@ Mesh> create_mesh( } // Add on global offset std::int64_t global_offset = 0; - MPI_Exscan(&num_owned, &global_offset, 1, MPI_INT64_T, MPI_SUM, comm); + MPI_Exscan(&num_owned, &global_offset, 1, dolfinx::MPI::mpi_t, + MPI_SUM, comm); for (std::int32_t i = 0; i < num_cell_types; ++i) { std::iota(original_idx1[i].begin(), original_idx1[i].end(), diff --git a/cpp/dolfinx/refinement/utils.cpp b/cpp/dolfinx/refinement/utils.cpp index 904df7dd3f7..b5fbdbbb199 100644 --- a/cpp/dolfinx/refinement/utils.cpp +++ b/cpp/dolfinx/refinement/utils.cpp @@ -84,8 +84,8 @@ void refinement::update_logical_edgefunction( data_to_recv.resize(recv_disp.back()); MPI_Neighbor_alltoallv(data_to_send.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, data_to_recv.data(), - recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + send_disp.data(), dolfinx::MPI::mpi_t, data_to_recv.data(), + recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm); } @@ -107,7 +107,7 @@ refinement::adjust_indices(const common::IndexMap& map, std::int32_t n) // Get offset for 'n' for this process const std::int64_t num_local = n; std::int64_t global_offset = 0; - MPI_Exscan(&num_local, &global_offset, 1, MPI_INT64_T, MPI_SUM, map.comm()); + MPI_Exscan(&num_local, &global_offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, map.comm()); std::span owners = map.owners(); std::span src = map.src(); @@ -121,8 +121,8 @@ refinement::adjust_indices(const common::IndexMap& map, std::int32_t n) // Communicate offset to neighbors std::vector offsets(src.size(), 0); offsets.reserve(1); - MPI_Neighbor_allgather(&global_offset, 1, MPI_INT64_T, offsets.data(), 1, - MPI_INT64_T, comm); + MPI_Neighbor_allgather(&global_offset, 1, dolfinx::MPI::mpi_t, offsets.data(), 1, + dolfinx::MPI::mpi_t, comm); MPI_Comm_free(&comm); diff --git a/cpp/dolfinx/refinement/utils.h b/cpp/dolfinx/refinement/utils.h index ec550cb3575..3cee9cb8420 100644 --- a/cpp/dolfinx/refinement/utils.h +++ b/cpp/dolfinx/refinement/utils.h @@ -172,7 +172,8 @@ create_new_vertices(MPI_Comm comm, const std::int64_t num_local = n; std::int64_t global_offset = 0; - MPI_Exscan(&num_local, &global_offset, 1, MPI_INT64_T, MPI_SUM, mesh.comm()); + MPI_Exscan(&num_local, &global_offset, 1, dolfinx::MPI::mpi_t, + MPI_SUM, mesh.comm()); global_offset += mesh.topology()->index_map(0)->local_range()[1]; std::for_each(local_edge_to_new_vertex.begin(), local_edge_to_new_vertex.end(), @@ -239,9 +240,10 @@ create_new_vertices(MPI_Comm comm, received_values.resize(recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), MPI_INT64_T, + send_disp.data(), dolfinx::MPI::mpi_t, received_values.data(), recv_sizes.data(), - recv_disp.data(), MPI_INT64_T, comm); + recv_disp.data(), dolfinx::MPI::mpi_t, + comm); } // Add received remote global vertex indices to map From b926fbf3a575cd2b13b8bd9ab412d46780714df4 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 20:52:24 +0100 Subject: [PATCH 03/33] Add mpi type mapping for std::int32_t --- cpp/dolfinx/common/IndexMap.cpp | 19 +++++++++++-------- cpp/dolfinx/common/MPI.h | 3 ++- cpp/dolfinx/common/Scatterer.h | 6 ++++-- cpp/dolfinx/geometry/utils.h | 16 ++++++++-------- cpp/dolfinx/refinement/plaza.cpp | 4 ++-- 5 files changed, 27 insertions(+), 21 deletions(-) diff --git a/cpp/dolfinx/common/IndexMap.cpp b/cpp/dolfinx/common/IndexMap.cpp index 3603c69120f..a931e6367b8 100644 --- a/cpp/dolfinx/common/IndexMap.cpp +++ b/cpp/dolfinx/common/IndexMap.cpp @@ -116,8 +116,10 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span src, send_sizes.reserve(1); recv_sizes.reserve(1); MPI_Request sizes_request; - MPI_Ineighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T, recv_sizes.data(), - 1, MPI_INT32_T, comm0, &sizes_request); + MPI_Ineighbor_alltoall( + send_sizes.data(), 1, dolfinx::MPI::mpi_t _T, + recv_sizes.data(), 1, dolfinx::MPI::mpi_t _T, comm0, + &sizes_request); // Build send buffer and ghost position to send buffer position for (auto& d : send_data) @@ -739,8 +741,9 @@ common::stack_index_maps( std::vector recv_sizes(dest.size(), 0); send_sizes.reserve(1); recv_sizes.reserve(1); - ierr = MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T, - recv_sizes.data(), 1, MPI_INT32_T, comm0); + ierr = MPI_Neighbor_alltoall( + send_sizes.data(), 1, dolfinx::MPI::mpi_t _T, + recv_sizes.data(), 1, dolfinx::MPI::mpi_t _T, comm0); dolfinx::MPI::check_error(comm0, ierr); // Prepare displacement vectors @@ -1318,12 +1321,12 @@ std::array IndexMap::imbalance() const // Find the maximum number of owned indices and the maximum number of ghost // indices across all processes. - MPI_Allreduce(local_sizes.data(), max_count.data(), 2, MPI_INT32_T, MPI_MAX, - _comm.comm()); + MPI_Allreduce(local_sizes.data(), max_count.data(), 2, + dolfinx::MPI::mpi_t _T, MPI_MAX, _comm.comm()); std::int32_t total_num_ghosts = 0; - MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, MPI_INT32_T, MPI_SUM, - _comm.comm()); + MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, + dolfinx::MPI::mpi_t _T, MPI_SUM, _comm.comm()); // Compute the average number of owned and ghost indices per process. int comm_size = dolfinx::MPI::size(_comm.comm()); diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index f66f52319a8..a2e9024b6f2 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -1,4 +1,4 @@ -// Copyright (C) 2007-2023 Magnus Vikstrøm and Garth N. Wells +// Copyright (C) 2007-2023 Magnus Vikstrøm, Garth N. Wells and Paul T. Kühner // // This file is part of DOLFINx (https://www.fenicsproject.org) // @@ -294,6 +294,7 @@ MAP_TO_MPI_TYPE(unsigned long, MPI_UNSIGNED_LONG) MAP_TO_MPI_TYPE(long long, MPI_LONG_LONG) MAP_TO_MPI_TYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index 20d05d0a225..1cf4687781d 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -120,8 +120,10 @@ class Scatterer _displs_local.resize(_sizes_local.size() + 1); _sizes_remote.reserve(1); _sizes_local.reserve(1); - MPI_Neighbor_alltoall(_sizes_remote.data(), 1, MPI_INT32_T, - _sizes_local.data(), 1, MPI_INT32_T, _comm1.comm()); + MPI_Neighbor_alltoall(_sizes_remote.data(), 1, + dolfinx::MPI::mpi_t _T, + _sizes_local.data(), 1, + dolfinx::MPI::mpi_t _T, _comm1.comm()); std::partial_sum(_sizes_local.begin(), _sizes_local.end(), std::next(_displs_local.begin())); diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index fab8b64dd74..1a98275ec69 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -827,10 +827,10 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, } std::vector recv_ranks(recv_offsets.back()); - MPI_Neighbor_alltoallv(cell_indicator.data(), send_sizes.data(), - send_offsets.data(), MPI_INT32_T, recv_ranks.data(), - recv_sizes.data(), recv_offsets.data(), MPI_INT32_T, - reverse_comm); + MPI_Neighbor_alltoallv( + cell_indicator.data(), send_sizes.data(), send_offsets.data(), + dolfinx::MPI::mpi_t _T, recv_ranks.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t _T, reverse_comm); std::vector point_owners(points.size() / 3, -1); for (std::size_t i = 0; i < unpack_map.size(); i++) @@ -946,10 +946,10 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, // Send ownership info std::vector dest_ranks(recv_offsets.back()); - MPI_Neighbor_alltoallv(send_owners.data(), send_sizes.data(), - send_offsets.data(), MPI_INT32_T, dest_ranks.data(), - recv_sizes.data(), recv_offsets.data(), MPI_INT32_T, - forward_comm); + MPI_Neighbor_alltoallv( + send_owners.data(), send_sizes.data(), send_offsets.data(), + dolfinx::MPI::mpi_t _T, dest_ranks.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t _T, forward_comm); // Unpack dest ranks if point owner is this rank std::vector owned_recv_ranks; diff --git a/cpp/dolfinx/refinement/plaza.cpp b/cpp/dolfinx/refinement/plaza.cpp index 8dcd037e19e..c632abe39a2 100644 --- a/cpp/dolfinx/refinement/plaza.cpp +++ b/cpp/dolfinx/refinement/plaza.cpp @@ -243,8 +243,8 @@ void plaza::impl::enforce_rules(MPI_Comm comm, } const std::int32_t update_count_old = update_count; - MPI_Allreduce(&update_count_old, &update_count, 1, MPI_INT32_T, MPI_SUM, - comm); + MPI_Allreduce(&update_count_old, &update_count, 1, + dolfinx::MPI::mpi_t _T, MPI_SUM, comm); } } //----------------------------------------------------------------------------- From 2ba98d8d6034769a6adf11c8396cb2cfee93fdda Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 20:53:33 +0100 Subject: [PATCH 04/33] Missing double cases --- cpp/dolfinx/common/Table.cpp | 5 +++-- cpp/dolfinx/la/MatrixCSR.h | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cpp/dolfinx/common/Table.cpp b/cpp/dolfinx/common/Table.cpp index 814cc43d14e..241cebd8532 100644 --- a/cpp/dolfinx/common/Table.cpp +++ b/cpp/dolfinx/common/Table.cpp @@ -144,8 +144,9 @@ Table Table::reduce(MPI_Comm comm, Table::Reduction reduction) const std::partial_sum(pcounts.begin(), pcounts.end(), offsets.begin() + 1); std::vector values_all(offsets.back()); - err = MPI_Gatherv(values.data(), values.size(), MPI_DOUBLE, values_all.data(), - pcounts.data(), offsets.data(), MPI_DOUBLE, 0, comm); + err = MPI_Gatherv(values.data(), values.size(), dolfinx::MPI::mpi_t, + values_all.data(), pcounts.data(), offsets.data(), + dolfinx::MPI::mpi_t, 0, comm); dolfinx::MPI::check_error(comm, err); // Return empty table on rank > 0 diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index 3be42d0840a..b655aa0a649 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -726,7 +726,8 @@ double MatrixCSR::squared_norm() const _data.cbegin(), std::next(_data.cbegin(), _row_ptr[num_owned_rows] * bs2), double(0), [](auto norm, value_type y) { return norm + std::norm(y); }); double norm_sq; - MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM, _comm.comm()); + MPI_Allreduce(&norm_sq_local, &norm_sq, 1, dolfinx::MPI::mpi_t, + MPI_SUM, _comm.comm()); return norm_sq; } //----------------------------------------------------------------------------- From 8d64f01f95c88c40c7017fe533680b234c73e231 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 20:56:07 +0100 Subject: [PATCH 05/33] Add documentation --- cpp/dolfinx/common/MPI.h | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index a2e9024b6f2..05875e5a73c 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -272,6 +272,8 @@ struct dependent_false : std::false_type template struct mpi_type_mapping; +/// @brief Retrieves the MPI data type associated to the provided type. +/// @tparam T cpp type to map template MPI_Datatype mpi_t = mpi_type_mapping::type; @@ -421,8 +423,9 @@ distribute_to_postoffice(MPI_Comm comm, const U& x, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer_index.data(), num_items_recv.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm); + dolfinx::MPI::mpi_t, recv_buffer_index.data(), + num_items_recv.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, neigh_comm); dolfinx::MPI::check_error(comm, err); // Send/receive data (x) @@ -551,8 +554,9 @@ distribute_from_postoffice(MPI_Comm comm, std::span indices, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_src.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer_index.data(), num_items_recv.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm0); + dolfinx::MPI::mpi_t, recv_buffer_index.data(), + num_items_recv.data(), recv_disp.data(), + dolfinx::MPI::mpi_t, neigh_comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&neigh_comm0); @@ -677,15 +681,16 @@ distribute_data(MPI_Comm comm0, std::span indices, int err; std::int64_t shape0 = 0; - err = MPI_Allreduce(&shape0_local, &shape0, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm0); + err = MPI_Allreduce(&shape0_local, &shape0, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm0); dolfinx::MPI::check_error(comm0, err); std::int64_t rank_offset = -1; if (comm1 != MPI_COMM_NULL) { rank_offset = 0; - err = MPI_Exscan(&shape0_local, &rank_offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, - comm1); + err = MPI_Exscan(&shape0_local, &rank_offset, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm1); dolfinx::MPI::check_error(comm1, err); } else From d492e11bf899a380fb72ec28a825eb0162903209 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:02:17 +0100 Subject: [PATCH 06/33] Tidy and fix --- cpp/dolfinx/common/IndexMap.cpp | 16 ++++++++-------- cpp/dolfinx/common/MPI.h | 12 +++++------- cpp/dolfinx/common/Scatterer.h | 4 ++-- cpp/dolfinx/geometry/utils.h | 8 ++++---- cpp/dolfinx/refinement/plaza.cpp | 2 +- 5 files changed, 20 insertions(+), 22 deletions(-) diff --git a/cpp/dolfinx/common/IndexMap.cpp b/cpp/dolfinx/common/IndexMap.cpp index a931e6367b8..1531c17fe25 100644 --- a/cpp/dolfinx/common/IndexMap.cpp +++ b/cpp/dolfinx/common/IndexMap.cpp @@ -116,10 +116,10 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span src, send_sizes.reserve(1); recv_sizes.reserve(1); MPI_Request sizes_request; - MPI_Ineighbor_alltoall( - send_sizes.data(), 1, dolfinx::MPI::mpi_t _T, - recv_sizes.data(), 1, dolfinx::MPI::mpi_t _T, comm0, - &sizes_request); + MPI_Ineighbor_alltoall(send_sizes.data(), 1, + dolfinx::MPI::mpi_t, recv_sizes.data(), + 1, dolfinx::MPI::mpi_t, comm0, + &sizes_request); // Build send buffer and ghost position to send buffer position for (auto& d : send_data) @@ -742,8 +742,8 @@ common::stack_index_maps( send_sizes.reserve(1); recv_sizes.reserve(1); ierr = MPI_Neighbor_alltoall( - send_sizes.data(), 1, dolfinx::MPI::mpi_t _T, - recv_sizes.data(), 1, dolfinx::MPI::mpi_t _T, comm0); + send_sizes.data(), 1, dolfinx::MPI::mpi_t, + recv_sizes.data(), 1, dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm0, ierr); // Prepare displacement vectors @@ -1322,11 +1322,11 @@ std::array IndexMap::imbalance() const // Find the maximum number of owned indices and the maximum number of ghost // indices across all processes. MPI_Allreduce(local_sizes.data(), max_count.data(), 2, - dolfinx::MPI::mpi_t _T, MPI_MAX, _comm.comm()); + dolfinx::MPI::mpi_t, MPI_MAX, _comm.comm()); std::int32_t total_num_ghosts = 0; MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, - dolfinx::MPI::mpi_t _T, MPI_SUM, _comm.comm()); + dolfinx::MPI::mpi_t, MPI_SUM, _comm.comm()); // Compute the average number of owned and ghost indices per process. int comm_size = dolfinx::MPI::size(_comm.comm()); diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 05875e5a73c..ff45b94b558 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -288,16 +288,14 @@ MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) -MAP_TO_MPI_TYPE(short int, MPI_SHORT) -MAP_TO_MPI_TYPE(int, MPI_INT) -MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) -MAP_TO_MPI_TYPE(long int, MPI_LONG) -MAP_TO_MPI_TYPE(unsigned long, MPI_UNSIGNED_LONG) -MAP_TO_MPI_TYPE(long long, MPI_LONG_LONG) -MAP_TO_MPI_TYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) +MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) +MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) +MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) +MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) //--------------------------------------------------------------------------- diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index 1cf4687781d..5d582666e1d 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -121,9 +121,9 @@ class Scatterer _sizes_remote.reserve(1); _sizes_local.reserve(1); MPI_Neighbor_alltoall(_sizes_remote.data(), 1, - dolfinx::MPI::mpi_t _T, + dolfinx::MPI::mpi_t, _sizes_local.data(), 1, - dolfinx::MPI::mpi_t _T, _comm1.comm()); + dolfinx::MPI::mpi_t, _comm1.comm()); std::partial_sum(_sizes_local.begin(), _sizes_local.end(), std::next(_displs_local.begin())); diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index 1a98275ec69..2bf4c798bc5 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -829,8 +829,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector recv_ranks(recv_offsets.back()); MPI_Neighbor_alltoallv( cell_indicator.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t _T, recv_ranks.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t _T, reverse_comm); + dolfinx::MPI::mpi_t, recv_ranks.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, reverse_comm); std::vector point_owners(points.size() / 3, -1); for (std::size_t i = 0; i < unpack_map.size(); i++) @@ -948,8 +948,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector dest_ranks(recv_offsets.back()); MPI_Neighbor_alltoallv( send_owners.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t _T, dest_ranks.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t _T, forward_comm); + dolfinx::MPI::mpi_t, dest_ranks.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, forward_comm); // Unpack dest ranks if point owner is this rank std::vector owned_recv_ranks; diff --git a/cpp/dolfinx/refinement/plaza.cpp b/cpp/dolfinx/refinement/plaza.cpp index c632abe39a2..6e129ef3982 100644 --- a/cpp/dolfinx/refinement/plaza.cpp +++ b/cpp/dolfinx/refinement/plaza.cpp @@ -244,7 +244,7 @@ void plaza::impl::enforce_rules(MPI_Comm comm, const std::int32_t update_count_old = update_count; MPI_Allreduce(&update_count_old, &update_count, 1, - dolfinx::MPI::mpi_t _T, MPI_SUM, comm); + dolfinx::MPI::mpi_t, MPI_SUM, comm); } } //----------------------------------------------------------------------------- From 4183a15fade98ac61c5ac5068c2c9b1b824f35e8 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:11:36 +0100 Subject: [PATCH 07/33] Some more --- cpp/dolfinx/graph/partitioners.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index ae73bbcc44b..89a1269ed74 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -555,9 +555,8 @@ graph::partition_fn graph::parmetis::partitioner(double imbalance, const int psize = dolfinx::MPI::size(pcomm); const idx_t num_local_nodes = graph.num_nodes(); node_disp = std::vector(psize + 1, 0); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_t(), - pcomm); + MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t, + node_disp.data() + 1, 1, dolfinx::MPI::mpi_t, pcomm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), @@ -632,8 +631,8 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_t(), comm); + MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t, + node_disp.data() + 1, 1, dolfinx::MPI::mpi_t, comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), graph.offsets().end()); From ec943ae7e51cbbef934b037234df47c4c39f5d17 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:30:23 +0100 Subject: [PATCH 08/33] More fixes --- cpp/dolfinx/common/Scatterer.h | 18 +++++++++--------- cpp/dolfinx/fem/interpolate.h | 4 ++-- cpp/dolfinx/geometry/BoundingBoxTree.h | 4 ++-- cpp/dolfinx/geometry/utils.h | 8 ++++---- cpp/dolfinx/la/MatrixCSR.h | 4 ++-- cpp/dolfinx/la/Vector.h | 4 ++-- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index 5d582666e1d..cf4e8ea05e3 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -210,9 +210,9 @@ class Scatterer { assert(requests.size() == std::size_t(1)); MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_local.data(), - _displs_local.data(), dolfinx::MPI::mpi_t(), + _displs_local.data(), dolfinx::MPI::mpi_t, recv_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), dolfinx::MPI::mpi_t(), + _displs_remote.data(), dolfinx::MPI::mpi_t, _comm0.comm(), requests.data()); break; } @@ -222,14 +222,14 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_t(), _src[i], MPI_ANY_TAG, _comm0.comm(), + dolfinx::MPI::mpi_t, _src[i], MPI_ANY_TAG, _comm0.comm(), &requests[i]); } for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Isend(send_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_t(), _dest[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t, _dest[i], 0, _comm0.comm(), &requests[i + _src.size()]); } break; @@ -408,8 +408,8 @@ class Scatterer assert(requests.size() == 1); MPI_Ineighbor_alltoallv( send_buffer.data(), _sizes_remote.data(), _displs_remote.data(), - MPI::mpi_t(), recv_buffer.data(), _sizes_local.data(), - _displs_local.data(), MPI::mpi_t(), _comm1.comm(), &requests[0]); + MPI::mpi_t, recv_buffer.data(), _sizes_local.data(), + _displs_local.data(), MPI::mpi_t, _comm1.comm(), &requests[0]); break; } case type::p2p: @@ -419,8 +419,8 @@ class Scatterer for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_t(), _dest[i], MPI_ANY_TAG, - _comm0.comm(), &requests[i]); + dolfinx::MPI::mpi_t, _dest[i], MPI_ANY_TAG, _comm0.comm(), + &requests[i]); } // Start non-blocking receive from neighbor process for which an owned @@ -428,7 +428,7 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Isend(send_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_t(), _src[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t, _src[i], 0, _comm0.comm(), &requests[i + _dest.size()]); } break; diff --git a/cpp/dolfinx/fem/interpolate.h b/cpp/dolfinx/fem/interpolate.h index 4b6f05c9b49..3141bc53c93 100644 --- a/cpp/dolfinx/fem/interpolate.h +++ b/cpp/dolfinx/fem/interpolate.h @@ -273,9 +273,9 @@ void scatter_values(MPI_Comm comm, std::span src_ranks, std::vector values(recv_offsets.back()); values.reserve(1); MPI_Neighbor_alltoallv(send_values.data_handle(), send_sizes.data(), - send_offsets.data(), dolfinx::MPI::mpi_t(), + send_offsets.data(), dolfinx::MPI::mpi_t, values.data(), recv_sizes.data(), recv_offsets.data(), - dolfinx::MPI::mpi_t(), reverse_comm); + dolfinx::MPI::mpi_t, reverse_comm); MPI_Comm_free(&reverse_comm); // Insert values received from neighborhood communicator in output diff --git a/cpp/dolfinx/geometry/BoundingBoxTree.h b/cpp/dolfinx/geometry/BoundingBoxTree.h index 2c9cc9cd266..d9b91c4573b 100644 --- a/cpp/dolfinx/geometry/BoundingBoxTree.h +++ b/cpp/dolfinx/geometry/BoundingBoxTree.h @@ -335,8 +335,8 @@ class BoundingBoxTree if (num_bboxes() > 0) std::copy_n(std::prev(_bbox_coordinates.end(), 6), 6, send_bbox.begin()); std::vector recv_bbox(mpi_size * 6); - MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_t(), - recv_bbox.data(), 6, dolfinx::MPI::mpi_t(), comm); + MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_t, recv_bbox.data(), + 6, dolfinx::MPI::mpi_t, comm); std::vector, std::int32_t>> _recv_bbox(mpi_size); for (std::size_t i = 0; i < _recv_bbox.size(); ++i) diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index 2bf4c798bc5..b121cadf8fb 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -771,8 +771,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector received_points((std::size_t)recv_offsets.back()); MPI_Neighbor_alltoallv( send_data.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t(), received_points.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t(), forward_comm); + dolfinx::MPI::mpi_t, received_points.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, forward_comm); // Get mesh geometry for closest entity const mesh::Geometry& geometry = mesh.geometry(); @@ -905,8 +905,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector recv_distances(recv_offsets.back()); MPI_Neighbor_alltoallv( squared_distances.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t(), recv_distances.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t(), reverse_comm); + dolfinx::MPI::mpi_t, recv_distances.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, reverse_comm); // Update point ownership with extrapolation information std::vector closest_distance(point_owners.size(), diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index b655aa0a649..adfdecfae8c 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -685,9 +685,9 @@ void MatrixCSR::scatter_rev_begin() int status = MPI_Ineighbor_alltoallv( _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(), - dolfinx::MPI::mpi_t(), _ghost_value_data_in.data(), + dolfinx::MPI::mpi_t, _ghost_value_data_in.data(), val_recv_count.data(), _val_recv_disp.data(), - dolfinx::MPI::mpi_t(), _comm.comm(), &_request); + dolfinx::MPI::mpi_t, _comm.comm(), &_request); assert(status == MPI_SUCCESS); } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/la/Vector.h b/cpp/dolfinx/la/Vector.h index 1775653358f..306205338b9 100644 --- a/cpp/dolfinx/la/Vector.h +++ b/cpp/dolfinx/la/Vector.h @@ -279,7 +279,7 @@ auto norm(const V& x, Norm type = Norm::l2) = std::accumulate(data.begin(), data.end(), U(0), [](auto norm, auto x) { return norm + std::abs(x); }); U l1(0); - MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_t(), MPI_SUM, + MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_t, MPI_SUM, x.index_map()->comm()); return l1; } @@ -293,7 +293,7 @@ auto norm(const V& x, Norm type = Norm::l2) data, [](T a, T b) { return std::norm(a) < std::norm(b); }); auto local_linf = std::abs(*max_pos); decltype(local_linf) linf = 0; - MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_t(), + MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_t, MPI_MAX, x.index_map()->comm()); return linf; } From 0cbc97241214d7fe642d7181d783648163fd8367 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:44:37 +0100 Subject: [PATCH 09/33] Fix order --- cpp/dolfinx/common/MPI.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index ff45b94b558..9484406d7da 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -272,11 +272,6 @@ struct dependent_false : std::false_type template struct mpi_type_mapping; -/// @brief Retrieves the MPI data type associated to the provided type. -/// @tparam T cpp type to map -template -MPI_Datatype mpi_t = mpi_type_mapping::type; - #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ struct mpi_type_mapping \ @@ -298,6 +293,11 @@ MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) +/// @brief Retrieves the MPI data type associated to the provided type. +/// @tparam T cpp type to map +template +MPI_Datatype mpi_t = mpi_type_mapping::type; + //--------------------------------------------------------------------------- template std::pair, From 7094daab5179d96c1bc0fae31500c5e65a11e115 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:55:06 +0100 Subject: [PATCH 10/33] Simplify --- cpp/dolfinx/common/MPI.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 9484406d7da..31d127c68f8 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,15 +269,20 @@ struct dependent_false : std::false_type }; /// MPI Type -template -struct mpi_type_mapping; +template +struct mpi_type_mapping +{ + static inline MPI_Datatype type = MPI_T; +}; + +/// @brief Retrieves the MPI data type associated to the provided type. +/// @tparam T cpp type to map +template +MPI_Datatype mpi_t = mpi_type_mapping::type; #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ - struct mpi_type_mapping \ - { \ - static inline MPI_Datatype type = mpi_t; \ - }; + struct mpi_type_mapping; MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) @@ -293,11 +298,6 @@ MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) -/// @brief Retrieves the MPI data type associated to the provided type. -/// @tparam T cpp type to map -template -MPI_Datatype mpi_t = mpi_type_mapping::type; - //--------------------------------------------------------------------------- template std::pair, From 061caeb1f9d59b0058a6cad01393909ec170ed9d Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 2 Nov 2024 22:03:02 +0100 Subject: [PATCH 11/33] Revert --- cpp/dolfinx/common/MPI.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 31d127c68f8..ff45b94b558 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,11 +269,8 @@ struct dependent_false : std::false_type }; /// MPI Type -template -struct mpi_type_mapping -{ - static inline MPI_Datatype type = MPI_T; -}; +template +struct mpi_type_mapping; /// @brief Retrieves the MPI data type associated to the provided type. /// @tparam T cpp type to map @@ -282,7 +279,10 @@ MPI_Datatype mpi_t = mpi_type_mapping::type; #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ - struct mpi_type_mapping; + struct mpi_type_mapping \ + { \ + static inline MPI_Datatype type = mpi_t; \ + }; MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) From a36535eece481362ef059aff2e2429f604b5676c Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 10:29:30 +0100 Subject: [PATCH 12/33] Add default NULL type --- cpp/dolfinx/common/MPI.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index ff45b94b558..697858069e1 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -270,7 +270,10 @@ struct dependent_false : std::false_type /// MPI Type template -struct mpi_type_mapping; +struct mpi_type_mapping +{ + static inline MPI_Datatype type = MPI_DATATYPE_NULL; +}; /// @brief Retrieves the MPI data type associated to the provided type. /// @tparam T cpp type to map From d56f991fcfb19e1fb33506098d5d25647aba1f28 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 10:39:58 +0100 Subject: [PATCH 13/33] Sanity check static assert --- cpp/dolfinx/common/MPI.h | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 697858069e1..d199b40c7b4 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -273,6 +273,7 @@ template struct mpi_type_mapping { static inline MPI_Datatype type = MPI_DATATYPE_NULL; + static_assert(false, "Undefined MPI type mapping."); }; /// @brief Retrieves the MPI data type associated to the provided type. From 6a3e2d708356f2f652a30260f1aa5dd4173bd598 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:12:17 +0100 Subject: [PATCH 14/33] Fancy void_t fix? --- cpp/dolfinx/common/MPI.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index d199b40c7b4..6a46b2fdaa2 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,17 +269,15 @@ struct dependent_false : std::false_type }; /// MPI Type -template -struct mpi_type_mapping -{ - static inline MPI_Datatype type = MPI_DATATYPE_NULL; - static_assert(false, "Undefined MPI type mapping."); -}; +template +struct mpi_type_mapping; /// @brief Retrieves the MPI data type associated to the provided type. /// @tparam T cpp type to map template -MPI_Datatype mpi_t = mpi_type_mapping::type; +MPI_Datatype mpi_t + = mpi_type_mapping::type)>>::type; #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ From b58e41ad91b859b57888be138f7262adb4f54b47 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:24:01 +0100 Subject: [PATCH 15/33] Wrong position --- cpp/dolfinx/common/MPI.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 6a46b2fdaa2..81ed1d51f17 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -274,10 +274,8 @@ struct mpi_type_mapping; /// @brief Retrieves the MPI data type associated to the provided type. /// @tparam T cpp type to map -template -MPI_Datatype mpi_t - = mpi_type_mapping::type)>>::type; +template ::type)>> +MPI_Datatype mpi_t = mpi_type_mapping::type; #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ From 831a3dc1ebc10d8e8aa8ca60ed7c09d71b49e3af Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:37:23 +0100 Subject: [PATCH 16/33] Switch to non width types --- cpp/dolfinx/common/MPI.h | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 81ed1d51f17..f7ab3160fbf 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -288,14 +288,22 @@ MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) -MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) -MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) -MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) -MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) -MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) -MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) -MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) -MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) +// MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +// MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) +// MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) +// MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) +// MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) +// MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) +// MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) +// MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) +MAP_TO_MPI_TYPE(short int, MPI_SHORT) +MAP_TO_MPI_TYPE(int, MPI_INT) +MAP_TO_MPI_TYPE(long int, MPI_LONG) +MAP_TO_MPI_TYPE(long long int, MPI_LONG_LONG) +MAP_TO_MPI_TYPE(unsigned short int, MPI_UNSIGNED_SHORT) +MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) +MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) +MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) //--------------------------------------------------------------------------- From 07488b8cd7033ded0012b6260c2ae627632bde34 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:43:52 +0100 Subject: [PATCH 17/33] Revert type trait tickery and document odd behavior --- cpp/dolfinx/common/MPI.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index f7ab3160fbf..b62acffc7ee 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,12 +269,12 @@ struct dependent_false : std::false_type }; /// MPI Type -template +template struct mpi_type_mapping; /// @brief Retrieves the MPI data type associated to the provided type. /// @tparam T cpp type to map -template ::type)>> +template MPI_Datatype mpi_t = mpi_type_mapping::type; #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ @@ -288,14 +288,6 @@ MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) -// MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) -// MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) -// MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) -// MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) -// MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) -// MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) -// MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) -// MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) MAP_TO_MPI_TYPE(short int, MPI_SHORT) MAP_TO_MPI_TYPE(int, MPI_INT) MAP_TO_MPI_TYPE(long int, MPI_LONG) @@ -306,6 +298,16 @@ MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) +// NOTE: this mapping fails on some platforms +// MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +// MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) +// MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) +// MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) +// MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) +// MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) +// MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) +// MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) + //--------------------------------------------------------------------------- template std::pair, From da9837082be88df8388fd97e4a498b57f6e82617 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 11:52:04 +0100 Subject: [PATCH 18/33] Add char types --- cpp/dolfinx/common/MPI.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index b62acffc7ee..03d04d9b399 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -288,10 +288,13 @@ MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) +MAP_TO_MPI_TYPE(char, MPI_CHAR) +MAP_TO_MPI_TYPE(signed char, MPI_SIGNED_CHAR) MAP_TO_MPI_TYPE(short int, MPI_SHORT) MAP_TO_MPI_TYPE(int, MPI_INT) MAP_TO_MPI_TYPE(long int, MPI_LONG) MAP_TO_MPI_TYPE(long long int, MPI_LONG_LONG) +MAP_TO_MPI_TYPE(unsigned char, MPI_UNSIGNED_CHAR) MAP_TO_MPI_TYPE(unsigned short int, MPI_UNSIGNED_SHORT) MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) From 1cf84d3b7548b9d7ba5ff42a8537151607029e4e Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 12:08:56 +0100 Subject: [PATCH 19/33] Doc --- cpp/dolfinx/common/MPI.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 03d04d9b399..a04c74c8cd0 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -269,6 +269,8 @@ struct dependent_false : std::false_type }; /// MPI Type + +/// @brief Type trait for MPI type conversions. template struct mpi_type_mapping; @@ -277,6 +279,8 @@ struct mpi_type_mapping; template MPI_Datatype mpi_t = mpi_type_mapping::type; +/// @brief Registers for cpp_t the correpsonding mpi_t which can then be +/// retrieved with mpi_t form here on. #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ struct mpi_type_mapping \ From e838edda31842d62cdd84f1424026ed2001a182d Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 13:15:30 +0100 Subject: [PATCH 20/33] Doc for macros --- cpp/dolfinx/common/MPI.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index a04c74c8cd0..75b2677ee70 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -288,6 +288,9 @@ MPI_Datatype mpi_t = mpi_type_mapping::type; static inline MPI_Datatype type = mpi_t; \ }; +/// @defgroup MPI type mappings +/// @{ +/// @cond MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) @@ -304,6 +307,8 @@ MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) +/// @endcond +/// @} // NOTE: this mapping fails on some platforms // MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) From d0888abd1cbb57a51827c3cb1d1e44861184d177 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 3 Nov 2024 13:45:46 +0100 Subject: [PATCH 21/33] Enabel preprocessing for doxygen --- cpp/doc/Doxyfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/doc/Doxyfile b/cpp/doc/Doxyfile index 7b83728d717..63fe413c304 100644 --- a/cpp/doc/Doxyfile +++ b/cpp/doc/Doxyfile @@ -2272,7 +2272,7 @@ PERLMOD_MAKEVAR_PREFIX = # C-preprocessor directives found in the sources and include files. # The default value is: YES. -ENABLE_PREPROCESSING = NO +ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names # in the source code. If set to NO, only conditional compilation will be @@ -2289,7 +2289,7 @@ MACRO_EXPANSION = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = YES +EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. From 1452cf75c825c2b6921f192b0e197c4e5386eb6b Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:07:38 +0100 Subject: [PATCH 22/33] Reactivate fixed with types --- cpp/dolfinx/common/MPI.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 75b2677ee70..51b33312a2d 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -297,28 +297,28 @@ MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) MAP_TO_MPI_TYPE(char, MPI_CHAR) MAP_TO_MPI_TYPE(signed char, MPI_SIGNED_CHAR) -MAP_TO_MPI_TYPE(short int, MPI_SHORT) -MAP_TO_MPI_TYPE(int, MPI_INT) -MAP_TO_MPI_TYPE(long int, MPI_LONG) -MAP_TO_MPI_TYPE(long long int, MPI_LONG_LONG) -MAP_TO_MPI_TYPE(unsigned char, MPI_UNSIGNED_CHAR) -MAP_TO_MPI_TYPE(unsigned short int, MPI_UNSIGNED_SHORT) -MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) -MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) -MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) +// MAP_TO_MPI_TYPE(short int, MPI_SHORT) +// MAP_TO_MPI_TYPE(int, MPI_INT) +// MAP_TO_MPI_TYPE(long int, MPI_LONG) +// MAP_TO_MPI_TYPE(long long int, MPI_LONG_LONG) +// MAP_TO_MPI_TYPE(unsigned char, MPI_UNSIGNED_CHAR) +// MAP_TO_MPI_TYPE(unsigned short int, MPI_UNSIGNED_SHORT) +// MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) +// MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) +// MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) /// @endcond /// @} // NOTE: this mapping fails on some platforms -// MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) -// MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) -// MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) -// MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) -// MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) -// MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) -// MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) -// MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) +MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) +MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) +MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) +MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) +MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) +MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) +MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) //--------------------------------------------------------------------------- template From d3260e423d9d09835822434264edf9b99ab2a56e Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:16:55 +0100 Subject: [PATCH 23/33] one more --- cpp/dolfinx/common/MPI.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 51b33312a2d..fc83c920719 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -296,7 +296,7 @@ MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) MAP_TO_MPI_TYPE(char, MPI_CHAR) -MAP_TO_MPI_TYPE(signed char, MPI_SIGNED_CHAR) +// MAP_TO_MPI_TYPE(signed char, MPI_SIGNED_CHAR) // MAP_TO_MPI_TYPE(short int, MPI_SHORT) // MAP_TO_MPI_TYPE(int, MPI_INT) // MAP_TO_MPI_TYPE(long int, MPI_LONG) From 5057409c7f3b3e88b2624cda9ed51290dfda7da8 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:22:05 +0100 Subject: [PATCH 24/33] Type size dependent overloading --- cpp/dolfinx/common/MPI.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index fc83c920719..a61d95d808e 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -319,7 +319,9 @@ MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) - +# if __LONG_LONG_WIDTH__ > 64 +MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) +#endif //--------------------------------------------------------------------------- template std::pair, From 858f934e63422ae4bf728440b11ad9d430f18849 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:40:50 +0100 Subject: [PATCH 25/33] Another --- cpp/dolfinx/common/MPI.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index a61d95d808e..7802b953a70 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -319,7 +319,7 @@ MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) -# if __LONG_LONG_WIDTH__ > 64 +# if ULLONG_WIDTH > 64 MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) #endif //--------------------------------------------------------------------------- From 59b6de08d0a28f508c11c607fe13eec2acaeb1b6 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 10 Nov 2024 15:45:54 +0100 Subject: [PATCH 26/33] Try wordsize check --- cpp/dolfinx/common/MPI.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 7802b953a70..ce06f4e1438 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -319,7 +319,7 @@ MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) -# if ULLONG_WIDTH > 64 +# if __WORDSIZE != 64 MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) #endif //--------------------------------------------------------------------------- From 609e0ce01e4155e03d3adc59e96f3e9c3c8f7b8d Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 10 Nov 2024 15:58:57 +0100 Subject: [PATCH 27/33] combine checks --- cpp/dolfinx/common/MPI.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index ce06f4e1438..7a6539916a1 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -319,7 +319,7 @@ MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) -# if __WORDSIZE != 64 +# if (__WORDSIZE != 64) || (ULLONG_WIDTH > 64) MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) #endif //--------------------------------------------------------------------------- From 1c1f6ebf220d49881476b5178faba16da8923c94 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 10 Nov 2024 16:06:36 +0100 Subject: [PATCH 28/33] Give up, make mpi type explicit for Kahip and remove general support of long long unsigned --- cpp/dolfinx/common/MPI.h | 24 +++++------------------- cpp/dolfinx/graph/partitioners.cpp | 4 ++-- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 7a6539916a1..254aad96e93 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -291,26 +291,12 @@ MPI_Datatype mpi_t = mpi_type_mapping::type; /// @defgroup MPI type mappings /// @{ /// @cond +MAP_TO_MPI_TYPE(char, MPI_CHAR) +MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) -MAP_TO_MPI_TYPE(char, MPI_CHAR) -// MAP_TO_MPI_TYPE(signed char, MPI_SIGNED_CHAR) -// MAP_TO_MPI_TYPE(short int, MPI_SHORT) -// MAP_TO_MPI_TYPE(int, MPI_INT) -// MAP_TO_MPI_TYPE(long int, MPI_LONG) -// MAP_TO_MPI_TYPE(long long int, MPI_LONG_LONG) -// MAP_TO_MPI_TYPE(unsigned char, MPI_UNSIGNED_CHAR) -// MAP_TO_MPI_TYPE(unsigned short int, MPI_UNSIGNED_SHORT) -// MAP_TO_MPI_TYPE(unsigned int, MPI_UNSIGNED) -// MAP_TO_MPI_TYPE(unsigned long int, MPI_UNSIGNED_LONG) -// MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) -MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) -/// @endcond -/// @} - -// NOTE: this mapping fails on some platforms MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) @@ -319,9 +305,9 @@ MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) -# if (__WORDSIZE != 64) || (ULLONG_WIDTH > 64) -MAP_TO_MPI_TYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG) -#endif +/// @endcond +/// @} + //--------------------------------------------------------------------------- template std::pair, diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index 89a1269ed74..b90d5f12a8a 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -631,8 +631,8 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t, - node_disp.data() + 1, 1, dolfinx::MPI::mpi_t, comm); + MPI_Allgather(&num_local_nodes, 1, MPI_UNSGINED_LONG_LONG, + node_disp.data() + 1, 1, MPI_UNSGINED_LONG_LONG, comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), graph.offsets().end()); From aad8099d579ccbe2b4cceb9640bc62e22e0ebecc Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sun, 10 Nov 2024 16:10:27 +0100 Subject: [PATCH 29/33] typo --- cpp/dolfinx/graph/partitioners.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index b90d5f12a8a..c062558987d 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -631,8 +631,8 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); - MPI_Allgather(&num_local_nodes, 1, MPI_UNSGINED_LONG_LONG, - node_disp.data() + 1, 1, MPI_UNSGINED_LONG_LONG, comm); + MPI_Allgather(&num_local_nodes, 1, MPI_UNSIGNED_LONG_LONG, + node_disp.data() + 1, 1, MPI_UNSIGNED_LONG_LONG, comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), graph.offsets().end()); From 15c61f456798600bcfcc95388416528beb8a9b3d Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:22:58 +0100 Subject: [PATCH 30/33] Add KaHIP type comment --- cpp/dolfinx/graph/partitioners.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index c062558987d..7f49edfaf87 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -631,6 +631,11 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); + + // KaHIP internally relies on an unsigned long long int type, which is not + // easily convertible to a general mpi type due to platform specific + // differences. So we can not rely on the general mpi_t<> mapping and do it + // by hand in this sole occurence. MPI_Allgather(&num_local_nodes, 1, MPI_UNSIGNED_LONG_LONG, node_disp.data() + 1, 1, MPI_UNSIGNED_LONG_LONG, comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); From f3390d4b3e17a440ee6a470c43cf92590c852c80 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:23:51 +0100 Subject: [PATCH 31/33] typos --- cpp/dolfinx/common/MPI.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 254aad96e93..28be12d26de 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -279,8 +279,8 @@ struct mpi_type_mapping; template MPI_Datatype mpi_t = mpi_type_mapping::type; -/// @brief Registers for cpp_t the correpsonding mpi_t which can then be -/// retrieved with mpi_t form here on. +/// @brief Registers for cpp_t the corresponding mpi_t which can then be +/// retrieved with mpi_t from here on. #define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ template <> \ struct mpi_type_mapping \ From 133620c37598fba02ba30b34cffd310b5de88ee2 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:27:26 +0100 Subject: [PATCH 32/33] Remove maps for char and bool --- cpp/dolfinx/common/MPI.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 28be12d26de..c6f1dba0433 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -291,8 +291,6 @@ MPI_Datatype mpi_t = mpi_type_mapping::type; /// @defgroup MPI type mappings /// @{ /// @cond -MAP_TO_MPI_TYPE(char, MPI_CHAR) -MAP_TO_MPI_TYPE(bool, MPI_C_BOOL) MAP_TO_MPI_TYPE(float, MPI_FLOAT) MAP_TO_MPI_TYPE(double, MPI_DOUBLE) MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) From 4263bb4a9ed7d994c87da1c6ddaaeda051655416 Mon Sep 17 00:00:00 2001 From: schnellerhase <56360279+schnellerhase@users.noreply.github.com> Date: Sat, 23 Nov 2024 15:02:38 +0100 Subject: [PATCH 33/33] Revert to non type trait usage, when not templated type --- cpp/dolfinx/common/IndexMap.cpp | 111 ++++++++++------------- cpp/dolfinx/common/MPI.h | 13 +-- cpp/dolfinx/common/Scatterer.h | 18 ++-- cpp/dolfinx/fem/DirichletBC.cpp | 7 +- cpp/dolfinx/fem/dofmapbuilder.cpp | 4 +- cpp/dolfinx/geometry/utils.h | 16 ++-- cpp/dolfinx/graph/partition.cpp | 28 +++--- cpp/dolfinx/graph/partitioners.cpp | 11 +-- cpp/dolfinx/io/xdmf_function.cpp | 3 +- cpp/dolfinx/io/xdmf_mesh.cpp | 10 +- cpp/dolfinx/io/xdmf_mesh.h | 4 +- cpp/dolfinx/io/xdmf_utils.cpp | 15 ++- cpp/dolfinx/io/xdmf_utils.h | 3 +- cpp/dolfinx/la/MatrixCSR.h | 8 +- cpp/dolfinx/la/SparsityPattern.cpp | 5 +- cpp/dolfinx/mesh/Topology.cpp | 24 ++--- cpp/dolfinx/mesh/graphbuild.cpp | 11 +-- cpp/dolfinx/mesh/topologycomputation.cpp | 26 +++--- cpp/dolfinx/mesh/utils.h | 6 +- cpp/dolfinx/refinement/plaza.cpp | 4 +- cpp/dolfinx/refinement/utils.cpp | 10 +- cpp/dolfinx/refinement/utils.h | 8 +- 22 files changed, 149 insertions(+), 196 deletions(-) diff --git a/cpp/dolfinx/common/IndexMap.cpp b/cpp/dolfinx/common/IndexMap.cpp index af1243ce5d8..1c4d4e8b174 100644 --- a/cpp/dolfinx/common/IndexMap.cpp +++ b/cpp/dolfinx/common/IndexMap.cpp @@ -117,10 +117,8 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span src, send_sizes.reserve(1); recv_sizes.reserve(1); MPI_Request sizes_request; - MPI_Ineighbor_alltoall(send_sizes.data(), 1, - dolfinx::MPI::mpi_t, recv_sizes.data(), - 1, dolfinx::MPI::mpi_t, comm0, - &sizes_request); + MPI_Ineighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T, recv_sizes.data(), + 1, MPI_INT32_T, comm0, &sizes_request); // Build send buffer and ghost position to send buffer position for (auto& d : send_data) @@ -139,11 +137,10 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span src, // Send ghost indices to owner, and receive indices recv_indices.resize(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv( - send_indices.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_indices.data(), - recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, - comm0); + ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_indices.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm0); dolfinx::MPI::check_error(comm, ierr); ierr = MPI_Comm_free(&comm0); @@ -515,10 +512,10 @@ compute_submap_ghost_indices(std::span submap_src, dolfinx::MPI::check_error(imap.comm(), ierr); // Send indices to ghosting ranks - ierr = MPI_Neighbor_alltoallv( - send_gidx.data(), recv_sizes.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, recv_gidx.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, comm1); + ierr = MPI_Neighbor_alltoallv(send_gidx.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, + recv_gidx.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, comm1); dolfinx::MPI::check_error(imap.comm(), ierr); ierr = MPI_Comm_free(&comm1); @@ -611,10 +608,10 @@ common::compute_owned_indices(std::span indices, // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); std::vector& send_buffer = global_indices; - ierr = MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, comm); + ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm); dolfinx::MPI::check_error(comm, ierr); ierr = MPI_Comm_free(&comm); dolfinx::MPI::check_error(map.comm(), ierr); @@ -742,9 +739,8 @@ common::stack_index_maps( std::vector recv_sizes(dest.size(), 0); send_sizes.reserve(1); recv_sizes.reserve(1); - ierr = MPI_Neighbor_alltoall( - send_sizes.data(), 1, dolfinx::MPI::mpi_t, - recv_sizes.data(), 1, dolfinx::MPI::mpi_t, comm0); + ierr = MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T, + recv_sizes.data(), 1, MPI_INT32_T, comm0); dolfinx::MPI::check_error(comm0, ierr); // Prepare displacement vectors @@ -757,11 +753,10 @@ common::stack_index_maps( // Send ghost indices to owner, and receive indices std::vector recv_indices(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv( - send_indices.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_indices.data(), - recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, - comm0); + ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_indices.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm0); dolfinx::MPI::check_error(comm0, ierr); // For each received index (which I should own), compute its new @@ -779,11 +774,10 @@ common::stack_index_maps( // Send back/receive new indices std::vector ghosts_new_idx(send_disp.back()); - ierr = MPI_Neighbor_alltoallv( - ghost_old_to_new.data(), recv_sizes.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, ghosts_new_idx.data(), - send_sizes.data(), send_disp.data(), dolfinx::MPI::mpi_t, - comm1); + ierr = MPI_Neighbor_alltoallv(ghost_old_to_new.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, + ghosts_new_idx.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, comm1); dolfinx::MPI::check_error(comm1, ierr); // Unpack new indices and store owner @@ -832,9 +826,8 @@ common::create_sub_index_map(const IndexMap& imap, // Compute submap offset for this rank std::int64_t submap_local_size = submap_owned.size(); std::int64_t submap_offset = 0; - int ierr - = MPI_Exscan(&submap_local_size, &submap_offset, 1, - dolfinx::MPI::mpi_t, MPI_SUM, imap.comm()); + int ierr = MPI_Exscan(&submap_local_size, &submap_offset, 1, MPI_INT64_T, + MPI_SUM, imap.comm()); dolfinx::MPI::check_error(imap.comm(), ierr); // Compute the global indices (w.r.t. the submap) of the submap ghosts @@ -866,16 +859,14 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size) : _comm(comm, true) std::int64_t offset = 0; const std::int64_t local_size_tmp = local_size; MPI_Request request_scan; - int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, - dolfinx::MPI::mpi_t, MPI_SUM, + int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM, _comm.comm(), &request_scan); dolfinx::MPI::check_error(_comm.comm(), ierr); // Send local size to sum reduction to get global size MPI_Request request; - ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm, - &request); + ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM, + comm, &request); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Wait(&request_scan, MPI_STATUS_IGNORE); @@ -912,16 +903,14 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size, std::int64_t offset = 0; const std::int64_t local_size_tmp = local_size; MPI_Request request_scan; - int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm, - &request_scan); + int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM, + comm, &request_scan); dolfinx::MPI::check_error(_comm.comm(), ierr); // Send local size to sum reduction to get global size MPI_Request request; - ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm, - &request); + ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM, + comm, &request); dolfinx::MPI::check_error(_comm.comm(), ierr); // Wait for MPI_Iexscan to complete (get offset) @@ -1084,11 +1073,10 @@ graph::AdjacencyList IndexMap::index_to_dest_ranks(int tag) const // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), - recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, - comm0); + ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm0); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(_comm.comm(), ierr); @@ -1184,11 +1172,10 @@ graph::AdjacencyList IndexMap::index_to_dest_ranks(int tag) const std::next(recv_disp.begin())); std::vector recv_indices(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_indices.data(), - recv_sizes.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, comm); + ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_indices.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm); dolfinx::MPI::check_error(_comm.comm(), ierr); @@ -1283,10 +1270,10 @@ std::vector IndexMap::shared_indices() const // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); - ierr = MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, comm); + ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, + recv_buffer.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm); dolfinx::MPI::check_error(_comm.comm(), ierr); ierr = MPI_Comm_free(&comm); @@ -1324,12 +1311,12 @@ std::array IndexMap::imbalance() const // Find the maximum number of owned indices and the maximum number of ghost // indices across all processes. - MPI_Allreduce(local_sizes.data(), max_count.data(), 2, - dolfinx::MPI::mpi_t, MPI_MAX, _comm.comm()); + MPI_Allreduce(local_sizes.data(), max_count.data(), 2, MPI_INT32_T, MPI_MAX, + _comm.comm()); std::int32_t total_num_ghosts = 0; - MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, - dolfinx::MPI::mpi_t, MPI_SUM, _comm.comm()); + MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, MPI_INT32_T, MPI_SUM, + _comm.comm()); // Compute the average number of owned and ghost indices per process. int comm_size = dolfinx::MPI::size(_comm.comm()); diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index 450b6c020d1..7cfc77e8866 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -431,9 +431,8 @@ distribute_to_postoffice(MPI_Comm comm, const U& x, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer_index.data(), - num_items_recv.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, neigh_comm); + MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(), + recv_disp.data(), MPI_INT64_T, neigh_comm); dolfinx::MPI::check_error(comm, err); // Send/receive data (x) @@ -562,9 +561,8 @@ distribute_from_postoffice(MPI_Comm comm, std::span indices, std::vector recv_buffer_index(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_buffer_index.data(), num_items_per_src.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer_index.data(), - num_items_recv.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, neigh_comm0); + MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(), + recv_disp.data(), MPI_INT64_T, neigh_comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&neigh_comm0); @@ -689,8 +687,7 @@ distribute_data(MPI_Comm comm0, std::span indices, int err; std::int64_t shape0 = 0; - err = MPI_Allreduce(&shape0_local, &shape0, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm0); + err = MPI_Allreduce(&shape0_local, &shape0, 1, MPI_INT64_T, MPI_SUM, comm0); dolfinx::MPI::check_error(comm0, err); std::int64_t rank_offset = -1; diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index d5aec396a25..72afe81ad11 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -120,10 +120,8 @@ class Scatterer _displs_local.resize(_sizes_local.size() + 1); _sizes_remote.reserve(1); _sizes_local.reserve(1); - MPI_Neighbor_alltoall(_sizes_remote.data(), 1, - dolfinx::MPI::mpi_t, - _sizes_local.data(), 1, - dolfinx::MPI::mpi_t, _comm1.comm()); + MPI_Neighbor_alltoall(_sizes_remote.data(), 1, MPI_INT32_T, + _sizes_local.data(), 1, MPI_INT32_T, _comm1.comm()); std::partial_sum(_sizes_local.begin(), _sizes_local.end(), std::next(_displs_local.begin())); @@ -133,11 +131,10 @@ class Scatterer // Send ghost global indices to owning rank, and receive owned // indices that are ghosts on other ranks std::vector recv_buffer(_displs_local.back(), 0); - MPI_Neighbor_alltoallv( - ghosts_sorted.data(), _sizes_remote.data(), _displs_remote.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), - _sizes_local.data(), _displs_local.data(), - dolfinx::MPI::mpi_t, _comm1.comm()); + MPI_Neighbor_alltoallv(ghosts_sorted.data(), _sizes_remote.data(), + _displs_remote.data(), MPI_INT64_T, + recv_buffer.data(), _sizes_local.data(), + _displs_local.data(), MPI_INT64_T, _comm1.comm()); const std::array range = map.local_range(); #ifndef NDEBUG @@ -148,8 +145,7 @@ class Scatterer // Scale sizes and displacements by block size { - auto rescale = [](auto& x, int bs) - { + auto rescale = [](auto& x, int bs) { std::ranges::transform(x, x.begin(), [bs](auto e) { return e *= bs; }); }; rescale(_sizes_local, bs); diff --git a/cpp/dolfinx/fem/DirichletBC.cpp b/cpp/dolfinx/fem/DirichletBC.cpp index 1017b0e0934..97d9b0689d9 100644 --- a/cpp/dolfinx/fem/DirichletBC.cpp +++ b/cpp/dolfinx/fem/DirichletBC.cpp @@ -140,10 +140,9 @@ get_remote_dofs(MPI_Comm comm, const common::IndexMap& map, int bs_map, // MPI_Neighbor_alltoallv to send only to relevant processes. // Send/receive global index of dofs with bcs to all neighbors std::vector dofs_received(disp.back()); - MPI_Ineighbor_allgatherv( - dofs_global.data(), dofs_global.size(), dolfinx::MPI::mpi_t, - dofs_received.data(), num_dofs_recv.data(), disp.data(), - dolfinx::MPI::mpi_t, comm, &request); + MPI_Ineighbor_allgatherv(dofs_global.data(), dofs_global.size(), MPI_INT64_T, + dofs_received.data(), num_dofs_recv.data(), + disp.data(), MPI_INT64_T, comm, &request); // FIXME: check that dofs is sorted // Build vector of local dof indices that have been marked by another diff --git a/cpp/dolfinx/fem/dofmapbuilder.cpp b/cpp/dolfinx/fem/dofmapbuilder.cpp index 29b648ded5c..676bb48eeba 100644 --- a/cpp/dolfinx/fem/dofmapbuilder.cpp +++ b/cpp/dolfinx/fem/dofmapbuilder.cpp @@ -540,9 +540,9 @@ std::pair, std::vector> get_global_indices( // TODO: use MPI_Ineighbor_alltoallv // Send global index of dofs to neighbors all_dofs_received[d].resize(disp_recv[d].back()); - MPI_Ineighbor_allgatherv(global[d].data(), global[d].size(), dolfinx::MPI::mpi_t, + MPI_Ineighbor_allgatherv(global[d].data(), global[d].size(), MPI_INT64_T, all_dofs_received[d].data(), size_recv[d].data(), - disp_recv[d].data(), dolfinx::MPI::mpi_t, comm[d], + disp_recv[d].data(), MPI_INT64_T, comm[d], &requests[requests_dim.size()]); requests_dim.push_back(d); } diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index b121cadf8fb..4e287fcb4cb 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -827,10 +827,10 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, } std::vector recv_ranks(recv_offsets.back()); - MPI_Neighbor_alltoallv( - cell_indicator.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t, recv_ranks.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t, reverse_comm); + MPI_Neighbor_alltoallv(cell_indicator.data(), send_sizes.data(), + send_offsets.data(), MPI_INT32_T, recv_ranks.data(), + recv_sizes.data(), recv_offsets.data(), MPI_INT32_T, + reverse_comm); std::vector point_owners(points.size() / 3, -1); for (std::size_t i = 0; i < unpack_map.size(); i++) @@ -946,10 +946,10 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, // Send ownership info std::vector dest_ranks(recv_offsets.back()); - MPI_Neighbor_alltoallv( - send_owners.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_t, dest_ranks.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t, forward_comm); + MPI_Neighbor_alltoallv(send_owners.data(), send_sizes.data(), + send_offsets.data(), MPI_INT32_T, dest_ranks.data(), + recv_sizes.data(), recv_offsets.data(), MPI_INT32_T, + forward_comm); // Unpack dest ranks if point owner is this rank std::vector owned_recv_ranks; diff --git a/cpp/dolfinx/graph/partition.cpp b/cpp/dolfinx/graph/partition.cpp index 10996ae67a5..d6080643003 100644 --- a/cpp/dolfinx/graph/partition.cpp +++ b/cpp/dolfinx/graph/partition.cpp @@ -49,8 +49,7 @@ graph::build::distribute(MPI_Comm comm, std::int64_t offset_global = 0; { const std::int64_t num_owned = list.num_nodes(); - MPI_Exscan(&num_owned, &offset_global, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm); } // TODO: Do this on the neighbourhood only @@ -152,8 +151,7 @@ graph::build::distribute(MPI_Comm comm, // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, - &compound_type); + MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), @@ -244,8 +242,7 @@ graph::build::distribute(MPI_Comm comm, std::span list, // Get global offset for converting local index to global index for // nodes in 'list' std::int64_t offset_global = 0; - MPI_Exscan(&num_owned, &offset_global, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm); // Buffer size (max number of edges + 2 for owning rank, // and node global index) @@ -337,8 +334,7 @@ graph::build::distribute(MPI_Comm comm, std::span list, // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, - &compound_type); + MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), @@ -406,8 +402,8 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::int64_t offset_local = 0; MPI_Request request_offset_scan; const std::int64_t num_local = owned_indices.size(); - MPI_Iexscan(&num_local, &offset_local, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm, &request_offset_scan); + MPI_Iexscan(&num_local, &offset_local, 1, MPI_INT64_T, MPI_SUM, comm, + &request_offset_scan); // Find out how many ghosts are on each neighboring process std::vector ghost_index_count; @@ -476,9 +472,8 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::vector recv_data(recv_offsets.back()); MPI_Neighbor_alltoallv(send_data.data(), ghost_index_count.data(), - send_offsets.data(), dolfinx::MPI::mpi_t, - recv_data.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t, + send_offsets.data(), MPI_INT64_T, recv_data.data(), + recv_sizes.data(), recv_offsets.data(), MPI_INT64_T, neighbor_comm_fwd); // Complete global_offset scan @@ -507,10 +502,9 @@ graph::build::compute_ghost_indices(MPI_Comm comm, std::vector new_recv(send_data.size()); MPI_Neighbor_alltoallv(recv_data.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_t, - new_recv.data(), ghost_index_count.data(), - send_offsets.data(), dolfinx::MPI::mpi_t, - neighbor_comm_rev); + recv_offsets.data(), MPI_INT64_T, new_recv.data(), + ghost_index_count.data(), send_offsets.data(), + MPI_INT64_T, neighbor_comm_rev); MPI_Comm_free(&neighbor_comm_fwd); MPI_Comm_free(&neighbor_comm_rev); diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index 7f49edfaf87..6efd9d1a80a 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -143,10 +143,10 @@ graph::AdjacencyList compute_destination_ranks( // Send/receive data std::vector recv_buffer(recv_disp.back()); - MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, neigh_comm); + MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, recv_buffer.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + neigh_comm); MPI_Comm_free(&neigh_comm); // Prepare (local node index, destination rank) array. Add local data, @@ -321,8 +321,7 @@ graph::partition_fn graph::scotch::partitioner(graph::scotch::strategy strategy, std::int64_t offset_global = 0; const std::int64_t num_owned = graph.num_nodes(); MPI_Request request_offset_scan; - MPI_Iexscan(&num_owned, &offset_global, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm, + MPI_Iexscan(&num_owned, &offset_global, 1, MPI_INT64_T, MPI_SUM, comm, &request_offset_scan); // C-style array indexing diff --git a/cpp/dolfinx/io/xdmf_function.cpp b/cpp/dolfinx/io/xdmf_function.cpp index 328915cef5f..c67ede85af7 100644 --- a/cpp/dolfinx/io/xdmf_function.cpp +++ b/cpp/dolfinx/io/xdmf_function.cpp @@ -161,8 +161,7 @@ void xdmf_function::add_function(MPI_Comm comm, const fem::Function& u, const std::int64_t num_local = data_values.size() / num_components; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, - comm); + MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); const bool use_mpi_io = dolfinx::MPI::size(comm) > 1; diff --git a/cpp/dolfinx/io/xdmf_mesh.cpp b/cpp/dolfinx/io/xdmf_mesh.cpp index 43c23e497cb..d6a228ce167 100644 --- a/cpp/dolfinx/io/xdmf_mesh.cpp +++ b/cpp/dolfinx/io/xdmf_mesh.cpp @@ -135,8 +135,8 @@ void xdmf_mesh::add_topology_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_entities_local = topology_data.size() / num_nodes_per_entity; std::int64_t num_entities_global = 0; - MPI_Allreduce(&num_entities_local, &num_entities_global, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm); + MPI_Allreduce(&num_entities_local, &num_entities_global, 1, MPI_INT64_T, + MPI_SUM, comm); topology_node.append_attribute("NumberOfElements") = std::to_string(num_entities_global).c_str(); topology_node.append_attribute("NodesPerElement") = num_nodes_per_entity; @@ -149,8 +149,7 @@ void xdmf_mesh::add_topology_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_local = num_entities_local; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, - comm); + MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item(topology_node, h5_id, h5_path, std::span(topology_data), @@ -204,8 +203,7 @@ void xdmf_mesh::add_geometry_data(MPI_Comm comm, pugi::xml_node& xml_node, const std::int64_t num_local = num_points_local; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, - comm); + MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item(geometry_node, h5_id, h5_path, std::span(x), offset, shape, "", diff --git a/cpp/dolfinx/io/xdmf_mesh.h b/cpp/dolfinx/io/xdmf_mesh.h index c8e2ddf629d..804f0ee5a35 100644 --- a/cpp/dolfinx/io/xdmf_mesh.h +++ b/cpp/dolfinx/io/xdmf_mesh.h @@ -129,11 +129,11 @@ void add_meshtags(MPI_Comm comm, const mesh::MeshTags& meshtags, std::int64_t global_num_values = 0; const std::int64_t local_num_values = num_active_entities; - MPI_Allreduce(&local_num_values, &global_num_values, 1, dolfinx::MPI::mpi_t, MPI_SUM, + MPI_Allreduce(&local_num_values, &global_num_values, 1, MPI_INT64_T, MPI_SUM, comm); const std::int64_t num_local = num_active_entities; std::int64_t offset = 0; - MPI_Exscan(&num_local, &offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, comm); + MPI_Exscan(&num_local, &offset, 1, MPI_INT64_T, MPI_SUM, comm); const bool use_mpi_io = (dolfinx::MPI::size(comm) > 1); xdmf_utils::add_data_item( attribute_node, h5_id, path_prefix + std::string("/Values"), diff --git a/cpp/dolfinx/io/xdmf_utils.cpp b/cpp/dolfinx/io/xdmf_utils.cpp index 985500ba599..444d3f71b3a 100644 --- a/cpp/dolfinx/io/xdmf_utils.cpp +++ b/cpp/dolfinx/io/xdmf_utils.cpp @@ -284,8 +284,7 @@ xdmf_utils::distribute_entity_data( MPI_Comm comm = topology.comm(); MPI_Datatype compound_type; - MPI_Type_contiguous(entities_v.extent(1), dolfinx::MPI::mpi_t, - &compound_type); + MPI_Type_contiguous(entities_v.extent(1), MPI_INT64_T, &compound_type); MPI_Type_commit(&compound_type); // -- B. Send entities and entity data to postmaster @@ -403,8 +402,7 @@ xdmf_utils::distribute_entity_data( std::vector> dest_to_index; std::ranges::transform( indices, std::back_inserter(dest_to_index), - [size, num_nodes](auto n) - { + [size, num_nodes](auto n) { return std::pair(dolfinx::MPI::index_owner(size, n, num_nodes), n); }); std::ranges::sort(dest_to_index); @@ -464,11 +462,10 @@ xdmf_utils::distribute_entity_data( [](auto x) { return x.second; }); std::vector recv_buffer(recv_disp.back()); - err = MPI_Neighbor_alltoallv( - send_buffer.data(), num_items_send.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), - num_items_recv.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, comm0); + err = MPI_Neighbor_alltoallv(send_buffer.data(), num_items_send.data(), + send_disp.data(), MPI_INT64_T, + recv_buffer.data(), num_items_recv.data(), + recv_disp.data(), MPI_INT64_T, comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(comm, err); diff --git a/cpp/dolfinx/io/xdmf_utils.h b/cpp/dolfinx/io/xdmf_utils.h index ad70a96da5c..b7573321dbf 100644 --- a/cpp/dolfinx/io/xdmf_utils.h +++ b/cpp/dolfinx/io/xdmf_utils.h @@ -317,8 +317,7 @@ std::vector get_dataset(MPI_Comm comm, const pugi::xml_node& dataset_node, std::int64_t size_global = 0; const std::int64_t size_local = data_vector.size(); - MPI_Allreduce(&size_local, &size_global, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm); + MPI_Allreduce(&size_local, &size_global, 1, MPI_INT64_T, MPI_SUM, comm); if (size != size_global) { throw std::runtime_error( diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index 06e69084a8b..1a6738d676a 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -577,10 +577,9 @@ MatrixCSR::MatrixCSR(const SparsityPattern& p, BlockMode mode) ghost_index_array.resize(recv_disp.back()); MPI_Neighbor_alltoallv(ghost_index_data.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, + send_disp.data(), MPI_INT64_T, ghost_index_array.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, - _comm.comm()); + recv_disp.data(), MPI_INT64_T, _comm.comm()); } // Store receive displacements for future use, when transferring @@ -730,8 +729,7 @@ double MatrixCSR::squared_norm() const _data.cbegin(), std::next(_data.cbegin(), _row_ptr[num_owned_rows] * bs2), double(0), [](auto norm, value_type y) { return norm + std::norm(y); }); double norm_sq; - MPI_Allreduce(&norm_sq_local, &norm_sq, 1, dolfinx::MPI::mpi_t, - MPI_SUM, _comm.comm()); + MPI_Allreduce(&norm_sq_local, &norm_sq, 1, MPI_DOUBLE, MPI_SUM, _comm.comm()); return norm_sq; } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/la/SparsityPattern.cpp b/cpp/dolfinx/la/SparsityPattern.cpp index 4ca4233890a..ac7ee1b6efc 100644 --- a/cpp/dolfinx/la/SparsityPattern.cpp +++ b/cpp/dolfinx/la/SparsityPattern.cpp @@ -335,9 +335,8 @@ void SparsityPattern::finalize() ghost_data_in.resize(recv_disp.back()); MPI_Neighbor_alltoallv(ghost_data.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, - ghost_data_in.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, + send_disp.data(), MPI_INT64_T, ghost_data_in.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, comm); MPI_Comm_free(&comm); } diff --git a/cpp/dolfinx/mesh/Topology.cpp b/cpp/dolfinx/mesh/Topology.cpp index be629e5da76..0c688e24753 100644 --- a/cpp/dolfinx/mesh/Topology.cpp +++ b/cpp/dolfinx/mesh/Topology.cpp @@ -50,8 +50,7 @@ determine_sharing_ranks(MPI_Comm comm, std::span indices) { std::int64_t max_index = indices.empty() ? 0 : *std::ranges::max_element(indices); - MPI_Allreduce(&max_index, &global_range, 1, - dolfinx::MPI::mpi_t, MPI_MAX, comm); + MPI_Allreduce(&max_index, &global_range, 1, MPI_INT64_T, MPI_MAX, comm); global_range += 1; } @@ -468,9 +467,8 @@ exchange_indexing(MPI_Comm comm, std::span indices, std::next(recv_disp.begin())); recv_data = std::vector(recv_disp.back()); MPI_Neighbor_alltoallv(sbuffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_data.data(), - recv_sizes.data(), recv_disp.data(), - dolfinx::MPI::mpi_t, comm0); + MPI_INT64_T, recv_data.data(), recv_sizes.data(), + recv_disp.data(), MPI_INT64_T, comm0); MPI_Comm_free(&comm0); } @@ -584,9 +582,8 @@ std::vector> exchange_ghost_indexing( // Send ghost indices to owner, and receive owned indices std::vector recv_buffer(recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, - recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, + send_disp.data(), MPI_INT64_T, recv_buffer.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, comm1); MPI_Comm_free(&comm1); @@ -667,10 +664,10 @@ std::vector> exchange_ghost_indexing( std::partial_sum(recv_sizes.begin(), recv_sizes.end(), std::next(recv_disp.begin())); std::vector recv_buffer(recv_disp.back()); - MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_buffer.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, comm); + MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, recv_buffer.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + comm); std::vector> data; data.reserve(recv_buffer.size() / 3); @@ -1133,8 +1130,7 @@ Topology mesh::create_topology( std::int64_t global_offset_v = 0; { const std::int64_t nlocal = owned_vertices.size(); - MPI_Exscan(&nlocal, &global_offset_v, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&nlocal, &global_offset_v, 1, MPI_INT64_T, MPI_SUM, comm); } // Get global indices of ghost cells diff --git a/cpp/dolfinx/mesh/graphbuild.cpp b/cpp/dolfinx/mesh/graphbuild.cpp index fc6a906b73a..a78e93f4821 100644 --- a/cpp/dolfinx/mesh/graphbuild.cpp +++ b/cpp/dolfinx/mesh/graphbuild.cpp @@ -81,8 +81,8 @@ graph::AdjacencyList compute_nonlocal_dual_graph( MPI_Request request_cell_offset; { const std::int64_t num_local = local_graph.num_nodes(); - MPI_Iexscan(&num_local, &cell_offset, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm, &request_cell_offset); + MPI_Iexscan(&num_local, &cell_offset, 1, MPI_INT64_T, MPI_SUM, comm, + &request_cell_offset); } // Find (max_vert_per_facet, min_vertex_index, max_vertex_index) @@ -100,8 +100,8 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Compute reductions std::array recv_buffer_r; - MPI_Allreduce(send_buffer_r.data(), recv_buffer_r.data(), 3, - dolfinx::MPI::mpi_t, MPI_MAX, comm); + MPI_Allreduce(send_buffer_r.data(), recv_buffer_r.data(), 3, MPI_INT64_T, + MPI_MAX, comm); assert(recv_buffer_r[1] != std::numeric_limits::min()); assert(recv_buffer_r[2] != -1); fshape1 = recv_buffer_r[0]; @@ -217,8 +217,7 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Send/receive data facet MPI_Datatype compound_type; - MPI_Type_contiguous(buffer_shape1, dolfinx::MPI::mpi_t, - &compound_type); + MPI_Type_contiguous(buffer_shape1, MPI_INT64_T, &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer(buffer_shape1 * recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), num_items_per_dest.data(), diff --git a/cpp/dolfinx/mesh/topologycomputation.cpp b/cpp/dolfinx/mesh/topologycomputation.cpp index 657c9d93b92..edefdbd1e26 100644 --- a/cpp/dolfinx/mesh/topologycomputation.cpp +++ b/cpp/dolfinx/mesh/topologycomputation.cpp @@ -256,10 +256,10 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, std::back_inserter(recv_disp)); recv_data.resize(recv_disp.back()); - MPI_Neighbor_alltoallv( - send_buffer.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_data.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, neighbor_comm); + MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, recv_data.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + neighbor_comm); } // List of (local index, sorted global vertices) pairs received from @@ -376,8 +376,7 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, { const std::int64_t _num_local = num_local; std::int64_t local_offset = 0; - MPI_Exscan(&_num_local, &local_offset, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&_num_local, &local_offset, 1, MPI_INT64_T, MPI_SUM, comm); // Send global indices for same entities that we sent before. This // uses the same pattern as before, so we can match up the received @@ -401,10 +400,10 @@ get_local_indexing(MPI_Comm comm, const common::IndexMap& vertex_map, { return a / num_vertices_per_e; }); recv_data.resize(recv_disp.back()); - MPI_Neighbor_alltoallv( - send_global_index_data.data(), send_sizes.data(), send_disp.data(), - dolfinx::MPI::mpi_t, recv_data.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, neighbor_comm); + MPI_Neighbor_alltoallv(send_global_index_data.data(), send_sizes.data(), + send_disp.data(), MPI_INT64_T, recv_data.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, + neighbor_comm); MPI_Comm_free(&neighbor_comm); // Map back received indices @@ -539,9 +538,10 @@ compute_entities_by_key_matching( std::vector perm(global_vertices.size()); std::iota(perm.begin(), perm.end(), 0); - std::ranges::sort( - perm, [&global_vertices](std::size_t i0, std::size_t i1) - { return global_vertices[i0] < global_vertices[i1]; }); + std::ranges::sort(perm, + [&global_vertices](std::size_t i0, std::size_t i1) { + return global_vertices[i0] < global_vertices[i1]; + }); // For quadrilaterals, the vertex opposite the lowest vertex should // be last if (entity_type == mesh::CellType::quadrilateral) diff --git a/cpp/dolfinx/mesh/utils.h b/cpp/dolfinx/mesh/utils.h index 106ade1ede9..e386381f562 100644 --- a/cpp/dolfinx/mesh/utils.h +++ b/cpp/dolfinx/mesh/utils.h @@ -830,8 +830,7 @@ Mesh> create_mesh( assert(cells1.size() % num_cell_nodes == 0); std::int64_t offset = 0; std::int64_t num_owned = cells1.size() / num_cell_nodes; - MPI_Exscan(&num_owned, &offset, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&num_owned, &offset, 1, MPI_INT64_T, MPI_SUM, comm); original_idx1.resize(num_owned); std::iota(original_idx1.begin(), original_idx1.end(), offset); } @@ -1039,8 +1038,7 @@ Mesh> create_mesh( } // Add on global offset std::int64_t global_offset = 0; - MPI_Exscan(&num_owned, &global_offset, 1, dolfinx::MPI::mpi_t, - MPI_SUM, comm); + MPI_Exscan(&num_owned, &global_offset, 1, MPI_INT64_T, MPI_SUM, comm); for (std::int32_t i = 0; i < num_cell_types; ++i) { std::iota(original_idx1[i].begin(), original_idx1[i].end(), diff --git a/cpp/dolfinx/refinement/plaza.cpp b/cpp/dolfinx/refinement/plaza.cpp index 6e129ef3982..8dcd037e19e 100644 --- a/cpp/dolfinx/refinement/plaza.cpp +++ b/cpp/dolfinx/refinement/plaza.cpp @@ -243,8 +243,8 @@ void plaza::impl::enforce_rules(MPI_Comm comm, } const std::int32_t update_count_old = update_count; - MPI_Allreduce(&update_count_old, &update_count, 1, - dolfinx::MPI::mpi_t, MPI_SUM, comm); + MPI_Allreduce(&update_count_old, &update_count, 1, MPI_INT32_T, MPI_SUM, + comm); } } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/refinement/utils.cpp b/cpp/dolfinx/refinement/utils.cpp index b5fbdbbb199..904df7dd3f7 100644 --- a/cpp/dolfinx/refinement/utils.cpp +++ b/cpp/dolfinx/refinement/utils.cpp @@ -84,8 +84,8 @@ void refinement::update_logical_edgefunction( data_to_recv.resize(recv_disp.back()); MPI_Neighbor_alltoallv(data_to_send.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, data_to_recv.data(), - recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t, + send_disp.data(), MPI_INT64_T, data_to_recv.data(), + recv_sizes.data(), recv_disp.data(), MPI_INT64_T, comm); } @@ -107,7 +107,7 @@ refinement::adjust_indices(const common::IndexMap& map, std::int32_t n) // Get offset for 'n' for this process const std::int64_t num_local = n; std::int64_t global_offset = 0; - MPI_Exscan(&num_local, &global_offset, 1, dolfinx::MPI::mpi_t, MPI_SUM, map.comm()); + MPI_Exscan(&num_local, &global_offset, 1, MPI_INT64_T, MPI_SUM, map.comm()); std::span owners = map.owners(); std::span src = map.src(); @@ -121,8 +121,8 @@ refinement::adjust_indices(const common::IndexMap& map, std::int32_t n) // Communicate offset to neighbors std::vector offsets(src.size(), 0); offsets.reserve(1); - MPI_Neighbor_allgather(&global_offset, 1, dolfinx::MPI::mpi_t, offsets.data(), 1, - dolfinx::MPI::mpi_t, comm); + MPI_Neighbor_allgather(&global_offset, 1, MPI_INT64_T, offsets.data(), 1, + MPI_INT64_T, comm); MPI_Comm_free(&comm); diff --git a/cpp/dolfinx/refinement/utils.h b/cpp/dolfinx/refinement/utils.h index 3cee9cb8420..ec550cb3575 100644 --- a/cpp/dolfinx/refinement/utils.h +++ b/cpp/dolfinx/refinement/utils.h @@ -172,8 +172,7 @@ create_new_vertices(MPI_Comm comm, const std::int64_t num_local = n; std::int64_t global_offset = 0; - MPI_Exscan(&num_local, &global_offset, 1, dolfinx::MPI::mpi_t, - MPI_SUM, mesh.comm()); + MPI_Exscan(&num_local, &global_offset, 1, MPI_INT64_T, MPI_SUM, mesh.comm()); global_offset += mesh.topology()->index_map(0)->local_range()[1]; std::for_each(local_edge_to_new_vertex.begin(), local_edge_to_new_vertex.end(), @@ -240,10 +239,9 @@ create_new_vertices(MPI_Comm comm, received_values.resize(recv_disp.back()); MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(), - send_disp.data(), dolfinx::MPI::mpi_t, + send_disp.data(), MPI_INT64_T, received_values.data(), recv_sizes.data(), - recv_disp.data(), dolfinx::MPI::mpi_t, - comm); + recv_disp.data(), MPI_INT64_T, comm); } // Add received remote global vertex indices to map