Skip to content

Commit

Permalink
Improve Datatype handling (#471)
Browse files Browse the repository at this point in the history
* Improve Datatype handling

- Uses generated functions to avoid creating new instances `Datatype`s on every communication operation (#462). These are lazily initialized to allow for use in precompiled modules.
- Attach the corresponding Julia to an MPI Datatype as an attribute
- Nicer printing of Datatypes
- Allow specifying bits type as return type of MPI.Scatter
- Minor optimisation when constructing buffers of a single NTuple

* duplicate Datatypes if same size as existing type
  • Loading branch information
simonbyrne authored May 14, 2021
1 parent a584b85 commit eeed089
Show file tree
Hide file tree
Showing 10 changed files with 205 additions and 41 deletions.
1 change: 1 addition & 0 deletions deps/consts_microsoftmpi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ const MPI_LOCK_EXCLUSIVE = Cint(234)
const MPI_LOCK_SHARED = Cint(235)
const MPI_MAX_INFO_KEY = Cint(255)
const MPI_MAX_INFO_VAL = Cint(1024)
const MPI_MAX_OBJECT_NAME = Cint(128)
const MPI_TAG_UB = reinterpret(Cint, 0x64400001)
const MPI_COMM_TYPE_SHARED = Cint(1)
const MPI_ORDER_C = Cint(56)
Expand Down
1 change: 1 addition & 0 deletions deps/consts_mpich.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ const MPI_LOCK_EXCLUSIVE = Cint(234)
const MPI_LOCK_SHARED = Cint(235)
const MPI_MAX_INFO_KEY = Cint(255)
const MPI_MAX_INFO_VAL = Cint(1024)
const MPI_MAX_OBJECT_NAME = Cint(128)
const MPI_TAG_UB = Cint(1681915905)
const MPI_COMM_TYPE_SHARED = Cint(1)
const MPI_ORDER_C = Cint(56)
Expand Down
1 change: 1 addition & 0 deletions deps/consts_openmpi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ const MPI_LOCK_EXCLUSIVE = Cint(1)
const MPI_LOCK_SHARED = Cint(2)
const MPI_MAX_INFO_KEY = Cint(36)
const MPI_MAX_INFO_VAL = Cint(256)
const MPI_MAX_OBJECT_NAME = Cint(64)
const MPI_TAG_UB = Cint(0)
const MPI_COMM_TYPE_SHARED = Cint(0)
const MPI_ORDER_C = Cint(0)
Expand Down
1 change: 1 addition & 0 deletions deps/gen_consts.jl
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ MPI_Cints = [
:MPI_LOCK_SHARED,
:MPI_MAX_INFO_KEY,
:MPI_MAX_INFO_VAL,
:MPI_MAX_OBJECT_NAME,
:MPI_TAG_UB,
:MPI_COMM_TYPE_SHARED,
:MPI_ORDER_C,
Expand Down
7 changes: 4 additions & 3 deletions docs/examples/06-scatterv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ if rank == root

# store sizes in 2 * comm_size Array
sizes = vcat(M_counts', N_counts')
size_ubuf = UBuffer(sizes, 2)

# store number of values to send to each rank in comm_size length Vector
counts = vec(prod(sizes, dims=1))
Expand All @@ -44,7 +45,7 @@ if rank == root
output_vbuf = VBuffer(output, counts) # VBuffer for gather
else
# these variables can be set to `nothing` on non-root processes
sizes = nothing
size_ubuf = UBuffer(nothing)
output_vbuf = test_vbuf = VBuffer(nothing)
end

Expand All @@ -58,8 +59,8 @@ if rank == root
end
MPI.Barrier(comm)

local_M, local_N = MPI.Scatter!(sizes, zeros(Int, 2), root, comm)
local_test = MPI.Scatterv!(test_vbuf, zeros(Float64, local_M, local_N), root, comm)
local_size = MPI.Scatter(size_ubuf, NTuple{2,Int}, root, comm)
local_test = MPI.Scatterv!(test_vbuf, zeros(Float64, local_size), root, comm)

for i = 0:comm_size-1
if rank == i
Expand Down
1 change: 1 addition & 0 deletions docs/src/advanced.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ MPI.Types.create_subarray
MPI.Types.create_struct
MPI.Types.create_resized
MPI.Types.commit!
MPI.Types.duplicate
```

## Operator objects
Expand Down
11 changes: 9 additions & 2 deletions src/buffers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -147,22 +147,29 @@ function Buffer(sub::Base.FastContiguousSubArray)
end
function Buffer(sub::Base.FastSubArray)
datatype = Types.create_vector(length(sub), 1, sub.stride1,
Datatype(eltype(sub); commit=false))
Datatype(eltype(sub)))
Types.commit!(datatype)
Buffer(sub, Cint(1), datatype)
end
function Buffer(sub::SubArray{T,N,P,I,false}) where {T,N,P,I<:Tuple{Vararg{Union{Base.ScalarIndex, Base.Slice, AbstractUnitRange}}}}
datatype = Types.create_subarray(size(parent(sub)),
map(length, sub.indices),
map(i -> first(i)-1, sub.indices),
Datatype(eltype(sub), commit=false))
Datatype(eltype(sub)))
Types.commit!(datatype)
Buffer(parent(sub), Cint(1), datatype)
end

# NTuple: avoid creating a new datatype if possible
function Buffer(data::Ref{NTuple{N,T}}) where {N,T}
Buffer(data, Cint(N), Datatype(T))
end


Buffer(::InPlace) = Buffer(IN_PLACE, 0, DATATYPE_NULL)
Buffer(::Nothing) = Buffer(nothing, 0, DATATYPE_NULL)


"""
Buffer_send(data)
Expand Down
18 changes: 16 additions & 2 deletions src/collective.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,25 @@ Scatter!(sendbuf::Nothing, recvbuf, root::Integer, comm::Comm) =
Scatter!(UBuffer(nothing), recvbuf, root, comm)

# determine UBuffer count from recvbuf
Scatter!(sendbuf::AbstractArray, recvbuf::Union{Ref,AbstractArray}, root::Integer, comm::Comm) =
Scatter!(sendbuf::AbstractArray{T}, recvbuf::Union{Ref{T},AbstractArray{T}}, root::Integer, comm::Comm) where {T} =
Scatter!(UBuffer(sendbuf,length(recvbuf)), recvbuf, root, comm)

"""
Scatterv!(sendbuf::Union{VBuffer,Nothing}, recvbuf, root, comm)
Scatter(sendbuf, T, root::Integer, comm::Comm)
Splits the buffer `sendbuf` in the `root` process into `Comm_size(comm)` chunks,
sending the `j`-th chunk to the process of rank `j-1` as an object of type `T`.
# See also
- [`Scatter!`](@ref)
"""
function Scatter(sendbuf, ::Type{T}, root::Integer, comm::Comm) where {T}
Scatter!(sendbuf, Ref{T}(), root, comm)[]
end


"""
Scatterv!(sendbuf, T, root, comm)
Splits the buffer `sendbuf` in the `root` process into `Comm_size(comm)` chunks and sends
the `j`th chunk to the process of rank `j-1` into the `recvbuf` buffer.
Expand Down
Loading

0 comments on commit eeed089

Please sign in to comment.