Ginkgo
Generated from pipelines/1554403166 branch based on develop. Ginkgo version 1.9.0
A numerical linear algebra library targeting many-core architectures
|
A thin wrapper of MPI_Comm that supports most MPI calls. More...
#include <ginkgo/core/base/mpi.hpp>
Public Member Functions | |
communicator (const MPI_Comm &comm, bool force_host_buffer=false) | |
Non-owning constructor for an existing communicator of type MPI_Comm. More... | |
communicator (const MPI_Comm &comm, int color, int key) | |
Create a communicator object from an existing MPI_Comm object using color and key. More... | |
communicator (const communicator &comm, int color, int key) | |
Create a communicator object from an existing MPI_Comm object using color and key. More... | |
const MPI_Comm & | get () const |
Return the underlying MPI_Comm object. More... | |
bool | force_host_buffer () const |
int | size () const |
Return the size of the communicator (number of ranks). More... | |
int | rank () const |
Return the rank of the calling process in the communicator. More... | |
int | node_local_rank () const |
Return the node local rank of the calling process in the communicator. More... | |
bool | operator== (const communicator &rhs) const |
Compare two communicator objects for equality. More... | |
bool | operator!= (const communicator &rhs) const |
Compare two communicator objects for non-equality. More... | |
void | synchronize () const |
This function is used to synchronize the ranks in the communicator. More... | |
template<typename SendType > | |
void | send (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const |
Send (Blocking) data from calling process to destination rank. More... | |
template<typename SendType > | |
request | i_send (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) const |
Send (Non-blocking, Immediate return) data from calling process to destination rank. More... | |
template<typename RecvType > | |
status | recv (std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const |
Receive data from source rank. More... | |
template<typename RecvType > | |
request | i_recv (std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) const |
Receive (Non-blocking, Immediate return) data from source rank. More... | |
template<typename BroadcastType > | |
void | broadcast (std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const |
Broadcast data from calling process to all ranks in the communicator. More... | |
template<typename BroadcastType > | |
request | i_broadcast (std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) const |
(Non-blocking) Broadcast data from calling process to all ranks in the communicator More... | |
template<typename ReduceType > | |
void | reduce (std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const |
Reduce data into root from all calling processes on the same communicator. More... | |
template<typename ReduceType > | |
request | i_reduce (std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) const |
(Non-blocking) Reduce data into root from all calling processes on the same communicator. More... | |
template<typename ReduceType > | |
void | all_reduce (std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const |
(In-place) Reduce data from all calling processes from all calling processes on same communicator. More... | |
template<typename ReduceType > | |
request | i_all_reduce (std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) const |
(In-place, non-blocking) Reduce data from all calling processes from all calling processes on same communicator. More... | |
template<typename ReduceType > | |
void | all_reduce (std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const |
Reduce data from all calling processes from all calling processes on same communicator. More... | |
template<typename ReduceType > | |
request | i_all_reduce (std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) const |
Reduce data from all calling processes from all calling processes on same communicator. More... | |
template<typename SendType , typename RecvType > | |
void | gather (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const |
Gather data onto the root rank from all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
request | i_gather (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const |
(Non-blocking) Gather data onto the root rank from all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
void | gather_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const |
Gather data onto the root rank from all ranks in the communicator with offsets. More... | |
template<typename SendType , typename RecvType > | |
request | i_gather_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) const |
(Non-blocking) Gather data onto the root rank from all ranks in the communicator with offsets. More... | |
template<typename SendType , typename RecvType > | |
void | all_gather (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const |
Gather data onto all ranks from all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
request | i_all_gather (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const |
(Non-blocking) Gather data onto all ranks from all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
void | scatter (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const |
Scatter data from root rank to all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
request | i_scatter (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) const |
(Non-blocking) Scatter data from root rank to all ranks in the communicator. More... | |
template<typename SendType , typename RecvType > | |
void | scatter_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const |
Scatter data from root rank to all ranks in the communicator with offsets. More... | |
template<typename SendType , typename RecvType > | |
request | i_scatter_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) const |
(Non-blocking) Scatter data from root rank to all ranks in the communicator with offsets. More... | |
template<typename RecvType > | |
void | all_to_all (std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const |
(In-place) Communicate data from all ranks to all other ranks in place (MPI_Alltoall). More... | |
template<typename RecvType > | |
request | i_all_to_all (std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) const |
(In-place, Non-blocking) Communicate data from all ranks to all other ranks in place (MPI_Ialltoall). More... | |
template<typename SendType , typename RecvType > | |
void | all_to_all (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const |
Communicate data from all ranks to all other ranks (MPI_Alltoall). More... | |
template<typename SendType , typename RecvType > | |
request | i_all_to_all (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) const |
(Non-blocking) Communicate data from all ranks to all other ranks (MPI_Ialltoall). More... | |
template<typename SendType , typename RecvType > | |
void | all_to_all_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const |
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv). More... | |
void | all_to_all_v (std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const |
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv). More... | |
request | i_all_to_all_v (std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) const |
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv). More... | |
template<typename SendType , typename RecvType > | |
request | i_all_to_all_v (std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) const |
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv). More... | |
template<typename ScanType > | |
void | scan (std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const |
Does a scan operation with the given operator. More... | |
template<typename ScanType > | |
request | i_scan (std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) const |
Does a scan operation with the given operator. More... | |
A thin wrapper of MPI_Comm that supports most MPI calls.
A wrapper class that takes in the given MPI communicator. If a bare MPI_Comm is provided, the wrapper takes no ownership of the MPI_Comm. Thus the MPI_Comm must remain valid throughout the lifetime of the communicator. If the communicator was created through splitting, the wrapper takes ownership of the MPI_Comm. In this case, as the class or object goes out of scope, the underlying MPI_Comm is freed.
|
inline |
Non-owning constructor for an existing communicator of type MPI_Comm.
The MPI_Comm object will not be deleted after the communicator object has been freed and an explicit MPI_Comm_free needs to be called on the original MPI_Comm object.
comm | The input MPI_Comm object. |
force_host_buffer | If set to true, always communicates through host memory |
|
inline |
Create a communicator object from an existing MPI_Comm object using color and key.
comm | The input MPI_Comm object. |
color | The color to split the original comm object |
key | The key to split the comm object |
|
inline |
Create a communicator object from an existing MPI_Comm object using color and key.
comm | The input communicator object. |
color | The color to split the original comm object |
key | The key to split the comm object |
References get().
|
inline |
Gather data onto all ranks from all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
Reduce data from all calling processes from all calling processes on same communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the data to reduce |
recv_buffer | the reduced result |
count | the number of elements to reduce |
operation | the reduce operation. See @MPI_Op |
ReduceType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
(In-place) Reduce data from all calling processes from all calling processes on same communicator.
exec | The executor, on which the message buffer is located. |
recv_buffer | the data to reduce and the reduced result |
count | the number of elements to reduce |
operation | the MPI_Op type reduce operation. |
ReduceType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Communicate data from all ranks to all other ranks (MPI_Alltoall).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
recv_buffer | the buffer to receive |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
(In-place) Communicate data from all ranks to all other ranks in place (MPI_Alltoall).
See MPI documentation for more details.
exec | The executor, on which the message buffer is located. |
buffer | the buffer to send and the buffer receive |
recv_count | the number of elements to receive |
comm | the communicator |
RecvType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
send_offsets | the offsets for the send buffer |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
recv_offsets | the offsets for the recv buffer |
comm | the communicator |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
|
inline |
Communicate data from all ranks to all other ranks with offsets (MPI_Alltoallv).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
send_offsets | the offsets for the send buffer |
send_type | the MPI_Datatype for the send buffer |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
recv_offsets | the offsets for the recv buffer |
recv_type | the MPI_Datatype for the recv buffer |
comm | the communicator |
References get().
|
inline |
Broadcast data from calling process to all ranks in the communicator.
exec | The executor, on which the message buffer is located. |
buffer | the buffer to broadcsat |
count | the number of elements to broadcast |
root_rank | the rank to broadcast from |
BroadcastType | the type of the data to broadcast. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Gather data onto the root rank from all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
root_rank | the rank to gather into |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
Gather data onto the root rank from all ranks in the communicator with offsets.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
displacements | the offsets for the buffer |
root_rank | the rank to gather into |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
Return the underlying MPI_Comm object.
Referenced by all_gather(), all_reduce(), all_to_all(), all_to_all_v(), broadcast(), communicator(), gather(), gather_v(), i_all_gather(), i_all_reduce(), i_all_to_all(), i_all_to_all_v(), i_broadcast(), i_gather(), i_gather_v(), i_recv(), i_reduce(), i_scan(), i_scatter(), i_scatter_v(), i_send(), recv(), reduce(), scan(), scatter(), scatter_v(), send(), synchronize(), and gko::experimental::mpi::window< ValueType >::window().
|
inline |
(Non-blocking) Gather data onto all ranks from all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Reduce data from all calling processes from all calling processes on same communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the data to reduce |
recv_buffer | the reduced result |
count | the number of elements to reduce |
operation | the reduce operation. See @MPI_Op |
ReduceType | the type of the data to reduce. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(In-place, non-blocking) Reduce data from all calling processes from all calling processes on same communicator.
exec | The executor, on which the message buffer is located. |
recv_buffer | the data to reduce and the reduced result |
count | the number of elements to reduce |
operation | the reduce operation. See @MPI_Op |
ReduceType | the type of the data to reduce. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Communicate data from all ranks to all other ranks (MPI_Ialltoall).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
recv_buffer | the buffer to receive |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(In-place, Non-blocking) Communicate data from all ranks to all other ranks in place (MPI_Ialltoall).
See MPI documentation for more details.
exec | The executor, on which the message buffer is located. |
buffer | the buffer to send and the buffer receive |
recv_count | the number of elements to receive |
comm | the communicator |
RecvType | the type of the data to receive. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
send_offsets | the offsets for the send buffer |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
recv_offsets | the offsets for the recv buffer |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References i_all_to_all_v().
|
inline |
Communicate data from all ranks to all other ranks with offsets (MPI_Ialltoallv).
See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
send_offsets | the offsets for the send buffer |
send_type | the MPI_Datatype for the send buffer |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
recv_offsets | the offsets for the recv buffer |
recv_type | the MPI_Datatype for the recv buffer |
References gko::experimental::mpi::request::get(), and get().
Referenced by i_all_to_all_v().
|
inline |
(Non-blocking) Broadcast data from calling process to all ranks in the communicator
exec | The executor, on which the message buffer is located. |
buffer | the buffer to broadcsat |
count | the number of elements to broadcast |
root_rank | the rank to broadcast from |
BroadcastType | the type of the data to broadcast. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Gather data onto the root rank from all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
root_rank | the rank to gather into |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Gather data onto the root rank from all ranks in the communicator with offsets.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
displacements | the offsets for the buffer |
root_rank | the rank to gather into |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Receive (Non-blocking, Immediate return) data from source rank.
exec | The executor, on which the message buffer is located. |
recv_buffer | the buffer to send |
recv_count | the number of elements to receive |
source_rank | the rank to receive the data from |
recv_tag | the tag for the recv call |
RecvType | the type of the data to receive. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Reduce data into root from all calling processes on the same communicator.
exec | The executor, on which the message buffer is located. |
send_buffer | the buffer to reduce |
recv_buffer | the reduced result |
count | the number of elements to reduce |
operation | the MPI_Op type reduce operation. |
ReduceType | the type of the data to reduce. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Does a scan operation with the given operator.
(MPI_Iscan). See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to scan from |
recv_buffer | the result buffer |
recv_count | the number of elements to scan |
operation | the operation type to be used for the scan. See @MPI_Op |
ScanType | the type of the data to scan. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Scatter data from root rank to all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
(Non-blocking) Scatter data from root rank to all ranks in the communicator with offsets.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
displacements | the offsets for the buffer |
comm | the communicator |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Send (Non-blocking, Immediate return) data from calling process to destination rank.
exec | The executor, on which the message buffer is located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
destination_rank | the rank to send the data to |
send_tag | the tag for the send call |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::request::get(), and get().
|
inline |
Return the node local rank of the calling process in the communicator.
|
inline |
Compare two communicator objects for non-equality.
|
inline |
Compare two communicator objects for equality.
|
inline |
Return the rank of the calling process in the communicator.
|
inline |
Receive data from source rank.
exec | The executor, on which the message buffer is located. |
recv_buffer | the buffer to receive |
recv_count | the number of elements to receive |
source_rank | the rank to receive the data from |
recv_tag | the tag for the recv call |
RecvType | the type of the data to receive. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References gko::experimental::mpi::status::get(), and get().
|
inline |
Reduce data into root from all calling processes on the same communicator.
exec | The executor, on which the message buffer is located. |
send_buffer | the buffer to reduce |
recv_buffer | the reduced result |
count | the number of elements to reduce |
operation | the MPI_Op type reduce operation. |
ReduceType | the type of the data to reduce. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Does a scan operation with the given operator.
(MPI_Scan). See MPI documentation for more details.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to scan from |
recv_buffer | the result buffer |
recv_count | the number of elements to scan |
operation | the operation type to be used for the scan. See @MPI_Op |
ScanType | the type of the data to scan. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Scatter data from root rank to all ranks in the communicator.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
Scatter data from root rank to all ranks in the communicator with offsets.
exec | The executor, on which the message buffers are located. |
send_buffer | the buffer to gather from |
send_count | the number of elements to send |
recv_buffer | the buffer to gather into |
recv_count | the number of elements to receive |
displacements | the offsets for the buffer |
comm | the communicator |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
RecvType | the type of the data to receive. The same restrictions as for SendType apply. |
References get().
|
inline |
Send (Blocking) data from calling process to destination rank.
exec | The executor, on which the message buffer is located. |
send_buffer | the buffer to send |
send_count | the number of elements to send |
destination_rank | the rank to send the data to |
send_tag | the tag for the send call |
SendType | the type of the data to send. Has to be a type which has a specialization of type_impl that defines its MPI_Datatype. |
References get().
|
inline |
Return the size of the communicator (number of ranks).
|
inline |
This function is used to synchronize the ranks in the communicator.
Calls MPI_Barrier
References get().