Ginkgo  Generated from pipelines/1330831941 branch based on master. Ginkgo version 1.8.0
A numerical linear algebra library targeting many-core architectures
gko::experimental::mpi::communicator Member List

This is the complete list of members for gko::experimental::mpi::communicator, including all inherited members.

all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) constgko::experimental::mpi::communicatorinline
all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) constgko::experimental::mpi::communicatorinline
broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) constgko::experimental::mpi::communicatorinline
communicator(const MPI_Comm &comm, bool force_host_buffer=false)gko::experimental::mpi::communicatorinline
communicator(const MPI_Comm &comm, int color, int key)gko::experimental::mpi::communicatorinline
communicator(const communicator &comm, int color, int key)gko::experimental::mpi::communicatorinline
force_host_buffer() const (defined in gko::experimental::mpi::communicator)gko::experimental::mpi::communicatorinline
gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) constgko::experimental::mpi::communicatorinline
get() constgko::experimental::mpi::communicatorinline
i_all_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
i_all_reduce(std::shared_ptr< const Executor > exec, ReduceType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
i_all_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
i_all_to_all(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
i_all_to_all(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count) constgko::experimental::mpi::communicatorinline
i_all_to_all_v(std::shared_ptr< const Executor > exec, const void *send_buffer, const int *send_counts, const int *send_offsets, MPI_Datatype send_type, void *recv_buffer, const int *recv_counts, const int *recv_offsets, MPI_Datatype recv_type) constgko::experimental::mpi::communicatorinline
i_all_to_all_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *send_offsets, RecvType *recv_buffer, const int *recv_counts, const int *recv_offsets) constgko::experimental::mpi::communicatorinline
i_broadcast(std::shared_ptr< const Executor > exec, BroadcastType *buffer, int count, int root_rank) constgko::experimental::mpi::communicatorinline
i_gather(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
i_gather_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int *recv_counts, const int *displacements, int root_rank) constgko::experimental::mpi::communicatorinline
i_recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) constgko::experimental::mpi::communicatorinline
i_reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) constgko::experimental::mpi::communicatorinline
i_scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
i_scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
i_scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
i_send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) constgko::experimental::mpi::communicatorinline
node_local_rank() constgko::experimental::mpi::communicatorinline
operator!=(const communicator &rhs) constgko::experimental::mpi::communicatorinline
operator==(const communicator &rhs) constgko::experimental::mpi::communicatorinline
rank() constgko::experimental::mpi::communicatorinline
recv(std::shared_ptr< const Executor > exec, RecvType *recv_buffer, const int recv_count, const int source_rank, const int recv_tag) constgko::experimental::mpi::communicatorinline
reduce(std::shared_ptr< const Executor > exec, const ReduceType *send_buffer, ReduceType *recv_buffer, int count, MPI_Op operation, int root_rank) constgko::experimental::mpi::communicatorinline
scan(std::shared_ptr< const Executor > exec, const ScanType *send_buffer, ScanType *recv_buffer, int count, MPI_Op operation) constgko::experimental::mpi::communicatorinline
scatter(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
scatter_v(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int *send_counts, const int *displacements, RecvType *recv_buffer, const int recv_count, int root_rank) constgko::experimental::mpi::communicatorinline
send(std::shared_ptr< const Executor > exec, const SendType *send_buffer, const int send_count, const int destination_rank, const int send_tag) constgko::experimental::mpi::communicatorinline
size() constgko::experimental::mpi::communicatorinline
synchronize() constgko::experimental::mpi::communicatorinline