OpenLB 1.7
Loading...
Searching...
No Matches
Public Member Functions | Friends | List of all members
olb::singleton::MpiManager Class Reference

Wrapper functions that simplify the use of MPI. More...

#include <mpiManager.h>

+ Collaboration diagram for olb::singleton::MpiManager:

Public Member Functions

void init (int *argc, char ***argv, bool verbose=true)
 Initializes the mpi manager.
 
int getSize () const
 Returns the number of processes.
 
int getRank () const
 Returns the process ID.
 
int bossId () const
 Returns process ID of main processor.
 
bool isMainProcessor () const
 Tells whether current processor is main processor.
 
double getTime () const
 Returns universal MPI-time in seconds.
 
void barrier (MPI_Comm comm=MPI_COMM_WORLD)
 Synchronizes the processes.
 
void synchronizeIO (unsigned tDelay=100, MPI_Comm comm=MPI_COMM_WORLD)
 Synchronizes the processes and wait to ensure correct cout order.
 
template<typename T >
void send (T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, blocking.
 
template<typename T , unsigned DIM>
void send (util::ADf< T, DIM > *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename... args>
void send (std::vector< args... > &vec, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<class T , std::size_t N>
void send (std::array< T, N > &array, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendInit (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Initialize persistent non-blocking send.
 
template<typename T , unsigned DIM>
void sendInit (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void iSend (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, non blocking.
 
template<typename T , unsigned DIM>
void iSend (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void ibSend (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, non blocking and buffered.
 
template<typename T , unsigned DIM>
void ibSend (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
std::size_t probeReceiveSize (int source, MPI_Datatype type, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Probe size of incoming message.
 
template<typename TYPE >
std::size_t probeReceiveSize (int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Probe size of incoming message with TYPE.
 
template<typename T >
void receive (T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Receives data at *buf, blocking.
 
template<typename T , unsigned DIM>
void receive (util::ADf< T, DIM > *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename... args>
void receive (std::vector< args... > &vec, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<class T , std::size_t N>
void receive (std::array< T, N > &array, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void recvInit (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Initialize persistent non-blocking receive.
 
template<typename T , unsigned DIM>
void recvInit (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void iRecv (T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Receives data at *buf, non blocking.
 
template<typename T , unsigned DIM>
void iRecv (util::ADf< T, DIM > *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendRecv (T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Send and receive data between two partners.
 
template<typename T , unsigned DIM>
void sendRecv (util::ADf< T, DIM > *sendBuf, util::ADf< T, DIM > *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendToMaster (T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data to master processor.
 
template<typename T >
void scatterv (T *sendBuf, int *sendCounts, int *displs, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Scatter data from one processor over multiple processors.
 
template<typename T >
void gather (T *sendBuf, int sendCount, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gather data from multiple processors to one processor.
 
template<typename T >
void gatherv (T *sendBuf, int sendCount, T *recvBuf, int *recvCounts, int *displs, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gather data from multiple processors to one processor.
 
template<typename T >
void bCast (T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcast data from one processor to multiple processors.
 
template<typename T , unsigned DIM>
void bCast (util::ADf< T, DIM > *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T , unsigned DIM>
void bCast (BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void bCast (T &sendVal, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void bCastThroughMaster (T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcast data when root is unknown to other processors.
 
template<typename T , unsigned DIM>
void bCastThroughMaster (util::ADf< T, DIM > *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 
void bCast (std::string &message, int root=0)
 Special case for broadcasting strings. Memory handling is automatic.
 
void bCast (BlockData< 2, double, double > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Special case for broadcasting BlockData2D.
 
void bCast (BlockData< 2, float, float > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Special case for broadcasting BlockData2D.
 
template<typename T >
void reduce (T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
 Reduction operation toward one processor.
 
template<typename T , unsigned DIM>
void reduce (util::ADf< T, DIM > &sendVal, util::ADf< T, DIM > &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
 
template<typename T , unsigned DIM>
void reduce (BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &sendVal, BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void reduceVect (std::vector< T > &sendVal, std::vector< T > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Element-per-element reduction of a vector of data.
 
template<typename T >
void reduceAndBcast (T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduction operation, followed by a broadcast.
 
template<typename T , unsigned DIM>
void reduceAndBcast (util::ADf< T, DIM > &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
void wait (MPI_Request *request, MPI_Status *status)
 Complete a non-blocking MPI operation.
 
void waitAll (MpiNonBlockingHelper &mpiNbHelper)
 Complete a series of non-blocking MPI operations.
 
template<>
void send (bool *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (char *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (int *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (float *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (double *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void sendInit (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendInit (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendInit (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (char *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (float *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (long double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (char *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (float *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void receive (bool *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (char *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (int *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (float *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (double *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (long double *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void sendToMaster (bool *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (char *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (int *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (float *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (double *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void recvInit (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void recvInit (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (bool *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (char *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (int *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (float *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (double *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendRecv (bool *sendBuf, bool *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (char *sendBuf, char *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (int *sendBuf, int *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (float *sendBuf, float *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (long *sendBuf, long *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (double *sendBuf, double *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (long double *sendBuf, long double *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void scatterv (bool *sendBuf, int *sendCounts, int *displs, bool *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (char *sendBuf, int *sendCounts, int *displs, char *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (int *sendBuf, int *sendCounts, int *displs, int *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (float *sendBuf, int *sendCounts, int *displs, float *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (double *sendBuf, int *sendCounts, int *displs, double *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void gather (int *sendBuf, int sendCount, int *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void gatherv (bool *sendBuf, int sendCount, bool *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (char *sendBuf, int sendCount, char *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (int *sendBuf, int sendCount, int *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (float *sendBuf, int sendCount, float *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (double *sendBuf, int sendCount, double *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void bCast (bool *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (char *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned char *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (int *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned long *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (float *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (double *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (bool &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (char &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned char &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (int &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned long &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (float &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (double &sendVal, int root, MPI_Comm comm)
 
template<>
void bCastThroughMaster (bool *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (char *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (int *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (float *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (double *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void reduce (bool &sendVal, bool &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (char &sendVal, char &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (int &sendVal, int &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (float &sendVal, float &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (double &sendVal, double &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< char > &sendVal, std::vector< char > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< int > &sendVal, std::vector< int > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< float > &sendVal, std::vector< float > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< double > &sendVal, std::vector< double > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (bool &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (char &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (int &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (float &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (double &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (long double &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (long &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (unsigned long &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, double, int > &sendVal, BlockData< 2, double, int > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, double, double > &sendVal, BlockData< 2, double, double > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, float, float > &sendVal, BlockData< 2, float, float > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 

Friends

MpiManagermpi ()
 

Detailed Description

Wrapper functions that simplify the use of MPI.

Definition at line 90 of file mpiManager.h.

Member Function Documentation

◆ barrier()

void olb::singleton::MpiManager::barrier ( MPI_Comm comm = MPI_COMM_WORLD)

Synchronizes the processes.

Definition at line 93 of file mpiManager.cpp.

94{
95 if (!ok) {
96 return;
97 }
98 MPI_Barrier(comm);
99}
+ Here is the caller graph for this function:

◆ bCast() [1/21]

void olb::singleton::MpiManager::bCast ( BlockData< 2, double, double > & sendData,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Special case for broadcasting BlockData2D.

Definition at line 152 of file blockData.hh.

153{
154 if (!ok) {
155 return;
156 }
157 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
158 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
159 sendData.getNcells(), MPI_DOUBLE, root, comm);
160 }
161}

References olb::cpu::sisd::Column< T >::data(), olb::BlockData< D, T, U >::getColumn(), olb::BlockStructureD< D >::getNcells(), and olb::BlockData< D, T, U >::getSize().

+ Here is the call graph for this function:

◆ bCast() [2/21]

void olb::singleton::MpiManager::bCast ( BlockData< 2, float, float > & sendData,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Special case for broadcasting BlockData2D.

Definition at line 163 of file blockData.hh.

164{
165 if (!ok) {
166 return;
167 }
168 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
169 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
170 sendData.getNcells(), MPI_FLOAT, root, comm);
171 }
172}

References olb::cpu::sisd::Column< T >::data(), olb::BlockData< D, T, U >::getColumn(), olb::BlockStructureD< D >::getNcells(), and olb::BlockData< D, T, U >::getSize().

+ Here is the call graph for this function:

◆ bCast() [3/21]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCast ( BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > & sendData,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 134 of file mpiManagerAD.hh.

137{
138 if (!ok) {
139 return;
140 }
141// MPI_Bcast(static_cast<void*>(sendData.getRawData()),
142// sendData.getDataSize(), MPI_DOUBLE, root, comm);
143 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
144 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
145 sendData.getNcells(), MPI_DOUBLE, root, comm);
146 }
147}

◆ bCast() [4/21]

template<>
void olb::singleton::MpiManager::bCast ( bool & sendVal,
int root,
MPI_Comm comm )

Definition at line 947 of file mpiManager.cpp.

948{
949 if (!ok) {
950 return;
951 }
952 MPI_Bcast(&sendVal, 1, MPI_BYTE, root, comm);
953}

◆ bCast() [5/21]

template<>
void olb::singleton::MpiManager::bCast ( bool * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 857 of file mpiManager.cpp.

858{
859 if (!ok) {
860 return;
861 }
862 MPI_Bcast(static_cast<void*>(sendBuf),
863 sendCount, MPI_BYTE, root, comm);
864}

◆ bCast() [6/21]

template<>
void olb::singleton::MpiManager::bCast ( char & sendVal,
int root,
MPI_Comm comm )

Definition at line 955 of file mpiManager.cpp.

956{
957 if (!ok) {
958 return;
959 }
960 MPI_Bcast(&sendVal, 1, MPI_CHAR, root, comm);
961}

◆ bCast() [7/21]

template<>
void olb::singleton::MpiManager::bCast ( char * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 867 of file mpiManager.cpp.

868{
869 if (!ok) {
870 return;
871 }
872 MPI_Bcast(static_cast<void*>(sendBuf),
873 sendCount, MPI_CHAR, root, comm);
874}

◆ bCast() [8/21]

template<>
void olb::singleton::MpiManager::bCast ( double & sendVal,
int root,
MPI_Comm comm )

Definition at line 1000 of file mpiManager.cpp.

1001{
1002 if (!ok) {
1003 return;
1004 }
1005 MPI_Bcast(&sendVal, 1, MPI_DOUBLE, root, comm);
1006}

◆ bCast() [9/21]

template<>
void olb::singleton::MpiManager::bCast ( double * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 917 of file mpiManager.cpp.

918{
919 if (!ok) {
920 return;
921 }
922 MPI_Bcast(static_cast<void*>(sendBuf),
923 sendCount, MPI_DOUBLE, root, comm);
924}

◆ bCast() [10/21]

template<>
void olb::singleton::MpiManager::bCast ( float & sendVal,
int root,
MPI_Comm comm )

Definition at line 991 of file mpiManager.cpp.

992{
993 if (!ok) {
994 return;
995 }
996 MPI_Bcast(&sendVal, 1, MPI_FLOAT, root, comm);
997}

◆ bCast() [11/21]

template<>
void olb::singleton::MpiManager::bCast ( float * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 907 of file mpiManager.cpp.

908{
909 if (!ok) {
910 return;
911 }
912 MPI_Bcast(static_cast<void*>(sendBuf),
913 sendCount, MPI_FLOAT, root, comm);
914}

◆ bCast() [12/21]

template<>
void olb::singleton::MpiManager::bCast ( int & sendVal,
int root,
MPI_Comm comm )

Definition at line 973 of file mpiManager.cpp.

974{
975 if (!ok) {
976 return;
977 }
978 MPI_Bcast(&sendVal, 1, MPI_INT, root, comm);
979}

◆ bCast() [13/21]

template<>
void olb::singleton::MpiManager::bCast ( int * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 887 of file mpiManager.cpp.

888{
889 if (!ok) {
890 return;
891 }
892 MPI_Bcast(static_cast<void*>(sendBuf),
893 sendCount, MPI_INT, root, comm);
894}

◆ bCast() [14/21]

void olb::singleton::MpiManager::bCast ( std::string & message,
int root = 0 )

Special case for broadcasting strings. Memory handling is automatic.

◆ bCast() [15/21]

template<typename T >
void olb::singleton::MpiManager::bCast ( T & sendVal,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

◆ bCast() [16/21]

template<typename T >
void olb::singleton::MpiManager::bCast ( T * sendBuf,
int sendCount,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Broadcast data from one processor to multiple processors.

+ Here is the caller graph for this function:

◆ bCast() [17/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned char & sendVal,
int root,
MPI_Comm comm )

Definition at line 964 of file mpiManager.cpp.

965{
966 if (!ok) {
967 return;
968 }
969 MPI_Bcast(&sendVal, 1, MPI_UNSIGNED_CHAR, root, comm);
970}

◆ bCast() [18/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned char * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 877 of file mpiManager.cpp.

878{
879 if (!ok) {
880 return;
881 }
882 MPI_Bcast(static_cast<void*>(sendBuf),
883 sendCount, MPI_UNSIGNED_CHAR, root, comm);
884}

◆ bCast() [19/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned long & sendVal,
int root,
MPI_Comm comm )

Definition at line 982 of file mpiManager.cpp.

983{
984 if (!ok) {
985 return;
986 }
987 MPI_Bcast(&sendVal, 1, MPI_UNSIGNED_LONG, root, comm);
988}

◆ bCast() [20/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned long * sendBuf,
int sendCount,
int root,
MPI_Comm comm )

Definition at line 897 of file mpiManager.cpp.

898{
899 if (!ok) {
900 return;
901 }
902 MPI_Bcast(static_cast<void*>(sendBuf),
903 sendCount, MPI_UNSIGNED_LONG, root, comm);
904}

◆ bCast() [21/21]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCast ( util::ADf< T, DIM > * sendBuf,
int sendCount,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 124 of file mpiManagerAD.hh.

125{
126 if (!ok) {
127 return;
128 }
129 MPI_Bcast(static_cast<void*>(sendBuf),
130 (sizeof(util::ADf<T,DIM>)/8)*sendCount, MPI_DOUBLE, root, comm);
131}

◆ bCastThroughMaster() [1/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( bool * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 1009 of file mpiManager.cpp.

1010{
1011 if (!ok) {
1012 return;
1013 }
1014 if (iAmRoot && !isMainProcessor()) {
1015 send(sendBuf, sendCount, 0);
1016 }
1017 if (isMainProcessor() && !iAmRoot) {
1018 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1019 }
1020 bCast(sendBuf, sendCount, 0);
1021}
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
bool isMainProcessor() const
Tells whether current processor is main processor.
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [2/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( char * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 1024 of file mpiManager.cpp.

1025{
1026 if (!ok) {
1027 return;
1028 }
1029 if (iAmRoot && !isMainProcessor()) {
1030 send(sendBuf, sendCount, 0);
1031 }
1032 if (isMainProcessor() && !iAmRoot) {
1033 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1034 }
1035 bCast(sendBuf, sendCount, 0);
1036}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [3/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( double * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 1069 of file mpiManager.cpp.

1070{
1071 if (!ok) {
1072 return;
1073 }
1074 if (iAmRoot && !isMainProcessor()) {
1075 send(sendBuf, sendCount, 0);
1076 }
1077 if (isMainProcessor() && !iAmRoot) {
1078 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1079 }
1080 bCast(sendBuf, sendCount, 0);
1081}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [4/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( float * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 1054 of file mpiManager.cpp.

1055{
1056 if (!ok) {
1057 return;
1058 }
1059 if (iAmRoot && !isMainProcessor()) {
1060 send(sendBuf, sendCount, 0);
1061 }
1062 if (isMainProcessor() && !iAmRoot) {
1063 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1064 }
1065 bCast(sendBuf, sendCount, 0);
1066}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [5/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( int * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 1039 of file mpiManager.cpp.

1040{
1041 if (!ok) {
1042 return;
1043 }
1044 if (iAmRoot && !isMainProcessor()) {
1045 send(sendBuf, sendCount, 0);
1046 }
1047 if (isMainProcessor() && !iAmRoot) {
1048 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1049 }
1050 bCast(sendBuf, sendCount, 0);
1051}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [6/7]

template<typename T >
void olb::singleton::MpiManager::bCastThroughMaster ( T * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm = MPI_COMM_WORLD )

Broadcast data when root is unknown to other processors.

◆ bCastThroughMaster() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCastThroughMaster ( util::ADf< T, DIM > * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 188 of file mpiManagerAD.hh.

189{
190 if (!ok) {
191 return;
192 }
193 if (iAmRoot && !isMainProcessor()) {
194 send(sendBuf, sendCount, 0);
195 }
196 if (isMainProcessor() && !iAmRoot) {
197 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
198 }
199 bCast(sendBuf, sendCount, 0);
200}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bossId()

int olb::singleton::MpiManager::bossId ( ) const

Returns process ID of main processor.

Definition at line 75 of file mpiManager.cpp.

76{
77 return 0;
78}
+ Here is the caller graph for this function:

◆ gather() [1/2]

template<>
void olb::singleton::MpiManager::gather ( int * sendBuf,
int sendCount,
int * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 766 of file mpiManager.cpp.

769{
770 if (!ok) {
771 return;
772 }
773 MPI_Gather(static_cast<void*>(sendBuf), sendCount, MPI_INT,
774 static_cast<void*>(recvBuf), recvCount, MPI_INT,
775 root, comm);
776}

◆ gather() [2/2]

template<typename T >
void olb::singleton::MpiManager::gather ( T * sendBuf,
int sendCount,
T * recvBuf,
int recvCount,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Gather data from multiple processors to one processor.

+ Here is the caller graph for this function:

◆ gatherv() [1/6]

template<>
void olb::singleton::MpiManager::gatherv ( bool * sendBuf,
int sendCount,
bool * recvBuf,
int * recvCounts,
int * displs,
int root,
MPI_Comm comm )

Definition at line 779 of file mpiManager.cpp.

782{
783 if (!ok) {
784 return;
785 }
786 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_BYTE,
787 static_cast<void*>(recvBuf), recvCounts, displs, MPI_BYTE,
788 root, comm);
789}

◆ gatherv() [2/6]

template<>
void olb::singleton::MpiManager::gatherv ( char * sendBuf,
int sendCount,
char * recvBuf,
int * recvCounts,
int * displs,
int root,
MPI_Comm comm )

Definition at line 792 of file mpiManager.cpp.

795{
796 if (!ok) {
797 return;
798 }
799 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_CHAR,
800 static_cast<void*>(recvBuf), recvCounts, displs, MPI_CHAR,
801 root, comm);
802}

◆ gatherv() [3/6]

template<>
void olb::singleton::MpiManager::gatherv ( double * sendBuf,
int sendCount,
double * recvBuf,
int * recvCounts,
int * displs,
int root,
MPI_Comm comm )

Definition at line 831 of file mpiManager.cpp.

834{
835 if (!ok) {
836 return;
837 }
838 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_DOUBLE,
839 static_cast<void*>(recvBuf), recvCounts, displs, MPI_DOUBLE,
840 root, comm);
841}

◆ gatherv() [4/6]

template<>
void olb::singleton::MpiManager::gatherv ( float * sendBuf,
int sendCount,
float * recvBuf,
int * recvCounts,
int * displs,
int root,
MPI_Comm comm )

Definition at line 818 of file mpiManager.cpp.

821{
822 if (!ok) {
823 return;
824 }
825 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_FLOAT,
826 static_cast<void*>(recvBuf), recvCounts, displs, MPI_FLOAT,
827 root, comm);
828}

◆ gatherv() [5/6]

template<>
void olb::singleton::MpiManager::gatherv ( int * sendBuf,
int sendCount,
int * recvBuf,
int * recvCounts,
int * displs,
int root,
MPI_Comm comm )

Definition at line 805 of file mpiManager.cpp.

808{
809 if (!ok) {
810 return;
811 }
812 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_INT,
813 static_cast<void*>(recvBuf), recvCounts, displs, MPI_INT,
814 root, comm);
815}

◆ gatherv() [6/6]

template<typename T >
void olb::singleton::MpiManager::gatherv ( T * sendBuf,
int sendCount,
T * recvBuf,
int * recvCounts,
int * displs,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Gather data from multiple processors to one processor.

+ Here is the caller graph for this function:

◆ getRank()

int olb::singleton::MpiManager::getRank ( ) const

Returns the process ID.

Definition at line 70 of file mpiManager.cpp.

71{
72 return taskId;
73}
+ Here is the caller graph for this function:

◆ getSize()

int olb::singleton::MpiManager::getSize ( ) const

Returns the number of processes.

Definition at line 65 of file mpiManager.cpp.

66{
67 return numTasks;
68}
+ Here is the caller graph for this function:

◆ getTime()

double olb::singleton::MpiManager::getTime ( ) const

Returns universal MPI-time in seconds.

Definition at line 85 of file mpiManager.cpp.

86{
87 if (!ok) {
88 return 0.;
89 }
90 return MPI_Wtime();
91}

◆ ibSend() [1/7]

template<>
void olb::singleton::MpiManager::ibSend ( bool * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 292 of file mpiManager.cpp.

294{
295 if (ok) {
296 MPI_Ibsend(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
297 }
298}

◆ ibSend() [2/7]

template<>
void olb::singleton::MpiManager::ibSend ( char * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 301 of file mpiManager.cpp.

303{
304 if (ok) {
305 MPI_Ibsend(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm, request);
306 }
307}

◆ ibSend() [3/7]

template<>
void olb::singleton::MpiManager::ibSend ( double * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 328 of file mpiManager.cpp.

330{
331 if (ok) {
332 MPI_Ibsend(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm, request);
333 }
334}

◆ ibSend() [4/7]

template<>
void olb::singleton::MpiManager::ibSend ( float * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 319 of file mpiManager.cpp.

321{
322 if (ok) {
323 MPI_Ibsend(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm, request);
324 }
325}

◆ ibSend() [5/7]

template<>
void olb::singleton::MpiManager::ibSend ( int * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 310 of file mpiManager.cpp.

312{
313 if (ok) {
314 MPI_Ibsend(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm, request);
315 }
316}

◆ ibSend() [6/7]

template<typename T >
void olb::singleton::MpiManager::ibSend ( T * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Sends data at *buf, non blocking and buffered.

◆ ibSend() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::ibSend ( util::ADf< T, DIM > * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

◆ init()

void olb::singleton::MpiManager::init ( int * argc,
char *** argv,
bool verbose = true )

Initializes the mpi manager.

Definition at line 48 of file mpiManager.cpp.

49{
50 int ok0{};
51 MPI_Initialized(&ok0);
52 if (ok0) {
53 return;
54 }
55 int ok1 = MPI_Init(argc, argv);
56 int ok2 = MPI_Comm_rank(MPI_COMM_WORLD, &taskId);
57 int ok3 = MPI_Comm_size(MPI_COMM_WORLD, &numTasks);
58 int ok4 = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
59 ok = (ok1 == MPI_SUCCESS && ok2 == MPI_SUCCESS && ok3 == MPI_SUCCESS && ok4 == MPI_SUCCESS);
60 if (verbose) {
61 clout << "Sucessfully initialized, numThreads=" << getSize() << std::endl;
62 }
63}
int getSize() const
Returns the number of processes.

References getSize().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ iRecv() [1/7]

template<>
void olb::singleton::MpiManager::iRecv ( bool * buf,
int count,
int source,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 549 of file mpiManager.cpp.

550{
551 if (ok) {
552 MPI_Irecv(static_cast<void*>(buf), count, MPI_BYTE, source, tag, comm, request);
553 }
554}

◆ iRecv() [2/7]

template<>
void olb::singleton::MpiManager::iRecv ( char * buf,
int count,
int source,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 557 of file mpiManager.cpp.

558{
559 if (ok) {
560 MPI_Irecv(static_cast<void*>(buf), count, MPI_CHAR, source, tag, comm, request);
561 }
562}

◆ iRecv() [3/7]

template<>
void olb::singleton::MpiManager::iRecv ( double * buf,
int count,
int source,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 581 of file mpiManager.cpp.

582{
583 if (ok) {
584 MPI_Irecv(static_cast<void*>(buf), count, MPI_DOUBLE, source, tag, comm, request);
585 }
586}

◆ iRecv() [4/7]

template<>
void olb::singleton::MpiManager::iRecv ( float * buf,
int count,
int source,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 573 of file mpiManager.cpp.

574{
575 if (ok) {
576 MPI_Irecv(static_cast<void*>(buf), count, MPI_FLOAT, source, tag, comm, request);
577 }
578}

◆ iRecv() [5/7]

template<>
void olb::singleton::MpiManager::iRecv ( int * buf,
int count,
int source,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 565 of file mpiManager.cpp.

566{
567 if (ok) {
568 MPI_Irecv(static_cast<void*>(buf), count, MPI_INT, source, tag, comm, request);
569 }
570}

◆ iRecv() [6/7]

template<typename T >
void olb::singleton::MpiManager::iRecv ( T * buf,
int count,
int source,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Receives data at *buf, non blocking.

◆ iRecv() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::iRecv ( util::ADf< T, DIM > * buf,
int count,
int source,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 98 of file mpiManagerAD.hh.

99{
100 if (ok) {
101 MPI_Irecv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, request);
102 }
103}

◆ iSend() [1/8]

template<>
void olb::singleton::MpiManager::iSend ( bool * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 210 of file mpiManager.cpp.

212{
213 if (ok) {
214 MPI_Isend(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
215 }
216}

◆ iSend() [2/8]

template<>
void olb::singleton::MpiManager::iSend ( char * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 219 of file mpiManager.cpp.

221{
222 if (ok) {
223 MPI_Isend(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm, request);
224 }
225}

◆ iSend() [3/8]

template<>
void olb::singleton::MpiManager::iSend ( double * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 273 of file mpiManager.cpp.

275{
276 if (ok) {
277 MPI_Isend(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm, request);
278 }
279}

◆ iSend() [4/8]

template<>
void olb::singleton::MpiManager::iSend ( float * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 264 of file mpiManager.cpp.

266{
267 if (ok) {
268 MPI_Isend(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm, request);
269 }
270}

◆ iSend() [5/8]

template<>
void olb::singleton::MpiManager::iSend ( int * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 237 of file mpiManager.cpp.

239{
240 if (ok) {
241 MPI_Isend(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm, request);
242 }
243}

◆ iSend() [6/8]

template<>
void olb::singleton::MpiManager::iSend ( long double * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 282 of file mpiManager.cpp.

284{
285 if (ok) {
286 MPI_Isend(static_cast<void*>(buf), count, MPI_LONG_DOUBLE, dest, tag, comm, request);
287 }
288}

◆ iSend() [7/8]

template<typename T >
void olb::singleton::MpiManager::iSend ( T * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Sends data at *buf, non blocking.

+ Here is the caller graph for this function:

◆ iSend() [8/8]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::iSend ( util::ADf< T, DIM > * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 85 of file mpiManagerAD.hh.

87{
88 if (ok) {
89 MPI_Isend(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
90 }
91}

◆ isMainProcessor()

bool olb::singleton::MpiManager::isMainProcessor ( ) const

Tells whether current processor is main processor.

Definition at line 80 of file mpiManager.cpp.

81{
82 return bossId() == getRank();
83}
int getRank() const
Returns the process ID.
int bossId() const
Returns process ID of main processor.

References bossId(), and getRank().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ probeReceiveSize() [1/2]

template<typename TYPE >
std::size_t olb::singleton::MpiManager::probeReceiveSize ( int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Probe size of incoming message with TYPE.

◆ probeReceiveSize() [2/2]

std::size_t olb::singleton::MpiManager::probeReceiveSize ( int source,
MPI_Datatype type,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Probe size of incoming message.

Definition at line 336 of file mpiManager.cpp.

337{
338 MPI_Status status;
339 if (MPI_Probe(source, tag, comm, &status) == MPI_SUCCESS) {
340 int requestSize;
341 MPI_Get_count(&status, type, &requestSize);
342 if (requestSize == MPI_UNDEFINED) {
343 throw std::runtime_error("MPI_UNDEFINED in probeReceiveSize(" + std::to_string(source) + "," + std::to_string(tag) + ")" + " ranks " + std::to_string(source) + " -> " + std::to_string(singleton::mpi().getRank()));
344 }
345 return requestSize;
346 } else {
347 throw std::runtime_error("MPI_Probe failed in probeReceiveSize");
348 }
349}
MpiManager & mpi()

References getRank(), and olb::singleton::mpi().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ receive() [1/10]

template<>
void olb::singleton::MpiManager::receive ( bool * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 364 of file mpiManager.cpp.

365{
366 if (!ok) {
367 return;
368 }
369 MPI_Status status;
370 MPI_Recv(static_cast<void*>(buf), count, MPI_BYTE, source, tag, comm, &status);
371}

◆ receive() [2/10]

template<>
void olb::singleton::MpiManager::receive ( char * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 375 of file mpiManager.cpp.

376{
377 if (!ok) {
378 return;
379 }
380 MPI_Status status;
381 MPI_Recv(static_cast<void*>(buf), count, MPI_CHAR, source, tag, comm, &status);
382}

◆ receive() [3/10]

template<>
void olb::singleton::MpiManager::receive ( double * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 435 of file mpiManager.cpp.

436{
437 if (!ok) {
438 return;
439 }
440 MPI_Status status;
441 MPI_Recv(static_cast<void*>(buf), count, MPI_DOUBLE, source, tag, comm, &status);
442}

◆ receive() [4/10]

template<>
void olb::singleton::MpiManager::receive ( float * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 425 of file mpiManager.cpp.

426{
427 if (!ok) {
428 return;
429 }
430 MPI_Status status;
431 MPI_Recv(static_cast<void*>(buf), count, MPI_FLOAT, source, tag, comm, &status);
432}

◆ receive() [5/10]

template<>
void olb::singleton::MpiManager::receive ( int * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 395 of file mpiManager.cpp.

396{
397 if (!ok) {
398 return;
399 }
400 MPI_Status status;
401 MPI_Recv(static_cast<void*>(buf), count, MPI_INT, source, tag, comm, &status);
402}

◆ receive() [6/10]

template<>
void olb::singleton::MpiManager::receive ( long double * buf,
int count,
int source,
int tag,
MPI_Comm comm )

Definition at line 445 of file mpiManager.cpp.

446{
447 if (!ok) {
448 return;
449 }
450 MPI_Status status;
451 MPI_Recv(static_cast<void*>(buf), count, MPI_LONG_DOUBLE, source, tag, comm, &status);
452}

◆ receive() [7/10]

template<class T , std::size_t N>
void olb::singleton::MpiManager::receive ( std::array< T, N > & array,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )
inline

Definition at line 159 of file mpiManager.h.

159 {
160 receive( array.data(), array.size(), source, tag, comm );
161 }

References receive().

+ Here is the call graph for this function:

◆ receive() [8/10]

template<typename... args>
void olb::singleton::MpiManager::receive ( std::vector< args... > & vec,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )
inline

Definition at line 155 of file mpiManager.h.

155 {
156 receive( vec.data(), vec.size(), source, tag, comm );
157 }

References receive().

+ Here is the call graph for this function:

◆ receive() [9/10]

template<typename T >
void olb::singleton::MpiManager::receive ( T * buf,
int count,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Receives data at *buf, blocking.

+ Here is the caller graph for this function:

◆ receive() [10/10]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::receive ( util::ADf< T, DIM > * buf,
int count,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 66 of file mpiManagerAD.hh.

67{
68 if (!ok) {
69 return;
70 }
71 MPI_Status status;
72 MPI_Recv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, &status);
73}

◆ recvInit() [1/4]

template<>
void olb::singleton::MpiManager::recvInit ( double * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 525 of file mpiManager.cpp.

526{
527 if (ok) {
528 MPI_Recv_init(buf, count, MPI_DOUBLE, dest, tag, comm, request);
529 }
530}

◆ recvInit() [2/4]

template<>
void olb::singleton::MpiManager::recvInit ( int * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 533 of file mpiManager.cpp.

534{
535 if (ok) {
536 MPI_Recv_init(buf, count, MPI_INT, dest, tag, comm, request);
537 }
538}

◆ recvInit() [3/4]

template<typename T >
void olb::singleton::MpiManager::recvInit ( T * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Initialize persistent non-blocking receive.

+ Here is the caller graph for this function:

◆ recvInit() [4/4]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::recvInit ( util::ADf< T, DIM > * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 76 of file mpiManagerAD.hh.

77{
78 if (ok) {
79 MPI_Recv_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
80 }
81}

◆ reduce() [1/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, double, double > & sendVal,
BlockData< 2, double, double > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 188 of file blockData.hh.

189{
190 if (!ok) {
191 return;
192 }
193 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
194 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
195 static_cast<void*>(recvVal.getColumn(iD).data()),
196 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
197 }
198}

◆ reduce() [2/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, double, int > & sendVal,
BlockData< 2, double, int > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 175 of file blockData.hh.

176{
177 if (!ok) {
178 return;
179 }
180 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
181 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
182 static_cast<void*>(recvVal.getColumn(iD).data()),
183 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
184 }
185}

◆ reduce() [3/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, float, float > & sendVal,
BlockData< 2, float, float > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 201 of file blockData.hh.

202{
203 if (!ok) {
204 return;
205 }
206 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
207 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
208 static_cast<void*>(recvVal.getColumn(iD).data()),
209 sendVal.getNcells(), MPI_FLOAT, op, root, comm);
210 }
211}

◆ reduce() [4/11]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduce ( BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > & sendVal,
BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > & recvVal,
MPI_Op op,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 168 of file mpiManagerAD.hh.

172{
173 if (!ok) {
174 return;
175 }
176// MPI_Reduce(static_cast<void*>(sendVal.getRawData()),
177// static_cast<void*>(recvVal.getRawData()),
178// sendVal.getDataSize(), MPI_DOUBLE, op, root, comm);
179 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
180 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
181 static_cast<void*>(recvVal.getColumn(iD).data()),
182 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
183 }
184}

◆ reduce() [5/11]

template<>
void olb::singleton::MpiManager::reduce ( bool & sendVal,
bool & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1084 of file mpiManager.cpp.

1085{
1086 if (!ok) {
1087 return;
1088 }
1089 MPI_Reduce(static_cast<void*>(&sendVal),
1090 static_cast<void*>(&recvVal), 1, MPI_BYTE, op, root, comm);
1091}

◆ reduce() [6/11]

template<>
void olb::singleton::MpiManager::reduce ( char & sendVal,
char & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1094 of file mpiManager.cpp.

1095{
1096 if (!ok) {
1097 return;
1098 }
1099 MPI_Reduce(static_cast<void*>(&sendVal),
1100 static_cast<void*>(&recvVal), 1, MPI_CHAR, op, root, comm);
1101}

◆ reduce() [7/11]

template<>
void olb::singleton::MpiManager::reduce ( double & sendVal,
double & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1124 of file mpiManager.cpp.

1125{
1126 if (!ok) {
1127 return;
1128 }
1129 MPI_Reduce(static_cast<void*>(&sendVal),
1130 static_cast<void*>(&recvVal), 1, MPI_DOUBLE, op, root, comm);
1131}

◆ reduce() [8/11]

template<>
void olb::singleton::MpiManager::reduce ( float & sendVal,
float & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1114 of file mpiManager.cpp.

1115{
1116 if (!ok) {
1117 return;
1118 }
1119 MPI_Reduce(static_cast<void*>(&sendVal),
1120 static_cast<void*>(&recvVal), 1, MPI_FLOAT, op, root, comm);
1121}

◆ reduce() [9/11]

template<>
void olb::singleton::MpiManager::reduce ( int & sendVal,
int & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1104 of file mpiManager.cpp.

1105{
1106 if (!ok) {
1107 return;
1108 }
1109 MPI_Reduce(static_cast<void*>(&sendVal),
1110 static_cast<void*>(&recvVal), 1, MPI_INT, op, root, comm);
1111}

◆ reduce() [10/11]

template<typename T >
void olb::singleton::MpiManager::reduce ( T & sendVal,
T & recvVal,
MPI_Op op,
int root = 0,
MPI_Comm = MPI_COMM_WORLD )

Reduction operation toward one processor.

+ Here is the caller graph for this function:

◆ reduce() [11/11]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduce ( util::ADf< T, DIM > & sendVal,
util::ADf< T, DIM > & recvVal,
MPI_Op op,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 150 of file mpiManagerAD.hh.

151{
152 if (!ok) {
153 return;
154 }
155
156 int sizeADouble = sizeof(util::ADf<T,DIM>)/8-1;
157
158 MPI_Reduce(static_cast<void*>(&sendVal.v()),
159 static_cast<void*>(&recvVal.v()), 1, MPI_DOUBLE, op, root, comm);
160
161 for (int i=0; i<sizeADouble; i++) {
162 MPI_Reduce(static_cast<void*>(&sendVal.d(i)),
163 static_cast<void*>(&recvVal.d(i)), 1, MPI_DOUBLE, op, root, comm);
164 }
165}

References olb::util::ADf< T, DIM >::d(), and olb::util::ADf< T, DIM >::v().

+ Here is the call graph for this function:

◆ reduceAndBcast() [1/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( bool & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1192 of file mpiManager.cpp.

1193{
1194 if (!ok) {
1195 return;
1196 }
1197 char recvVal;
1198 MPI_Reduce(static_cast<void*>(&reductVal), static_cast<void*>(&recvVal), 1, MPI_BYTE, op, root, comm);
1199 reductVal = recvVal;
1200 MPI_Bcast(&reductVal, 1, MPI_BYTE, root, comm);
1201
1202}

◆ reduceAndBcast() [2/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( char & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1205 of file mpiManager.cpp.

1206{
1207 if (!ok) {
1208 return;
1209 }
1210 char recvVal;
1211 MPI_Reduce(&reductVal, &recvVal, 1, MPI_CHAR, op, root, comm);
1212 reductVal = recvVal;
1213 MPI_Bcast(&reductVal, 1, MPI_CHAR, root, comm);
1214
1215}

◆ reduceAndBcast() [3/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( double & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1244 of file mpiManager.cpp.

1245{
1246 if (!ok) {
1247 return;
1248 }
1249 double recvVal;
1250 MPI_Reduce(&reductVal, &recvVal, 1, MPI_DOUBLE, op, root, comm);
1251 reductVal = recvVal;
1252 MPI_Bcast(&reductVal, 1, MPI_DOUBLE, root, comm);
1253
1254}

◆ reduceAndBcast() [4/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( float & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1231 of file mpiManager.cpp.

1232{
1233 if (!ok) {
1234 return;
1235 }
1236 float recvVal;
1237 MPI_Reduce(&reductVal, &recvVal, 1, MPI_FLOAT, op, root, comm);
1238 reductVal = recvVal;
1239 MPI_Bcast(&reductVal, 1, MPI_FLOAT, root, comm);
1240
1241}

◆ reduceAndBcast() [5/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( int & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1218 of file mpiManager.cpp.

1219{
1220 if (!ok) {
1221 return;
1222 }
1223 int recvVal;
1224 MPI_Reduce(&reductVal, &recvVal, 1, MPI_INT, op, root, comm);
1225 reductVal = recvVal;
1226 MPI_Bcast(&reductVal, 1, MPI_INT, root, comm);
1227
1228}

◆ reduceAndBcast() [6/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( long & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1270 of file mpiManager.cpp.

1271{
1272 if (!ok) {
1273 return;
1274 }
1275 long recvVal;
1276 MPI_Reduce(&reductVal, &recvVal, 1, MPI_LONG, op, root, comm);
1277 reductVal = recvVal;
1278 MPI_Bcast(&reductVal, 1, MPI_LONG, root, comm);
1279
1280}

◆ reduceAndBcast() [7/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( long double & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1257 of file mpiManager.cpp.

1258{
1259 if (!ok) {
1260 return;
1261 }
1262 long double recvVal;
1263 MPI_Reduce(&reductVal, &recvVal, 1, MPI_LONG_DOUBLE, op, root, comm);
1264 reductVal = recvVal;
1265 MPI_Bcast(&reductVal, 1, MPI_LONG_DOUBLE, root, comm);
1266
1267}

◆ reduceAndBcast() [8/10]

template<typename T >
void olb::singleton::MpiManager::reduceAndBcast ( T & reductVal,
MPI_Op op,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Reduction operation, followed by a broadcast.

+ Here is the caller graph for this function:

◆ reduceAndBcast() [9/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( unsigned long & reductVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1283 of file mpiManager.cpp.

1284{
1285 if (!ok) {
1286 return;
1287 }
1288 unsigned long recvVal;
1289 MPI_Reduce(&reductVal, &recvVal, 1, MPI_UNSIGNED_LONG, op, root, comm);
1290 reductVal = recvVal;
1291 MPI_Bcast(&reductVal, 1, MPI_UNSIGNED_LONG, root, comm);
1292
1293}

◆ reduceAndBcast() [10/10]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduceAndBcast ( util::ADf< T, DIM > & reductVal,
MPI_Op op,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 203 of file mpiManagerAD.hh.

204{
205 if (!ok) {
206 return;
207 }
208 util::ADf<T,DIM> recvVal;
209 reduce(reductVal, recvVal, op, root, comm);
210
211 //MPI_Reduce(&reductVal, &recvVal, 1, MPI_DOUBLE, op, root, comm);
212 reductVal = recvVal;
213 bCast(&reductVal, 1, root, comm);
214
215 //MPI_Bcast(&reductVal, 1, MPI_DOUBLE, root, comm);
216
217}
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.

References bCast(), and reduce().

+ Here is the call graph for this function:

◆ reduceVect() [1/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< char > & sendVal,
std::vector< char > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1144 of file mpiManager.cpp.

1146{
1147 if (!ok) {
1148 return;
1149 }
1150 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1151 static_cast<void*>(&(recvVal[0])),
1152 sendVal.size(), MPI_CHAR, op, root, comm);
1153}

◆ reduceVect() [2/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< double > & sendVal,
std::vector< double > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1180 of file mpiManager.cpp.

1182{
1183 if (!ok) {
1184 return;
1185 }
1186 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1187 static_cast<void*>(&(recvVal[0])),
1188 sendVal.size(), MPI_DOUBLE, op, root, comm);
1189}

◆ reduceVect() [3/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< float > & sendVal,
std::vector< float > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1168 of file mpiManager.cpp.

1170{
1171 if (!ok) {
1172 return;
1173 }
1174 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1175 static_cast<void*>(&(recvVal[0])),
1176 sendVal.size(), MPI_FLOAT, op, root, comm);
1177}

◆ reduceVect() [4/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< int > & sendVal,
std::vector< int > & recvVal,
MPI_Op op,
int root,
MPI_Comm comm )

Definition at line 1156 of file mpiManager.cpp.

1158{
1159 if (!ok) {
1160 return;
1161 }
1162 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1163 static_cast<void*>(&(recvVal[0])),
1164 sendVal.size(), MPI_INT, op, root, comm);
1165}

◆ reduceVect() [5/5]

template<typename T >
void olb::singleton::MpiManager::reduceVect ( std::vector< T > & sendVal,
std::vector< T > & recvVal,
MPI_Op op,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Element-per-element reduction of a vector of data.

◆ scatterv() [1/6]

template<>
void olb::singleton::MpiManager::scatterv ( bool * sendBuf,
int * sendCounts,
int * displs,
bool * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 701 of file mpiManager.cpp.

703{
704 if (!ok) {
705 return;
706 }
707 MPI_Scatterv(static_cast<void*>(sendBuf),
708 sendCounts, displs, MPI_BYTE,
709 static_cast<void*>(recvBuf),
710 recvCount, MPI_BYTE, root, comm);
711}

◆ scatterv() [2/6]

template<>
void olb::singleton::MpiManager::scatterv ( char * sendBuf,
int * sendCounts,
int * displs,
char * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 714 of file mpiManager.cpp.

716{
717 if (!ok) {
718 return;
719 }
720 MPI_Scatterv(static_cast<void*>(sendBuf),
721 sendCounts, displs, MPI_CHAR,
722 static_cast<void*>(recvBuf),
723 recvCount, MPI_CHAR, root, comm);
724}

◆ scatterv() [3/6]

template<>
void olb::singleton::MpiManager::scatterv ( double * sendBuf,
int * sendCounts,
int * displs,
double * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 753 of file mpiManager.cpp.

755{
756 if (!ok) {
757 return;
758 }
759 MPI_Scatterv(static_cast<void*>(sendBuf),
760 sendCounts, displs, MPI_DOUBLE,
761 static_cast<void*>(recvBuf),
762 recvCount, MPI_DOUBLE, root, comm);
763}

◆ scatterv() [4/6]

template<>
void olb::singleton::MpiManager::scatterv ( float * sendBuf,
int * sendCounts,
int * displs,
float * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 740 of file mpiManager.cpp.

742{
743 if (!ok) {
744 return;
745 }
746 MPI_Scatterv(static_cast<void*>(sendBuf),
747 sendCounts, displs, MPI_FLOAT,
748 static_cast<void*>(recvBuf),
749 recvCount, MPI_FLOAT, root, comm);
750}

◆ scatterv() [5/6]

template<>
void olb::singleton::MpiManager::scatterv ( int * sendBuf,
int * sendCounts,
int * displs,
int * recvBuf,
int recvCount,
int root,
MPI_Comm comm )

Definition at line 727 of file mpiManager.cpp.

729{
730 if (!ok) {
731 return;
732 }
733 MPI_Scatterv(static_cast<void*>(sendBuf),
734 sendCounts, displs, MPI_INT,
735 static_cast<void*>(recvBuf),
736 recvCount, MPI_INT, root, comm);
737}

◆ scatterv() [6/6]

template<typename T >
void olb::singleton::MpiManager::scatterv ( T * sendBuf,
int * sendCounts,
int * displs,
T * recvBuf,
int recvCount,
int root = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Scatter data from one processor over multiple processors.

◆ send() [1/9]

template<>
void olb::singleton::MpiManager::send ( bool * buf,
int count,
int dest,
int tag,
MPI_Comm comm )

Definition at line 108 of file mpiManager.cpp.

109{
110 if (!ok) {
111 return;
112 }
113 MPI_Send(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm);
114}

◆ send() [2/9]

template<>
void olb::singleton::MpiManager::send ( char * buf,
int count,
int dest,
int tag,
MPI_Comm comm )

Definition at line 117 of file mpiManager.cpp.

118{
119 if (!ok) {
120 return;
121 }
122 MPI_Send(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm);
123}

◆ send() [3/9]

template<>
void olb::singleton::MpiManager::send ( double * buf,
int count,
int dest,
int tag,
MPI_Comm comm )

Definition at line 153 of file mpiManager.cpp.

154{
155 if (!ok) {
156 return;
157 }
158 MPI_Send(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm);
159}

◆ send() [4/9]

template<>
void olb::singleton::MpiManager::send ( float * buf,
int count,
int dest,
int tag,
MPI_Comm comm )

Definition at line 144 of file mpiManager.cpp.

145{
146 if (!ok) {
147 return;
148 }
149 MPI_Send(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm);
150}

◆ send() [5/9]

template<>
void olb::singleton::MpiManager::send ( int * buf,
int count,
int dest,
int tag,
MPI_Comm comm )

Definition at line 135 of file mpiManager.cpp.

136{
137 if (!ok) {
138 return;
139 }
140 MPI_Send(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm);
141}

◆ send() [6/9]

template<class T , std::size_t N>
void olb::singleton::MpiManager::send ( std::array< T, N > & array,
int dest,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )
inline

Definition at line 121 of file mpiManager.h.

121 {
122 send( array.data(), array.size(), dest, tag, comm );
123 }

References send().

+ Here is the call graph for this function:

◆ send() [7/9]

template<typename... args>
void olb::singleton::MpiManager::send ( std::vector< args... > & vec,
int dest,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )
inline

Definition at line 117 of file mpiManager.h.

117 {
118 send( vec.data(), vec.size(), dest, tag, comm );
119 }

References send().

+ Here is the call graph for this function:

◆ send() [8/9]

template<typename T >
void olb::singleton::MpiManager::send ( T * buf,
int count,
int dest,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Sends data at *buf, blocking.

+ Here is the caller graph for this function:

◆ send() [9/9]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::send ( util::ADf< T, DIM > * buf,
int count,
int dest,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 49 of file mpiManagerAD.hh.

50{
51 if (!ok) {
52 return;
53 }
54 MPI_Send(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm);
55}

◆ sendInit() [1/5]

template<>
void olb::singleton::MpiManager::sendInit ( bool * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 202 of file mpiManager.cpp.

203{
204 if (ok) {
205 MPI_Send_init(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
206 }
207}

◆ sendInit() [2/5]

template<>
void olb::singleton::MpiManager::sendInit ( double * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 162 of file mpiManager.cpp.

163{
164 if (ok) {
165 MPI_Send_init(buf, count, MPI_DOUBLE, dest, tag, comm, request);
166 }
167}

◆ sendInit() [3/5]

template<>
void olb::singleton::MpiManager::sendInit ( int * buf,
int count,
int dest,
MPI_Request * request,
int tag,
MPI_Comm comm )

Definition at line 194 of file mpiManager.cpp.

195{
196 if (ok) {
197 MPI_Send_init(buf, count, MPI_INT, dest, tag, comm, request);
198 }
199}

◆ sendInit() [4/5]

template<typename T >
void olb::singleton::MpiManager::sendInit ( T * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Initialize persistent non-blocking send.

+ Here is the caller graph for this function:

◆ sendInit() [5/5]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::sendInit ( util::ADf< T, DIM > * buf,
int count,
int dest,
MPI_Request * request,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 58 of file mpiManagerAD.hh.

59{
60 if (ok) {
61 MPI_Send_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
62 }
63}

◆ sendRecv() [1/9]

template<>
void olb::singleton::MpiManager::sendRecv ( bool * sendBuf,
bool * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 589 of file mpiManager.cpp.

591{
592 if (!ok) {
593 return;
594 }
595 MPI_Status status;
596 MPI_Sendrecv(static_cast<void*>(sendBuf),
597 count,
598 MPI_BYTE, dest, tag,
599 static_cast<void*>(recvBuf),
600 count,
601 MPI_BYTE, source, tag, comm, &status);
602}

◆ sendRecv() [2/9]

template<>
void olb::singleton::MpiManager::sendRecv ( char * sendBuf,
char * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 605 of file mpiManager.cpp.

607{
608 if (!ok) {
609 return;
610 }
611 MPI_Status status;
612 MPI_Sendrecv(static_cast<void*>(sendBuf),
613 count,
614 MPI_CHAR, dest, tag,
615 static_cast<void*>(recvBuf),
616 count,
617 MPI_CHAR, source, tag, comm, &status);
618}

◆ sendRecv() [3/9]

template<>
void olb::singleton::MpiManager::sendRecv ( double * sendBuf,
double * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 669 of file mpiManager.cpp.

671{
672 if (!ok) {
673 return;
674 }
675 MPI_Status status;
676 MPI_Sendrecv(static_cast<void*>(sendBuf),
677 count,
678 MPI_DOUBLE, dest, tag,
679 static_cast<void*>(recvBuf),
680 count,
681 MPI_DOUBLE, source, tag, comm, &status);
682}

◆ sendRecv() [4/9]

template<>
void olb::singleton::MpiManager::sendRecv ( float * sendBuf,
float * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 637 of file mpiManager.cpp.

639{
640 if (!ok) {
641 return;
642 }
643 MPI_Status status;
644 MPI_Sendrecv(static_cast<void*>(sendBuf),
645 count,
646 MPI_FLOAT, dest, tag,
647 static_cast<void*>(recvBuf),
648 count,
649 MPI_FLOAT, source, tag, comm, &status);
650}

◆ sendRecv() [5/9]

template<>
void olb::singleton::MpiManager::sendRecv ( int * sendBuf,
int * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 621 of file mpiManager.cpp.

623{
624 if (!ok) {
625 return;
626 }
627 MPI_Status status;
628 MPI_Sendrecv(static_cast<void*>(sendBuf),
629 count,
630 MPI_INT, dest, tag,
631 static_cast<void*>(recvBuf),
632 count,
633 MPI_INT, source, tag, comm, &status);
634}

◆ sendRecv() [6/9]

template<>
void olb::singleton::MpiManager::sendRecv ( long * sendBuf,
long * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 653 of file mpiManager.cpp.

655{
656 if (!ok) {
657 return;
658 }
659 MPI_Status status;
660 MPI_Sendrecv(static_cast<void*>(sendBuf),
661 count,
662 MPI_LONG, dest, tag,
663 static_cast<void*>(recvBuf),
664 count,
665 MPI_LONG, source, tag, comm, &status);
666}

◆ sendRecv() [7/9]

template<>
void olb::singleton::MpiManager::sendRecv ( long double * sendBuf,
long double * recvBuf,
int count,
int dest,
int source,
int tag,
MPI_Comm comm )

Definition at line 685 of file mpiManager.cpp.

687{
688 if (!ok) {
689 return;
690 }
691 MPI_Status status;
692 MPI_Sendrecv(static_cast<void*>(sendBuf),
693 count,
694 MPI_LONG_DOUBLE, dest, tag,
695 static_cast<void*>(recvBuf),
696 count,
697 MPI_LONG_DOUBLE, source, tag, comm, &status);
698}

◆ sendRecv() [8/9]

template<typename T >
void olb::singleton::MpiManager::sendRecv ( T * sendBuf,
T * recvBuf,
int count,
int dest,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Send and receive data between two partners.

+ Here is the caller graph for this function:

◆ sendRecv() [9/9]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::sendRecv ( util::ADf< T, DIM > * sendBuf,
util::ADf< T, DIM > * recvBuf,
int count,
int dest,
int source,
int tag = 0,
MPI_Comm comm = MPI_COMM_WORLD )

Definition at line 106 of file mpiManagerAD.hh.

107{
108 if (!ok) {
109 return;
110 }
111 MPI_Status status;
112 MPI_Sendrecv(static_cast<void*>(sendBuf),
113 (sizeof(util::ADf<T,DIM>)/8)*count,
114 MPI_DOUBLE, dest, tag,
115 static_cast<void*>(recvBuf),
116 (sizeof(util::ADf<T,DIM>)/8)*count,
117 MPI_DOUBLE, source, tag, comm, &status);
118}

◆ sendToMaster() [1/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( bool * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 455 of file mpiManager.cpp.

456{
457 if (!ok) {
458 return;
459 }
460 if (iAmRoot && !isMainProcessor()) {
461 send(sendBuf, sendCount, 0);
462 }
463 if (isMainProcessor() && !iAmRoot) {
464 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
465 }
466}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [2/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( char * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 469 of file mpiManager.cpp.

470{
471 if (!ok) {
472 return;
473 }
474 if (iAmRoot && !isMainProcessor()) {
475 send(sendBuf, sendCount, 0);
476 }
477 if (isMainProcessor() && !iAmRoot) {
478 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
479 }
480}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [3/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( double * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 511 of file mpiManager.cpp.

512{
513 if (!ok) {
514 return;
515 }
516 if (iAmRoot && !isMainProcessor()) {
517 send(sendBuf, sendCount, 0);
518 }
519 if (isMainProcessor() && !iAmRoot) {
520 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
521 }
522}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [4/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( float * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 497 of file mpiManager.cpp.

498{
499 if (!ok) {
500 return;
501 }
502 if (iAmRoot && !isMainProcessor()) {
503 send(sendBuf, sendCount, 0);
504 }
505 if (isMainProcessor() && !iAmRoot) {
506 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
507 }
508}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [5/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( int * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm )

Definition at line 483 of file mpiManager.cpp.

484{
485 if (!ok) {
486 return;
487 }
488 if (iAmRoot && !isMainProcessor()) {
489 send(sendBuf, sendCount, 0);
490 }
491 if (isMainProcessor() && !iAmRoot) {
492 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
493 }
494}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [6/6]

template<typename T >
void olb::singleton::MpiManager::sendToMaster ( T * sendBuf,
int sendCount,
bool iAmRoot,
MPI_Comm comm = MPI_COMM_WORLD )

Sends data to master processor.

+ Here is the caller graph for this function:

◆ synchronizeIO()

void olb::singleton::MpiManager::synchronizeIO ( unsigned tDelay = 100,
MPI_Comm comm = MPI_COMM_WORLD )

Synchronizes the processes and wait to ensure correct cout order.

Definition at line 101 of file mpiManager.cpp.

102{
103 usleep(tDelay);
104 barrier(comm);
105}
void barrier(MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes.

References barrier().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ wait()

void olb::singleton::MpiManager::wait ( MPI_Request * request,
MPI_Status * status )

Complete a non-blocking MPI operation.

Definition at line 1295 of file mpiManager.cpp.

1296{
1297 if (!ok) {
1298 return;
1299 }
1300 MPI_Wait(request, status);
1301}

◆ waitAll()

void olb::singleton::MpiManager::waitAll ( MpiNonBlockingHelper & mpiNbHelper)

Complete a series of non-blocking MPI operations.

Definition at line 1303 of file mpiManager.cpp.

1304{
1305 if (!ok || mpiNbHelper.get_size() == 0) {
1306 return;
1307 }
1308 MPI_Waitall(mpiNbHelper.get_size(), mpiNbHelper.get_mpiRequest(), mpiNbHelper.get_mpiStatus());
1309}

References olb::singleton::MpiNonBlockingHelper::get_mpiRequest(), olb::singleton::MpiNonBlockingHelper::get_mpiStatus(), and olb::singleton::MpiNonBlockingHelper::get_size().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

Friends And Related Symbol Documentation

◆ mpi

MpiManager & mpi ( )
friend

Definition at line 29 of file mpiManager.cpp.

30{
31 static MpiManager instance;
32 return instance;
33}

The documentation for this class was generated from the following files: