OpenLB 1.6
Loading...
Searching...
No Matches
Public Member Functions | Friends | List of all members
olb::singleton::MpiManager Class Reference

Wrapper functions that simplify the use of MPI. More...

#include <mpiManager.h>

+ Collaboration diagram for olb::singleton::MpiManager:

Public Member Functions

void init (int *argc, char ***argv, bool verbose=true)
 Initializes the mpi manager. More...
 
int getSize () const
 Returns the number of processes. More...
 
int getRank () const
 Returns the process ID. More...
 
int bossId () const
 Returns process ID of main processor. More...
 
bool isMainProcessor () const
 Tells whether current processor is main processor. More...
 
double getTime () const
 Returns universal MPI-time in seconds. More...
 
void barrier (MPI_Comm comm=MPI_COMM_WORLD)
 Synchronizes the processes. More...
 
void synchronizeIO (unsigned tDelay=100, MPI_Comm comm=MPI_COMM_WORLD)
 Synchronizes the processes and wait to ensure correct cout order. More...
 
template<typename T >
void send (T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, blocking. More...
 
template<typename T , unsigned DIM>
void send (util::ADf< T, DIM > *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename... args>
void send (std::vector< args... > &vec, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<class T , std::size_t N>
void send (std::array< T, N > &array, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendInit (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Initialize persistent non-blocking send. More...
 
template<typename T , unsigned DIM>
void sendInit (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void iSend (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, non blocking. More...
 
template<typename T , unsigned DIM>
void iSend (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void ibSend (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data at *buf, non blocking and buffered. More...
 
template<typename T , unsigned DIM>
void ibSend (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
std::size_t probeReceiveSize (int source, MPI_Datatype type, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Probe size of incoming message. More...
 
template<typename TYPE >
std::size_t probeReceiveSize (int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Probe size of incoming message with TYPE. More...
 
template<typename T >
void receive (T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Receives data at *buf, blocking. More...
 
template<typename T , unsigned DIM>
void receive (util::ADf< T, DIM > *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename... args>
void receive (std::vector< args... > &vec, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<class T , std::size_t N>
void receive (std::array< T, N > &array, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void recvInit (T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Initialize persistent non-blocking receive. More...
 
template<typename T , unsigned DIM>
void recvInit (util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void iRecv (T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Receives data at *buf, non blocking. More...
 
template<typename T , unsigned DIM>
void iRecv (util::ADf< T, DIM > *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendRecv (T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 Send and receive data between two partners. More...
 
template<typename T , unsigned DIM>
void sendRecv (util::ADf< T, DIM > *sendBuf, util::ADf< T, DIM > *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void sendToMaster (T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 Sends data to master processor. More...
 
template<typename T >
void scatterv (T *sendBuf, int *sendCounts, int *displs, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Scatter data from one processor over multiple processors. More...
 
template<typename T >
void gatherv (T *sendBuf, int sendCount, T *recvBuf, int *recvCounts, int *displs, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Gather data from multiple processors to one processor. More...
 
template<typename T >
void bCast (T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcast data from one processor to multiple processors. More...
 
template<typename T , unsigned DIM>
void bCast (util::ADf< T, DIM > *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T , unsigned DIM>
void bCast (BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void bCast (T &sendVal, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void bCastThroughMaster (T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 Broadcast data when root is unknown to other processors. More...
 
template<typename T , unsigned DIM>
void bCastThroughMaster (util::ADf< T, DIM > *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
 
void bCast (std::string &message, int root=0)
 Special case for broadcasting strings. Memory handling is automatic. More...
 
void bCast (BlockData< 2, double, double > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Special case for broadcasting BlockData2D. More...
 
void bCast (BlockData< 2, float, float > &sendData, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Special case for broadcasting BlockData2D. More...
 
template<typename T >
void reduce (T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
 Reduction operation toward one processor. More...
 
template<typename T , unsigned DIM>
void reduce (util::ADf< T, DIM > &sendVal, util::ADf< T, DIM > &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
 
template<typename T , unsigned DIM>
void reduce (BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &sendVal, BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
template<typename T >
void reduceVect (std::vector< T > &sendVal, std::vector< T > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Element-per-element reduction of a vector of data. More...
 
template<typename T >
void reduceAndBcast (T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 Reduction operation, followed by a broadcast. More...
 
template<typename T , unsigned DIM>
void reduceAndBcast (util::ADf< T, DIM > &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
 
void wait (MPI_Request *request, MPI_Status *status)
 Complete a non-blocking MPI operation. More...
 
void waitAll (MpiNonBlockingHelper &mpiNbHelper)
 Complete a series of non-blocking MPI operations. More...
 
template<>
void send (bool *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (char *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (int *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (float *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void send (double *buf, int count, int dest, int tag, MPI_Comm comm)
 
template<>
void sendInit (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendInit (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendInit (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (char *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (float *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iSend (long double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (bool *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (char *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (float *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void ibSend (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void receive (bool *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (char *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (int *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (float *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (double *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void receive (long double *buf, int count, int source, int tag, MPI_Comm comm)
 
template<>
void sendToMaster (bool *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (char *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (int *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (float *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void sendToMaster (double *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void recvInit (double *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void recvInit (int *buf, int count, int dest, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (bool *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (char *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (int *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (float *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void iRecv (double *buf, int count, int source, MPI_Request *request, int tag, MPI_Comm comm)
 
template<>
void sendRecv (bool *sendBuf, bool *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (char *sendBuf, char *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (int *sendBuf, int *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (float *sendBuf, float *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (long *sendBuf, long *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (double *sendBuf, double *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void sendRecv (long double *sendBuf, long double *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
 
template<>
void scatterv (bool *sendBuf, int *sendCounts, int *displs, bool *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (char *sendBuf, int *sendCounts, int *displs, char *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (int *sendBuf, int *sendCounts, int *displs, int *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (float *sendBuf, int *sendCounts, int *displs, float *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void scatterv (double *sendBuf, int *sendCounts, int *displs, double *recvBuf, int recvCount, int root, MPI_Comm comm)
 
template<>
void gatherv (bool *sendBuf, int sendCount, bool *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (char *sendBuf, int sendCount, char *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (int *sendBuf, int sendCount, int *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (float *sendBuf, int sendCount, float *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void gatherv (double *sendBuf, int sendCount, double *recvBuf, int *recvCounts, int *displs, int root, MPI_Comm comm)
 
template<>
void bCast (bool *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (char *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned char *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (int *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned long *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (float *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (double *sendBuf, int sendCount, int root, MPI_Comm comm)
 
template<>
void bCast (bool &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (char &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned char &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (int &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (unsigned long &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (float &sendVal, int root, MPI_Comm comm)
 
template<>
void bCast (double &sendVal, int root, MPI_Comm comm)
 
template<>
void bCastThroughMaster (bool *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (char *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (int *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (float *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void bCastThroughMaster (double *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
 
template<>
void reduce (bool &sendVal, bool &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (char &sendVal, char &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (int &sendVal, int &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (float &sendVal, float &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (double &sendVal, double &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< char > &sendVal, std::vector< char > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< int > &sendVal, std::vector< int > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< float > &sendVal, std::vector< float > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceVect (std::vector< double > &sendVal, std::vector< double > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (bool &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (char &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (int &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (float &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (double &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (long double &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (long &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduceAndBcast (unsigned long &reductVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, double, int > &sendVal, BlockData< 2, double, int > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, double, double > &sendVal, BlockData< 2, double, double > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 
template<>
void reduce (BlockData< 2, float, float > &sendVal, BlockData< 2, float, float > &recvVal, MPI_Op op, int root, MPI_Comm comm)
 

Friends

MpiManagermpi ()
 

Detailed Description

Wrapper functions that simplify the use of MPI.

Definition at line 90 of file mpiManager.h.

Member Function Documentation

◆ barrier()

void olb::singleton::MpiManager::barrier ( MPI_Comm  comm = MPI_COMM_WORLD)

Synchronizes the processes.

Definition at line 93 of file mpiManager.cpp.

94{
95 if (!ok) {
96 return;
97 }
98 MPI_Barrier(comm);
99}
+ Here is the caller graph for this function:

◆ bCast() [1/21]

void olb::singleton::MpiManager::bCast ( BlockData< 2, double, double > &  sendData,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Special case for broadcasting BlockData2D.

Definition at line 152 of file blockData.hh.

153{
154 if (!ok) {
155 return;
156 }
157 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
158 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
159 sendData.getNcells(), MPI_DOUBLE, root, comm);
160 }
161}

References olb::cpu::sisd::Column< T >::data(), olb::BlockData< D, T, U >::getColumn(), olb::BlockStructureD< D >::getNcells(), and olb::BlockData< D, T, U >::getSize().

+ Here is the call graph for this function:

◆ bCast() [2/21]

void olb::singleton::MpiManager::bCast ( BlockData< 2, float, float > &  sendData,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Special case for broadcasting BlockData2D.

Definition at line 163 of file blockData.hh.

164{
165 if (!ok) {
166 return;
167 }
168 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
169 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
170 sendData.getNcells(), MPI_FLOAT, root, comm);
171 }
172}

References olb::cpu::sisd::Column< T >::data(), olb::BlockData< D, T, U >::getColumn(), olb::BlockStructureD< D >::getNcells(), and olb::BlockData< D, T, U >::getSize().

+ Here is the call graph for this function:

◆ bCast() [3/21]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCast ( BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &  sendData,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 134 of file mpiManagerAD.hh.

137{
138 if (!ok) {
139 return;
140 }
141// MPI_Bcast(static_cast<void*>(sendData.getRawData()),
142// sendData.getDataSize(), MPI_DOUBLE, root, comm);
143 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
144 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
145 sendData.getNcells(), MPI_DOUBLE, root, comm);
146 }
147}

◆ bCast() [4/21]

template<>
void olb::singleton::MpiManager::bCast ( bool &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 934 of file mpiManager.cpp.

935{
936 if (!ok) {
937 return;
938 }
939 MPI_Bcast(&sendVal, 1, MPI_BYTE, root, comm);
940}

◆ bCast() [5/21]

template<>
void olb::singleton::MpiManager::bCast ( bool *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 844 of file mpiManager.cpp.

845{
846 if (!ok) {
847 return;
848 }
849 MPI_Bcast(static_cast<void*>(sendBuf),
850 sendCount, MPI_BYTE, root, comm);
851}

◆ bCast() [6/21]

template<>
void olb::singleton::MpiManager::bCast ( char &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 942 of file mpiManager.cpp.

943{
944 if (!ok) {
945 return;
946 }
947 MPI_Bcast(&sendVal, 1, MPI_CHAR, root, comm);
948}

◆ bCast() [7/21]

template<>
void olb::singleton::MpiManager::bCast ( char *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 854 of file mpiManager.cpp.

855{
856 if (!ok) {
857 return;
858 }
859 MPI_Bcast(static_cast<void*>(sendBuf),
860 sendCount, MPI_CHAR, root, comm);
861}

◆ bCast() [8/21]

template<>
void olb::singleton::MpiManager::bCast ( double &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 987 of file mpiManager.cpp.

988{
989 if (!ok) {
990 return;
991 }
992 MPI_Bcast(&sendVal, 1, MPI_DOUBLE, root, comm);
993}

◆ bCast() [9/21]

template<>
void olb::singleton::MpiManager::bCast ( double *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 904 of file mpiManager.cpp.

905{
906 if (!ok) {
907 return;
908 }
909 MPI_Bcast(static_cast<void*>(sendBuf),
910 sendCount, MPI_DOUBLE, root, comm);
911}

◆ bCast() [10/21]

template<>
void olb::singleton::MpiManager::bCast ( float &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 978 of file mpiManager.cpp.

979{
980 if (!ok) {
981 return;
982 }
983 MPI_Bcast(&sendVal, 1, MPI_FLOAT, root, comm);
984}

◆ bCast() [11/21]

template<>
void olb::singleton::MpiManager::bCast ( float *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 894 of file mpiManager.cpp.

895{
896 if (!ok) {
897 return;
898 }
899 MPI_Bcast(static_cast<void*>(sendBuf),
900 sendCount, MPI_FLOAT, root, comm);
901}

◆ bCast() [12/21]

template<>
void olb::singleton::MpiManager::bCast ( int &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 960 of file mpiManager.cpp.

961{
962 if (!ok) {
963 return;
964 }
965 MPI_Bcast(&sendVal, 1, MPI_INT, root, comm);
966}

◆ bCast() [13/21]

template<>
void olb::singleton::MpiManager::bCast ( int *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 874 of file mpiManager.cpp.

875{
876 if (!ok) {
877 return;
878 }
879 MPI_Bcast(static_cast<void*>(sendBuf),
880 sendCount, MPI_INT, root, comm);
881}

◆ bCast() [14/21]

void olb::singleton::MpiManager::bCast ( std::string &  message,
int  root = 0 
)

Special case for broadcasting strings. Memory handling is automatic.

◆ bCast() [15/21]

template<typename T >
void olb::singleton::MpiManager::bCast ( T &  sendVal,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

◆ bCast() [16/21]

template<typename T >
void olb::singleton::MpiManager::bCast ( T *  sendBuf,
int  sendCount,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Broadcast data from one processor to multiple processors.

+ Here is the caller graph for this function:

◆ bCast() [17/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned char &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 951 of file mpiManager.cpp.

952{
953 if (!ok) {
954 return;
955 }
956 MPI_Bcast(&sendVal, 1, MPI_UNSIGNED_CHAR, root, comm);
957}

◆ bCast() [18/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned char *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 864 of file mpiManager.cpp.

865{
866 if (!ok) {
867 return;
868 }
869 MPI_Bcast(static_cast<void*>(sendBuf),
870 sendCount, MPI_UNSIGNED_CHAR, root, comm);
871}

◆ bCast() [19/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned long &  sendVal,
int  root,
MPI_Comm  comm 
)

Definition at line 969 of file mpiManager.cpp.

970{
971 if (!ok) {
972 return;
973 }
974 MPI_Bcast(&sendVal, 1, MPI_UNSIGNED_LONG, root, comm);
975}

◆ bCast() [20/21]

template<>
void olb::singleton::MpiManager::bCast ( unsigned long *  sendBuf,
int  sendCount,
int  root,
MPI_Comm  comm 
)

Definition at line 884 of file mpiManager.cpp.

885{
886 if (!ok) {
887 return;
888 }
889 MPI_Bcast(static_cast<void*>(sendBuf),
890 sendCount, MPI_UNSIGNED_LONG, root, comm);
891}

◆ bCast() [21/21]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCast ( util::ADf< T, DIM > *  sendBuf,
int  sendCount,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 124 of file mpiManagerAD.hh.

125{
126 if (!ok) {
127 return;
128 }
129 MPI_Bcast(static_cast<void*>(sendBuf),
130 (sizeof(util::ADf<T,DIM>)/8)*sendCount, MPI_DOUBLE, root, comm);
131}

◆ bCastThroughMaster() [1/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( bool *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 996 of file mpiManager.cpp.

997{
998 if (!ok) {
999 return;
1000 }
1001 if (iAmRoot && !isMainProcessor()) {
1002 send(sendBuf, sendCount, 0);
1003 }
1004 if (isMainProcessor() && !iAmRoot) {
1005 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1006 }
1007 bCast(sendBuf, sendCount, 0);
1008}
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
bool isMainProcessor() const
Tells whether current processor is main processor.
Definition: mpiManager.cpp:80
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [2/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( char *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 1011 of file mpiManager.cpp.

1012{
1013 if (!ok) {
1014 return;
1015 }
1016 if (iAmRoot && !isMainProcessor()) {
1017 send(sendBuf, sendCount, 0);
1018 }
1019 if (isMainProcessor() && !iAmRoot) {
1020 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1021 }
1022 bCast(sendBuf, sendCount, 0);
1023}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [3/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( double *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 1056 of file mpiManager.cpp.

1057{
1058 if (!ok) {
1059 return;
1060 }
1061 if (iAmRoot && !isMainProcessor()) {
1062 send(sendBuf, sendCount, 0);
1063 }
1064 if (isMainProcessor() && !iAmRoot) {
1065 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1066 }
1067 bCast(sendBuf, sendCount, 0);
1068}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [4/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( float *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 1041 of file mpiManager.cpp.

1042{
1043 if (!ok) {
1044 return;
1045 }
1046 if (iAmRoot && !isMainProcessor()) {
1047 send(sendBuf, sendCount, 0);
1048 }
1049 if (isMainProcessor() && !iAmRoot) {
1050 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1051 }
1052 bCast(sendBuf, sendCount, 0);
1053}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [5/7]

template<>
void olb::singleton::MpiManager::bCastThroughMaster ( int *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 1026 of file mpiManager.cpp.

1027{
1028 if (!ok) {
1029 return;
1030 }
1031 if (iAmRoot && !isMainProcessor()) {
1032 send(sendBuf, sendCount, 0);
1033 }
1034 if (isMainProcessor() && !iAmRoot) {
1035 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
1036 }
1037 bCast(sendBuf, sendCount, 0);
1038}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bCastThroughMaster() [6/7]

template<typename T >
void olb::singleton::MpiManager::bCastThroughMaster ( T *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Broadcast data when root is unknown to other processors.

◆ bCastThroughMaster() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::bCastThroughMaster ( util::ADf< T, DIM > *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 188 of file mpiManagerAD.hh.

189{
190 if (!ok) {
191 return;
192 }
193 if (iAmRoot && !isMainProcessor()) {
194 send(sendBuf, sendCount, 0);
195 }
196 if (isMainProcessor() && !iAmRoot) {
197 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
198 }
199 bCast(sendBuf, sendCount, 0);
200}

References bCast(), isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ bossId()

int olb::singleton::MpiManager::bossId ( ) const

Returns process ID of main processor.

Definition at line 75 of file mpiManager.cpp.

76{
77 return 0;
78}
+ Here is the caller graph for this function:

◆ gatherv() [1/6]

template<>
void olb::singleton::MpiManager::gatherv ( bool *  sendBuf,
int  sendCount,
bool *  recvBuf,
int *  recvCounts,
int *  displs,
int  root,
MPI_Comm  comm 
)

Definition at line 766 of file mpiManager.cpp.

769{
770 if (!ok) {
771 return;
772 }
773 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_BYTE,
774 static_cast<void*>(recvBuf), recvCounts, displs, MPI_BYTE,
775 root, comm);
776}

◆ gatherv() [2/6]

template<>
void olb::singleton::MpiManager::gatherv ( char *  sendBuf,
int  sendCount,
char *  recvBuf,
int *  recvCounts,
int *  displs,
int  root,
MPI_Comm  comm 
)

Definition at line 779 of file mpiManager.cpp.

782{
783 if (!ok) {
784 return;
785 }
786 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_CHAR,
787 static_cast<void*>(recvBuf), recvCounts, displs, MPI_CHAR,
788 root, comm);
789}

◆ gatherv() [3/6]

template<>
void olb::singleton::MpiManager::gatherv ( double *  sendBuf,
int  sendCount,
double *  recvBuf,
int *  recvCounts,
int *  displs,
int  root,
MPI_Comm  comm 
)

Definition at line 818 of file mpiManager.cpp.

821{
822 if (!ok) {
823 return;
824 }
825 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_DOUBLE,
826 static_cast<void*>(recvBuf), recvCounts, displs, MPI_DOUBLE,
827 root, comm);
828}

◆ gatherv() [4/6]

template<>
void olb::singleton::MpiManager::gatherv ( float *  sendBuf,
int  sendCount,
float *  recvBuf,
int *  recvCounts,
int *  displs,
int  root,
MPI_Comm  comm 
)

Definition at line 805 of file mpiManager.cpp.

808{
809 if (!ok) {
810 return;
811 }
812 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_FLOAT,
813 static_cast<void*>(recvBuf), recvCounts, displs, MPI_FLOAT,
814 root, comm);
815}

◆ gatherv() [5/6]

template<>
void olb::singleton::MpiManager::gatherv ( int *  sendBuf,
int  sendCount,
int *  recvBuf,
int *  recvCounts,
int *  displs,
int  root,
MPI_Comm  comm 
)

Definition at line 792 of file mpiManager.cpp.

795{
796 if (!ok) {
797 return;
798 }
799 MPI_Gatherv(static_cast<void*>(sendBuf), sendCount, MPI_INT,
800 static_cast<void*>(recvBuf), recvCounts, displs, MPI_INT,
801 root, comm);
802}

◆ gatherv() [6/6]

template<typename T >
void olb::singleton::MpiManager::gatherv ( T *  sendBuf,
int  sendCount,
T *  recvBuf,
int *  recvCounts,
int *  displs,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Gather data from multiple processors to one processor.

+ Here is the caller graph for this function:

◆ getRank()

int olb::singleton::MpiManager::getRank ( ) const

Returns the process ID.

Definition at line 70 of file mpiManager.cpp.

71{
72 return taskId;
73}
+ Here is the caller graph for this function:

◆ getSize()

int olb::singleton::MpiManager::getSize ( ) const

Returns the number of processes.

Definition at line 65 of file mpiManager.cpp.

66{
67 return numTasks;
68}
+ Here is the caller graph for this function:

◆ getTime()

double olb::singleton::MpiManager::getTime ( ) const

Returns universal MPI-time in seconds.

Definition at line 85 of file mpiManager.cpp.

86{
87 if (!ok) {
88 return 0.;
89 }
90 return MPI_Wtime();
91}

◆ ibSend() [1/7]

template<>
void olb::singleton::MpiManager::ibSend ( bool *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 292 of file mpiManager.cpp.

294{
295 if (ok) {
296 MPI_Ibsend(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
297 }
298}

◆ ibSend() [2/7]

template<>
void olb::singleton::MpiManager::ibSend ( char *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 301 of file mpiManager.cpp.

303{
304 if (ok) {
305 MPI_Ibsend(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm, request);
306 }
307}

◆ ibSend() [3/7]

template<>
void olb::singleton::MpiManager::ibSend ( double *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 328 of file mpiManager.cpp.

330{
331 if (ok) {
332 MPI_Ibsend(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm, request);
333 }
334}

◆ ibSend() [4/7]

template<>
void olb::singleton::MpiManager::ibSend ( float *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 319 of file mpiManager.cpp.

321{
322 if (ok) {
323 MPI_Ibsend(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm, request);
324 }
325}

◆ ibSend() [5/7]

template<>
void olb::singleton::MpiManager::ibSend ( int *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 310 of file mpiManager.cpp.

312{
313 if (ok) {
314 MPI_Ibsend(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm, request);
315 }
316}

◆ ibSend() [6/7]

template<typename T >
void olb::singleton::MpiManager::ibSend ( T *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Sends data at *buf, non blocking and buffered.

◆ ibSend() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::ibSend ( util::ADf< T, DIM > *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

◆ init()

void olb::singleton::MpiManager::init ( int *  argc,
char ***  argv,
bool  verbose = true 
)

Initializes the mpi manager.

Definition at line 48 of file mpiManager.cpp.

49{
50 int ok0{};
51 MPI_Initialized(&ok0);
52 if (ok0) {
53 return;
54 }
55 int ok1 = MPI_Init(argc, argv);
56 int ok2 = MPI_Comm_rank(MPI_COMM_WORLD, &taskId);
57 int ok3 = MPI_Comm_size(MPI_COMM_WORLD, &numTasks);
58 int ok4 = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
59 ok = (ok1 == MPI_SUCCESS && ok2 == MPI_SUCCESS && ok3 == MPI_SUCCESS && ok4 == MPI_SUCCESS);
60 if (verbose) {
61 clout << "Sucessfully initialized, numThreads=" << getSize() << std::endl;
62 }
63}
int getSize() const
Returns the number of processes.
Definition: mpiManager.cpp:65

References getSize().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ iRecv() [1/7]

template<>
void olb::singleton::MpiManager::iRecv ( bool *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 549 of file mpiManager.cpp.

550{
551 if (ok) {
552 MPI_Irecv(static_cast<void*>(buf), count, MPI_BYTE, source, tag, comm, request);
553 }
554}

◆ iRecv() [2/7]

template<>
void olb::singleton::MpiManager::iRecv ( char *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 557 of file mpiManager.cpp.

558{
559 if (ok) {
560 MPI_Irecv(static_cast<void*>(buf), count, MPI_CHAR, source, tag, comm, request);
561 }
562}

◆ iRecv() [3/7]

template<>
void olb::singleton::MpiManager::iRecv ( double *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 581 of file mpiManager.cpp.

582{
583 if (ok) {
584 MPI_Irecv(static_cast<void*>(buf), count, MPI_DOUBLE, source, tag, comm, request);
585 }
586}

◆ iRecv() [4/7]

template<>
void olb::singleton::MpiManager::iRecv ( float *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 573 of file mpiManager.cpp.

574{
575 if (ok) {
576 MPI_Irecv(static_cast<void*>(buf), count, MPI_FLOAT, source, tag, comm, request);
577 }
578}

◆ iRecv() [5/7]

template<>
void olb::singleton::MpiManager::iRecv ( int *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 565 of file mpiManager.cpp.

566{
567 if (ok) {
568 MPI_Irecv(static_cast<void*>(buf), count, MPI_INT, source, tag, comm, request);
569 }
570}

◆ iRecv() [6/7]

template<typename T >
void olb::singleton::MpiManager::iRecv ( T *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Receives data at *buf, non blocking.

◆ iRecv() [7/7]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::iRecv ( util::ADf< T, DIM > *  buf,
int  count,
int  source,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 98 of file mpiManagerAD.hh.

99{
100 if (ok) {
101 MPI_Irecv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, request);
102 }
103}

◆ iSend() [1/8]

template<>
void olb::singleton::MpiManager::iSend ( bool *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 210 of file mpiManager.cpp.

212{
213 if (ok) {
214 MPI_Isend(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
215 }
216}

◆ iSend() [2/8]

template<>
void olb::singleton::MpiManager::iSend ( char *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 219 of file mpiManager.cpp.

221{
222 if (ok) {
223 MPI_Isend(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm, request);
224 }
225}

◆ iSend() [3/8]

template<>
void olb::singleton::MpiManager::iSend ( double *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 273 of file mpiManager.cpp.

275{
276 if (ok) {
277 MPI_Isend(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm, request);
278 }
279}

◆ iSend() [4/8]

template<>
void olb::singleton::MpiManager::iSend ( float *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 264 of file mpiManager.cpp.

266{
267 if (ok) {
268 MPI_Isend(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm, request);
269 }
270}

◆ iSend() [5/8]

template<>
void olb::singleton::MpiManager::iSend ( int *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 237 of file mpiManager.cpp.

239{
240 if (ok) {
241 MPI_Isend(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm, request);
242 }
243}

◆ iSend() [6/8]

template<>
void olb::singleton::MpiManager::iSend ( long double *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 282 of file mpiManager.cpp.

284{
285 if (ok) {
286 MPI_Isend(static_cast<void*>(buf), count, MPI_LONG_DOUBLE, dest, tag, comm, request);
287 }
288}

◆ iSend() [7/8]

template<typename T >
void olb::singleton::MpiManager::iSend ( T *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Sends data at *buf, non blocking.

+ Here is the caller graph for this function:

◆ iSend() [8/8]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::iSend ( util::ADf< T, DIM > *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 85 of file mpiManagerAD.hh.

87{
88 if (ok) {
89 MPI_Isend(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
90 }
91}

◆ isMainProcessor()

bool olb::singleton::MpiManager::isMainProcessor ( ) const

Tells whether current processor is main processor.

Definition at line 80 of file mpiManager.cpp.

81{
82 return bossId() == getRank();
83}
int getRank() const
Returns the process ID.
Definition: mpiManager.cpp:70
int bossId() const
Returns process ID of main processor.
Definition: mpiManager.cpp:75

References bossId(), and getRank().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ probeReceiveSize() [1/2]

template<typename TYPE >
std::size_t olb::singleton::MpiManager::probeReceiveSize ( int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Probe size of incoming message with TYPE.

◆ probeReceiveSize() [2/2]

std::size_t olb::singleton::MpiManager::probeReceiveSize ( int  source,
MPI_Datatype  type,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Probe size of incoming message.

Definition at line 336 of file mpiManager.cpp.

337{
338 MPI_Status status;
339 if (MPI_Probe(source, tag, comm, &status) == MPI_SUCCESS) {
340 int requestSize;
341 MPI_Get_count(&status, type, &requestSize);
342 if (requestSize == MPI_UNDEFINED) {
343 throw std::runtime_error("MPI_UNDEFINED in probeReceiveSize(" + std::to_string(source) + "," + std::to_string(tag) + ")" + " ranks " + std::to_string(source) + " -> " + std::to_string(singleton::mpi().getRank()));
344 }
345 return requestSize;
346 } else {
347 throw std::runtime_error("MPI_Probe failed in probeReceiveSize");
348 }
349}
MpiManager & mpi()
Definition: mpiManager.cpp:29

References getRank(), and olb::singleton::mpi().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ receive() [1/10]

template<>
void olb::singleton::MpiManager::receive ( bool *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 364 of file mpiManager.cpp.

365{
366 if (!ok) {
367 return;
368 }
369 MPI_Status status;
370 MPI_Recv(static_cast<void*>(buf), count, MPI_BYTE, source, tag, comm, &status);
371}

◆ receive() [2/10]

template<>
void olb::singleton::MpiManager::receive ( char *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 375 of file mpiManager.cpp.

376{
377 if (!ok) {
378 return;
379 }
380 MPI_Status status;
381 MPI_Recv(static_cast<void*>(buf), count, MPI_CHAR, source, tag, comm, &status);
382}

◆ receive() [3/10]

template<>
void olb::singleton::MpiManager::receive ( double *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 435 of file mpiManager.cpp.

436{
437 if (!ok) {
438 return;
439 }
440 MPI_Status status;
441 MPI_Recv(static_cast<void*>(buf), count, MPI_DOUBLE, source, tag, comm, &status);
442}

◆ receive() [4/10]

template<>
void olb::singleton::MpiManager::receive ( float *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 425 of file mpiManager.cpp.

426{
427 if (!ok) {
428 return;
429 }
430 MPI_Status status;
431 MPI_Recv(static_cast<void*>(buf), count, MPI_FLOAT, source, tag, comm, &status);
432}

◆ receive() [5/10]

template<>
void olb::singleton::MpiManager::receive ( int *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 395 of file mpiManager.cpp.

396{
397 if (!ok) {
398 return;
399 }
400 MPI_Status status;
401 MPI_Recv(static_cast<void*>(buf), count, MPI_INT, source, tag, comm, &status);
402}

◆ receive() [6/10]

template<>
void olb::singleton::MpiManager::receive ( long double *  buf,
int  count,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 445 of file mpiManager.cpp.

446{
447 if (!ok) {
448 return;
449 }
450 MPI_Status status;
451 MPI_Recv(static_cast<void*>(buf), count, MPI_LONG_DOUBLE, source, tag, comm, &status);
452}

◆ receive() [7/10]

template<class T , std::size_t N>
void olb::singleton::MpiManager::receive ( std::array< T, N > &  array,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Definition at line 159 of file mpiManager.h.

159 {
160 receive( array.data(), array.size(), source, tag, comm );
161 }

References receive().

+ Here is the call graph for this function:

◆ receive() [8/10]

template<typename... args>
void olb::singleton::MpiManager::receive ( std::vector< args... > &  vec,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Definition at line 155 of file mpiManager.h.

155 {
156 receive( vec.data(), vec.size(), source, tag, comm );
157 }

References receive().

+ Here is the call graph for this function:

◆ receive() [9/10]

template<typename T >
void olb::singleton::MpiManager::receive ( T *  buf,
int  count,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Receives data at *buf, blocking.

+ Here is the caller graph for this function:

◆ receive() [10/10]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::receive ( util::ADf< T, DIM > *  buf,
int  count,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 66 of file mpiManagerAD.hh.

67{
68 if (!ok) {
69 return;
70 }
71 MPI_Status status;
72 MPI_Recv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, &status);
73}

◆ recvInit() [1/4]

template<>
void olb::singleton::MpiManager::recvInit ( double *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 525 of file mpiManager.cpp.

526{
527 if (ok) {
528 MPI_Recv_init(buf, count, MPI_DOUBLE, dest, tag, comm, request);
529 }
530}

◆ recvInit() [2/4]

template<>
void olb::singleton::MpiManager::recvInit ( int *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 533 of file mpiManager.cpp.

534{
535 if (ok) {
536 MPI_Recv_init(buf, count, MPI_INT, dest, tag, comm, request);
537 }
538}

◆ recvInit() [3/4]

template<typename T >
void olb::singleton::MpiManager::recvInit ( T *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Initialize persistent non-blocking receive.

+ Here is the caller graph for this function:

◆ recvInit() [4/4]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::recvInit ( util::ADf< T, DIM > *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 76 of file mpiManagerAD.hh.

77{
78 if (ok) {
79 MPI_Recv_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
80 }
81}

◆ reduce() [1/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, double, double > &  sendVal,
BlockData< 2, double, double > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 188 of file blockData.hh.

189{
190 if (!ok) {
191 return;
192 }
193 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
194 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
195 static_cast<void*>(recvVal.getColumn(iD).data()),
196 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
197 }
198}

◆ reduce() [2/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, double, int > &  sendVal,
BlockData< 2, double, int > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 175 of file blockData.hh.

176{
177 if (!ok) {
178 return;
179 }
180 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
181 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
182 static_cast<void*>(recvVal.getColumn(iD).data()),
183 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
184 }
185}

◆ reduce() [3/11]

template<>
void olb::singleton::MpiManager::reduce ( BlockData< 2, float, float > &  sendVal,
BlockData< 2, float, float > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 201 of file blockData.hh.

202{
203 if (!ok) {
204 return;
205 }
206 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
207 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
208 static_cast<void*>(recvVal.getColumn(iD).data()),
209 sendVal.getNcells(), MPI_FLOAT, op, root, comm);
210 }
211}

◆ reduce() [4/11]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduce ( BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &  sendVal,
BlockData< 2, util::ADf< T, DIM >, util::ADf< T, DIM > > &  recvVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 168 of file mpiManagerAD.hh.

172{
173 if (!ok) {
174 return;
175 }
176// MPI_Reduce(static_cast<void*>(sendVal.getRawData()),
177// static_cast<void*>(recvVal.getRawData()),
178// sendVal.getDataSize(), MPI_DOUBLE, op, root, comm);
179 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
180 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
181 static_cast<void*>(recvVal.getColumn(iD).data()),
182 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
183 }
184}

◆ reduce() [5/11]

template<>
void olb::singleton::MpiManager::reduce ( bool &  sendVal,
bool &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1071 of file mpiManager.cpp.

1072{
1073 if (!ok) {
1074 return;
1075 }
1076 MPI_Reduce(static_cast<void*>(&sendVal),
1077 static_cast<void*>(&recvVal), 1, MPI_BYTE, op, root, comm);
1078}

◆ reduce() [6/11]

template<>
void olb::singleton::MpiManager::reduce ( char &  sendVal,
char &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1081 of file mpiManager.cpp.

1082{
1083 if (!ok) {
1084 return;
1085 }
1086 MPI_Reduce(static_cast<void*>(&sendVal),
1087 static_cast<void*>(&recvVal), 1, MPI_CHAR, op, root, comm);
1088}

◆ reduce() [7/11]

template<>
void olb::singleton::MpiManager::reduce ( double &  sendVal,
double &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1111 of file mpiManager.cpp.

1112{
1113 if (!ok) {
1114 return;
1115 }
1116 MPI_Reduce(static_cast<void*>(&sendVal),
1117 static_cast<void*>(&recvVal), 1, MPI_DOUBLE, op, root, comm);
1118}

◆ reduce() [8/11]

template<>
void olb::singleton::MpiManager::reduce ( float &  sendVal,
float &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1101 of file mpiManager.cpp.

1102{
1103 if (!ok) {
1104 return;
1105 }
1106 MPI_Reduce(static_cast<void*>(&sendVal),
1107 static_cast<void*>(&recvVal), 1, MPI_FLOAT, op, root, comm);
1108}

◆ reduce() [9/11]

template<>
void olb::singleton::MpiManager::reduce ( int &  sendVal,
int &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1091 of file mpiManager.cpp.

1092{
1093 if (!ok) {
1094 return;
1095 }
1096 MPI_Reduce(static_cast<void*>(&sendVal),
1097 static_cast<void*>(&recvVal), 1, MPI_INT, op, root, comm);
1098}

◆ reduce() [10/11]

template<typename T >
void olb::singleton::MpiManager::reduce ( T &  sendVal,
T &  recvVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  = MPI_COMM_WORLD 
)

Reduction operation toward one processor.

+ Here is the caller graph for this function:

◆ reduce() [11/11]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduce ( util::ADf< T, DIM > &  sendVal,
util::ADf< T, DIM > &  recvVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 150 of file mpiManagerAD.hh.

151{
152 if (!ok) {
153 return;
154 }
155
156 int sizeADouble = sizeof(util::ADf<T,DIM>)/8-1;
157
158 MPI_Reduce(static_cast<void*>(&sendVal.v()),
159 static_cast<void*>(&recvVal.v()), 1, MPI_DOUBLE, op, root, comm);
160
161 for (int i=0; i<sizeADouble; i++) {
162 MPI_Reduce(static_cast<void*>(&sendVal.d(i)),
163 static_cast<void*>(&recvVal.d(i)), 1, MPI_DOUBLE, op, root, comm);
164 }
165}

References olb::util::ADf< T, DIM >::d(), and olb::util::ADf< T, DIM >::v().

+ Here is the call graph for this function:

◆ reduceAndBcast() [1/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( bool &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1179 of file mpiManager.cpp.

1180{
1181 if (!ok) {
1182 return;
1183 }
1184 char recvVal;
1185 MPI_Reduce(static_cast<void*>(&reductVal), static_cast<void*>(&recvVal), 1, MPI_BYTE, op, root, comm);
1186 reductVal = recvVal;
1187 MPI_Bcast(&reductVal, 1, MPI_BYTE, root, comm);
1188
1189}

◆ reduceAndBcast() [2/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( char &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1192 of file mpiManager.cpp.

1193{
1194 if (!ok) {
1195 return;
1196 }
1197 char recvVal;
1198 MPI_Reduce(&reductVal, &recvVal, 1, MPI_CHAR, op, root, comm);
1199 reductVal = recvVal;
1200 MPI_Bcast(&reductVal, 1, MPI_CHAR, root, comm);
1201
1202}

◆ reduceAndBcast() [3/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( double &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1231 of file mpiManager.cpp.

1232{
1233 if (!ok) {
1234 return;
1235 }
1236 double recvVal;
1237 MPI_Reduce(&reductVal, &recvVal, 1, MPI_DOUBLE, op, root, comm);
1238 reductVal = recvVal;
1239 MPI_Bcast(&reductVal, 1, MPI_DOUBLE, root, comm);
1240
1241}

◆ reduceAndBcast() [4/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( float &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1218 of file mpiManager.cpp.

1219{
1220 if (!ok) {
1221 return;
1222 }
1223 float recvVal;
1224 MPI_Reduce(&reductVal, &recvVal, 1, MPI_FLOAT, op, root, comm);
1225 reductVal = recvVal;
1226 MPI_Bcast(&reductVal, 1, MPI_FLOAT, root, comm);
1227
1228}

◆ reduceAndBcast() [5/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( int &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1205 of file mpiManager.cpp.

1206{
1207 if (!ok) {
1208 return;
1209 }
1210 int recvVal;
1211 MPI_Reduce(&reductVal, &recvVal, 1, MPI_INT, op, root, comm);
1212 reductVal = recvVal;
1213 MPI_Bcast(&reductVal, 1, MPI_INT, root, comm);
1214
1215}

◆ reduceAndBcast() [6/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( long &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1257 of file mpiManager.cpp.

1258{
1259 if (!ok) {
1260 return;
1261 }
1262 long recvVal;
1263 MPI_Reduce(&reductVal, &recvVal, 1, MPI_LONG, op, root, comm);
1264 reductVal = recvVal;
1265 MPI_Bcast(&reductVal, 1, MPI_LONG, root, comm);
1266
1267}

◆ reduceAndBcast() [7/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( long double &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1244 of file mpiManager.cpp.

1245{
1246 if (!ok) {
1247 return;
1248 }
1249 long double recvVal;
1250 MPI_Reduce(&reductVal, &recvVal, 1, MPI_LONG_DOUBLE, op, root, comm);
1251 reductVal = recvVal;
1252 MPI_Bcast(&reductVal, 1, MPI_LONG_DOUBLE, root, comm);
1253
1254}

◆ reduceAndBcast() [8/10]

template<typename T >
void olb::singleton::MpiManager::reduceAndBcast ( T &  reductVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Reduction operation, followed by a broadcast.

+ Here is the caller graph for this function:

◆ reduceAndBcast() [9/10]

template<>
void olb::singleton::MpiManager::reduceAndBcast ( unsigned long &  reductVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1270 of file mpiManager.cpp.

1271{
1272 if (!ok) {
1273 return;
1274 }
1275 unsigned long recvVal;
1276 MPI_Reduce(&reductVal, &recvVal, 1, MPI_UNSIGNED_LONG, op, root, comm);
1277 reductVal = recvVal;
1278 MPI_Bcast(&reductVal, 1, MPI_UNSIGNED_LONG, root, comm);
1279
1280}

◆ reduceAndBcast() [10/10]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::reduceAndBcast ( util::ADf< T, DIM > &  reductVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 203 of file mpiManagerAD.hh.

204{
205 if (!ok) {
206 return;
207 }
208 util::ADf<T,DIM> recvVal;
209 reduce(reductVal, recvVal, op, root, comm);
210
211 //MPI_Reduce(&reductVal, &recvVal, 1, MPI_DOUBLE, op, root, comm);
212 reductVal = recvVal;
213 bCast(&reductVal, 1, root, comm);
214
215 //MPI_Bcast(&reductVal, 1, MPI_DOUBLE, root, comm);
216
217}
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.

References bCast(), and reduce().

+ Here is the call graph for this function:

◆ reduceVect() [1/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< char > &  sendVal,
std::vector< char > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1131 of file mpiManager.cpp.

1133{
1134 if (!ok) {
1135 return;
1136 }
1137 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1138 static_cast<void*>(&(recvVal[0])),
1139 sendVal.size(), MPI_CHAR, op, root, comm);
1140}

◆ reduceVect() [2/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< double > &  sendVal,
std::vector< double > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1167 of file mpiManager.cpp.

1169{
1170 if (!ok) {
1171 return;
1172 }
1173 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1174 static_cast<void*>(&(recvVal[0])),
1175 sendVal.size(), MPI_DOUBLE, op, root, comm);
1176}

◆ reduceVect() [3/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< float > &  sendVal,
std::vector< float > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1155 of file mpiManager.cpp.

1157{
1158 if (!ok) {
1159 return;
1160 }
1161 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1162 static_cast<void*>(&(recvVal[0])),
1163 sendVal.size(), MPI_FLOAT, op, root, comm);
1164}

◆ reduceVect() [4/5]

template<>
void olb::singleton::MpiManager::reduceVect ( std::vector< int > &  sendVal,
std::vector< int > &  recvVal,
MPI_Op  op,
int  root,
MPI_Comm  comm 
)

Definition at line 1143 of file mpiManager.cpp.

1145{
1146 if (!ok) {
1147 return;
1148 }
1149 MPI_Reduce(static_cast<void*>(&(sendVal[0])),
1150 static_cast<void*>(&(recvVal[0])),
1151 sendVal.size(), MPI_INT, op, root, comm);
1152}

◆ reduceVect() [5/5]

template<typename T >
void olb::singleton::MpiManager::reduceVect ( std::vector< T > &  sendVal,
std::vector< T > &  recvVal,
MPI_Op  op,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Element-per-element reduction of a vector of data.

◆ scatterv() [1/6]

template<>
void olb::singleton::MpiManager::scatterv ( bool *  sendBuf,
int *  sendCounts,
int *  displs,
bool *  recvBuf,
int  recvCount,
int  root,
MPI_Comm  comm 
)

Definition at line 701 of file mpiManager.cpp.

703{
704 if (!ok) {
705 return;
706 }
707 MPI_Scatterv(static_cast<void*>(sendBuf),
708 sendCounts, displs, MPI_BYTE,
709 static_cast<void*>(recvBuf),
710 recvCount, MPI_BYTE, root, comm);
711}

◆ scatterv() [2/6]

template<>
void olb::singleton::MpiManager::scatterv ( char *  sendBuf,
int *  sendCounts,
int *  displs,
char *  recvBuf,
int  recvCount,
int  root,
MPI_Comm  comm 
)

Definition at line 714 of file mpiManager.cpp.

716{
717 if (!ok) {
718 return;
719 }
720 MPI_Scatterv(static_cast<void*>(sendBuf),
721 sendCounts, displs, MPI_CHAR,
722 static_cast<void*>(recvBuf),
723 recvCount, MPI_CHAR, root, comm);
724}

◆ scatterv() [3/6]

template<>
void olb::singleton::MpiManager::scatterv ( double *  sendBuf,
int *  sendCounts,
int *  displs,
double *  recvBuf,
int  recvCount,
int  root,
MPI_Comm  comm 
)

Definition at line 753 of file mpiManager.cpp.

755{
756 if (!ok) {
757 return;
758 }
759 MPI_Scatterv(static_cast<void*>(sendBuf),
760 sendCounts, displs, MPI_DOUBLE,
761 static_cast<void*>(recvBuf),
762 recvCount, MPI_DOUBLE, root, comm);
763}

◆ scatterv() [4/6]

template<>
void olb::singleton::MpiManager::scatterv ( float *  sendBuf,
int *  sendCounts,
int *  displs,
float *  recvBuf,
int  recvCount,
int  root,
MPI_Comm  comm 
)

Definition at line 740 of file mpiManager.cpp.

742{
743 if (!ok) {
744 return;
745 }
746 MPI_Scatterv(static_cast<void*>(sendBuf),
747 sendCounts, displs, MPI_FLOAT,
748 static_cast<void*>(recvBuf),
749 recvCount, MPI_FLOAT, root, comm);
750}

◆ scatterv() [5/6]

template<>
void olb::singleton::MpiManager::scatterv ( int *  sendBuf,
int *  sendCounts,
int *  displs,
int *  recvBuf,
int  recvCount,
int  root,
MPI_Comm  comm 
)

Definition at line 727 of file mpiManager.cpp.

729{
730 if (!ok) {
731 return;
732 }
733 MPI_Scatterv(static_cast<void*>(sendBuf),
734 sendCounts, displs, MPI_INT,
735 static_cast<void*>(recvBuf),
736 recvCount, MPI_INT, root, comm);
737}

◆ scatterv() [6/6]

template<typename T >
void olb::singleton::MpiManager::scatterv ( T *  sendBuf,
int *  sendCounts,
int *  displs,
T *  recvBuf,
int  recvCount,
int  root = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Scatter data from one processor over multiple processors.

◆ send() [1/9]

template<>
void olb::singleton::MpiManager::send ( bool *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm 
)

Definition at line 108 of file mpiManager.cpp.

109{
110 if (!ok) {
111 return;
112 }
113 MPI_Send(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm);
114}

◆ send() [2/9]

template<>
void olb::singleton::MpiManager::send ( char *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm 
)

Definition at line 117 of file mpiManager.cpp.

118{
119 if (!ok) {
120 return;
121 }
122 MPI_Send(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm);
123}

◆ send() [3/9]

template<>
void olb::singleton::MpiManager::send ( double *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm 
)

Definition at line 153 of file mpiManager.cpp.

154{
155 if (!ok) {
156 return;
157 }
158 MPI_Send(static_cast<void*>(buf), count, MPI_DOUBLE, dest, tag, comm);
159}

◆ send() [4/9]

template<>
void olb::singleton::MpiManager::send ( float *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm 
)

Definition at line 144 of file mpiManager.cpp.

145{
146 if (!ok) {
147 return;
148 }
149 MPI_Send(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm);
150}

◆ send() [5/9]

template<>
void olb::singleton::MpiManager::send ( int *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm  comm 
)

Definition at line 135 of file mpiManager.cpp.

136{
137 if (!ok) {
138 return;
139 }
140 MPI_Send(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm);
141}

◆ send() [6/9]

template<class T , std::size_t N>
void olb::singleton::MpiManager::send ( std::array< T, N > &  array,
int  dest,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Definition at line 121 of file mpiManager.h.

121 {
122 send( array.data(), array.size(), dest, tag, comm );
123 }

References send().

+ Here is the call graph for this function:

◆ send() [7/9]

template<typename... args>
void olb::singleton::MpiManager::send ( std::vector< args... > &  vec,
int  dest,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)
inline

Definition at line 117 of file mpiManager.h.

117 {
118 send( vec.data(), vec.size(), dest, tag, comm );
119 }

References send().

+ Here is the call graph for this function:

◆ send() [8/9]

template<typename T >
void olb::singleton::MpiManager::send ( T *  buf,
int  count,
int  dest,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Sends data at *buf, blocking.

+ Here is the caller graph for this function:

◆ send() [9/9]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::send ( util::ADf< T, DIM > *  buf,
int  count,
int  dest,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 49 of file mpiManagerAD.hh.

50{
51 if (!ok) {
52 return;
53 }
54 MPI_Send(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm);
55}

◆ sendInit() [1/5]

template<>
void olb::singleton::MpiManager::sendInit ( bool *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 202 of file mpiManager.cpp.

203{
204 if (ok) {
205 MPI_Send_init(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm, request);
206 }
207}

◆ sendInit() [2/5]

template<>
void olb::singleton::MpiManager::sendInit ( double *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 162 of file mpiManager.cpp.

163{
164 if (ok) {
165 MPI_Send_init(buf, count, MPI_DOUBLE, dest, tag, comm, request);
166 }
167}

◆ sendInit() [3/5]

template<>
void olb::singleton::MpiManager::sendInit ( int *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag,
MPI_Comm  comm 
)

Definition at line 194 of file mpiManager.cpp.

195{
196 if (ok) {
197 MPI_Send_init(buf, count, MPI_INT, dest, tag, comm, request);
198 }
199}

◆ sendInit() [4/5]

template<typename T >
void olb::singleton::MpiManager::sendInit ( T *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Initialize persistent non-blocking send.

+ Here is the caller graph for this function:

◆ sendInit() [5/5]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::sendInit ( util::ADf< T, DIM > *  buf,
int  count,
int  dest,
MPI_Request *  request,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 58 of file mpiManagerAD.hh.

59{
60 if (ok) {
61 MPI_Send_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
62 }
63}

◆ sendRecv() [1/9]

template<>
void olb::singleton::MpiManager::sendRecv ( bool *  sendBuf,
bool *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 589 of file mpiManager.cpp.

591{
592 if (!ok) {
593 return;
594 }
595 MPI_Status status;
596 MPI_Sendrecv(static_cast<void*>(sendBuf),
597 count,
598 MPI_BYTE, dest, tag,
599 static_cast<void*>(recvBuf),
600 count,
601 MPI_BYTE, source, tag, comm, &status);
602}

◆ sendRecv() [2/9]

template<>
void olb::singleton::MpiManager::sendRecv ( char *  sendBuf,
char *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 605 of file mpiManager.cpp.

607{
608 if (!ok) {
609 return;
610 }
611 MPI_Status status;
612 MPI_Sendrecv(static_cast<void*>(sendBuf),
613 count,
614 MPI_CHAR, dest, tag,
615 static_cast<void*>(recvBuf),
616 count,
617 MPI_CHAR, source, tag, comm, &status);
618}

◆ sendRecv() [3/9]

template<>
void olb::singleton::MpiManager::sendRecv ( double *  sendBuf,
double *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 669 of file mpiManager.cpp.

671{
672 if (!ok) {
673 return;
674 }
675 MPI_Status status;
676 MPI_Sendrecv(static_cast<void*>(sendBuf),
677 count,
678 MPI_DOUBLE, dest, tag,
679 static_cast<void*>(recvBuf),
680 count,
681 MPI_DOUBLE, source, tag, comm, &status);
682}

◆ sendRecv() [4/9]

template<>
void olb::singleton::MpiManager::sendRecv ( float *  sendBuf,
float *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 637 of file mpiManager.cpp.

639{
640 if (!ok) {
641 return;
642 }
643 MPI_Status status;
644 MPI_Sendrecv(static_cast<void*>(sendBuf),
645 count,
646 MPI_FLOAT, dest, tag,
647 static_cast<void*>(recvBuf),
648 count,
649 MPI_FLOAT, source, tag, comm, &status);
650}

◆ sendRecv() [5/9]

template<>
void olb::singleton::MpiManager::sendRecv ( int *  sendBuf,
int *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 621 of file mpiManager.cpp.

623{
624 if (!ok) {
625 return;
626 }
627 MPI_Status status;
628 MPI_Sendrecv(static_cast<void*>(sendBuf),
629 count,
630 MPI_INT, dest, tag,
631 static_cast<void*>(recvBuf),
632 count,
633 MPI_INT, source, tag, comm, &status);
634}

◆ sendRecv() [6/9]

template<>
void olb::singleton::MpiManager::sendRecv ( long *  sendBuf,
long *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 653 of file mpiManager.cpp.

655{
656 if (!ok) {
657 return;
658 }
659 MPI_Status status;
660 MPI_Sendrecv(static_cast<void*>(sendBuf),
661 count,
662 MPI_LONG, dest, tag,
663 static_cast<void*>(recvBuf),
664 count,
665 MPI_LONG, source, tag, comm, &status);
666}

◆ sendRecv() [7/9]

template<>
void olb::singleton::MpiManager::sendRecv ( long double *  sendBuf,
long double *  recvBuf,
int  count,
int  dest,
int  source,
int  tag,
MPI_Comm  comm 
)

Definition at line 685 of file mpiManager.cpp.

687{
688 if (!ok) {
689 return;
690 }
691 MPI_Status status;
692 MPI_Sendrecv(static_cast<void*>(sendBuf),
693 count,
694 MPI_LONG_DOUBLE, dest, tag,
695 static_cast<void*>(recvBuf),
696 count,
697 MPI_LONG_DOUBLE, source, tag, comm, &status);
698}

◆ sendRecv() [8/9]

template<typename T >
void olb::singleton::MpiManager::sendRecv ( T *  sendBuf,
T *  recvBuf,
int  count,
int  dest,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Send and receive data between two partners.

+ Here is the caller graph for this function:

◆ sendRecv() [9/9]

template<typename T , unsigned DIM>
void olb::singleton::MpiManager::sendRecv ( util::ADf< T, DIM > *  sendBuf,
util::ADf< T, DIM > *  recvBuf,
int  count,
int  dest,
int  source,
int  tag = 0,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Definition at line 106 of file mpiManagerAD.hh.

107{
108 if (!ok) {
109 return;
110 }
111 MPI_Status status;
112 MPI_Sendrecv(static_cast<void*>(sendBuf),
113 (sizeof(util::ADf<T,DIM>)/8)*count,
114 MPI_DOUBLE, dest, tag,
115 static_cast<void*>(recvBuf),
116 (sizeof(util::ADf<T,DIM>)/8)*count,
117 MPI_DOUBLE, source, tag, comm, &status);
118}

◆ sendToMaster() [1/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( bool *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 455 of file mpiManager.cpp.

456{
457 if (!ok) {
458 return;
459 }
460 if (iAmRoot && !isMainProcessor()) {
461 send(sendBuf, sendCount, 0);
462 }
463 if (isMainProcessor() && !iAmRoot) {
464 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
465 }
466}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [2/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( char *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 469 of file mpiManager.cpp.

470{
471 if (!ok) {
472 return;
473 }
474 if (iAmRoot && !isMainProcessor()) {
475 send(sendBuf, sendCount, 0);
476 }
477 if (isMainProcessor() && !iAmRoot) {
478 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
479 }
480}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [3/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( double *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 511 of file mpiManager.cpp.

512{
513 if (!ok) {
514 return;
515 }
516 if (iAmRoot && !isMainProcessor()) {
517 send(sendBuf, sendCount, 0);
518 }
519 if (isMainProcessor() && !iAmRoot) {
520 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
521 }
522}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [4/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( float *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 497 of file mpiManager.cpp.

498{
499 if (!ok) {
500 return;
501 }
502 if (iAmRoot && !isMainProcessor()) {
503 send(sendBuf, sendCount, 0);
504 }
505 if (isMainProcessor() && !iAmRoot) {
506 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
507 }
508}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [5/6]

template<>
void olb::singleton::MpiManager::sendToMaster ( int *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm 
)

Definition at line 483 of file mpiManager.cpp.

484{
485 if (!ok) {
486 return;
487 }
488 if (iAmRoot && !isMainProcessor()) {
489 send(sendBuf, sendCount, 0);
490 }
491 if (isMainProcessor() && !iAmRoot) {
492 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
493 }
494}

References isMainProcessor(), receive(), and send().

+ Here is the call graph for this function:

◆ sendToMaster() [6/6]

template<typename T >
void olb::singleton::MpiManager::sendToMaster ( T *  sendBuf,
int  sendCount,
bool  iAmRoot,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Sends data to master processor.

+ Here is the caller graph for this function:

◆ synchronizeIO()

void olb::singleton::MpiManager::synchronizeIO ( unsigned  tDelay = 100,
MPI_Comm  comm = MPI_COMM_WORLD 
)

Synchronizes the processes and wait to ensure correct cout order.

Definition at line 101 of file mpiManager.cpp.

102{
103 usleep(tDelay);
104 barrier(comm);
105}
void barrier(MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes.
Definition: mpiManager.cpp:93

References barrier().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ wait()

void olb::singleton::MpiManager::wait ( MPI_Request *  request,
MPI_Status *  status 
)

Complete a non-blocking MPI operation.

Definition at line 1282 of file mpiManager.cpp.

1283{
1284 if (!ok) {
1285 return;
1286 }
1287 MPI_Wait(request, status);
1288}

◆ waitAll()

void olb::singleton::MpiManager::waitAll ( MpiNonBlockingHelper mpiNbHelper)

Complete a series of non-blocking MPI operations.

Definition at line 1290 of file mpiManager.cpp.

1291{
1292 if (!ok || mpiNbHelper.get_size() == 0) {
1293 return;
1294 }
1295 MPI_Waitall(mpiNbHelper.get_size(), mpiNbHelper.get_mpiRequest(), mpiNbHelper.get_mpiStatus());
1296}

References olb::singleton::MpiNonBlockingHelper::get_mpiRequest(), olb::singleton::MpiNonBlockingHelper::get_mpiStatus(), and olb::singleton::MpiNonBlockingHelper::get_size().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

Friends And Related Function Documentation

◆ mpi

MpiManager & mpi ( )
friend

Definition at line 29 of file mpiManager.cpp.

30{
31 static MpiManager instance;
32 return instance;
33}

The documentation for this class was generated from the following files: