30#ifdef PARALLEL_MODE_MPI
43template <
unsigned D,
typename T,
typename U>
48#ifdef PARALLEL_MODE_MPI
56 std::unique_ptr<MPI_Request[]> _mpiRequest;
58 std::unique_ptr<MPI_Status[]> _mpiStatus;
93 void init(
int *argc,
char ***argv,
bool verbose=
true);
106 void barrier(MPI_Comm comm = MPI_COMM_WORLD);
109 void synchronizeIO(
unsigned tDelay = 100, MPI_Comm comm = MPI_COMM_WORLD);
112 template <
typename T>
113 void send(T *buf,
int count,
int dest,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
114 template <
typename T,
unsigned DIM>
115 void send(
util::ADf<T,DIM> *buf,
int count,
int dest,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
116 template<
typename... args>
117 void send(std::vector<args...>& vec,
int dest,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
118 send( vec.data(), vec.size(), dest, tag, comm );
120 template<
class T, std::
size_t N>
121 void send(std::array<T,N>& array,
int dest,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
122 send( array.data(), array.size(), dest, tag, comm );
126 template <
typename T>
127 void sendInit(T *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
128 template <
typename T,
unsigned DIM>
129 void sendInit(
util::ADf<T,DIM> *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
132 template <
typename T>
133 void iSend(T *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
134 template <
typename T,
unsigned DIM>
135 void iSend(
util::ADf<T,DIM> *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
138 template <
typename T>
139 void ibSend(T *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
140 template <
typename T,
unsigned DIM>
141 void ibSend(
util::ADf<T,DIM> *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
144 std::size_t
probeReceiveSize(
int source, MPI_Datatype type,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
146 template <
typename TYPE>
150 template <
typename T>
151 void receive(T *buf,
int count,
int source,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
152 template <
typename T,
unsigned DIM>
154 template<
typename... args>
155 void receive(std::vector<args...>& vec,
int source,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
156 receive( vec.data(), vec.size(), source, tag, comm );
158 template<
class T, std::
size_t N>
159 void receive(std::array<T,N>& array,
int source,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
160 receive( array.data(), array.size(), source, tag, comm );
164 template <
typename T>
165 void recvInit(T *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
166 template <
typename T,
unsigned DIM>
167 void recvInit(
util::ADf<T,DIM> *buf,
int count,
int dest, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
170 template <
typename T>
171 void iRecv(T *buf,
int count,
int source, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
172 template <
typename T,
unsigned DIM>
173 void iRecv(
util::ADf<T,DIM> *buf,
int count,
int source, MPI_Request* request,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
176 template <
typename T>
177 void sendRecv(T *sendBuf, T *recvBuf,
int count,
int dest,
int source,
int tag = 0,
178 MPI_Comm comm = MPI_COMM_WORLD);
179 template <
typename T,
unsigned DIM>
181 int dest,
int source,
int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
184 template <
typename T>
185 void sendToMaster(T* sendBuf,
int sendCount,
bool iAmRoot, MPI_Comm comm = MPI_COMM_WORLD);
188 template <
typename T>
189 void scatterv(T *sendBuf,
int* sendCounts,
int* displs,
190 T* recvBuf,
int recvCount,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
193 template <
typename T>
194 void gather(T* sendBuf,
int sendCount, T* recvBuf,
int recvCount,
195 int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
198 template <
typename T>
199 void gatherv(T* sendBuf,
int sendCount, T* recvBuf,
int* recvCounts,
int* displs,
200 int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
203 template <
typename T>
204 void bCast(T* sendBuf,
int sendCount,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
205 template <
typename T,
unsigned DIM>
207 template <
typename T,
unsigned DIM>
209 template <
typename T>
210 void bCast(T& sendVal,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
213 template <
typename T>
215 template <
typename T,
unsigned DIM>
219 void bCast(std::string& message,
int root = 0);
226 template <
typename T>
227 void reduce(T& sendVal, T& recvVal, MPI_Op op,
int root = 0, MPI_Comm = MPI_COMM_WORLD);
228 template <
typename T,
unsigned DIM>
230 MPI_Op op,
int root = 0, MPI_Comm = MPI_COMM_WORLD);
231 template <
typename T,
unsigned DIM>
234 MPI_Op op,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
237 template <
typename T>
238 void reduceVect(std::vector<T>& sendVal, std::vector<T>& recvVal,
239 MPI_Op op,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
242 template <
typename T>
243 void reduceAndBcast(T& reductVal, MPI_Op op,
int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
244 template <
typename T,
unsigned DIM>
248 void wait(MPI_Request* request, MPI_Status* status);
257 int numTasks, taskId;
269 void init(
int *argc,
char ***argv,
bool verbose=
false) { }
294 friend MpiManager&
mpi();
The description of a algoritmic differentiation data type using the forward method – header file.
class for marking output with some text
Wrapper functions that simplify the use of MPI.
void gather(T *sendBuf, int sendCount, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Gather data from multiple processors to one processor.
void bCast(T &sendVal, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
void wait(MPI_Request *request, MPI_Status *status)
Complete a non-blocking MPI operation.
void ibSend(util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
int getSize() const
Returns the number of processes.
void iSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking.
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.
void bCast(std::string &message, int root=0)
Special case for broadcasting strings. Memory handling is automatic.
double getTime() const
Returns universal MPI-time in seconds.
bool isMainProcessor() const
Tells whether current processor is main processor.
void gatherv(T *sendBuf, int sendCount, T *recvBuf, int *recvCounts, int *displs, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Gather data from multiple processors to one processor.
void synchronizeIO(unsigned tDelay=100, MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes and wait to ensure correct cout order.
void reduceVect(std::vector< T > &sendVal, std::vector< T > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Element-per-element reduction of a vector of data.
void ibSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking and buffered.
void reduceAndBcast(T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Reduction operation, followed by a broadcast.
int getRank() const
Returns the process ID.
std::size_t probeReceiveSize(int source, MPI_Datatype type, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Probe size of incoming message.
void send(std::array< T, N > &array, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
std::size_t probeReceiveSize(int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Probe size of incoming message with TYPE.
void send(std::vector< args... > &vec, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
void waitAll(MpiNonBlockingHelper &mpiNbHelper)
Complete a series of non-blocking MPI operations.
void barrier(MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes.
void bCastThroughMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data when root is unknown to other processors.
void iRecv(T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, non blocking.
void init(int *argc, char ***argv, bool verbose=true)
Initializes the mpi manager.
void recvInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking receive.
void scatterv(T *sendBuf, int *sendCounts, int *displs, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Scatter data from one processor over multiple processors.
void sendInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking send.
int bossId() const
Returns process ID of main processor.
friend MpiManager & mpi()
void sendToMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Sends data to master processor.
void sendRecv(T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send and receive data between two partners.
void receive(std::vector< args... > &vec, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.
void receive(std::array< T, N > &array, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Helper class for non blocking MPI communication.
void allocate(unsigned i)
Allocates memory.
MpiNonBlockingHelper(const MpiNonBlockingHelper &)=delete
MpiNonBlockingHelper(MpiNonBlockingHelper &&rhs)=default
MpiNonBlockingHelper & operator=(const MpiNonBlockingHelper &)=delete
MPI_Status * get_mpiStatus(int i=0) const
Get the specified status object.
~MpiNonBlockingHelper()=default
MPI_Request * get_mpiRequest(int i=0) const
Get the specified request object.
void swap(MpiNonBlockingHelper &rhs)
Swap method.
unsigned get_size() const
Returns the size of the vector _mpiRequest/_mpiStatus.
Definition of a description of a algoritmic differentiation data type using the forward method.
Top level namespace for all of OpenLB.