OpenLB 1.7
Loading...
Searching...
No Matches
mpiManager.h
Go to the documentation of this file.
1/* This file is part of the OpenLB library
2 *
3 * Copyright (C) 2007 The OpenLB project
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public
16 * License along with this program; if not, write to the Free
17 * Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
19*/
20
25#ifndef MPI_MANAGER_H
26#define MPI_MANAGER_H
27
29
30#ifdef PARALLEL_MODE_MPI
31#include "mpi.h"
32
33#include <vector>
34#include <memory>
35#endif
36
37#include <string>
38#include "io/ostreamManager.h"
39#include "utilities/aDiff.h"
40
41namespace olb {
42
43template <unsigned D, typename T, typename U>
44class BlockData;
45
46namespace singleton {
47
48#ifdef PARALLEL_MODE_MPI
49
52private:
54 unsigned _size;
56 std::unique_ptr<MPI_Request[]> _mpiRequest;
58 std::unique_ptr<MPI_Status[]> _mpiStatus;
59public:
62
66
68 void allocate(unsigned i);
70 void free();
71
73 unsigned get_size() const;
74
76 MPI_Request* get_mpiRequest(int i=0) const;
78 MPI_Status* get_mpiStatus(int i=0) const;
79
80 void start(int i);
81 void wait(int i);
82 bool isDone(int i);
83
85 void swap(MpiNonBlockingHelper& rhs);
86};
87
89
91public:
93 void init(int *argc, char ***argv, bool verbose=true);
95 int getSize() const;
97 int getRank() const;
99 int bossId() const;
101 bool isMainProcessor() const;
103 double getTime() const;
104
106 void barrier(MPI_Comm comm = MPI_COMM_WORLD);
107
109 void synchronizeIO(unsigned tDelay = 100, MPI_Comm comm = MPI_COMM_WORLD);
110
112 template <typename T>
113 void send(T *buf, int count, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
114 template <typename T,unsigned DIM>
115 void send(util::ADf<T,DIM> *buf, int count, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
116 template<typename... args>
117 void send(std::vector<args...>& vec, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
118 send( vec.data(), vec.size(), dest, tag, comm );
119 }
120 template<class T, std::size_t N>
121 void send(std::array<T,N>& array, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
122 send( array.data(), array.size(), dest, tag, comm );
123 }
124
126 template <typename T>
127 void sendInit(T *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
128 template <typename T,unsigned DIM>
129 void sendInit(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
130
132 template <typename T>
133 void iSend(T *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
134 template <typename T,unsigned DIM>
135 void iSend(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
136
138 template <typename T>
139 void ibSend(T *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
140 template <typename T,unsigned DIM>
141 void ibSend(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
142
144 std::size_t probeReceiveSize(int source, MPI_Datatype type, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
146 template <typename TYPE>
147 std::size_t probeReceiveSize(int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
148
150 template <typename T>
151 void receive(T *buf, int count, int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
152 template <typename T,unsigned DIM>
153 void receive(util::ADf<T,DIM> *buf, int count, int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
154 template<typename... args>
155 void receive(std::vector<args...>& vec, int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
156 receive( vec.data(), vec.size(), source, tag, comm );
157 }
158 template<class T, std::size_t N>
159 void receive(std::array<T,N>& array, int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD){
160 receive( array.data(), array.size(), source, tag, comm );
161 }
162
164 template <typename T>
165 void recvInit(T *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
166 template <typename T,unsigned DIM>
167 void recvInit(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
168
170 template <typename T>
171 void iRecv(T *buf, int count, int source, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
172 template <typename T,unsigned DIM>
173 void iRecv(util::ADf<T,DIM> *buf, int count, int source, MPI_Request* request, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
174
176 template <typename T>
177 void sendRecv(T *sendBuf, T *recvBuf, int count, int dest, int source, int tag = 0,
178 MPI_Comm comm = MPI_COMM_WORLD);
179 template <typename T,unsigned DIM>
180 void sendRecv(util::ADf<T,DIM> *sendBuf, util::ADf<T,DIM> *recvBuf, int count,
181 int dest, int source, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD);
182
184 template <typename T>
185 void sendToMaster(T* sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm = MPI_COMM_WORLD);
186
188 template <typename T>
189 void scatterv(T *sendBuf, int* sendCounts, int* displs,
190 T* recvBuf, int recvCount, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
191
193 template <typename T>
194 void gather(T* sendBuf, int sendCount, T* recvBuf, int recvCount,
195 int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
196
198 template <typename T>
199 void gatherv(T* sendBuf, int sendCount, T* recvBuf, int* recvCounts, int* displs,
200 int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
201
203 template <typename T>
204 void bCast(T* sendBuf, int sendCount, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
205 template <typename T,unsigned DIM>
206 void bCast(util::ADf<T,DIM>* sendBuf, int sendCount, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
207 template <typename T,unsigned DIM>
208 void bCast(BlockData<2,util::ADf<T,DIM>,util::ADf<T,DIM>>& sendData, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
209 template <typename T>
210 void bCast(T& sendVal, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
211
213 template <typename T>
214 void bCastThroughMaster(T* sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm = MPI_COMM_WORLD);
215 template <typename T,unsigned DIM>
216 void bCastThroughMaster(util::ADf<T,DIM>* sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm = MPI_COMM_WORLD);
217
219 void bCast(std::string& message, int root = 0);
221 void bCast(BlockData<2,double,double>& sendData, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
223 void bCast(BlockData<2,float,float>& sendData, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
224
226 template <typename T>
227 void reduce(T& sendVal, T& recvVal, MPI_Op op, int root = 0, MPI_Comm = MPI_COMM_WORLD);
228 template <typename T,unsigned DIM>
229 void reduce(util::ADf<T,DIM>& sendVal, util::ADf<T,DIM>& recvVal,
230 MPI_Op op, int root = 0, MPI_Comm = MPI_COMM_WORLD);
231 template <typename T,unsigned DIM>
234 MPI_Op op, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
235
237 template <typename T>
238 void reduceVect(std::vector<T>& sendVal, std::vector<T>& recvVal,
239 MPI_Op op, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
240
242 template <typename T>
243 void reduceAndBcast(T& reductVal, MPI_Op op, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
244 template <typename T,unsigned DIM>
245 void reduceAndBcast(util::ADf<T,DIM>& reductVal, MPI_Op op, int root = 0, MPI_Comm comm = MPI_COMM_WORLD);
246
248 void wait(MPI_Request* request, MPI_Status* status);
249
251 void waitAll(MpiNonBlockingHelper& mpiNbHelper);
252
253private:
254 MpiManager();
255 ~MpiManager();
256private:
257 int numTasks, taskId;
258 bool ok;
259 mutable OstreamManager clout;
260
261 friend MpiManager& mpi();
262};
263
264#else
265
266class MpiManager {
267public:
269 void init(int *argc, char ***argv, bool verbose=false) { }
271 int getSize() const
272 {
273 return 1;
274 }
276 int getRank() const
277 {
278 return 0;
279 }
281 int bossId() const
282 {
283 return 0;
284 }
286 bool isMainProcessor() const
287 {
288 return true;
289 }
290
292 void barrier() const {};
293
294 friend MpiManager& mpi();
295};
296
297#endif
298
299MpiManager& mpi();
300
301} // namespace singleton
302
303} // namespace olb
304
305
306#endif // MPI_MANAGER_H
The description of a algoritmic differentiation data type using the forward method – header file.
class for marking output with some text
Wrapper functions that simplify the use of MPI.
Definition mpiManager.h:90
void gather(T *sendBuf, int sendCount, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Gather data from multiple processors to one processor.
void bCast(T &sendVal, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
void wait(MPI_Request *request, MPI_Status *status)
Complete a non-blocking MPI operation.
void ibSend(util::ADf< T, DIM > *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
int getSize() const
Returns the number of processes.
void iSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking.
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.
void bCast(std::string &message, int root=0)
Special case for broadcasting strings. Memory handling is automatic.
double getTime() const
Returns universal MPI-time in seconds.
bool isMainProcessor() const
Tells whether current processor is main processor.
void gatherv(T *sendBuf, int sendCount, T *recvBuf, int *recvCounts, int *displs, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Gather data from multiple processors to one processor.
void synchronizeIO(unsigned tDelay=100, MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes and wait to ensure correct cout order.
void reduceVect(std::vector< T > &sendVal, std::vector< T > &recvVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Element-per-element reduction of a vector of data.
void ibSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking and buffered.
void reduceAndBcast(T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Reduction operation, followed by a broadcast.
int getRank() const
Returns the process ID.
std::size_t probeReceiveSize(int source, MPI_Datatype type, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Probe size of incoming message.
void send(std::array< T, N > &array, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Definition mpiManager.h:121
std::size_t probeReceiveSize(int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Probe size of incoming message with TYPE.
void send(std::vector< args... > &vec, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Definition mpiManager.h:117
void waitAll(MpiNonBlockingHelper &mpiNbHelper)
Complete a series of non-blocking MPI operations.
void barrier(MPI_Comm comm=MPI_COMM_WORLD)
Synchronizes the processes.
void bCastThroughMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data when root is unknown to other processors.
void iRecv(T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, non blocking.
void init(int *argc, char ***argv, bool verbose=true)
Initializes the mpi manager.
void recvInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking receive.
void scatterv(T *sendBuf, int *sendCounts, int *displs, T *recvBuf, int recvCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Scatter data from one processor over multiple processors.
void sendInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking send.
int bossId() const
Returns process ID of main processor.
friend MpiManager & mpi()
void sendToMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Sends data to master processor.
void sendRecv(T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send and receive data between two partners.
void receive(std::vector< args... > &vec, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Definition mpiManager.h:155
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.
void receive(std::array< T, N > &array, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Definition mpiManager.h:159
Helper class for non blocking MPI communication.
Definition mpiManager.h:51
void allocate(unsigned i)
Allocates memory.
MpiNonBlockingHelper(const MpiNonBlockingHelper &)=delete
MpiNonBlockingHelper(MpiNonBlockingHelper &&rhs)=default
MpiNonBlockingHelper & operator=(const MpiNonBlockingHelper &)=delete
MPI_Status * get_mpiStatus(int i=0) const
Get the specified status object.
MPI_Request * get_mpiRequest(int i=0) const
Get the specified request object.
void swap(MpiNonBlockingHelper &rhs)
Swap method.
unsigned get_size() const
Returns the size of the vector _mpiRequest/_mpiStatus.
Definition of a description of a algoritmic differentiation data type using the forward method.
Definition aDiff.h:64
MpiManager & mpi()
Top level namespace for all of OpenLB.