OpenLB 1.7
Loading...
Searching...
No Matches
mpiManagerAD.hh
Go to the documentation of this file.
1/* This file is part of the OpenLB library
2 *
3 * Copyright (C) 2012 Mathias J. Krause
4 * E-mail contact: info@openlb.net
5 * The most recent release of OpenLB can be downloaded at
6 * <http://www.openlb.net/>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the Free
20 * Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 * Boston, MA 02110-1301, USA.
22*/
23
28#ifndef MPI_MANAGER_AD_HH
29#define MPI_MANAGER_AD_HH
30
31#ifdef PARALLEL_MODE_MPI
32
33
34
35//#include <adolc/adouble.h>
36#include "utilities/aDiff.h"
38
39
40//using namespace adtl;
41
42
43namespace olb {
44
45namespace singleton {
46
47
48template <typename T, unsigned DIM>
49void MpiManager::send(util::ADf<T,DIM> *buf, int count, int dest, int tag, MPI_Comm comm)
50{
51 if (!ok) {
52 return;
53 }
54 MPI_Send(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm);
55}
56
57template <typename T, unsigned DIM>
58void MpiManager::sendInit(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag, MPI_Comm comm)
59{
60 if (ok) {
61 MPI_Send_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
62 }
63}
64
65template <typename T, unsigned DIM>
66void MpiManager::receive(util::ADf<T,DIM> *buf, int count, int source, int tag, MPI_Comm comm)
67{
68 if (!ok) {
69 return;
70 }
71 MPI_Status status;
72 MPI_Recv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, &status);
73}
74
75template <typename T, unsigned DIM>
76void MpiManager::recvInit(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag, MPI_Comm comm)
77{
78 if (ok) {
79 MPI_Recv_init(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
80 }
81}
82
83
84template <typename T, unsigned DIM>
86(util::ADf<T,DIM> *buf, int count, int dest, MPI_Request* request, int tag, MPI_Comm comm)
87{
88 if (ok) {
89 MPI_Isend(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
90 }
91}
92
93
94
95
96
97template <typename T, unsigned DIM>
98void MpiManager::iRecv(util::ADf<T,DIM> *buf, int count, int source, MPI_Request* request, int tag, MPI_Comm comm)
99{
100 if (ok) {
101 MPI_Irecv(static_cast<void*>(buf), (sizeof(util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, request);
102 }
103}
104
105template <typename T, unsigned DIM>
106void MpiManager::sendRecv(util::ADf<T,DIM> *sendBuf, util::ADf<T,DIM> *recvBuf, int count, int dest, int source, int tag, MPI_Comm comm)
107{
108 if (!ok) {
109 return;
110 }
111 MPI_Status status;
112 MPI_Sendrecv(static_cast<void*>(sendBuf),
113 (sizeof(util::ADf<T,DIM>)/8)*count,
114 MPI_DOUBLE, dest, tag,
115 static_cast<void*>(recvBuf),
116 (sizeof(util::ADf<T,DIM>)/8)*count,
117 MPI_DOUBLE, source, tag, comm, &status);
118}
119
120
121
122
123template <typename T, unsigned DIM>
124void MpiManager::bCast(util::ADf<T,DIM>* sendBuf, int sendCount, int root, MPI_Comm comm)
125{
126 if (!ok) {
127 return;
128 }
129 MPI_Bcast(static_cast<void*>(sendBuf),
130 (sizeof(util::ADf<T,DIM>)/8)*sendCount, MPI_DOUBLE, root, comm);
131}
132
133template <typename T, unsigned DIM>
136 int root, MPI_Comm comm)
137{
138 if (!ok) {
139 return;
140 }
141// MPI_Bcast(static_cast<void*>(sendData.getRawData()),
142// sendData.getDataSize(), MPI_DOUBLE, root, comm);
143 for (unsigned iD=0; iD < sendData.getSize(); ++iD) {
144 MPI_Bcast(static_cast<void*>(sendData.getColumn(iD).data()),
145 sendData.getNcells(), MPI_DOUBLE, root, comm);
146 }
147}
148
149template <typename T, unsigned DIM>
150void MpiManager::reduce(util::ADf<T,DIM>& sendVal, util::ADf<T,DIM>& recvVal, MPI_Op op, int root, MPI_Comm comm)
151{
152 if (!ok) {
153 return;
154 }
155
156 int sizeADouble = sizeof(util::ADf<T,DIM>)/8-1;
157
158 MPI_Reduce(static_cast<void*>(&sendVal.v()),
159 static_cast<void*>(&recvVal.v()), 1, MPI_DOUBLE, op, root, comm);
160
161 for (int i=0; i<sizeADouble; i++) {
162 MPI_Reduce(static_cast<void*>(&sendVal.d(i)),
163 static_cast<void*>(&recvVal.d(i)), 1, MPI_DOUBLE, op, root, comm);
164 }
165}
166
167template <typename T, unsigned DIM>
171 MPI_Op op, int root, MPI_Comm comm)
172{
173 if (!ok) {
174 return;
175 }
176// MPI_Reduce(static_cast<void*>(sendVal.getRawData()),
177// static_cast<void*>(recvVal.getRawData()),
178// sendVal.getDataSize(), MPI_DOUBLE, op, root, comm);
179 for (unsigned iD=0; iD < sendVal.getSize(); ++iD) {
180 MPI_Reduce(static_cast<void*>(sendVal.getColumn(iD).data()),
181 static_cast<void*>(recvVal.getColumn(iD).data()),
182 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
183 }
184}
185
186
187template <typename T, unsigned DIM>
188void MpiManager::bCastThroughMaster(util::ADf<T,DIM>* sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm)
189{
190 if (!ok) {
191 return;
192 }
193 if (iAmRoot && !isMainProcessor()) {
194 send(sendBuf, sendCount, 0);
195 }
196 if (isMainProcessor() && !iAmRoot) {
197 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
198 }
199 bCast(sendBuf, sendCount, 0);
200}
201
202template <typename T, unsigned DIM>
203void MpiManager::reduceAndBcast(util::ADf<T,DIM>& reductVal, MPI_Op op, int root, MPI_Comm comm)
204{
205 if (!ok) {
206 return;
207 }
208 util::ADf<T,DIM> recvVal;
209 reduce(reductVal, recvVal, op, root, comm);
210
211 //MPI_Reduce(&reductVal, &recvVal, 1, MPI_DOUBLE, op, root, comm);
212 reductVal = recvVal;
213 bCast(&reductVal, 1, root, comm);
214
215 //MPI_Bcast(&reductVal, 1, MPI_DOUBLE, root, comm);
216
217}
218
219
220/*
221
222template <typename T>
223void MpiManager::wait(MPI_Request* request, MPI_Status* status, void* ptr, T* writeBack, int writeBackSize)
224{
225 if (!ok) return;
226 MPI_Wait(request, status);
227
228 if (ptr!=NULL) {
229 delete [] ptr;
230 ptr = NULL;
231 }
232}
233*/
234
235
236
237
238} // namespace singleton
239
240
241} // namespace olb
242
243
244
245#endif // PARALLEL_MODE_MPI
246
247#endif // MPI_MANAGER_AD_HH
The description of a algoritmic differentiation data type using the forward method – header file.
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
void iSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking.
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.
bool isMainProcessor() const
Tells whether current processor is main processor.
void reduceAndBcast(T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Reduction operation, followed by a broadcast.
void bCastThroughMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data when root is unknown to other processors.
void iRecv(T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, non blocking.
void recvInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking receive.
void sendInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking send.
void sendRecv(T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send and receive data between two partners.
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.
Definition of a description of a algoritmic differentiation data type using the forward method.
Definition aDiff.h:64
constexpr T & v()
Definition aDiff.h:246
constexpr T & d(unsigned i)
Definition aDiff.h:252
Wrapper functions that simplify the use of MPI.
Top level namespace for all of OpenLB.