28#ifndef MPI_MANAGER_AD_HH
29#define MPI_MANAGER_AD_HH
31#ifdef PARALLEL_MODE_MPI
48template <
typename T,
unsigned DIM>
54 MPI_Send(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm);
57template <
typename T,
unsigned DIM>
61 MPI_Send_init(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
65template <
typename T,
unsigned DIM>
72 MPI_Recv(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, &status);
75template <
typename T,
unsigned DIM>
79 MPI_Recv_init(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
84template <
typename T,
unsigned DIM>
86(
util::ADf<T,DIM> *buf,
int count,
int dest, MPI_Request* request,
int tag, MPI_Comm comm)
89 MPI_Isend(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, dest, tag, comm, request);
97template <
typename T,
unsigned DIM>
101 MPI_Irecv(
static_cast<void*
>(buf), (
sizeof(
util::ADf<T,DIM>)/8)*count, MPI_DOUBLE, source, tag, comm, request);
105template <
typename T,
unsigned DIM>
112 MPI_Sendrecv(
static_cast<void*
>(sendBuf),
114 MPI_DOUBLE, dest, tag,
115 static_cast<void*
>(recvBuf),
117 MPI_DOUBLE, source, tag, comm, &status);
123template <
typename T,
unsigned DIM>
129 MPI_Bcast(
static_cast<void*
>(sendBuf),
133template <
typename T,
unsigned DIM>
136 int root, MPI_Comm comm)
143 for (
unsigned iD=0; iD < sendData.getSize(); ++iD) {
144 MPI_Bcast(
static_cast<void*
>(sendData.getColumn(iD).data()),
145 sendData.getNcells(), MPI_DOUBLE, root, comm);
149template <
typename T,
unsigned DIM>
158 MPI_Reduce(
static_cast<void*
>(&sendVal.
v()),
159 static_cast<void*
>(&recvVal.
v()), 1, MPI_DOUBLE, op, root, comm);
161 for (
int i=0; i<sizeADouble; i++) {
162 MPI_Reduce(
static_cast<void*
>(&sendVal.
d(i)),
163 static_cast<void*
>(&recvVal.
d(i)), 1, MPI_DOUBLE, op, root, comm);
167template <
typename T,
unsigned DIM>
171 MPI_Op op,
int root, MPI_Comm comm)
179 for (
unsigned iD=0; iD < sendVal.getSize(); ++iD) {
180 MPI_Reduce(
static_cast<void*
>(sendVal.getColumn(iD).data()),
181 static_cast<void*
>(recvVal.getColumn(iD).data()),
182 sendVal.getNcells(), MPI_DOUBLE, op, root, comm);
187template <
typename T,
unsigned DIM>
194 send(sendBuf, sendCount, 0);
197 receive(sendBuf, sendCount, MPI_ANY_SOURCE);
199 bCast(sendBuf, sendCount, 0);
202template <
typename T,
unsigned DIM>
209 reduce(reductVal, recvVal, op, root, comm);
213 bCast(&reductVal, 1, root, comm);
The description of a algoritmic differentiation data type using the forward method – header file.
void send(T *buf, int count, int dest, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, blocking.
void bCast(T *sendBuf, int sendCount, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data from one processor to multiple processors.
void iSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking.
void reduce(T &sendVal, T &recvVal, MPI_Op op, int root=0, MPI_Comm=MPI_COMM_WORLD)
Reduction operation toward one processor.
bool isMainProcessor() const
Tells whether current processor is main processor.
void reduceAndBcast(T &reductVal, MPI_Op op, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Reduction operation, followed by a broadcast.
void bCastThroughMaster(T *sendBuf, int sendCount, bool iAmRoot, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast data when root is unknown to other processors.
void iRecv(T *buf, int count, int source, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, non blocking.
void recvInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking receive.
void sendInit(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Initialize persistent non-blocking send.
void sendRecv(T *sendBuf, T *recvBuf, int count, int dest, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Send and receive data between two partners.
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.
Definition of a description of a algoritmic differentiation data type using the forward method.
constexpr T & d(unsigned i)
Wrapper functions that simplify the use of MPI.
Top level namespace for all of OpenLB.