OpenLB 1.7
Loading...
Searching...
No Matches
blockCommunicator.hh
Go to the documentation of this file.
1/* This file is part of the OpenLB library
2 *
3 * Copyright (C) 2021 Adrian Kummerlaender
4 * E-mail contact: info@openlb.net
5 * The most recent release of OpenLB can be downloaded at
6 * <http://www.openlb.net/>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the Free
20 * Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 * Boston, MA 02110-1301, USA.
22*/
23
24#ifndef BLOCK_COMMUNICATOR_HH
25#define BLOCK_COMMUNICATOR_HH
26
27#include "blockCommunicator.h"
28
29#include "mpiRequest.h"
30#include "communicatable.h"
32
33#ifdef PLATFORM_GPU_CUDA
35#endif
36
37namespace olb {
38
39#ifdef PARALLEL_MODE_MPI
40
42template <typename BLOCK>
44private:
45 const std::vector<CellID>& _cells;
46
48
49 std::unique_ptr<std::uint8_t[]> _buffer;
50 MpiSendRequest _request;
51
52public:
53 SendTask(MPI_Comm comm, int tag, int rank,
54 const std::vector<std::type_index>& fields,
55 const std::vector<CellID>& cells,
56 BLOCK& block):
57 _cells(cells),
58 _source(block, fields),
59 _buffer(new std::uint8_t[_source.size(_cells)] { }),
60 _request(_buffer.get(), _source.size(_cells),
61 rank, tag, comm)
62 { }
63
64 void send()
65 {
66 _source.serialize(_cells, _buffer.get());
67 _request.start();
68 }
69
70 void wait()
71 {
72 _request.wait();
73 }
74};
75
77template <typename BLOCK>
79private:
80 const int _tag;
81 const int _rank;
82 const std::vector<CellID>& _cells;
83
85
86 std::unique_ptr<std::uint8_t[]> _buffer;
87 MpiRecvRequest _request;
88
89public:
91
98 class ref {
99 private:
100 RecvTask& _task;
101 public:
102 ref(RecvTask& task): _task(task) { };
103
105 {
106 return &_task;
107 }
108
109 bool operator <(const ref& rhs) const
110 {
111 return _task < rhs._task;
112 }
113 };
114
115 RecvTask(MPI_Comm comm, int tag, int rank,
116 const std::vector<std::type_index>& fields,
117 const std::vector<CellID>& cells,
118 BLOCK& block):
119 _tag(tag),
120 _rank(rank),
121 _cells(cells),
122 _target(block, fields),
123 _buffer(new std::uint8_t[_target.size(_cells)] { }),
124 _request(_buffer.get(), _target.size(_cells),
125 _rank, _tag, comm)
126 { }
127
128 bool operator<(const RecvTask& rhs) const
129 {
130 return _rank < rhs._rank
131 || (_rank == rhs._rank && _tag < rhs._tag);
132 }
133
134 void receive()
135 {
136 _request.start();
137 };
138
139 bool isDone()
140 {
141 return _request.isDone();
142 }
143
144 void unpack()
145 {
146 _target.deserialize(_cells, _buffer.get());
147 }
148};
149
150#else // not using PARALLEL_MODE_MPI
151
152template <typename BLOCK>
153class ConcreteBlockCommunicator<BLOCK>::CopyTask {
154private:
155 const std::vector<CellID>& _targetCells;
156 const std::vector<CellID>& _sourceCells;
157
160
161 std::unique_ptr<std::uint8_t[]> _buffer;
162
163public:
164 CopyTask(
165 const std::vector<std::type_index>& fields,
166 const std::vector<CellID>& targetCells, BLOCK& target,
167 const std::vector<CellID>& sourceCells, BLOCK& source):
168 _targetCells(targetCells),
169 _sourceCells(sourceCells),
170 _target(target, fields),
171 _source(source, fields),
172 _buffer(new std::uint8_t[_source.size(_sourceCells)] { })
173 {
174 OLB_ASSERT(_sourceCells.size() == _targetCells.size(),
175 "Source cell count must match target cell count");
176 }
177
178 void copy()
179 {
180 _source.serialize(_sourceCells, _buffer.get());
181 _target.deserialize(_targetCells, _buffer.get());
182 };
183};
184
185#endif
186
187template <typename BLOCK>
188template <typename T, typename SUPER>
190 SUPER& super,
191 LoadBalancer<T>& loadBalancer,
192#ifdef PARALLEL_MODE_MPI
194 MPI_Comm comm,
195#endif
196 int iC,
198 _iC(iC)
199#ifdef PARALLEL_MODE_MPI
200, _mpiCommunicator(comm)
201#endif
202{
203#ifdef PARALLEL_MODE_MPI
204 neighborhood.forNeighbors([&](int remoteC) {
205 if (loadBalancer.isLocal(remoteC) && loadBalancer.platform(loadBalancer.loc(remoteC)) == Platform::GPU_CUDA) {
206 if constexpr (std::is_same_v<SUPER, SuperGeometry<T,SUPER::d>>) {
207 if (!neighborhood.getCellsOutboundTo(remoteC).empty()) {
208 _sendTasks.emplace_back(_mpiCommunicator, tagCoordinator.get(loadBalancer.glob(_iC), remoteC),
209 loadBalancer.rank(remoteC),
210 neighborhood.getFieldsCommonWith(remoteC),
211 neighborhood.getCellsOutboundTo(remoteC),
212 super.template getBlock<BLOCK>(_iC));
213 }
214 }
215 if (!neighborhood.getCellsInboundFrom(remoteC).empty()) {
216 _recvTasks.emplace_back(_mpiCommunicator, tagCoordinator.get(remoteC, loadBalancer.glob(_iC)),
217 loadBalancer.rank(remoteC),
218 neighborhood.getFieldsCommonWith(remoteC),
219 neighborhood.getCellsInboundFrom(remoteC),
220 super.template getBlock<BLOCK>(_iC));
221 }
222 } else {
223 if (!neighborhood.getCellsOutboundTo(remoteC).empty()) {
224 _sendTasks.emplace_back(_mpiCommunicator, tagCoordinator.get(loadBalancer.glob(_iC), remoteC),
225 loadBalancer.rank(remoteC),
226 neighborhood.getFieldsCommonWith(remoteC),
227 neighborhood.getCellsOutboundTo(remoteC),
228 super.template getBlock<BLOCK>(_iC));
229 }
230 if (!neighborhood.getCellsInboundFrom(remoteC).empty()) {
231 _recvTasks.emplace_back(_mpiCommunicator, tagCoordinator.get(remoteC, loadBalancer.glob(_iC)),
232 loadBalancer.rank(remoteC),
233 neighborhood.getFieldsCommonWith(remoteC),
234 neighborhood.getCellsInboundFrom(remoteC),
235 super.template getBlock<BLOCK>(_iC));
236 }
237 }
238 });
239
240#else // not using PARALLEL_MODE_MPI
241 neighborhood.forNeighbors([&](int localC) {
242 if (!neighborhood.getCellsInboundFrom(localC).empty()) {
243 _copyTasks.emplace_back(neighborhood.getFieldsCommonWith(localC),
244 neighborhood.getCellsInboundFrom(localC), super.template getBlock<BLOCK>(_iC),
245 neighborhood.getCellsRequestedFrom(localC), super.template getBlock<BLOCK>(loadBalancer.loc(localC)));
246 }
247 });
248#endif
249}
250
251#ifdef PARALLEL_MODE_MPI
252
253template <typename BLOCK>
255{
256 for (auto& task : _recvTasks) {
257 task.receive();
258 }
259}
260
261template <typename BLOCK>
263{
264 for (auto& task : _sendTasks) {
265 task.send();
266 }
267}
268
269template <typename BLOCK>
271{
272 std::set<typename RecvTask::ref> pending(_recvTasks.begin(), _recvTasks.end());
273 while (!pending.empty()) {
274 auto task_iterator = pending.begin();
275 while (task_iterator != pending.end()) {
276 auto& task = *task_iterator;
277 if (task->isDone()) {
278 task->unpack();
279 task_iterator = pending.erase(task_iterator);
280 }
281 else {
282 ++task_iterator;
283 }
284 }
285 }
286}
287
288template <typename BLOCK>
290{
291 for (auto& task : _sendTasks) {
292 task.wait();
293 }
294}
295
296#else // not using PARALLEL_MODE_MPI
297
298template <typename BLOCK>
300{
301 for (auto& task : _copyTasks) {
302 task.copy();
303 }
304}
305
306#endif
307
308
309}
310
311#endif
Configurable overlap communication neighborhood of a block.
const std::vector< CellID > & getCellsOutboundTo(int iC) const
const std::vector< CellID > & getCellsInboundFrom(int iC) const
void forNeighbors(F f) const
Calls f(iC) for every neighboring cuboid ID iC.
Manual replacement for std::reference_wrapper<RecvTask>
Wrapper for a non-blocking block propagation receive request.
bool operator<(const RecvTask &rhs) const
RecvTask(MPI_Comm comm, int tag, int rank, const std::vector< std::type_index > &fields, const std::vector< CellID > &cells, BLOCK &block)
Wrapper for a non-blocking block propagation send request.
SendTask(MPI_Comm comm, int tag, int rank, const std::vector< std::type_index > &fields, const std::vector< CellID > &cells, BLOCK &block)
ConcreteBlockCommunicator(SUPER &super, LoadBalancer< T > &loadBalancer, SuperCommunicationTagCoordinator< T > &tagCoordinator, MPI_Comm comm, int iC, const BlockCommunicationNeighborhood< T, SUPER::d > &neighborhood)
Base class for all LoadBalancer.
bool isLocal(const int &glob)
returns whether glob is on this process
virtual Platform platform(int loc) const
int loc(const int &glob)
Non-blocking MPI receive request.
Definition mpiRequest.h:77
Non-blocking MPI send request.
Definition mpiRequest.h:57
std::size_t serialize(ConstSpan< CellID > indices, std::uint8_t *buffer) const override
Serialize data at locations indices to buffer
std::size_t size(ConstSpan< CellID > indices) const override
Get serialized size for data at locations indices
std::size_t deserialize(ConstSpan< CellID > indices, const std::uint8_t *buffer) override
Deserialize data at locations indices to buffer
Communication-free negotation of unique tags for inter-cuboid communication.
Top level namespace for all of OpenLB.
@ GPU_CUDA
Vector CPU (AVX2 / AVX-512 collision)
#define OLB_ASSERT(COND, MESSAGE)
Definition olbDebug.h:45