80 _ratioFullEmpty = ratioFullEmpty;
82 _cGeometry3d = &cGeometry3d;
85 int nC = _cGeometry3d->
getNc();
86#ifdef PARALLEL_MODE_MPI
94 std::vector<int> cuboidToThread(nC);
95 std::vector<int> partitionResult(nC);
96 std::vector<int> vwgt(nC);
97 std::vector<int> taken(nC, 0);
98 std::vector<int> currentLoad(size, 0);
101 for (
int i = 0; i < nC; ++i) {
102 this->_glob.push_back(i);
111 for (
int iC = 0; iC < nC; iC++) {
113 int fullCells = _cGeometry3d->get(iC).getWeight();
114 vwgt[iC] = int(weightEmpty*(_cGeometry3d->get(iC).getLatticeVolume() - fullCells)) + int(ratioFullEmpty * fullCells);
122 for (
int iC = 0 ; iC < nC; iC++) {
123 if (taken[iC] == 0 && vwgt[iC] > maxLoad) {
130 double minLoad = currentLoad[0];
132 for (
int j = 1; j < size; j++) {
133 if (currentLoad[j] < minLoad) {
134 minLoad = currentLoad[j];
139 currentLoad[minJ] += maxLoad;
140 partitionResult[maxIC] = minJ;
143 while (maxLoad != -1);
145 std::cout <<
"vwgt" << std::endl;
146 for (
int i = 0; i < nC; i++) {
147 std::cout <<
"[" << i <<
"]="<< vwgt[i] << std::endl;
150 for (
int i = 0; i < size; i++) {
151 std::cout <<
"load[" << i <<
"]=" << currentLoad[i] << std::endl;
154 std::cout <<
"vwgt" << std::endl;
155 for (
int i = 0; i < nC; i++) {
156 std::cout << vwgt[i] << std::endl;
158 std::cout <<
"xadj" << std::endl;
159 for (
int i = 0; i < nC+1; i++) {
160 std::cout << xadj[i] << std::endl;
162 std::cout <<
"adjncy" << std::endl;
163 for (
int i = 0; i <adjncy.size(); i++) {
164 std::cout << adjncy[i] << std::endl;
166 std::cout <<
"adjcwgt" << std::endl;
167 for (
int i = 0; i < adjcwgt.size(); i++) {
168 std::cout << adjcwgt[i] << std::endl;
171 std::cout <<
"nC" << nC <<
" size " << size <<
" inbalance " <<
172 inbalance << std::endl;
175 for (
int i = 0; i < nC; ++i) {
176 if (partitionResult[i] == 0) {
177 this->_glob.push_back(i);
178 this->_loc[i] = count;
181 this->_rank[i] = partitionResult[i];
182 cuboidToThread[i] = partitionResult[i];
188#ifdef PARALLEL_MODE_MPI
192 _mpiNbHelper.allocate(size-1);
193 for (
int i = 1; i < size; i++) {
195 nC, i, &_mpiNbHelper.get_mpiRequest()[i-1], 0);
200 int *tmpCuboids =
new int[nC];
203 for (
int i = 0; i < nC; ++i) {
204 if (tmpCuboids[i] == rank) {
205 this->_glob.push_back(i);
206 this->_loc[i] = count;
209 this->_rank[i] = tmpCuboids[i];
223 _ratioFullEmpty = ratioFullEmpty;
225 _cGeometry2d = &cGeometry2d;
228 int nC = _cGeometry2d->
getNc();
229#ifdef PARALLEL_MODE_MPI
237 std::vector<int> cuboidToThread(nC);
238 std::vector<int> partitionResult(nC);
239 std::vector<int> vwgt(nC);
240 std::vector<int> taken(nC, 0);
241 std::vector<int> currentLoad(size, 0);
244 for (
int i = 0; i < nC; ++i) {
245 this->_glob.push_back(i);
254 for (
int iC = 0; iC < nC; iC++) {
256 int fullCells = _cGeometry2d->get(iC).getWeight();
257 vwgt[iC] = int(weightEmpty*(_cGeometry2d->get(iC).getLatticeVolume() - fullCells)) + int(ratioFullEmpty * fullCells);
266 for (
int iC = 0 ; iC < nC; iC++) {
267 if (taken[iC] == 0 && vwgt[iC] > maxLoad) {
274 double minLoad = currentLoad[0];
276 for (
int j = 1; j < size; j++) {
277 if (currentLoad[j] < minLoad) {
278 minLoad = currentLoad[j];
283 currentLoad[minJ] += maxLoad;
284 partitionResult[maxIC] = minJ;
287 while (maxLoad != -1);
289 std::cout <<
"vwgt" << std::endl;
290 for (
int i = 0; i < nC; i++) {
291 std::cout <<
"[" << i <<
"]="<< vwgt[i] << std::endl;
294 for (
int i = 0; i < size; i++) {
295 std::cout <<
"load[" << i <<
"]=" << currentLoad[i] << std::endl;
298 std::cout <<
"vwgt" << std::endl;
299 for (
int i = 0; i < nC; i++) {
300 std::cout << vwgt[i] << std::endl;
302 std::cout <<
"xadj" << std::endl;
303 for (
int i = 0; i < nC+1; i++) {
304 std::cout << xadj[i] << std::endl;
306 std::cout <<
"adjncy" << std::endl;
307 for (
int i = 0; i <adjncy.size(); i++) {
308 std::cout << adjncy[i] << std::endl;
310 std::cout <<
"adjcwgt" << std::endl;
311 for (
int i = 0; i < adjcwgt.size(); i++) {
312 std::cout << adjcwgt[i] << std::endl;
315 std::cout <<
"nC" << nC <<
" size " << size <<
" inbalance " <<
316 inbalance << std::endl;
319 for (
int i = 0; i < nC; ++i) {
320 if (partitionResult[i] == 0) {
321 this->_glob.push_back(i);
322 this->_loc[i] = count;
325 this->_rank[i] = partitionResult[i];
326 cuboidToThread[i] = partitionResult[i];
332#ifdef PARALLEL_MODE_MPI
336 _mpiNbHelper.allocate(size-1);
337 for (
int i = 1; i < size; i++) {
339 nC, i, &_mpiNbHelper.get_mpiRequest()[i-1], 0);
344 int *tmpCuboids =
new int[nC];
347 for (
int i = 0; i < nC; ++i) {
348 if (tmpCuboids[i] == rank) {
349 this->_glob.push_back(i);
350 this->_loc[i] = count;
353 this->_rank[i] = tmpCuboids[i];
void iSend(T *buf, int count, int dest, MPI_Request *request, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Sends data at *buf, non blocking.
void receive(T *buf, int count, int source, int tag=0, MPI_Comm comm=MPI_COMM_WORLD)
Receives data at *buf, blocking.