Dune Core Modules (2.7.0)

torus.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_GRID_YASPGRID_TORUS_HH
4#define DUNE_GRID_YASPGRID_TORUS_HH
5
6#include <array>
7#include <bitset>
8#include <cmath>
9#include <deque>
10#include <iostream>
11#include <vector>
12
13#if HAVE_MPI
14#include <mpi.h>
15#endif
16
19#include <dune/grid/common/exceptions.hh>
20
21#include "partitioning.hh"
22
27namespace Dune
28{
29
43 template<class CollectiveCommunication, int d>
44 class Torus {
45 public:
47 typedef std::array<int, d> iTupel;
48
49
50 private:
51 struct CommPartner {
52 int rank;
53 iTupel delta;
54 int index;
55 };
56
57 struct CommTask {
58 int rank; // process to send to / receive from
59 int size; // size of buffer
60 void *buffer; // buffer to send / receive
61 };
62
63 public:
66 {}
67
70 : _comm(comm), _tag(tag)
71 {
72 // determine dimensions
73 lb->loadbalance(size, _comm.size(), _dims);
74
75 // compute increments for lexicographic ordering
76 int inc = 1;
77 for (int i=0; i<d; i++)
78 {
79 _increment[i] = inc;
80 inc *= _dims[i];
81 }
82
83 // check whether the load balancing matches the size of the communicator
84 if (inc != _comm.size())
85 DUNE_THROW(Dune::Exception, "Communicator size and result of the given load balancer do not match!");
86
87 // make full schedule
88 proclists();
89 }
90
92 int rank () const
93 {
94 return _comm.rank();
95 }
96
98 iTupel coord () const
99 {
100 return rank_to_coord(_comm.rank());
101 }
102
104 int procs () const
105 {
106 return _comm.size();
107 }
108
110 const iTupel & dims () const
111 {
112 return _dims;
113 }
114
116 int dims (int i) const
117 {
118 return _dims[i];
119 }
120
123 {
124 return _comm;
125 }
126
128 int tag () const
129 {
130 return _tag;
131 }
132
134 bool inside (iTupel c) const
135 {
136 for (int i=d-1; i>=0; i--)
137 if (c[i]<0 || c[i]>=_dims[i]) return false;
138 return true;
139 }
140
143 {
145 rank = rank%_comm.size();
146 for (int i=d-1; i>=0; i--)
147 {
148 coord[i] = rank/_increment[i];
149 rank = rank%_increment[i];
150 }
151 return coord;
152 }
153
156 {
157 for (int i=0; i<d; i++) coord[i] = coord[i]%_dims[i];
158 int rank = 0;
159 for (int i=0; i<d; i++) rank += coord[i]*_increment[i];
160 return rank;
161 }
162
164 int rank_relative (int rank, int dir, int cnt) const
165 {
167 coord[dir] = (coord[dir]+_dims[dir]+cnt)%_dims[dir];
168 return coord_to_rank(coord);
169 }
170
172 int color (const iTupel & coord) const
173 {
174 int c = 0;
175 int power = 1;
176
177 // interior coloring
178 for (int i=0; i<d; i++)
179 {
180 if (coord[i]%2==1) c += power;
181 power *= 2;
182 }
183
184 // extra colors for boundary processes
185 for (int i=0; i<d; i++)
186 {
187 if (_dims[i]>1 && coord[i]==_dims[i]-1) c += power;
188 power *= 2;
189 }
190
191 return c;
192 }
193
195 int color (int rank) const
196 {
197 return color(rank_to_coord(rank));
198 }
199
201 int neighbors () const
202 {
203 int n=1;
204 for (int i=0; i<d; ++i)
205 n *= 3;
206 return n-1;
207 }
208
210 bool is_neighbor (iTupel delta, std::bitset<d> periodic) const
211 {
212 iTupel coord = rank_to_coord(_comm.rank()); // my own coordinate with 0 <= c_i < dims_i
213
214 for (int i=0; i<d; i++)
215 {
216 if (delta[i]<0)
217 {
218 // if I am on the boundary and domain is not periodic => no neighbor
219 if (coord[i]==0 && periodic[i]==false) return false;
220 }
221 if (delta[i]>0)
222 {
223 // if I am on the boundary and domain is not periodic => no neighbor
224 if (coord[i]==_dims[i]-1 && periodic[i]==false) return false;
225 }
226 }
227 return true;
228 }
229
237 double partition (int rank, iTupel origin_in, iTupel size_in, iTupel& origin_out, iTupel& size_out) const
238 {
240 double maxsize = 1;
241 double sz = 1;
242
243 // make a tensor product partition
244 for (int i=0; i<d; i++)
245 {
246 // determine
247 int m = size_in[i]/_dims[i];
248 int r = size_in[i]%_dims[i];
249
250 sz *= size_in[i];
251
252 if (coord[i]<_dims[i]-r)
253 {
254 origin_out[i] = origin_in[i] + coord[i]*m;
255 size_out[i] = m;
256 maxsize *= m;
257 }
258 else
259 {
260 origin_out[i] = origin_in[i] + (_dims[i]-r)*m + (coord[i]-(_dims[i]-r))*(m+1);
261 size_out[i] = m+1;
262 maxsize *= m+1;
263 }
264 }
265 return maxsize/(sz/_comm.size());
266 }
267
275 public:
277 ProcListIterator (typename std::deque<CommPartner>::const_iterator iter)
278 {
279 i = iter;
280 }
281
283 int rank () const
284 {
285 return i->rank;
286 }
287
289 iTupel delta () const
290 {
291 return i->delta;
292 }
293
295 int index () const
296 {
297 return i->index;
298 }
299
301 int distance () const
302 {
303 int dist = 0;
304 iTupel delta=i->delta;
305 for (int j=0; j<d; ++j)
306 dist += std::abs(delta[j]);
307 return dist;
308 }
309
312 {
313 return i == iter.i;
314 }
315
316
319 {
320 return i != iter.i;
321 }
322
325 {
326 ++i;
327 return *this;
328 }
329
330 private:
331 typename std::deque<CommPartner>::const_iterator i;
332 };
333
336 {
337 return ProcListIterator(_sendlist.begin());
338 }
339
342 {
343 return ProcListIterator(_sendlist.end());
344 }
345
348 {
349 return ProcListIterator(_recvlist.begin());
350 }
351
354 {
355 return ProcListIterator(_recvlist.end());
356 }
357
359 void send (int rank, void* buffer, int size) const
360 {
361 CommTask task;
362 task.rank = rank;
363 task.buffer = buffer;
364 task.size = size;
365 if (rank!=_comm.rank())
366 _sendrequests.push_back(task);
367 else
368 _localsendrequests.push_back(task);
369 }
370
372 void recv (int rank, void* buffer, int size) const
373 {
374 CommTask task;
375 task.rank = rank;
376 task.buffer = buffer;
377 task.size = size;
378 if (rank!=_comm.rank())
379 _recvrequests.push_back(task);
380 else
381 _localrecvrequests.push_back(task);
382 }
383
385 void exchange () const
386 {
387 // handle local requests first
388 if (_localsendrequests.size()!=_localrecvrequests.size())
389 {
390 std::cout << "[" << rank() << "]: ERROR: local sends/receives do not match in exchange!" << std::endl;
391 return;
392 }
393 for (unsigned int i=0; i<_localsendrequests.size(); i++)
394 {
395 if (_localsendrequests[i].size!=_localrecvrequests[i].size)
396 {
397 std::cout << "[" << rank() << "]: ERROR: size in local sends/receive does not match in exchange!" << std::endl;
398 return;
399 }
400 memcpy(_localrecvrequests[i].buffer,_localsendrequests[i].buffer,_localsendrequests[i].size);
401 }
402 _localsendrequests.clear();
403 _localrecvrequests.clear();
404
405#if HAVE_MPI
406 // handle foreign requests
407
408 std::vector<MPI_Request> requests(_sendrequests.size() + _recvrequests.size());
409 MPI_Request* req = requests.data();
410
411 // issue sends to foreign processes
412 for (unsigned int i=0; i<_sendrequests.size(); i++)
413 if (_sendrequests[i].rank!=rank())
414 {
415 // std::cout << "[" << rank() << "]" << " send " << _sendrequests[i].size << " bytes "
416 // << "to " << _sendrequests[i].rank << " p=" << _sendrequests[i].buffer << std::endl;
417 MPI_Isend(_sendrequests[i].buffer, _sendrequests[i].size, MPI_BYTE,
418 _sendrequests[i].rank, _tag, _comm, req++);
419 }
420
421 // issue receives from foreign processes
422 for (unsigned int i=0; i<_recvrequests.size(); i++)
423 if (_recvrequests[i].rank!=rank())
424 {
425 // std::cout << "[" << rank() << "]" << " recv " << _recvrequests[i].size << " bytes "
426 // << "fm " << _recvrequests[i].rank << " p=" << _recvrequests[i].buffer << std::endl;
427 MPI_Irecv(_recvrequests[i].buffer, _recvrequests[i].size, MPI_BYTE,
428 _recvrequests[i].rank, _tag, _comm, req++);
429 }
430
431 // Wait for communication to complete
432 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
433
434 // clear request buffers
435 _sendrequests.clear();
436 _recvrequests.clear();
437#endif
438 }
439
441 double global_max (double x) const
442 {
443 double res = 0.0;
444 _comm.template allreduce<Dune::Max<double>,double>(&x, &res, 1);
445 return res;
446 }
447
449 void print (std::ostream& s) const
450 {
451 s << "[" << rank() << "]: Torus " << procs() << " processor(s) arranged as " << dims() << std::endl;
452 for (ProcListIterator i=sendbegin(); i!=sendend(); ++i)
453 {
454 s << "[" << rank() << "]: send to "
455 << "rank=" << i.rank()
456 << " index=" << i.index()
457 << " delta=" << i.delta() << " dist=" << i.distance() << std::endl;
458 }
459 for (ProcListIterator i=recvbegin(); i!=recvend(); ++i)
460 {
461 s << "[" << rank() << "]: recv from "
462 << "rank=" << i.rank()
463 << " index=" << i.index()
464 << " delta=" << i.delta() << " dist=" << i.distance() << std::endl;
465 }
466 }
467
468 private:
469
470 void proclists ()
471 {
472 // compile the full neighbor list
473 CommPartner cp;
474 iTupel delta;
475
476 std::fill(delta.begin(), delta.end(), -1);
477 bool ready = false;
478 iTupel me, nb;
479 me = rank_to_coord(_comm.rank());
480 int index = 0;
481 int last = neighbors()-1;
482 while (!ready)
483 {
484 // find neighbors coordinates
485 for (int i=0; i<d; i++)
486 nb[i] = ( me[i]+_dims[i]+delta[i] ) % _dims[i];
487
488 // find neighbors rank
489 int nbrank = coord_to_rank(nb);
490
491 // check if delta is not zero
492 for (int i=0; i<d; i++)
493 if (delta[i]!=0)
494 {
495 cp.rank = nbrank;
496 cp.delta = delta;
497 cp.index = index;
498 _recvlist.push_back(cp);
499 cp.index = last-index;
500 _sendlist.push_front(cp);
501 index++;
502 break;
503 }
504
505 // next neighbor
506 ready = true;
507 for (int i=0; i<d; i++)
508 if (delta[i]<1)
509 {
510 (delta[i])++;
511 ready=false;
512 break;
513 }
514 else
515 {
516 delta[i] = -1;
517 }
518 }
519
520 }
521
522 CollectiveCommunication _comm;
523
524 iTupel _dims;
525 iTupel _increment;
526 int _tag;
527 std::deque<CommPartner> _sendlist;
528 std::deque<CommPartner> _recvlist;
529
530 mutable std::vector<CommTask> _sendrequests;
531 mutable std::vector<CommTask> _recvrequests;
532 mutable std::vector<CommTask> _localsendrequests;
533 mutable std::vector<CommTask> _localrecvrequests;
534
535 };
536
538 template <class CollectiveCommunication, int d>
539 inline std::ostream& operator<< (std::ostream& s, const Torus<CollectiveCommunication, d> & t)
540 {
541 t.print(s);
542 return s;
543 }
544}
545
546#endif
helper classes to provide unique types for standard functions
int rank() const
Return rank, is between 0 and size()-1.
Definition: communication.hh:95
int size() const
Number of processes in set, is greater than 0.
Definition: communication.hh:101
Base class for Dune-Exceptions.
Definition: exceptions.hh:94
Definition: torus.hh:274
ProcListIterator(typename std::deque< CommPartner >::const_iterator iter)
make an iterator
Definition: torus.hh:277
int distance() const
return 1-norm of distance vector
Definition: torus.hh:301
int index() const
return index in proclist
Definition: torus.hh:295
bool operator!=(const ProcListIterator &iter)
Return true when two iterators do not point to same member.
Definition: torus.hh:318
int rank() const
return rank of neighboring process
Definition: torus.hh:283
bool operator==(const ProcListIterator &iter)
Return true when two iterators point to same member.
Definition: torus.hh:311
iTupel delta() const
return distance vector
Definition: torus.hh:289
ProcListIterator & operator++()
Increment iterator to next cell.
Definition: torus.hh:324
Definition: torus.hh:44
Torus()
constructor making uninitialized object
Definition: torus.hh:65
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:201
iTupel rank_to_coord(int rank) const
map rank to coordinate in torus using lexicographic ordering
Definition: torus.hh:142
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:335
int color(const iTupel &coord) const
assign color to given coordinate
Definition: torus.hh:172
int rank() const
return own rank
Definition: torus.hh:92
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:347
int rank_relative(int rank, int dir, int cnt) const
return rank of process where its coordinate in direction dir has offset cnt (handles periodic case)
Definition: torus.hh:164
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:372
int coord_to_rank(iTupel coord) const
map coordinate in torus to rank using lexicographic ordering
Definition: torus.hh:155
int dims(int i) const
return dimensions of torus in direction i
Definition: torus.hh:116
int procs() const
return number of processes
Definition: torus.hh:104
Torus(CollectiveCommunication comm, int tag, iTupel size, const YLoadBalance< d > *lb)
make partitioner from communicator and coarse mesh size
Definition: torus.hh:69
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:353
double global_max(double x) const
global max
Definition: torus.hh:441
bool is_neighbor(iTupel delta, std::bitset< d > periodic) const
return true if neighbor with given delta is a neighbor under the given periodicity
Definition: torus.hh:210
int color(int rank) const
assign color to given rank
Definition: torus.hh:195
CollectiveCommunication comm() const
return communicator
Definition: torus.hh:122
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:359
void print(std::ostream &s) const
print contents of torus object
Definition: torus.hh:449
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:237
iTupel coord() const
return own coordinates
Definition: torus.hh:98
bool inside(iTupel c) const
return true if coordinate is inside torus
Definition: torus.hh:134
int tag() const
return tag used by torus
Definition: torus.hh:128
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:385
std::array< int, d > iTupel
type used to pass tupels in and out
Definition: torus.hh:47
ProcListIterator sendend() const
end of send list
Definition: torus.hh:341
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:110
a base class for the yaspgrid partitioning strategy The name might be irritating. It will probably ch...
Definition: partitioning.hh:24
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
Dune namespace.
Definition: alignedallocator.hh:14
constexpr Mantissa power(Mantissa m, Exponent p)
Power method for integer exponents.
Definition: math.hh:73
This file provides tools to partition YaspGrids. If you want to write your own partitioner,...
Implementation of stream operators for std::array and std::tuple.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)