Dune Core Modules (2.6.0)

torus.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_GRID_YASPGRID_TORUS_HH
4#define DUNE_GRID_YASPGRID_TORUS_HH
5
6#include <array>
7#include <bitset>
8#include <cmath>
9#include <deque>
10#include <iostream>
11#include <vector>
12
13#if HAVE_MPI
14#include <mpi.h>
15#endif
16
19#include <dune/grid/common/exceptions.hh>
20
21#include "partitioning.hh"
22
27namespace Dune
28{
29
43 template<class CollectiveCommunication, int d>
44 class Torus {
45 public:
47 typedef std::array<int, d> iTupel;
48
49
50 private:
51 struct CommPartner {
52 int rank;
53 iTupel delta;
54 int index;
55 };
56
57 struct CommTask {
58 int rank; // process to send to / receive from
59 void *buffer; // buffer to send / receive
60 int size; // size of buffer
61#if HAVE_MPI
62 MPI_Request request; // used by MPI to handle request
63#else
64 int request;
65#endif
66 int flag; // used by MPI
67 };
68
69 public:
72 {}
73
76 : _comm(comm), _tag(tag)
77 {
78 // determine dimensions
79 lb->loadbalance(size, _comm.size(), _dims);
80
81 // compute increments for lexicographic ordering
82 int inc = 1;
83 for (int i=0; i<d; i++)
84 {
85 _increment[i] = inc;
86 inc *= _dims[i];
87 }
88
89 // check whether the load balancing matches the size of the communicator
90 if (inc != _comm.size())
91 DUNE_THROW(Dune::Exception, "Communicator size and result of the given load balancer do not match!");
92
93 // make full schedule
94 proclists();
95 }
96
98 int rank () const
99 {
100 return _comm.rank();
101 }
102
104 iTupel coord () const
105 {
106 return rank_to_coord(_comm.rank());
107 }
108
110 int procs () const
111 {
112 return _comm.size();
113 }
114
116 const iTupel & dims () const
117 {
118 return _dims;
119 }
120
122 int dims (int i) const
123 {
124 return _dims[i];
125 }
126
129 {
130 return _comm;
131 }
132
134 int tag () const
135 {
136 return _tag;
137 }
138
140 bool inside (iTupel c) const
141 {
142 for (int i=d-1; i>=0; i--)
143 if (c[i]<0 || c[i]>=_dims[i]) return false;
144 return true;
145 }
146
149 {
151 rank = rank%_comm.size();
152 for (int i=d-1; i>=0; i--)
153 {
154 coord[i] = rank/_increment[i];
155 rank = rank%_increment[i];
156 }
157 return coord;
158 }
159
162 {
163 for (int i=0; i<d; i++) coord[i] = coord[i]%_dims[i];
164 int rank = 0;
165 for (int i=0; i<d; i++) rank += coord[i]*_increment[i];
166 return rank;
167 }
168
170 int rank_relative (int rank, int dir, int cnt) const
171 {
173 coord[dir] = (coord[dir]+_dims[dir]+cnt)%_dims[dir];
174 return coord_to_rank(coord);
175 }
176
178 int color (const iTupel & coord) const
179 {
180 int c = 0;
181 int power = 1;
182
183 // interior coloring
184 for (int i=0; i<d; i++)
185 {
186 if (coord[i]%2==1) c += power;
187 power *= 2;
188 }
189
190 // extra colors for boundary processes
191 for (int i=0; i<d; i++)
192 {
193 if (_dims[i]>1 && coord[i]==_dims[i]-1) c += power;
194 power *= 2;
195 }
196
197 return c;
198 }
199
201 int color (int rank) const
202 {
203 return color(rank_to_coord(rank));
204 }
205
207 int neighbors () const
208 {
209 int n=1;
210 for (int i=0; i<d; ++i)
211 n *= 3;
212 return n-1;
213 }
214
216 bool is_neighbor (iTupel delta, std::bitset<d> periodic) const
217 {
218 iTupel coord = rank_to_coord(_comm.rank()); // my own coordinate with 0 <= c_i < dims_i
219
220 for (int i=0; i<d; i++)
221 {
222 if (delta[i]<0)
223 {
224 // if I am on the boundary and domain is not periodic => no neighbor
225 if (coord[i]==0 && periodic[i]==false) return false;
226 }
227 if (delta[i]>0)
228 {
229 // if I am on the boundary and domain is not periodic => no neighbor
230 if (coord[i]==_dims[i]-1 && periodic[i]==false) return false;
231 }
232 }
233 return true;
234 }
235
243 double partition (int rank, iTupel origin_in, iTupel size_in, iTupel& origin_out, iTupel& size_out) const
244 {
246 double maxsize = 1;
247 double sz = 1;
248
249 // make a tensor product partition
250 for (int i=0; i<d; i++)
251 {
252 // determine
253 int m = size_in[i]/_dims[i];
254 int r = size_in[i]%_dims[i];
255
256 sz *= size_in[i];
257
258 if (coord[i]<_dims[i]-r)
259 {
260 origin_out[i] = origin_in[i] + coord[i]*m;
261 size_out[i] = m;
262 maxsize *= m;
263 }
264 else
265 {
266 origin_out[i] = origin_in[i] + (_dims[i]-r)*m + (coord[i]-(_dims[i]-r))*(m+1);
267 size_out[i] = m+1;
268 maxsize *= m+1;
269 }
270 }
271 return maxsize/(sz/_comm.size());
272 }
273
281 public:
283 ProcListIterator (typename std::deque<CommPartner>::const_iterator iter)
284 {
285 i = iter;
286 }
287
289 int rank () const
290 {
291 return i->rank;
292 }
293
295 iTupel delta () const
296 {
297 return i->delta;
298 }
299
301 int index () const
302 {
303 return i->index;
304 }
305
307 int distance () const
308 {
309 int dist = 0;
310 iTupel delta=i->delta;
311 for (int j=0; j<d; ++j)
312 dist += std::abs(delta[j]);
313 return dist;
314 }
315
318 {
319 return i == iter.i;
320 }
321
322
325 {
326 return i != iter.i;
327 }
328
331 {
332 ++i;
333 return *this;
334 }
335
336 private:
337 typename std::deque<CommPartner>::const_iterator i;
338 };
339
342 {
343 return ProcListIterator(_sendlist.begin());
344 }
345
348 {
349 return ProcListIterator(_sendlist.end());
350 }
351
354 {
355 return ProcListIterator(_recvlist.begin());
356 }
357
360 {
361 return ProcListIterator(_recvlist.end());
362 }
363
365 void send (int rank, void* buffer, int size) const
366 {
367 CommTask task;
368 task.rank = rank;
369 task.buffer = buffer;
370 task.size = size;
371 if (rank!=_comm.rank())
372 _sendrequests.push_back(task);
373 else
374 _localsendrequests.push_back(task);
375 }
376
378 void recv (int rank, void* buffer, int size) const
379 {
380 CommTask task;
381 task.rank = rank;
382 task.buffer = buffer;
383 task.size = size;
384 if (rank!=_comm.rank())
385 _recvrequests.push_back(task);
386 else
387 _localrecvrequests.push_back(task);
388 }
389
391 void exchange () const
392 {
393 // handle local requests first
394 if (_localsendrequests.size()!=_localrecvrequests.size())
395 {
396 std::cout << "[" << rank() << "]: ERROR: local sends/receives do not match in exchange!" << std::endl;
397 return;
398 }
399 for (unsigned int i=0; i<_localsendrequests.size(); i++)
400 {
401 if (_localsendrequests[i].size!=_localrecvrequests[i].size)
402 {
403 std::cout << "[" << rank() << "]: ERROR: size in local sends/receive does not match in exchange!" << std::endl;
404 return;
405 }
406 memcpy(_localrecvrequests[i].buffer,_localsendrequests[i].buffer,_localsendrequests[i].size);
407 }
408 _localsendrequests.clear();
409 _localrecvrequests.clear();
410
411#if HAVE_MPI
412 // handle foreign requests
413 int sends=0;
414 int recvs=0;
415
416 // issue sends to foreign processes
417 for (unsigned int i=0; i<_sendrequests.size(); i++)
418 if (_sendrequests[i].rank!=rank())
419 {
420 // std::cout << "[" << rank() << "]" << " send " << _sendrequests[i].size << " bytes "
421 // << "to " << _sendrequests[i].rank << " p=" << _sendrequests[i].buffer << std::endl;
422 MPI_Isend(_sendrequests[i].buffer, _sendrequests[i].size, MPI_BYTE,
423 _sendrequests[i].rank, _tag, _comm, &(_sendrequests[i].request));
424 _sendrequests[i].flag = false;
425 sends++;
426 }
427
428 // issue receives from foreign processes
429 for (unsigned int i=0; i<_recvrequests.size(); i++)
430 if (_recvrequests[i].rank!=rank())
431 {
432 // std::cout << "[" << rank() << "]" << " recv " << _recvrequests[i].size << " bytes "
433 // << "fm " << _recvrequests[i].rank << " p=" << _recvrequests[i].buffer << std::endl;
434 MPI_Irecv(_recvrequests[i].buffer, _recvrequests[i].size, MPI_BYTE,
435 _recvrequests[i].rank, _tag, _comm, &(_recvrequests[i].request));
436 _recvrequests[i].flag = false;
437 recvs++;
438 }
439
440 // poll sends
441 while (sends>0)
442 {
443 for (unsigned int i=0; i<_sendrequests.size(); i++)
444 if (!_sendrequests[i].flag)
445 {
446 MPI_Status status;
447 MPI_Test( &(_sendrequests[i].request), &(_sendrequests[i].flag), &status);
448 if (_sendrequests[i].flag)
449 {
450 sends--;
451 // std::cout << "[" << rank() << "]" << " send to " << _sendrequests[i].rank << " OK" << std::endl;
452 }
453 }
454 }
455
456 // poll receives
457 while (recvs>0)
458 {
459 for (unsigned int i=0; i<_recvrequests.size(); i++)
460 if (!_recvrequests[i].flag)
461 {
462 MPI_Status status;
463 MPI_Test( &(_recvrequests[i].request), &(_recvrequests[i].flag), &status);
464 if (_recvrequests[i].flag)
465 {
466 recvs--;
467 // std::cout << "[" << rank() << "]" << " recv fm " << _recvrequests[i].rank << " OK" << std::endl;
468 }
469
470 }
471 }
472
473 // clear request buffers
474 _sendrequests.clear();
475 _recvrequests.clear();
476#endif
477 }
478
480 double global_max (double x) const
481 {
482 double res = 0.0;
483 _comm.template allreduce<Dune::Max<double>,double>(&x, &res, 1);
484 return res;
485 }
486
488 void print (std::ostream& s) const
489 {
490 s << "[" << rank() << "]: Torus " << procs() << " processor(s) arranged as " << dims() << std::endl;
491 for (ProcListIterator i=sendbegin(); i!=sendend(); ++i)
492 {
493 s << "[" << rank() << "]: send to "
494 << "rank=" << i.rank()
495 << " index=" << i.index()
496 << " delta=" << i.delta() << " dist=" << i.distance() << std::endl;
497 }
498 for (ProcListIterator i=recvbegin(); i!=recvend(); ++i)
499 {
500 s << "[" << rank() << "]: recv from "
501 << "rank=" << i.rank()
502 << " index=" << i.index()
503 << " delta=" << i.delta() << " dist=" << i.distance() << std::endl;
504 }
505 }
506
507 private:
508
509 void proclists ()
510 {
511 // compile the full neighbor list
512 CommPartner cp;
513 iTupel delta;
514
515 std::fill(delta.begin(), delta.end(), -1);
516 bool ready = false;
517 iTupel me, nb;
518 me = rank_to_coord(_comm.rank());
519 int index = 0;
520 int last = neighbors()-1;
521 while (!ready)
522 {
523 // find neighbors coordinates
524 for (int i=0; i<d; i++)
525 nb[i] = ( me[i]+_dims[i]+delta[i] ) % _dims[i];
526
527 // find neighbors rank
528 int nbrank = coord_to_rank(nb);
529
530 // check if delta is not zero
531 for (int i=0; i<d; i++)
532 if (delta[i]!=0)
533 {
534 cp.rank = nbrank;
535 cp.delta = delta;
536 cp.index = index;
537 _recvlist.push_back(cp);
538 cp.index = last-index;
539 _sendlist.push_front(cp);
540 index++;
541 break;
542 }
543
544 // next neighbor
545 ready = true;
546 for (int i=0; i<d; i++)
547 if (delta[i]<1)
548 {
549 (delta[i])++;
550 ready=false;
551 break;
552 }
553 else
554 {
555 delta[i] = -1;
556 }
557 }
558
559 }
560
561 CollectiveCommunication _comm;
562
563 iTupel _dims;
564 iTupel _increment;
565 int _tag;
566 std::deque<CommPartner> _sendlist;
567 std::deque<CommPartner> _recvlist;
568
569 mutable std::vector<CommTask> _sendrequests;
570 mutable std::vector<CommTask> _recvrequests;
571 mutable std::vector<CommTask> _localsendrequests;
572 mutable std::vector<CommTask> _localrecvrequests;
573
574 };
575
577 template <class CollectiveCommunication, int d>
578 inline std::ostream& operator<< (std::ostream& s, const Torus<CollectiveCommunication, d> & t)
579 {
580 t.print(s);
581 return s;
582 }
583}
584
585#endif
Various helper classes derived from from std::binary_function for stl-style functional programming.
Collective communication interface and sequential default implementation.
Definition: collectivecommunication.hh:80
int rank() const
Return rank, is between 0 and size()-1.
Definition: collectivecommunication.hh:94
int size() const
Number of processes in set, is greater than 0.
Definition: collectivecommunication.hh:100
Base class for Dune-Exceptions.
Definition: exceptions.hh:94
Definition: torus.hh:280
ProcListIterator(typename std::deque< CommPartner >::const_iterator iter)
make an iterator
Definition: torus.hh:283
int distance() const
return 1-norm of distance vector
Definition: torus.hh:307
int index() const
return index in proclist
Definition: torus.hh:301
bool operator!=(const ProcListIterator &iter)
Return true when two iterators do not point to same member.
Definition: torus.hh:324
int rank() const
return rank of neighboring process
Definition: torus.hh:289
bool operator==(const ProcListIterator &iter)
Return true when two iterators point to same member.
Definition: torus.hh:317
iTupel delta() const
return distance vector
Definition: torus.hh:295
ProcListIterator & operator++()
Increment iterator to next cell.
Definition: torus.hh:330
Definition: torus.hh:44
Torus()
constructor making uninitialized object
Definition: torus.hh:71
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:207
iTupel rank_to_coord(int rank) const
map rank to coordinate in torus using lexicographic ordering
Definition: torus.hh:148
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:341
int color(const iTupel &coord) const
assign color to given coordinate
Definition: torus.hh:178
int rank() const
return own rank
Definition: torus.hh:98
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:353
int rank_relative(int rank, int dir, int cnt) const
return rank of process where its coordinate in direction dir has offset cnt (handles periodic case)
Definition: torus.hh:170
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:378
int coord_to_rank(iTupel coord) const
map coordinate in torus to rank using lexicographic ordering
Definition: torus.hh:161
int dims(int i) const
return dimensions of torus in direction i
Definition: torus.hh:122
int procs() const
return number of processes
Definition: torus.hh:110
Torus(CollectiveCommunication comm, int tag, iTupel size, const YLoadBalance< d > *lb)
make partitioner from communicator and coarse mesh size
Definition: torus.hh:75
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:359
double global_max(double x) const
global max
Definition: torus.hh:480
bool is_neighbor(iTupel delta, std::bitset< d > periodic) const
return true if neighbor with given delta is a neighbor under the given periodicity
Definition: torus.hh:216
int color(int rank) const
assign color to given rank
Definition: torus.hh:201
CollectiveCommunication comm() const
return communicator
Definition: torus.hh:128
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:365
void print(std::ostream &s) const
print contents of torus object
Definition: torus.hh:488
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:243
iTupel coord() const
return own coordinates
Definition: torus.hh:104
bool inside(iTupel c) const
return true if coordinate is inside torus
Definition: torus.hh:140
int tag() const
return tag used by torus
Definition: torus.hh:134
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:391
std::array< int, d > iTupel
type used to pass tupels in and out
Definition: torus.hh:47
ProcListIterator sendend() const
end of send list
Definition: torus.hh:347
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:116
a base class for the yaspgrid partitioning strategy The name might be irritating. It will probably ch...
Definition: partitioning.hh:24
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
Dune namespace.
Definition: alignedallocator.hh:10
This file provides tools to partition YaspGrids. If you want to write your own partitioner,...
Implementation of stream operators for std::array and std::tuple.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 13, 23:29, 2024)