Dune Core Modules (2.10.0)

yaspgrid.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_GRID_YASPGRID_HH
6#define DUNE_GRID_YASPGRID_HH
7
8#include <cstdint>
9#include <iostream>
10#include <iterator>
11#include <vector>
12#include <algorithm>
13#include <stack>
14#include <type_traits>
15
16#include <dune/grid/common/backuprestore.hh>
17#include <dune/grid/common/grid.hh> // the grid base classes
18#include <dune/grid/common/capabilities.hh> // the capabilities
19#include <dune/common/hybridutilities.hh>
21#include <dune/common/math.hh>
27#include <dune/geometry/type.hh>
30
31
32#if HAVE_MPI
34#endif
35
43namespace Dune {
44
45 /* some sizes for building global ids
46 */
47 const int yaspgrid_dim_bits = 24; // bits for encoding each dimension
48 const int yaspgrid_level_bits = 5; // bits for encoding level number
49
50
51 //************************************************************************
52 // forward declaration of templates
53
54 template<int dim, class Coordinates> class YaspGrid;
55 template<int mydim, int cdim, class GridImp> class YaspGeometry;
56 template<int codim, int dim, class GridImp> class YaspEntity;
57 template<int codim, class GridImp> class YaspEntitySeed;
58 template<int codim, PartitionIteratorType pitype, class GridImp> class YaspLevelIterator;
59 template<class GridImp> class YaspIntersectionIterator;
60 template<class GridImp> class YaspIntersection;
61 template<class GridImp> class YaspHierarchicIterator;
62 template<class GridImp, bool isLeafIndexSet> class YaspIndexSet;
63 template<class GridImp> class YaspGlobalIdSet;
64 template<class GridImp> class YaspPersistentContainerIndex;
65
66} // namespace Dune
67
79#include <dune/grid/yaspgrid/yaspgrididset.hh>
81
82namespace Dune {
83
84#if HAVE_MPI
85 using YaspCommunication = Communication<MPI_Comm>;
86#else
87 using YaspCommunication = Communication<No_Comm>;
88#endif
89
90 template<int dim, class Coordinates>
91 struct YaspGridFamily
92 {
93 typedef YaspCommunication CCType;
94
95 typedef GridTraits<dim, // dimension of the grid
96 dim, // dimension of the world space
98 YaspGeometry,YaspEntity,
99 YaspLevelIterator, // type used for the level iterator
100 YaspIntersection, // leaf intersection
101 YaspIntersection, // level intersection
102 YaspIntersectionIterator, // leaf intersection iter
103 YaspIntersectionIterator, // level intersection iter
104 YaspHierarchicIterator,
105 YaspLevelIterator, // type used for the leaf(!) iterator
106 YaspIndexSet< const YaspGrid< dim, Coordinates >, false >, // level index set
107 YaspIndexSet< const YaspGrid< dim, Coordinates >, true >, // leaf index set
108 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
109 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
110 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
111 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
112 CCType,
113 DefaultLevelGridViewTraits, DefaultLeafGridViewTraits,
114 YaspEntitySeed,
115 YaspGeometry,
116 unsigned int,
117 std::array<GeometryType,1>>
118 Traits;
119 };
120
121#ifndef DOXYGEN
122 template<int dim, int codim>
123 struct YaspCommunicateMeta {
124 template<class G, class DataHandle>
125 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
126 {
127 if (data.contains(dim,codim))
128 {
129 g.template communicateCodim<DataHandle,codim>(data,iftype,dir,level);
130 }
131 YaspCommunicateMeta<dim,codim-1>::comm(g,data,iftype,dir,level);
132 }
133 };
134
135 template<int dim>
136 struct YaspCommunicateMeta<dim,0> {
137 template<class G, class DataHandle>
138 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
139 {
140 if (data.contains(dim,0))
141 g.template communicateCodim<DataHandle,0>(data,iftype,dir,level);
142 }
143 };
144#endif
145
146 //************************************************************************
163 template<int dim, class Coordinates = EquidistantCoordinates<double, dim> >
165 : public GridDefaultImplementation<dim,dim,typename Coordinates::ctype,YaspGridFamily<dim, Coordinates> >
166 {
167
168 template<int, PartitionIteratorType, typename>
169 friend class YaspLevelIterator;
170
171 template<typename>
172 friend class YaspHierarchicIterator;
173
175 protected:
176
177 public:
179 typedef typename Coordinates::ctype ctype;
180
181 using Communication = typename Base::Communication;
182
183#ifndef DOXYGEN
184 typedef typename Dune::YGrid<Coordinates> YGrid;
186
189 struct YGridLevel {
190
192 int level() const
193 {
194 return level_;
195 }
196
197 Coordinates coords;
198
199 std::array<YGrid, dim+1> overlapfront;
200 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlapfront_data;
201 std::array<YGrid, dim+1> overlap;
202 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlap_data;
203 std::array<YGrid, dim+1> interiorborder;
204 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interiorborder_data;
205 std::array<YGrid, dim+1> interior;
206 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interior_data;
207
208 std::array<YGridList<Coordinates>,dim+1> send_overlapfront_overlapfront;
209 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlapfront_overlapfront_data;
210 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlapfront;
211 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlapfront_data;
212
213 std::array<YGridList<Coordinates>,dim+1> send_overlap_overlapfront;
214 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlap_overlapfront_data;
215 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlap;
216 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlap_data;
217
218 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_interiorborder;
219 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_interiorborder_data;
220 std::array<YGridList<Coordinates>,dim+1> recv_interiorborder_interiorborder;
221 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_interiorborder_interiorborder_data;
222
223 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_overlapfront;
224 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_overlapfront_data;
225 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_interiorborder;
226 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_interiorborder_data;
227
228 // general
229 YaspGrid<dim,Coordinates>* mg; // each grid level knows its multigrid
230 int overlapSize; // in mesh cells on this level
231 bool keepOverlap;
232
234 int level_;
235 };
236
238 typedef std::array<int, dim> iTupel;
239 typedef FieldVector<ctype, dim> fTupel;
240
241 // communication tag used by multigrid
242 enum { tag = 17 };
243#endif
244
247 {
248 return _torus;
249 }
250
252 int globalSize(int i) const
253 {
254 return levelSize(maxLevel(),i);
255 }
256
258 iTupel globalSize() const
259 {
260 return levelSize(maxLevel());
261 }
262
264 int levelSize(int l, int i) const
265 {
266 return _coarseSize[i] * (1 << l);
267 }
268
270 iTupel levelSize(int l) const
271 {
272 iTupel s;
273 for (int i=0; i<dim; ++i)
274 s[i] = levelSize(l,i);
275 return s;
276 }
277
279 bool isPeriodic(int i) const
280 {
281 return _periodic[i];
282 }
283
284 bool getRefineOption() const
285 {
286 return keep_ovlp;
287 }
288
291
294 {
295 return _levels.begin();
296 }
297
300 {
301 if (i<0 || i>maxLevel())
302 DUNE_THROW(GridError, "level not existing");
303 return std::next(_levels.begin(), i);
304 }
305
308 {
309 return _levels.end();
310 }
311
312 // static method to create the default partitioning strategy
313 static const Yasp::Partitioning<dim>* defaultPartitioner()
314 {
315 static Yasp::DefaultPartitioning<dim> lb;
316 return & lb;
317 }
318
319 protected:
327 void makelevel (const Coordinates& coords, std::bitset<dim> periodic, iTupel o_interior, int overlap)
328 {
329 YGridLevel& g = _levels.back();
330 g.overlapSize = overlap;
331 g.mg = this;
332 g.level_ = maxLevel();
333 g.coords = coords;
334 g.keepOverlap = keep_ovlp;
335
336 using Dune::power;
337
338 // set the inserting positions in the corresponding arrays of YGridLevelStructure
339 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlapfront_it = g.overlapfront_data.begin();
340 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlap_it = g.overlap_data.begin();
341 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interiorborder_it = g.interiorborder_data.begin();
342 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interior_it = g.interior_data.begin();
343
344 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
345 send_overlapfront_overlapfront_it = g.send_overlapfront_overlapfront_data.begin();
346 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
347 recv_overlapfront_overlapfront_it = g.recv_overlapfront_overlapfront_data.begin();
348
349 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
350 send_overlap_overlapfront_it = g.send_overlap_overlapfront_data.begin();
351 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
352 recv_overlapfront_overlap_it = g.recv_overlapfront_overlap_data.begin();
353
354 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
355 send_interiorborder_interiorborder_it = g.send_interiorborder_interiorborder_data.begin();
356 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
357 recv_interiorborder_interiorborder_it = g.recv_interiorborder_interiorborder_data.begin();
358
359 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
360 send_interiorborder_overlapfront_it = g.send_interiorborder_overlapfront_data.begin();
361 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
362 recv_overlapfront_interiorborder_it = g.recv_overlapfront_interiorborder_data.begin();
363
364 // have a null array for constructor calls around
365 std::array<int,dim> n;
366 std::fill(n.begin(), n.end(), 0);
367
368 // determine origin of the grid with overlap and store whether an overlap area exists in direction i.
369 std::bitset<dim> ovlp_low(0ULL);
370 std::bitset<dim> ovlp_up(0ULL);
371
372 iTupel o_overlap;
373 iTupel s_overlap;
374
375 // determine at where we have overlap and how big the size of the overlap partition is
376 for (int i=0; i<dim; i++)
377 {
378 // the coordinate container has been constructed to hold the entire grid on
379 // this processor, including overlap. this is the element size.
380 s_overlap[i] = g.coords.size(i);
381
382 //in the periodic case there is always overlap
383 if (periodic[i])
384 {
385 o_overlap[i] = o_interior[i]-overlap;
386 ovlp_low[i] = true;
387 ovlp_up[i] = true;
388 }
389 else
390 {
391 //check lower boundary
392 if (o_interior[i] - overlap < 0)
393 o_overlap[i] = 0;
394 else
395 {
396 o_overlap[i] = o_interior[i] - overlap;
397 ovlp_low[i] = true;
398 }
399
400 //check upper boundary
401 if (o_overlap[i] + g.coords.size(i) < globalSize(i))
402 ovlp_up[i] = true;
403 }
404 }
405
406 for (unsigned int codim = 0; codim < dim + 1; codim++)
407 {
408 // set the begin iterator for the corresponding ygrids
409 g.overlapfront[codim].setBegin(overlapfront_it);
410 g.overlap[codim].setBegin(overlap_it);
411 g.interiorborder[codim].setBegin(interiorborder_it);
412 g.interior[codim].setBegin(interior_it);
413 g.send_overlapfront_overlapfront[codim].setBegin(send_overlapfront_overlapfront_it);
414 g.recv_overlapfront_overlapfront[codim].setBegin(recv_overlapfront_overlapfront_it);
415 g.send_overlap_overlapfront[codim].setBegin(send_overlap_overlapfront_it);
416 g.recv_overlapfront_overlap[codim].setBegin(recv_overlapfront_overlap_it);
417 g.send_interiorborder_interiorborder[codim].setBegin(send_interiorborder_interiorborder_it);
418 g.recv_interiorborder_interiorborder[codim].setBegin(recv_interiorborder_interiorborder_it);
419 g.send_interiorborder_overlapfront[codim].setBegin(send_interiorborder_overlapfront_it);
420 g.recv_overlapfront_interiorborder[codim].setBegin(recv_overlapfront_interiorborder_it);
421
422 // find all combinations of unit vectors that span entities of the given codimension
423 for (unsigned int index = 0; index < (1<<dim); index++)
424 {
425 // check whether the given shift is of our codimension
426 std::bitset<dim> r(index);
427 if (r.count() != dim-codim)
428 continue;
429
430 // get an origin and a size array for subsequent modification
431 std::array<int,dim> origin(o_overlap);
432 std::array<int,dim> size(s_overlap);
433
434 // build overlapfront
435 // we have to extend the element size by one in all directions without shift.
436 for (int i=0; i<dim; i++)
437 if (!r[i])
438 size[i]++;
439 *overlapfront_it = YGridComponent<Coordinates>(origin, r, &g.coords, size, n, size);
440
441 // build overlap
442 for (int i=0; i<dim; i++)
443 {
444 if (!r[i])
445 {
446 if (ovlp_low[i])
447 {
448 origin[i]++;
449 size[i]--;
450 }
451 if (ovlp_up[i])
452 size[i]--;
453 }
454 }
455 *overlap_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
456
457 // build interiorborder
458 for (int i=0; i<dim; i++)
459 {
460 if (ovlp_low[i])
461 {
462 origin[i] += overlap;
463 size[i] -= overlap;
464 if (!r[i])
465 {
466 origin[i]--;
467 size[i]++;
468 }
469 }
470 if (ovlp_up[i])
471 {
472 size[i] -= overlap;
473 if (!r[i])
474 size[i]++;
475 }
476 }
477 *interiorborder_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
478
479 // build interior
480 for (int i=0; i<dim; i++)
481 {
482 if (!r[i])
483 {
484 if (ovlp_low[i])
485 {
486 origin[i]++;
487 size[i]--;
488 }
489 if (ovlp_up[i])
490 size[i]--;
491 }
492 }
493 *interior_it = YGridComponent<Coordinates>(origin, size, *overlapfront_it);
494
495 intersections(*overlapfront_it,*overlapfront_it,*send_overlapfront_overlapfront_it, *recv_overlapfront_overlapfront_it);
496 intersections(*overlap_it,*overlapfront_it,*send_overlap_overlapfront_it, *recv_overlapfront_overlap_it);
497 intersections(*interiorborder_it,*interiorborder_it,*send_interiorborder_interiorborder_it,*recv_interiorborder_interiorborder_it);
498 intersections(*interiorborder_it,*overlapfront_it,*send_interiorborder_overlapfront_it,*recv_overlapfront_interiorborder_it);
499
500 // advance all iterators pointing to the next insertion point
501 ++overlapfront_it;
502 ++overlap_it;
503 ++interiorborder_it;
504 ++interior_it;
505 ++send_overlapfront_overlapfront_it;
506 ++recv_overlapfront_overlapfront_it;
507 ++send_overlap_overlapfront_it;
508 ++recv_overlapfront_overlap_it;
509 ++send_interiorborder_interiorborder_it;
510 ++recv_interiorborder_interiorborder_it;
511 ++send_interiorborder_overlapfront_it;
512 ++recv_overlapfront_interiorborder_it;
513 }
514
515 // set end iterators in the corresponding ygrids
516 g.overlapfront[codim].finalize(overlapfront_it);
517 g.overlap[codim].finalize(overlap_it);
518 g.interiorborder[codim].finalize(interiorborder_it);
519 g.interior[codim].finalize(interior_it);
520 g.send_overlapfront_overlapfront[codim].finalize(send_overlapfront_overlapfront_it,g.overlapfront[codim]);
521 g.recv_overlapfront_overlapfront[codim].finalize(recv_overlapfront_overlapfront_it,g.overlapfront[codim]);
522 g.send_overlap_overlapfront[codim].finalize(send_overlap_overlapfront_it,g.overlapfront[codim]);
523 g.recv_overlapfront_overlap[codim].finalize(recv_overlapfront_overlap_it,g.overlapfront[codim]);
524 g.send_interiorborder_interiorborder[codim].finalize(send_interiorborder_interiorborder_it,g.overlapfront[codim]);
525 g.recv_interiorborder_interiorborder[codim].finalize(recv_interiorborder_interiorborder_it,g.overlapfront[codim]);
526 g.send_interiorborder_overlapfront[codim].finalize(send_interiorborder_overlapfront_it,g.overlapfront[codim]);
527 g.recv_overlapfront_interiorborder[codim].finalize(recv_overlapfront_interiorborder_it,g.overlapfront[codim]);
528 }
529 }
530
531#ifndef DOXYGEN
540 struct mpifriendly_ygrid {
541 mpifriendly_ygrid ()
542 {
543 std::fill(origin.begin(), origin.end(), 0);
544 std::fill(size.begin(), size.end(), 0);
545 }
546 mpifriendly_ygrid (const YGridComponent<Coordinates>& grid)
547 : origin(grid.origin()), size(grid.size())
548 {}
549 iTupel origin;
550 iTupel size;
551 };
552#endif
553
563 std::deque<Intersection>& sendlist, std::deque<Intersection>& recvlist)
564 {
565 iTupel size = globalSize();
566
567 // the exchange buffers
568 std::vector<YGridComponent<Coordinates> > send_recvgrid(_torus.neighbors());
569 std::vector<YGridComponent<Coordinates> > recv_recvgrid(_torus.neighbors());
570 std::vector<YGridComponent<Coordinates> > send_sendgrid(_torus.neighbors());
571 std::vector<YGridComponent<Coordinates> > recv_sendgrid(_torus.neighbors());
572
573 // new exchange buffers to send simple struct without virtual functions
574 std::vector<mpifriendly_ygrid> mpifriendly_send_recvgrid(_torus.neighbors());
575 std::vector<mpifriendly_ygrid> mpifriendly_recv_recvgrid(_torus.neighbors());
576 std::vector<mpifriendly_ygrid> mpifriendly_send_sendgrid(_torus.neighbors());
577 std::vector<mpifriendly_ygrid> mpifriendly_recv_sendgrid(_torus.neighbors());
578
579 // fill send buffers; iterate over all neighboring processes
580 // non-periodic case is handled automatically because intersection will be zero
581 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
582 {
583 // determine if we communicate with this neighbor (and what)
584 bool skip = false;
585 iTupel coord = _torus.coord(); // my coordinates
586 iTupel delta = i.delta(); // delta to neighbor
587 iTupel nb = coord; // the neighbor
588 for (int k=0; k<dim; k++) nb[k] += delta[k];
589 iTupel v; // grid movement
590 std::fill(v.begin(), v.end(), 0);
591
592 for (int k=0; k<dim; k++)
593 {
594 if (nb[k]<0)
595 {
596 if (_periodic[k])
597 v[k] += size[k];
598 else
599 skip = true;
600 }
601 if (nb[k]>=_torus.dims(k))
602 {
603 if (_periodic[k])
604 v[k] -= size[k];
605 else
606 skip = true;
607 }
608 // neither might be true, then v=0
609 }
610
611 // store moved grids in send buffers
612 if (!skip)
613 {
614 send_sendgrid[i.index()] = sendgrid.move(v);
615 send_recvgrid[i.index()] = recvgrid.move(v);
616 }
617 else
618 {
619 send_sendgrid[i.index()] = YGridComponent<Coordinates>();
620 send_recvgrid[i.index()] = YGridComponent<Coordinates>();
621 }
622 }
623
624 // issue send requests for sendgrid being sent to all neighbors
625 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
626 {
627 mpifriendly_send_sendgrid[i.index()] = mpifriendly_ygrid(send_sendgrid[i.index()]);
628 _torus.send(i.rank(), &mpifriendly_send_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
629 }
630
631 // issue recv requests for sendgrids of neighbors
632 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
633 _torus.recv(i.rank(), &mpifriendly_recv_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
634
635 // exchange the sendgrids
636 _torus.exchange();
637
638 // issue send requests for recvgrid being sent to all neighbors
639 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
640 {
641 mpifriendly_send_recvgrid[i.index()] = mpifriendly_ygrid(send_recvgrid[i.index()]);
642 _torus.send(i.rank(), &mpifriendly_send_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
643 }
644
645 // issue recv requests for recvgrid of neighbors
646 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
647 _torus.recv(i.rank(), &mpifriendly_recv_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
648
649 // exchange the recvgrid
650 _torus.exchange();
651
652 // process receive buffers and compute intersections
653 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
654 {
655 // what must be sent to this neighbor
656 Intersection send_intersection{};
657 mpifriendly_ygrid yg = mpifriendly_recv_recvgrid[i.index()];
658 recv_recvgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
659 send_intersection.grid = sendgrid.intersection(recv_recvgrid[i.index()]);
660 send_intersection.rank = i.rank();
661 send_intersection.distance = i.distance();
662 if (!send_intersection.grid.empty()) sendlist.push_front(send_intersection);
663
664 Intersection recv_intersection{};
665 yg = mpifriendly_recv_sendgrid[i.index()];
666 recv_sendgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
667 recv_intersection.grid = recvgrid.intersection(recv_sendgrid[i.index()]);
668 recv_intersection.rank = i.rank();
669 recv_intersection.distance = i.distance();
670 if(!recv_intersection.grid.empty()) recvlist.push_back(recv_intersection);
671 }
672 }
673
674 protected:
675
676 typedef const YaspGrid<dim,Coordinates> GridImp;
677
678 void init()
679 {
680 indexsets.push_back( std::make_shared< YaspIndexSet<const YaspGrid<dim, Coordinates>, false > >(*this,0) );
681 boundarysegmentssize();
682 }
683
684 void boundarysegmentssize()
685 {
686 // sizes of local macro grid
687 std::array<int, dim> sides;
688 {
689 for (int i=0; i<dim; i++)
690 {
691 sides[i] =
692 ((begin()->overlap[0].dataBegin()->origin(i) == 0)+
693 (begin()->overlap[0].dataBegin()->origin(i) + begin()->overlap[0].dataBegin()->size(i)
694 == levelSize(0,i)));
695 }
696 }
697 nBSegments = 0;
698 for (int k=0; k<dim; k++)
699 {
700 int offset = 1;
701 for (int l=0; l<dim; l++)
702 {
703 if (l==k) continue;
704 offset *= begin()->overlap[0].dataBegin()->size(l);
705 }
706 nBSegments += sides[k]*offset;
707 }
708 }
709
710 public:
711
712 // define the persistent index type
713 typedef bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim> PersistentIndexType;
714
716 typedef YaspGridFamily<dim, Coordinates> GridFamily;
717 // the Traits
719
720 // need for friend declarations in entity
724
732 YaspGrid (const Coordinates& coordinates,
733 std::bitset<dim> periodic = std::bitset<dim>(0ULL),
734 int overlap = 1,
735 Communication comm = Communication(),
736 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
737 : ccobj(comm)
738 , leafIndexSet_(*this)
739 , _periodic(periodic)
740 , _overlap(overlap)
741 , keep_ovlp(true)
742 , adaptRefCount(0)
743 , adaptActive(false)
744 {
745 _levels.resize(1);
746
747 // Number of elements per coordinate direction on the coarsest level
748 for (std::size_t i=0; i<dim; i++)
749 _coarseSize[i] = coordinates.size(i);
750
751 // Construct the communication torus
752 _torus = decltype(_torus)(comm,tag,_coarseSize,overlap,partitioner);
753
754 iTupel o;
755 std::fill(o.begin(), o.end(), 0);
756 iTupel o_interior(o);
757 iTupel s_interior(_coarseSize);
758
759 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
760
761 // Set domain size
762 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
763 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
764 {
765 for (std::size_t i=0; i<dim; i++)
766 _L[i] = coordinates.size(i) * coordinates.meshsize(i,0);
767 }
768 if (std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value)
769 {
770 //determine sizes of vector to correctly construct torus structure and store for later size requests
771 for (int i=0; i<dim; i++)
772 _L[i] = coordinates.coordinate(i,_coarseSize[i]) - coordinates.coordinate(i,0);
773 }
774
775#if HAVE_MPI
776 // TODO: Settle on a single value for all coordinate types
777 int mysteryFactor = (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value) ? 1 : 2;
778
779 // check whether the grid is large enough to be overlapping
780 for (int i=0; i<dim; i++)
781 {
782 // find out whether the grid is too small to
783 int toosmall = (s_interior[i] <= mysteryFactor * overlap) && // interior is very small
784 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
785 // communicate the result to all those processes to have all processors error out if one process failed.
786 int global = 0;
787 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
788 if (global)
789 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
790 " Note that this also holds for DOFs on subdomain boundaries."
791 " Increase grid elements or decrease overlap accordingly.");
792 }
793#endif // #if HAVE_MPI
794
795 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
796 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
797 {
798 iTupel s_overlap(s_interior);
799 for (int i=0; i<dim; i++)
800 {
801 if ((o_interior[i] - overlap > 0) || (periodic[i]))
802 s_overlap[i] += overlap;
803 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
804 s_overlap[i] += overlap;
805 }
806
807 FieldVector<ctype,dim> upperRightWithOverlap;
808 for (int i=0; i<dim; i++)
809 upperRightWithOverlap[i] = coordinates.coordinate(i,0) + coordinates.meshsize(i,0) * s_overlap[i];
810
811 if constexpr (std::is_same_v<Coordinates,EquidistantCoordinates<ctype,dim>>)
812 {
813 // New coordinate object that additionally contains the overlap elements
814 EquidistantCoordinates<ctype,dim> coordinatesWithOverlap(upperRightWithOverlap,s_overlap);
815
816 // add level (the this-> is needed to make g++-6 happy)
817 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
818 }
819
820 if constexpr (std::is_same_v<Coordinates,EquidistantOffsetCoordinates<ctype,dim>>)
821 {
823 for (int i=0; i<dim; i++)
824 lowerleft[i] = coordinates.origin(i);
825
826 // New coordinate object that additionally contains the overlap elements
827 EquidistantOffsetCoordinates<ctype,dim> coordinatesWithOverlap(lowerleft,upperRightWithOverlap,s_overlap);
828
829 // add level (the this-> is needed to make g++-6 happy)
830 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
831 }
832 }
833
834 if constexpr (std::is_same_v<Coordinates,TensorProductCoordinates<ctype,dim>>)
835 {
836 std::array<std::vector<ctype>,dim> newCoords;
837 std::array<int, dim> offset(o_interior);
838
839 // find the relevant part of the coords vector for this processor and copy it to newCoords
840 for (int i=0; i<dim; ++i)
841 {
842 //define the coordinate range to be used
843 std::size_t begin = o_interior[i];
844 std::size_t end = begin + s_interior[i] + 1;
845
846 // check whether we are not at the physical boundary. In that case overlap is a simple
847 // extension of the coordinate range to be used
848 if (o_interior[i] - overlap > 0)
849 {
850 begin = begin - overlap;
851 offset[i] -= overlap;
852 }
853 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
854 end = end + overlap;
855
856 //copy the selected part in the new coord vector
857 newCoords[i].resize(end-begin);
858 auto newCoordsIt = newCoords[i].begin();
859 for (std::size_t j=begin; j<end; j++)
860 {
861 *newCoordsIt = coordinates.coordinate(i, j);
862 newCoordsIt++;
863 }
864
865 // Check whether we are at the physical boundary and have a periodic grid.
866 // In this case the coordinate vector has to be tweaked manually.
867 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
868 {
869 // we need to add the first <overlap> cells to the end of newcoords
870 for (int j=0; j<overlap; ++j)
871 newCoords[i].push_back(newCoords[i].back() - coordinates.coordinate(i,j) + coordinates.coordinate(i,j+1));
872 }
873
874 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
875 {
876 offset[i] -= overlap;
877
878 // we need to add the last <overlap> cells to the begin of newcoords
879 std::size_t reverseCounter = coordinates.size(i);
880 for (int j=0; j<overlap; ++j, --reverseCounter)
881 newCoords[i].insert(newCoords[i].begin(), newCoords[i].front()
882 - coordinates.coordinate(i,reverseCounter) + coordinates.coordinate(i,reverseCounter-1));
883 }
884 }
885
886 TensorProductCoordinates<ctype,dim> coordinatesWithOverlap(newCoords, offset);
887
888 // add level (the this-> is needed to make g++-6 happy)
889 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
890 }
891
892 init();
893 }
894
903 template<class C = Coordinates,
904 typename std::enable_if_t< std::is_same_v<C, EquidistantCoordinates<ctype,dim> >, int> = 0>
906 std::array<int, std::size_t{dim}> s,
907 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>{0ULL},
908 int overlap = 1,
909 Communication comm = Communication(),
910 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
911 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
912 _L(L), _periodic(periodic), _coarseSize(s), _overlap(overlap),
913 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
914 {
915 _levels.resize(1);
916
917 iTupel o;
918 std::fill(o.begin(), o.end(), 0);
919 iTupel o_interior(o);
920 iTupel s_interior(s);
921
922 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
923
924#if HAVE_MPI
925 // check whether the grid is large enough to be overlapping
926 for (int i=0; i<dim; i++)
927 {
928 // find out whether the grid is too small to
929 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
930 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
931 // communicate the result to all those processes to have all processors error out if one process failed.
932 int global = 0;
933 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
934 if (global)
935 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
936 " Note that this also holds for DOFs on subdomain boundaries."
937 " Increase grid elements or decrease overlap accordingly.");
938 }
939#endif // #if HAVE_MPI
940
941 iTupel s_overlap(s_interior);
942 for (int i=0; i<dim; i++)
943 {
944 if ((o_interior[i] - overlap > 0) || (periodic[i]))
945 s_overlap[i] += overlap;
946 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
947 s_overlap[i] += overlap;
948 }
949
950 FieldVector<ctype,dim> upperRightWithOverlap;
951
952 for (int i=0; i<dim; i++)
953 upperRightWithOverlap[i] = (L[i] / s[i]) * s_overlap[i];
954
955 // New coordinate object that additionally contains the overlap elements
956 EquidistantCoordinates<ctype,dim> cc(upperRightWithOverlap,s_overlap);
957
958 // add level
959 makelevel(cc,periodic,o_interior,overlap);
960
961 init();
962 }
963
973 template<class C = Coordinates,
974 typename std::enable_if_t< std::is_same_v<C, EquidistantOffsetCoordinates<ctype,dim> >, int> = 0>
977 std::array<int, std::size_t{dim}> s,
978 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
979 int overlap = 1,
980 Communication comm = Communication(),
981 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
982 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
983 _L(upperright - lowerleft),
984 _periodic(periodic), _coarseSize(s), _overlap(overlap),
985 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
986 {
987 _levels.resize(1);
988
989 iTupel o;
990 std::fill(o.begin(), o.end(), 0);
991 iTupel o_interior(o);
992 iTupel s_interior(s);
993
994 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
995
996#if HAVE_MPI
997 // check whether the grid is large enough to be overlapping
998 for (int i=0; i<dim; i++)
999 {
1000 // find out whether the grid is too small to
1001 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1002 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
1003 // communicate the result to all those processes to have all processors error out if one process failed.
1004 int global = 0;
1005 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1006 if (global)
1007 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1008 " Note that this also holds for DOFs on subdomain boundaries."
1009 " Increase grid elements or decrease overlap accordingly.");
1010 }
1011#endif // #if HAVE_MPI
1012
1013 iTupel s_overlap(s_interior);
1014 for (int i=0; i<dim; i++)
1015 {
1016 if ((o_interior[i] - overlap > 0) || (periodic[i]))
1017 s_overlap[i] += overlap;
1018 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
1019 s_overlap[i] += overlap;
1020 }
1021
1022 FieldVector<ctype,dim> upperRightWithOverlap;
1023 for (int i=0; i<dim; i++)
1024 upperRightWithOverlap[i] = lowerleft[i]
1025 + s_overlap[i] * (upperright[i]-lowerleft[i]) / s[i];
1026
1027 EquidistantOffsetCoordinates<ctype,dim> cc(lowerleft,upperRightWithOverlap,s_overlap);
1028
1029 // add level
1030 makelevel(cc,periodic,o_interior,overlap);
1031
1032 init();
1033 }
1034
1042 template<class C = Coordinates,
1043 typename std::enable_if_t< std::is_same_v<C, TensorProductCoordinates<ctype,dim> >, int> = 0>
1044 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1045 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
1046 int overlap = 1,
1047 Communication comm = Communication(),
1048 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1049 : ccobj(comm), _torus(comm,tag,Dune::Yasp::sizeArray<dim>(coords),overlap,partitioner),
1050 leafIndexSet_(*this), _periodic(periodic), _overlap(overlap),
1051 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1052 {
1053 if (!Dune::Yasp::checkIfMonotonous(coords))
1054 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1055
1056 _levels.resize(1);
1057
1058 //determine sizes of vector to correctly construct torus structure and store for later size requests
1059 for (int i=0; i<dim; i++) {
1060 _coarseSize[i] = coords[i].size() - 1;
1061 _L[i] = coords[i][_coarseSize[i]] - coords[i][0];
1062 }
1063
1064 iTupel o;
1065 std::fill(o.begin(), o.end(), 0);
1066 iTupel o_interior(o);
1067 iTupel s_interior(_coarseSize);
1068
1069 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
1070
1071#if HAVE_MPI
1072 // check whether the grid is large enough to be overlapping
1073 for (int i=0; i<dim; i++)
1074 {
1075 // find out whether the grid is too small to
1076 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1077 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
1078 // communicate the result to all those processes to have all processors error out if one process failed.
1079 int global = 0;
1080 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1081 if (global)
1082 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1083 " Note that this also holds for DOFs on subdomain boundaries."
1084 " Increase grid elements or decrease overlap accordingly.");
1085 }
1086#endif // #if HAVE_MPI
1087
1088
1089 std::array<std::vector<ctype>,dim> newcoords;
1090 std::array<int, dim> offset(o_interior);
1091
1092 // find the relevant part of the coords vector for this processor and copy it to newcoords
1093 for (int i=0; i<dim; ++i)
1094 {
1095 //define iterators on coords that specify the coordinate range to be used
1096 typename std::vector<ctype>::iterator begin = coords[i].begin() + o_interior[i];
1097 typename std::vector<ctype>::iterator end = begin + s_interior[i] + 1;
1098
1099 // check whether we are not at the physical boundary. In that case overlap is a simple
1100 // extension of the coordinate range to be used
1101 if (o_interior[i] - overlap > 0)
1102 {
1103 begin = begin - overlap;
1104 offset[i] -= overlap;
1105 }
1106 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
1107 end = end + overlap;
1108
1109 //copy the selected part in the new coord vector
1110 newcoords[i].resize(end-begin);
1111 std::copy(begin, end, newcoords[i].begin());
1112
1113 // check whether we are at the physical boundary and a have a periodic grid.
1114 // In this case the coordinate vector has to be tweaked manually.
1115 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
1116 {
1117 // we need to add the first <overlap> cells to the end of newcoords
1118 typename std::vector<ctype>::iterator it = coords[i].begin();
1119 for (int j=0; j<overlap; ++j)
1120 newcoords[i].push_back(newcoords[i].back() - *it + *(++it));
1121 }
1122
1123 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
1124 {
1125 offset[i] -= overlap;
1126
1127 // we need to add the last <overlap> cells to the begin of newcoords
1128 typename std::vector<ctype>::iterator it = coords[i].end() - 1;
1129 for (int j=0; j<overlap; ++j)
1130 newcoords[i].insert(newcoords[i].begin(), newcoords[i].front() - *it + *(--it));
1131 }
1132 }
1133
1134 TensorProductCoordinates<ctype,dim> cc(newcoords, offset);
1135
1136 // add level
1137 makelevel(cc,periodic,o_interior,overlap);
1138 init();
1139 }
1140
1141 private:
1142
1157 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1158 std::bitset<std::size_t{dim}> periodic,
1159 int overlap,
1160 Communication comm,
1161 std::array<int,dim> coarseSize,
1162 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1163 : ccobj(comm), _torus(comm,tag,coarseSize,overlap,partitioner), leafIndexSet_(*this),
1164 _periodic(periodic), _coarseSize(coarseSize), _overlap(overlap),
1165 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1166 {
1167 // check whether YaspGrid has been given the correct template parameter
1168 static_assert(std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value,
1169 "YaspGrid coordinate container template parameter and given constructor values do not match!");
1170
1171 if (!Dune::Yasp::checkIfMonotonous(coords))
1172 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1173
1174 for (int i=0; i<dim; i++)
1175 _L[i] = coords[i][coords[i].size() - 1] - coords[i][0];
1176
1177 _levels.resize(1);
1178
1179 std::array<int,dim> o;
1180 std::fill(o.begin(), o.end(), 0);
1181 std::array<int,dim> o_interior(o);
1182 std::array<int,dim> s_interior(coarseSize);
1183
1184 _torus.partition(_torus.rank(),o,coarseSize,o_interior,s_interior);
1185
1186 // get offset by modifying o_interior according to overlap
1187 std::array<int,dim> offset(o_interior);
1188 for (int i=0; i<dim; i++)
1189 if ((periodic[i]) || (o_interior[i] > 0))
1190 offset[i] -= overlap;
1191
1192 TensorProductCoordinates<ctype,dim> cc(coords, offset);
1193
1194 // add level
1195 makelevel(cc,periodic,o_interior,overlap);
1196
1197 init();
1198 }
1199
1200 // the backup restore facility needs to be able to use above constructor
1201 friend struct BackupRestoreFacility<YaspGrid<dim,Coordinates> >;
1202
1203 // do not copy this class
1204 YaspGrid(const YaspGrid&);
1205
1206 public:
1207
1211 int maxLevel() const
1212 {
1213 return _levels.size()-1;
1214 }
1215
1217 void globalRefine (int refCount)
1218 {
1219 if (refCount < -maxLevel())
1220 DUNE_THROW(GridError, "Only " << maxLevel() << " levels left. " <<
1221 "Coarsening " << -refCount << " levels requested!");
1222
1223 // If refCount is negative then coarsen the grid
1224 for (int k=refCount; k<0; k++)
1225 {
1226 // create an empty grid level
1227 YGridLevel empty;
1228 _levels.back() = empty;
1229 // reduce maxlevel
1230 _levels.pop_back();
1231
1232 indexsets.pop_back();
1233 }
1234
1235 // If refCount is positive refine the grid
1236 for (int k=0; k<refCount; k++)
1237 {
1238 // access to coarser grid level
1239 YGridLevel& cg = _levels[maxLevel()];
1240
1241 std::bitset<dim> ovlp_low(0ULL), ovlp_up(0ULL);
1242 for (int i=0; i<dim; i++)
1243 {
1244 if (cg.overlap[0].dataBegin()->origin(i) > 0 || _periodic[i])
1245 ovlp_low[i] = true;
1246 if (cg.overlap[0].dataBegin()->max(i) + 1 < globalSize(i) || _periodic[i])
1247 ovlp_up[i] = true;
1248 }
1249
1250 Coordinates newcont(cg.coords.refine(ovlp_low, ovlp_up, cg.overlapSize, keep_ovlp));
1251
1252 int overlap = (keep_ovlp) ? 2*cg.overlapSize : cg.overlapSize;
1253
1254 //determine new origin
1255 iTupel o_interior;
1256 for (int i=0; i<dim; i++)
1257 o_interior[i] = 2*cg.interior[0].dataBegin()->origin(i);
1258
1259 // add level
1260 _levels.resize(_levels.size() + 1);
1261 makelevel(newcont,_periodic,o_interior,overlap);
1262
1263 indexsets.push_back( std::make_shared<YaspIndexSet<const YaspGrid<dim,Coordinates>, false > >(*this,maxLevel()) );
1264 }
1265 }
1266
1271 void refineOptions (bool keepPhysicalOverlap)
1272 {
1273 keep_ovlp = keepPhysicalOverlap;
1274 }
1275
1287 bool mark( int refCount, const typename Traits::template Codim<0>::Entity & e )
1288 {
1289 assert(adaptActive == false);
1290 if (e.level() != maxLevel()) return false;
1291 adaptRefCount = std::max(adaptRefCount, refCount);
1292 return true;
1293 }
1294
1301 int getMark ( const typename Traits::template Codim<0>::Entity &e ) const
1302 {
1303 return ( e.level() == maxLevel() ) ? adaptRefCount : 0;
1304 }
1305
1307 bool adapt ()
1308 {
1309 globalRefine(adaptRefCount);
1310 return (adaptRefCount > 0);
1311 }
1312
1314 bool preAdapt ()
1315 {
1316 adaptActive = true;
1317 adaptRefCount = comm().max(adaptRefCount);
1318 return (adaptRefCount < 0);
1319 }
1320
1323 {
1324 adaptActive = false;
1325 adaptRefCount = 0;
1326 }
1327
1329 template<int cd, PartitionIteratorType pitype>
1330 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lbegin (int level) const
1331 {
1332 return levelbegin<cd,pitype>(level);
1333 }
1334
1336 template<int cd, PartitionIteratorType pitype>
1337 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lend (int level) const
1338 {
1339 return levelend<cd,pitype>(level);
1340 }
1341
1343 template<int cd>
1344 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lbegin (int level) const
1345 {
1346 return levelbegin<cd,All_Partition>(level);
1347 }
1348
1350 template<int cd>
1351 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lend (int level) const
1352 {
1353 return levelend<cd,All_Partition>(level);
1354 }
1355
1357 template<int cd, PartitionIteratorType pitype>
1358 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafbegin () const
1359 {
1360 return levelbegin<cd,pitype>(maxLevel());
1361 }
1362
1364 template<int cd, PartitionIteratorType pitype>
1365 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafend () const
1366 {
1367 return levelend<cd,pitype>(maxLevel());
1368 }
1369
1371 template<int cd>
1372 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafbegin () const
1373 {
1374 return levelbegin<cd,All_Partition>(maxLevel());
1375 }
1376
1378 template<int cd>
1379 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafend () const
1380 {
1381 return levelend<cd,All_Partition>(maxLevel());
1382 }
1383
1384 // \brief obtain Entity from EntitySeed. */
1385 template <typename Seed>
1386 typename Traits::template Codim<Seed::codimension>::Entity
1387 entity(const Seed& seed) const
1388 {
1389 const int codim = Seed::codimension;
1390 YGridLevelIterator g = begin(seed.impl().level());
1391
1392 typedef typename Traits::template Codim<Seed::codimension>::Entity Entity;
1393 typedef YaspEntity<codim,dim,const YaspGrid> EntityImp;
1394 typedef typename YGrid::Iterator YIterator;
1395
1396 return Entity(EntityImp(g,YIterator(g->overlapfront[codim],seed.impl().coord(),seed.impl().offset())));
1397 }
1398
1400 int overlapSize (int level, [[maybe_unused]] int codim) const
1401 {
1402 YGridLevelIterator g = begin(level);
1403 return g->overlapSize;
1404 }
1405
1407 int overlapSize ([[maybe_unused]] int odim) const
1408 {
1410 return g->overlapSize;
1411 }
1412
1414 int ghostSize ([[maybe_unused]] int level, [[maybe_unused]] int codim) const
1415 {
1416 return 0;
1417 }
1418
1420 int ghostSize ([[maybe_unused]] int codim) const
1421 {
1422 return 0;
1423 }
1424
1426 int size (int level, int codim) const
1427 {
1428 YGridLevelIterator g = begin(level);
1429
1430 // sum over all components of the codimension
1431 int count = 0;
1432 typedef typename std::array<YGridComponent<Coordinates>, Dune::power(2,dim)>::iterator DAI;
1433 for (DAI it = g->overlapfront[codim].dataBegin(); it != g->overlapfront[codim].dataEnd(); ++it)
1434 count += it->totalsize();
1435
1436 return count;
1437 }
1438
1440 int size (int codim) const
1441 {
1442 return size(maxLevel(),codim);
1443 }
1444
1446 int size (int level, GeometryType type) const
1447 {
1448 return (type.isCube()) ? size(level,dim-type.dim()) : 0;
1449 }
1450
1452 int size (GeometryType type) const
1453 {
1454 return size(maxLevel(),type);
1455 }
1456
1458 size_t numBoundarySegments () const
1459 {
1460 return nBSegments;
1461 }
1462
1465 return _L;
1466 }
1467
1472 template<class DataHandleImp, class DataType>
1474 {
1475 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,level);
1476 }
1477
1482 template<class DataHandleImp, class DataType>
1484 {
1485 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,this->maxLevel());
1486 }
1487
1492 template<class DataHandle, int codim>
1493 void communicateCodim (DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level) const
1494 {
1495 // check input
1496 if (!data.contains(dim,codim)) return; // should have been checked outside
1497
1498 // data types
1499 typedef typename DataHandle::DataType DataType;
1500
1501 // access to grid level
1502 YGridLevelIterator g = begin(level);
1503
1504 // find send/recv lists or throw error
1505 const YGridList<Coordinates>* sendlist = 0;
1506 const YGridList<Coordinates>* recvlist = 0;
1507
1509 {
1510 sendlist = &g->send_interiorborder_interiorborder[codim];
1511 recvlist = &g->recv_interiorborder_interiorborder[codim];
1512 }
1513 if (iftype==InteriorBorder_All_Interface)
1514 {
1515 sendlist = &g->send_interiorborder_overlapfront[codim];
1516 recvlist = &g->recv_overlapfront_interiorborder[codim];
1517 }
1519 {
1520 sendlist = &g->send_overlap_overlapfront[codim];
1521 recvlist = &g->recv_overlapfront_overlap[codim];
1522 }
1523 if (iftype==All_All_Interface)
1524 {
1525 sendlist = &g->send_overlapfront_overlapfront[codim];
1526 recvlist = &g->recv_overlapfront_overlapfront[codim];
1527 }
1528
1529 // change communication direction?
1530 if (dir==BackwardCommunication)
1531 std::swap(sendlist,recvlist);
1532
1533 int cnt;
1534
1535 // Size computation (requires communication if variable size)
1536 std::vector<int> send_size(sendlist->size(),-1); // map rank to total number of objects (of type DataType) to be sent
1537 std::vector<int> recv_size(recvlist->size(),-1); // map rank to total number of objects (of type DataType) to be recvd
1538 std::vector<size_t*> send_sizes(sendlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be sent
1539 std::vector<size_t*> recv_sizes(recvlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be recvd
1540
1541 // define type to iterate over send and recv lists
1542 typedef typename YGridList<Coordinates>::Iterator ListIt;
1543
1544 if (data.fixedSize(dim,codim))
1545 {
1546 // fixed size: just take a dummy entity, size can be computed without communication
1547 cnt=0;
1548 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1549 {
1550 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1552 send_size[cnt] = is->grid.totalsize() * data.size(*it);
1553 cnt++;
1554 }
1555 cnt=0;
1556 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1557 {
1558 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1560 recv_size[cnt] = is->grid.totalsize() * data.size(*it);
1561 cnt++;
1562 }
1563 }
1564 else
1565 {
1566 // variable size case: sender side determines the size
1567 cnt=0;
1568 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1569 {
1570 // allocate send buffer for sizes per entity
1571 size_t *buf = new size_t[is->grid.totalsize()];
1572 send_sizes[cnt] = buf;
1573
1574 // loop over entities and ask for size
1575 int i=0; size_t n=0;
1576 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1578 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1579 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1580 for ( ; it!=itend; ++it)
1581 {
1582 buf[i] = data.size(*it);
1583 n += buf[i];
1584 i++;
1585 }
1586
1587 // now we know the size for this rank
1588 send_size[cnt] = n;
1589
1590 // hand over send request to torus class
1591 torus().send(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1592 cnt++;
1593 }
1594
1595 // allocate recv buffers for sizes and store receive request
1596 cnt=0;
1597 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1598 {
1599 // allocate recv buffer
1600 size_t *buf = new size_t[is->grid.totalsize()];
1601 recv_sizes[cnt] = buf;
1602
1603 // hand over recv request to torus class
1604 torus().recv(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1605 cnt++;
1606 }
1607
1608 // exchange all size buffers now
1609 torus().exchange();
1610
1611 // release send size buffers
1612 cnt=0;
1613 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1614 {
1615 delete[] send_sizes[cnt];
1616 send_sizes[cnt] = 0;
1617 cnt++;
1618 }
1619
1620 // process receive size buffers
1621 cnt=0;
1622 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1623 {
1624 // get recv buffer
1625 size_t *buf = recv_sizes[cnt];
1626
1627 // compute total size
1628 size_t n=0;
1629 for (int i=0; i<is->grid.totalsize(); ++i)
1630 n += buf[i];
1631
1632 // ... and store it
1633 recv_size[cnt] = n;
1634 ++cnt;
1635 }
1636 }
1637
1638
1639 // allocate & fill the send buffers & store send request
1640 std::vector<DataType*> sends(sendlist->size(), static_cast<DataType*>(0)); // store pointers to send buffers
1641 cnt=0;
1642 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1643 {
1644 // allocate send buffer
1645 DataType *buf = new DataType[send_size[cnt]];
1646
1647 // remember send buffer
1648 sends[cnt] = buf;
1649
1650 // make a message buffer
1651 MessageBuffer<DataType> mb(buf);
1652
1653 // fill send buffer; iterate over cells in intersection
1654 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1656 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1657 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1658 for ( ; it!=itend; ++it)
1659 data.gather(mb,*it);
1660
1661 // hand over send request to torus class
1662 torus().send(is->rank,buf,send_size[cnt]*sizeof(DataType));
1663 cnt++;
1664 }
1665
1666 // allocate recv buffers and store receive request
1667 std::vector<DataType*> recvs(recvlist->size(),static_cast<DataType*>(0)); // store pointers to send buffers
1668 cnt=0;
1669 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1670 {
1671 // allocate recv buffer
1672 DataType *buf = new DataType[recv_size[cnt]];
1673
1674 // remember recv buffer
1675 recvs[cnt] = buf;
1676
1677 // hand over recv request to torus class
1678 torus().recv(is->rank,buf,recv_size[cnt]*sizeof(DataType));
1679 cnt++;
1680 }
1681
1682 // exchange all buffers now
1683 torus().exchange();
1684
1685 // release send buffers
1686 cnt=0;
1687 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1688 {
1689 delete[] sends[cnt];
1690 sends[cnt] = 0;
1691 cnt++;
1692 }
1693
1694 // process receive buffers and delete them
1695 cnt=0;
1696 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1697 {
1698 // get recv buffer
1699 DataType *buf = recvs[cnt];
1700
1701 // make a message buffer
1702 MessageBuffer<DataType> mb(buf);
1703
1704 // copy data from receive buffer; iterate over cells in intersection
1705 if (data.fixedSize(dim,codim))
1706 {
1707 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1709 size_t n=data.size(*it);
1710 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1711 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1712 for ( ; it!=itend; ++it)
1713 data.scatter(mb,*it,n);
1714 }
1715 else
1716 {
1717 int i=0;
1718 size_t *sbuf = recv_sizes[cnt];
1719 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1721 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1722 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1723 for ( ; it!=itend; ++it)
1724 data.scatter(mb,*it,sbuf[i++]);
1725 delete[] sbuf;
1726 }
1727
1728 // delete buffer
1729 delete[] buf; // hier krachts !
1730 cnt++;
1731 }
1732 }
1733
1734 // The new index sets from DDM 11.07.2005
1735 const typename Traits::GlobalIdSet& globalIdSet() const
1736 {
1737 return theglobalidset;
1738 }
1739
1740 const typename Traits::LocalIdSet& localIdSet() const
1741 {
1742 return theglobalidset;
1743 }
1744
1745 const typename Traits::LevelIndexSet& levelIndexSet(int level) const
1746 {
1747 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1748 return *(indexsets[level]);
1749 }
1750
1751 const typename Traits::LeafIndexSet& leafIndexSet() const
1752 {
1753 return leafIndexSet_;
1754 }
1755
1758 const Communication& comm () const
1759 {
1760 return ccobj;
1761 }
1762
1763 private:
1764
1765 // number of boundary segments of the level 0 grid
1766 int nBSegments;
1767
1768 // Index classes need access to the real entity
1769 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, true >;
1770 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, false >;
1771 friend class Dune::YaspGlobalIdSet<const Dune::YaspGrid<dim, Coordinates> >;
1772 friend class Dune::YaspPersistentContainerIndex<const Dune::YaspGrid<dim, Coordinates> >;
1773
1774 friend class Dune::YaspIntersectionIterator<const Dune::YaspGrid<dim, Coordinates> >;
1775 friend class Dune::YaspIntersection<const Dune::YaspGrid<dim, Coordinates> >;
1776 friend class Dune::YaspEntity<0, dim, const Dune::YaspGrid<dim, Coordinates> >;
1777
1778 template<int codim_, int dim_, class GridImp_, template<int,int,class> class EntityImp_>
1779 friend class Entity;
1780
1781 template<class DT>
1782 class MessageBuffer {
1783 public:
1784 // Constructor
1785 MessageBuffer (DT *p)
1786 {
1787 a=p;
1788 i=0;
1789 j=0;
1790 }
1791
1792 // write data to message buffer, acts like a stream !
1793 template<class Y>
1794 void write (const Y& data)
1795 {
1796 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1797 a[i++] = data;
1798 }
1799
1800 // read data from message buffer, acts like a stream !
1801 template<class Y>
1802 void read (Y& data) const
1803 {
1804 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1805 data = a[j++];
1806 }
1807
1808 private:
1809 DT *a;
1810 int i;
1811 mutable int j;
1812 };
1813
1815 template<int cd, PartitionIteratorType pitype>
1816 YaspLevelIterator<cd,pitype,GridImp> levelbegin (int level) const
1817 {
1818 YGridLevelIterator g = begin(level);
1819 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1820
1821 if (pitype==Interior_Partition)
1822 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].begin());
1823 if (pitype==InteriorBorder_Partition)
1824 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].begin());
1825 if (pitype==Overlap_Partition)
1826 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].begin());
1827 if (pitype<=All_Partition)
1828 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].begin());
1829 if (pitype==Ghost_Partition)
1830 return levelend <cd, pitype> (level);
1831
1832 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1833 }
1834
1836 template<int cd, PartitionIteratorType pitype>
1837 YaspLevelIterator<cd,pitype,GridImp> levelend (int level) const
1838 {
1839 YGridLevelIterator g = begin(level);
1840 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1841
1842 if (pitype==Interior_Partition)
1843 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].end());
1844 if (pitype==InteriorBorder_Partition)
1845 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].end());
1846 if (pitype==Overlap_Partition)
1847 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].end());
1848 if (pitype<=All_Partition || pitype == Ghost_Partition)
1849 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].end());
1850
1851 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1852 }
1853
1854 Communication ccobj;
1855
1856 Torus<Communication,dim> _torus;
1857
1858 std::vector< std::shared_ptr< YaspIndexSet<const YaspGrid<dim,Coordinates>, false > > > indexsets;
1859 YaspIndexSet<const YaspGrid<dim,Coordinates>, true> leafIndexSet_;
1860 YaspGlobalIdSet<const YaspGrid<dim,Coordinates> > theglobalidset;
1861
1863 iTupel _s;
1864 std::bitset<dim> _periodic;
1865 iTupel _coarseSize;
1866 ReservedVector<YGridLevel,32> _levels;
1867 int _overlap;
1868 bool keep_ovlp;
1869 int adaptRefCount;
1870 bool adaptActive;
1871 };
1872
1873 // Class template deduction guides
1874 template<typename ctype, int dim>
1875 YaspGrid(FieldVector<ctype, dim>,
1876 std::array<int, std::size_t{dim}>,
1877 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1878 int = 1,
1879 YaspCommunication = YaspCommunication(),
1880 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantCoordinates<ctype, dim>>::defaultPartitioner())
1881 -> YaspGrid< dim, EquidistantCoordinates<ctype, dim> >;
1882
1883 template<typename ctype, int dim>
1884 YaspGrid(FieldVector<ctype, dim>,
1885 FieldVector<ctype, dim>,
1886 std::array<int, std::size_t{dim}>,
1887 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1888 int = 1,
1889 YaspCommunication = YaspCommunication(),
1890 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantOffsetCoordinates<ctype, dim>>::defaultPartitioner())
1891 -> YaspGrid< dim, EquidistantOffsetCoordinates<ctype, dim> >;
1892
1893 template<typename ctype, std::size_t dim>
1894 YaspGrid(std::array<std::vector<ctype>, dim>,
1895 std::bitset<dim> = std::bitset<dim>{0ULL},
1896 int = 1,
1897 YaspCommunication = YaspCommunication(),
1898 const Yasp::Partitioning<dim>* = YaspGrid<int{dim}, TensorProductCoordinates<ctype, int{dim}>>::defaultPartitioner())
1899 -> YaspGrid< int{dim}, TensorProductCoordinates<ctype, int{dim}> >;
1900
1902 template <int d, class CC>
1903 std::ostream& operator<< (std::ostream& s, const YaspGrid<d,CC>& grid)
1904 {
1905 int rank = grid.torus().rank();
1906
1907 s << "[" << rank << "]:" << " YaspGrid maxlevel=" << grid.maxLevel() << std::endl;
1908
1909 s << "Printing the torus: " <<std::endl;
1910 s << grid.torus() << std::endl;
1911
1912 for (typename YaspGrid<d,CC>::YGridLevelIterator g=grid.begin(); g!=grid.end(); ++g)
1913 {
1914 s << "[" << rank << "]: " << std::endl;
1915 s << "[" << rank << "]: " << "==========================================" << std::endl;
1916 s << "[" << rank << "]: " << "level=" << g->level() << std::endl;
1917
1918 for (int codim = 0; codim < d + 1; ++codim)
1919 {
1920 s << "[" << rank << "]: " << "overlapfront[" << codim << "]: " << g->overlapfront[codim] << std::endl;
1921 s << "[" << rank << "]: " << "overlap[" << codim << "]: " << g->overlap[codim] << std::endl;
1922 s << "[" << rank << "]: " << "interiorborder[" << codim << "]: " << g->interiorborder[codim] << std::endl;
1923 s << "[" << rank << "]: " << "interior[" << codim << "]: " << g->interior[codim] << std::endl;
1924
1925 typedef typename YGridList<CC>::Iterator I;
1926 for (I i=g->send_overlapfront_overlapfront[codim].begin();
1927 i!=g->send_overlapfront_overlapfront[codim].end(); ++i)
1928 s << "[" << rank << "]: " << " s_of_of[" << codim << "] to rank "
1929 << i->rank << " " << i->grid << std::endl;
1930
1931 for (I i=g->recv_overlapfront_overlapfront[codim].begin();
1932 i!=g->recv_overlapfront_overlapfront[codim].end(); ++i)
1933 s << "[" << rank << "]: " << " r_of_of[" << codim << "] to rank "
1934 << i->rank << " " << i->grid << std::endl;
1935
1936 for (I i=g->send_overlap_overlapfront[codim].begin();
1937 i!=g->send_overlap_overlapfront[codim].end(); ++i)
1938 s << "[" << rank << "]: " << " s_o_of[" << codim << "] to rank "
1939 << i->rank << " " << i->grid << std::endl;
1940
1941 for (I i=g->recv_overlapfront_overlap[codim].begin();
1942 i!=g->recv_overlapfront_overlap[codim].end(); ++i)
1943 s << "[" << rank << "]: " << " r_of_o[" << codim << "] to rank "
1944 << i->rank << " " << i->grid << std::endl;
1945
1946 for (I i=g->send_interiorborder_interiorborder[codim].begin();
1947 i!=g->send_interiorborder_interiorborder[codim].end(); ++i)
1948 s << "[" << rank << "]: " << " s_ib_ib[" << codim << "] to rank "
1949 << i->rank << " " << i->grid << std::endl;
1950
1951 for (I i=g->recv_interiorborder_interiorborder[codim].begin();
1952 i!=g->recv_interiorborder_interiorborder[codim].end(); ++i)
1953 s << "[" << rank << "]: " << " r_ib_ib[" << codim << "] to rank "
1954 << i->rank << " " << i->grid << std::endl;
1955
1956 for (I i=g->send_interiorborder_overlapfront[codim].begin();
1957 i!=g->send_interiorborder_overlapfront[codim].end(); ++i)
1958 s << "[" << rank << "]: " << " s_ib_of[" << codim << "] to rank "
1959 << i->rank << " " << i->grid << std::endl;
1960
1961 for (I i=g->recv_overlapfront_interiorborder[codim].begin();
1962 i!=g->recv_overlapfront_interiorborder[codim].end(); ++i)
1963 s << "[" << rank << "]: " << " r_of_ib[" << codim << "] to rank "
1964 << i->rank << " " << i->grid << std::endl;
1965 }
1966 }
1967
1968 s << std::endl;
1969
1970 return s;
1971 }
1972
1973 namespace Capabilities
1974 {
1975
1983 template<int dim, class Coordinates>
1984 struct hasBackupRestoreFacilities< YaspGrid<dim, Coordinates> >
1985 {
1986 static const bool v = true;
1987 };
1988
1992 template<int dim, class Coordinates>
1993 struct hasSingleGeometryType< YaspGrid<dim, Coordinates> >
1994 {
1995 static const bool v = true;
1996 static const unsigned int topologyId = GeometryTypes::cube(dim).id();
1997 };
1998
2002 template<int dim, class Coordinates>
2003 struct isCartesian< YaspGrid<dim, Coordinates> >
2004 {
2005 static const bool v = true;
2006 };
2007
2011 template<int dim, class Coordinates, int codim>
2012 struct hasEntity< YaspGrid<dim, Coordinates>, codim>
2013 {
2014 static const bool v = true;
2015 };
2016
2021 template<int dim, class Coordinates, int codim>
2022 struct hasEntityIterator<YaspGrid<dim, Coordinates>, codim>
2023 {
2024 static const bool v = true;
2025 };
2026
2030 template<int dim, int codim, class Coordinates>
2031 struct canCommunicate< YaspGrid< dim, Coordinates>, codim >
2032 {
2033 static const bool v = true;
2034 };
2035
2039 template<int dim, class Coordinates>
2040 struct isLevelwiseConforming< YaspGrid<dim, Coordinates> >
2041 {
2042 static const bool v = true;
2043 };
2044
2048 template<int dim, class Coordinates>
2049 struct isLeafwiseConforming< YaspGrid<dim, Coordinates> >
2050 {
2051 static const bool v = true;
2052 };
2053
2057 template<int dim, class Coordinates>
2058 struct viewThreadSafe< YaspGrid<dim, Coordinates> >
2059 {
2060 static const bool v = true;
2061 };
2062
2063 }
2064
2065} // end namespace
2066
2067// Include the specialization of the StructuredGridFactory class for YaspGrid
2069// Include the specialization of the BackupRestoreFacility class for YaspGrid
2070#include <dune/grid/yaspgrid/backuprestore.hh>
2071
2072#endif
A geometry implementation for axis-aligned hypercubes.
Portable very large unsigned integers.
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:78
Wrapper class for entities.
Definition: entity.hh:66
Container for equidistant coordinates in a YaspGrid.
Definition: coordinates.hh:29
Container for equidistant coordinates in a YaspGrid with non-trivial origin.
Definition: coordinates.hh:131
static constexpr size_type size() noexcept
Obtain the number of elements stored in the vector.
Definition: fvector.hh:193
Unique label for each type of entities that can occur in DUNE grids.
Definition: type.hh:114
constexpr unsigned int dim() const
Return dimension of the type.
Definition: type.hh:360
constexpr bool isCube() const
Return true if entity is a cube of any dimension.
Definition: type.hh:324
constexpr unsigned int id() const
Return the topology id of the type.
Definition: type.hh:365
Definition: grid.hh:848
Base class for exceptions in Dune grid modules.
Definition: exceptions.hh:20
typename GridFamily::Traits::Communication Communication
A type that is a model of Dune::Communication. It provides a portable way for communication on the se...
Definition: grid.hh:515
Intersection of a mesh entity of codimension 0 ("element") with a "neighboring" element or with the d...
Definition: intersection.hh:164
A Vector class with statically reserved memory.
Definition: reservedvector.hh:47
constexpr iterator end() noexcept
Returns an iterator pointing to the end of the vector.
Definition: reservedvector.hh:273
constexpr size_type size() const noexcept
Returns number of elements in the vector.
Definition: reservedvector.hh:387
constexpr iterator begin() noexcept
Returns a iterator pointing to the beginning of the vector.
Definition: reservedvector.hh:237
constexpr void pop_back() noexcept
Erases the last element of the vector, O(1) time.
Definition: reservedvector.hh:227
constexpr reference back() noexcept
Returns reference to last element of vector.
Definition: reservedvector.hh:357
constexpr void resize(size_type s) noexcept
Specifies a new size for the vector.
Definition: reservedvector.hh:187
Coordinate container for a tensor product YaspGrid.
Definition: coordinates.hh:245
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:239
iTupel coord() const
return own coordinates
Definition: torus.hh:100
int rank() const
return own rank
Definition: torus.hh:94
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:112
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:374
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:361
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:203
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:355
ProcListIterator sendend() const
end of send list
Definition: torus.hh:343
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:337
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:387
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:349
Definition: ygrid.hh:75
YGridComponent< Coordinates > move(iTupel v) const
return grid moved by the vector v
Definition: ygrid.hh:263
YGridComponent< Coordinates > intersection(const YGridComponent< Coordinates > &r) const
Return YGridComponent of supergrid of self which is the intersection of self and another YGridCompone...
Definition: ygrid.hh:271
implements a collection of multiple std::deque<Intersection> Intersections with neighboring processor...
Definition: ygrid.hh:823
int size() const
return the size of the container, this is the sum of the sizes of all deques
Definition: ygrid.hh:953
Iterator end() const
return iterator pointing to the end of the container
Definition: ygrid.hh:929
Iterator begin() const
return iterator pointing to the begin of the container
Definition: ygrid.hh:923
Iterator over a collection o YGrids A YGrid::Iterator is the heart of an entity in YaspGrid.
Definition: ygrid.hh:594
implements a collection of YGridComponents which form a codimension Entities of given codimension c n...
Definition: ygrid.hh:551
persistent, globally unique Ids
Definition: yaspgrididset.hh:25
[ provides Dune::Grid ]
Definition: yaspgrid.hh:166
YaspGridFamily< dim, Coordinates > GridFamily
the GridFamily of this grid
Definition: yaspgrid.hh:716
int ghostSize(int level, int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1414
const Torus< Communication, dim > & torus() const
return reference to torus
Definition: yaspgrid.hh:246
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lbegin(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1344
int size(int level, GeometryType type) const
number of entities per level and geometry type in this process
Definition: yaspgrid.hh:1446
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1372
void globalRefine(int refCount)
refine the grid refCount times.
Definition: yaspgrid.hh:1217
int getMark(const typename Traits::template Codim< 0 >::Entity &e) const
returns adaptation mark for given entity
Definition: yaspgrid.hh:1301
YaspGrid(std::array< std::vector< ctype >, std::size_t{dim}> coords, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Standard constructor for a tensorproduct YaspGrid.
Definition: yaspgrid.hh:1044
int ghostSize(int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1420
int globalSize(int i) const
return number of cells on finest level in given direction on all processors
Definition: yaspgrid.hh:252
YaspGrid(Dune::FieldVector< ctype, dim > lowerleft, Dune::FieldVector< ctype, dim > upperright, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:975
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1365
void postAdapt()
clean up some markers
Definition: yaspgrid.hh:1322
const Dune::FieldVector< ctype, dim > & domainSize() const
returns the size of the physical domain
Definition: yaspgrid.hh:1464
YGridLevelIterator end() const
return iterator pointing to one past the finest level
Definition: yaspgrid.hh:307
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: yaspgrid.hh:1452
int maxLevel() const
Definition: yaspgrid.hh:1211
YaspGrid(Dune::FieldVector< ctype, dim > L, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>{0ULL}, int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:905
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1379
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1473
void intersections(const YGridComponent< Coordinates > &sendgrid, const YGridComponent< Coordinates > &recvgrid, std::deque< Intersection > &sendlist, std::deque< Intersection > &recvlist)
Construct list of intersections with neighboring processors.
Definition: yaspgrid.hh:562
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1358
bool preAdapt()
returns true, if the grid will be coarsened
Definition: yaspgrid.hh:1314
iTupel levelSize(int l) const
return size vector of the grid (in cells) on level l
Definition: yaspgrid.hh:270
bool mark(int refCount, const typename Traits::template Codim< 0 >::Entity &e)
Marks an entity to be refined/coarsened in a subsequent adapt.
Definition: yaspgrid.hh:1287
int overlapSize(int level, int codim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1400
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir) const
Definition: yaspgrid.hh:1483
int size(int codim) const
number of leaf entities per codim in this process
Definition: yaspgrid.hh:1440
bool isPeriodic(int i) const
return whether the grid is periodic in direction i
Definition: yaspgrid.hh:279
void refineOptions(bool keepPhysicalOverlap)
set options for refinement
Definition: yaspgrid.hh:1271
int size(int level, int codim) const
number of entities per level and codim in this process
Definition: yaspgrid.hh:1426
YaspGrid(const Coordinates &coordinates, std::bitset< dim > periodic=std::bitset< dim >(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:732
bool adapt()
map adapt to global refine
Definition: yaspgrid.hh:1307
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lend(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1351
ReservedVector< YGridLevel, 32 >::const_iterator YGridLevelIterator
Iterator over the grid levels.
Definition: yaspgrid.hh:290
size_t numBoundarySegments() const
returns the number of boundary segments within the macro grid
Definition: yaspgrid.hh:1458
void communicateCodim(DataHandle &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1493
void makelevel(const Coordinates &coords, std::bitset< dim > periodic, iTupel o_interior, int overlap)
Make a new YGridLevel structure.
Definition: yaspgrid.hh:327
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lend(int level) const
Iterator to one past the last entity of given codim on level for partition type.
Definition: yaspgrid.hh:1337
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lbegin(int level) const
one past the end on this level
Definition: yaspgrid.hh:1330
iTupel globalSize() const
return number of cells on finest level on all processors
Definition: yaspgrid.hh:258
const Communication & comm() const
return a communication object
Definition: yaspgrid.hh:1758
int overlapSize(int odim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1407
YGridLevelIterator begin(int i) const
return iterator pointing to given level
Definition: yaspgrid.hh:299
Coordinates::ctype ctype
Type used for coordinates.
Definition: yaspgrid.hh:179
int levelSize(int l, int i) const
return size of the grid (in cells) on level l in direction i
Definition: yaspgrid.hh:264
YGridLevelIterator begin() const
return iterator pointing to coarsest level
Definition: yaspgrid.hh:293
YaspHierarchicIterator enables iteration over son entities of codim 0.
Definition: yaspgridhierarchiciterator.hh:20
Implementation of Level- and LeafIndexSets for YaspGrid.
Definition: yaspgridindexsets.hh:25
YaspIntersectionIterator enables iteration over intersections with neighboring codim 0 entities.
Definition: yaspgridintersectioniterator.hh:22
YaspIntersection provides data about intersection with neighboring codim 0 entities.
Definition: yaspgridintersection.hh:22
Iterates over entities of one grid level.
Definition: yaspgridleveliterator.hh:19
implement a consecutive index for all entities of given codim of a YaspGrid
Definition: yaspgridpersistentcontainer.hh:35
a base class for the yaspgrid partitioning strategy
Definition: partitioning.hh:38
A set of traits classes to store static information about grid implementation.
Different resources needed by all grid implementations.
Provides base classes for index and id sets.
Implements an utility class that provides collective communication methods for sequential programs.
This provides container classes for the coordinates to be used in YaspGrid Upon implementation of the...
Describes the parallel communication interface class for MessageBuffers and DataHandles.
Traits for type conversions and type information.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
auto periodic(RawPreBasisIndicator &&rawPreBasisIndicator, PIS &&periodicIndexSet)
Create a pre-basis factory that can create a periodic pre-basis.
Definition: periodicbasis.hh:203
CommunicationDirection
Define a type for communication direction parameter.
Definition: gridenums.hh:170
InterfaceType
Parameter to be used for the communication functions.
Definition: gridenums.hh:86
@ All_Partition
all entities
Definition: gridenums.hh:141
@ Interior_Partition
only interior entities
Definition: gridenums.hh:137
@ InteriorBorder_Partition
interior and border entities
Definition: gridenums.hh:138
@ Overlap_Partition
interior, border, and overlap entities
Definition: gridenums.hh:139
@ Ghost_Partition
only ghost entities
Definition: gridenums.hh:142
@ BackwardCommunication
reverse communication direction
Definition: gridenums.hh:172
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:88
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:91
@ Overlap_All_Interface
send overlap, receive all entities
Definition: gridenums.hh:90
@ Overlap_OverlapFront_Interface
send overlap, receive overlap and front entities
Definition: gridenums.hh:89
@ InteriorBorder_InteriorBorder_Interface
send/receive interior and border entities
Definition: gridenums.hh:87
constexpr GeometryType cube(unsigned int dim)
Returns a GeometryType representing a hypercube of dimension dim.
Definition: type.hh:462
constexpr auto max
Function object that returns the greater of the given values.
Definition: hybridutilities.hh:484
Some useful basic math stuff.
Implements an utility class that provides MPI's collective communication methods.
Helpers for dealing with MPI.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integer_sequence< T, II..., T(IN)> push_back(std::integer_sequence< T, II... >, std::integral_constant< T, IN >={})
Append an index IN to the back of the sequence.
Definition: integersequence.hh:69
constexpr Base power(Base m, Exponent p)
Power method for integer exponents.
Definition: math.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
constexpr std::integral_constant< T, I0 > front(std::integer_sequence< T, I0, II... >)
Return the first entry of the sequence.
Definition: integersequence.hh:39
constexpr auto back(std::integer_sequence< T, II... > seq)
Return the last entry of the sequence.
Definition: integersequence.hh:44
An stl-compliant random-access container which stores everything on the stack.
specialize with 'true' for all codims that a grid can communicate data on (default=false)
Definition: capabilities.hh:97
specialize with 'true' for all codims that a grid provides an iterator for (default=hasEntity<codim>:...
Definition: capabilities.hh:74
Specialize with 'true' for all codims that a grid implements entities for. (default=false)
Definition: capabilities.hh:58
Specialize with 'true' for if the codimension 0 entity of the grid has only one possible geometry typ...
Definition: capabilities.hh:27
Specialize with 'true' if the grid is a Cartesian grid. Cartesian grids satisfy the following propert...
Definition: capabilities.hh:48
Specialize with 'true' if implementation guarantees a conforming leaf grid. (default=false)
Definition: capabilities.hh:115
Specialize with 'true' if implementation guarantees conforming level grids. (default=false)
Definition: capabilities.hh:106
Specialize with 'true' if the grid implementation is thread safe, while it is not modified....
Definition: capabilities.hh:169
Static tag representing a codimension.
Definition: dimension.hh:24
A traits struct that collects all associated types of one grid model.
Definition: grid.hh:1013
IndexSet< const GridImp, LeafIndexSetImp, LeafIndexType, LeafGeometryTypes > LeafIndexSet
The type of the leaf index set.
Definition: grid.hh:1082
IndexSet< const GridImp, LevelIndexSetImp, LevelIndexType, LevelGeometryTypes > LevelIndexSet
The type of the level index set.
Definition: grid.hh:1080
IdSet< const GridImp, LocalIdSetImp, LIDType > LocalIdSet
The type of the local id set.
Definition: grid.hh:1086
IdSet< const GridImp, GlobalIdSetImp, GIDType > GlobalIdSet
The type of the global id set.
Definition: grid.hh:1084
type describing an intersection with a neighboring processor
Definition: ygrid.hh:829
Specialization of the StructuredGridFactory class for YaspGrid.
This file provides the infrastructure for toroidal communication in YaspGrid.
A unique label for each type of element that can occur in a grid.
the YaspEntity class and its specializations
The YaspEntitySeed class.
The YaspGeometry class and its specializations.
level-wise, non-persistent, consecutive indices for YaspGrid
The YaspIntersection class.
The YaspIntersectionIterator class.
The YaspLevelIterator class.
Specialization of the PersistentContainer for YaspGrid.
This provides a YGrid, the elemental component of the yaspgrid implementation.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 26, 23:30, 2024)