DUNE-FEM (unstable)

yaspgrid.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_GRID_YASPGRID_HH
6#define DUNE_GRID_YASPGRID_HH
7
8#include <cstdint>
9#include <iostream>
10#include <iterator>
11#include <vector>
12#include <algorithm>
13#include <stack>
14#include <type_traits>
15
16#include <dune/grid/common/backuprestore.hh>
17#include <dune/grid/common/grid.hh> // the grid base classes
18#include <dune/grid/common/capabilities.hh> // the capabilities
19#include <dune/common/hybridutilities.hh>
21#include <dune/common/math.hh>
27#include <dune/geometry/type.hh>
30
31
32#if HAVE_MPI
34#endif
35
43namespace Dune {
44
45 /* some sizes for building global ids
46 */
47 const int yaspgrid_dim_bits = 24; // bits for encoding each dimension
48 const int yaspgrid_level_bits = 5; // bits for encoding level number
49
50
51 //************************************************************************
52 // forward declaration of templates
53
54 template<int dim, class Coordinates> class YaspGrid;
55 template<int mydim, int cdim, class GridImp> class YaspGeometry;
56 template<int codim, int dim, class GridImp> class YaspEntity;
57 template<int codim, class GridImp> class YaspEntitySeed;
58 template<int codim, PartitionIteratorType pitype, class GridImp> class YaspLevelIterator;
59 template<class GridImp> class YaspIntersectionIterator;
60 template<class GridImp> class YaspIntersection;
61 template<class GridImp> class YaspHierarchicIterator;
62 template<class GridImp, bool isLeafIndexSet> class YaspIndexSet;
63 template<class GridImp> class YaspGlobalIdSet;
64 template<class GridImp> class YaspPersistentContainerIndex;
65
66} // namespace Dune
67
79#include <dune/grid/yaspgrid/yaspgrididset.hh>
81
82namespace Dune {
83
84#if HAVE_MPI
85 using YaspCommunication = Communication<MPI_Comm>;
86#else
87 using YaspCommunication = Communication<No_Comm>;
88#endif
89
90 template<int dim, class Coordinates>
91 struct YaspGridFamily
92 {
93 typedef YaspCommunication CCType;
94
95 typedef GridTraits<dim, // dimension of the grid
96 dim, // dimension of the world space
98 YaspGeometry,YaspEntity,
99 YaspLevelIterator, // type used for the level iterator
100 YaspIntersection, // leaf intersection
101 YaspIntersection, // level intersection
102 YaspIntersectionIterator, // leaf intersection iter
103 YaspIntersectionIterator, // level intersection iter
104 YaspHierarchicIterator,
105 YaspLevelIterator, // type used for the leaf(!) iterator
106 YaspIndexSet< const YaspGrid< dim, Coordinates >, false >, // level index set
107 YaspIndexSet< const YaspGrid< dim, Coordinates >, true >, // leaf index set
108 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
109 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
110 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
111 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
112 CCType,
113 DefaultLevelGridViewTraits, DefaultLeafGridViewTraits,
114 YaspEntitySeed,
115 YaspGeometry,
116 unsigned int,
117 std::array<GeometryType,1>>
118 Traits;
119 };
120
121#ifndef DOXYGEN
122 template<int dim, int codim>
123 struct YaspCommunicateMeta {
124 template<class G, class DataHandle>
125 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
126 {
127 if (data.contains(dim,codim))
128 {
129 g.template communicateCodim<DataHandle,codim>(data,iftype,dir,level);
130 }
131 YaspCommunicateMeta<dim,codim-1>::comm(g,data,iftype,dir,level);
132 }
133 };
134
135 template<int dim>
136 struct YaspCommunicateMeta<dim,0> {
137 template<class G, class DataHandle>
138 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
139 {
140 if (data.contains(dim,0))
141 g.template communicateCodim<DataHandle,0>(data,iftype,dir,level);
142 }
143 };
144#endif
145
146 //************************************************************************
163 template<int dim, class Coordinates = EquidistantCoordinates<double, dim> >
165 : public GridDefaultImplementation<dim,dim,typename Coordinates::ctype,YaspGridFamily<dim, Coordinates> >
166 {
167
168 template<int, PartitionIteratorType, typename>
169 friend class YaspLevelIterator;
170
171 template<typename>
172 friend class YaspHierarchicIterator;
173
175 protected:
176
177 public:
179 typedef typename Coordinates::ctype ctype;
180
181 using Communication = typename Base::Communication;
182
183#ifndef DOXYGEN
184 typedef typename Dune::YGrid<Coordinates> YGrid;
186
189 struct YGridLevel {
190
192 int level() const
193 {
194 return level_;
195 }
196
197 Coordinates coords;
198
199 std::array<YGrid, dim+1> overlapfront;
200 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlapfront_data;
201 std::array<YGrid, dim+1> overlap;
202 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlap_data;
203 std::array<YGrid, dim+1> interiorborder;
204 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interiorborder_data;
205 std::array<YGrid, dim+1> interior;
206 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interior_data;
207
208 std::array<YGridList<Coordinates>,dim+1> send_overlapfront_overlapfront;
209 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlapfront_overlapfront_data;
210 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlapfront;
211 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlapfront_data;
212
213 std::array<YGridList<Coordinates>,dim+1> send_overlap_overlapfront;
214 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlap_overlapfront_data;
215 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlap;
216 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlap_data;
217
218 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_interiorborder;
219 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_interiorborder_data;
220 std::array<YGridList<Coordinates>,dim+1> recv_interiorborder_interiorborder;
221 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_interiorborder_interiorborder_data;
222
223 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_overlapfront;
224 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_overlapfront_data;
225 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_interiorborder;
226 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_interiorborder_data;
227
228 // general
229 YaspGrid<dim,Coordinates>* mg; // each grid level knows its multigrid
230 int overlapSize; // in mesh cells on this level
231 bool keepOverlap;
232
234 int level_;
235 };
236
238 typedef std::array<int, dim> iTupel;
239 typedef FieldVector<ctype, dim> fTupel;
240
241 // communication tag used by multigrid
242 enum { tag = 17 };
243#endif
244
247 {
248 return _torus;
249 }
250
252 int globalSize(int i) const
253 {
254 return levelSize(maxLevel(),i);
255 }
256
258 iTupel globalSize() const
259 {
260 return levelSize(maxLevel());
261 }
262
264 int levelSize(int l, int i) const
265 {
266 return _coarseSize[i] * (1 << l);
267 }
268
270 iTupel levelSize(int l) const
271 {
272 iTupel s;
273 for (int i=0; i<dim; ++i)
274 s[i] = levelSize(l,i);
275 return s;
276 }
277
279 bool isPeriodic(int i) const
280 {
281 return _periodic[i];
282 }
283
284 bool getRefineOption() const
285 {
286 return keep_ovlp;
287 }
288
291
294 {
295 return _levels.begin();
296 }
297
300 {
301 if (i<0 || i>maxLevel())
302 DUNE_THROW(GridError, "level not existing");
303 return std::next(_levels.begin(), i);
304 }
305
308 {
309 return _levels.end();
310 }
311
312 // static method to create the default partitioning strategy
313 static const Yasp::Partitioning<dim>* defaultPartitioner()
314 {
315 static Yasp::DefaultPartitioning<dim> lb;
316 return & lb;
317 }
318
319 protected:
327 void makelevel (const Coordinates& coords, std::bitset<dim> periodic, iTupel o_interior, int overlap)
328 {
329 YGridLevel& g = _levels.back();
330 g.overlapSize = overlap;
331 g.mg = this;
332 g.level_ = maxLevel();
333 g.coords = coords;
334 g.keepOverlap = keep_ovlp;
335
336 using Dune::power;
337
338 // set the inserting positions in the corresponding arrays of YGridLevelStructure
339 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlapfront_it = g.overlapfront_data.begin();
340 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlap_it = g.overlap_data.begin();
341 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interiorborder_it = g.interiorborder_data.begin();
342 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interior_it = g.interior_data.begin();
343
344 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
345 send_overlapfront_overlapfront_it = g.send_overlapfront_overlapfront_data.begin();
346 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
347 recv_overlapfront_overlapfront_it = g.recv_overlapfront_overlapfront_data.begin();
348
349 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
350 send_overlap_overlapfront_it = g.send_overlap_overlapfront_data.begin();
351 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
352 recv_overlapfront_overlap_it = g.recv_overlapfront_overlap_data.begin();
353
354 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
355 send_interiorborder_interiorborder_it = g.send_interiorborder_interiorborder_data.begin();
356 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
357 recv_interiorborder_interiorborder_it = g.recv_interiorborder_interiorborder_data.begin();
358
359 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
360 send_interiorborder_overlapfront_it = g.send_interiorborder_overlapfront_data.begin();
361 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
362 recv_overlapfront_interiorborder_it = g.recv_overlapfront_interiorborder_data.begin();
363
364 // have a null array for constructor calls around
365 std::array<int,dim> n;
366 std::fill(n.begin(), n.end(), 0);
367
368 // determine origin of the grid with overlap and store whether an overlap area exists in direction i.
369 std::bitset<dim> ovlp_low(0ULL);
370 std::bitset<dim> ovlp_up(0ULL);
371
372 iTupel o_overlap;
373 iTupel s_overlap;
374
375 // determine at where we have overlap and how big the size of the overlap partition is
376 for (int i=0; i<dim; i++)
377 {
378 // the coordinate container has been constructed to hold the entire grid on
379 // this processor, including overlap. this is the element size.
380 s_overlap[i] = g.coords.size(i);
381
382 //in the periodic case there is always overlap
383 if (periodic[i])
384 {
385 o_overlap[i] = o_interior[i]-overlap;
386 ovlp_low[i] = true;
387 ovlp_up[i] = true;
388 }
389 else
390 {
391 //check lower boundary
392 if (o_interior[i] - overlap < 0)
393 o_overlap[i] = 0;
394 else
395 {
396 o_overlap[i] = o_interior[i] - overlap;
397 ovlp_low[i] = true;
398 }
399
400 //check upper boundary
401 if (o_overlap[i] + g.coords.size(i) < globalSize(i))
402 ovlp_up[i] = true;
403 }
404 }
405
406 for (unsigned int codim = 0; codim < dim + 1; codim++)
407 {
408 // set the begin iterator for the corresponding ygrids
409 g.overlapfront[codim].setBegin(&*overlapfront_it);
410 g.overlap[codim].setBegin(&*overlap_it);
411 g.interiorborder[codim].setBegin(&*interiorborder_it);
412 g.interior[codim].setBegin(&*interior_it);
413 g.send_overlapfront_overlapfront[codim].setBegin(send_overlapfront_overlapfront_it);
414 g.recv_overlapfront_overlapfront[codim].setBegin(recv_overlapfront_overlapfront_it);
415 g.send_overlap_overlapfront[codim].setBegin(send_overlap_overlapfront_it);
416 g.recv_overlapfront_overlap[codim].setBegin(recv_overlapfront_overlap_it);
417 g.send_interiorborder_interiorborder[codim].setBegin(send_interiorborder_interiorborder_it);
418 g.recv_interiorborder_interiorborder[codim].setBegin(recv_interiorborder_interiorborder_it);
419 g.send_interiorborder_overlapfront[codim].setBegin(send_interiorborder_overlapfront_it);
420 g.recv_overlapfront_interiorborder[codim].setBegin(recv_overlapfront_interiorborder_it);
421
422 // find all combinations of unit vectors that span entities of the given codimension
423 for (unsigned int index = 0; index < (1<<dim); index++)
424 {
425 // check whether the given shift is of our codimension
426 std::bitset<dim> r(index);
427 if (r.count() != dim-codim)
428 continue;
429
430 // get an origin and a size array for subsequent modification
431 std::array<int,dim> origin(o_overlap);
432 std::array<int,dim> size(s_overlap);
433
434 // build overlapfront
435 // we have to extend the element size by one in all directions without shift.
436 for (int i=0; i<dim; i++)
437 if (!r[i])
438 size[i]++;
439 *overlapfront_it = YGridComponent<Coordinates>(origin, r, &g.coords, size, n, size);
440
441 // build overlap
442 for (int i=0; i<dim; i++)
443 {
444 if (!r[i])
445 {
446 if (ovlp_low[i])
447 {
448 origin[i]++;
449 size[i]--;
450 }
451 if (ovlp_up[i])
452 size[i]--;
453 }
454 }
455 *overlap_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
456
457 // build interiorborder
458 for (int i=0; i<dim; i++)
459 {
460 if (ovlp_low[i])
461 {
462 origin[i] += overlap;
463 size[i] -= overlap;
464 if (!r[i])
465 {
466 origin[i]--;
467 size[i]++;
468 }
469 }
470 if (ovlp_up[i])
471 {
472 size[i] -= overlap;
473 if (!r[i])
474 size[i]++;
475 }
476 }
477 *interiorborder_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
478
479 // build interior
480 for (int i=0; i<dim; i++)
481 {
482 if (!r[i])
483 {
484 if (ovlp_low[i])
485 {
486 origin[i]++;
487 size[i]--;
488 }
489 if (ovlp_up[i])
490 size[i]--;
491 }
492 }
493 *interior_it = YGridComponent<Coordinates>(origin, size, *overlapfront_it);
494
495 intersections(*overlapfront_it,*overlapfront_it,*send_overlapfront_overlapfront_it, *recv_overlapfront_overlapfront_it);
496 intersections(*overlap_it,*overlapfront_it,*send_overlap_overlapfront_it, *recv_overlapfront_overlap_it);
497 intersections(*interiorborder_it,*interiorborder_it,*send_interiorborder_interiorborder_it,*recv_interiorborder_interiorborder_it);
498 intersections(*interiorborder_it,*overlapfront_it,*send_interiorborder_overlapfront_it,*recv_overlapfront_interiorborder_it);
499
500 // advance all iterators pointing to the next insertion point
501 ++overlapfront_it;
502 ++overlap_it;
503 ++interiorborder_it;
504 ++interior_it;
505 ++send_overlapfront_overlapfront_it;
506 ++recv_overlapfront_overlapfront_it;
507 ++send_overlap_overlapfront_it;
508 ++recv_overlapfront_overlap_it;
509 ++send_interiorborder_interiorborder_it;
510 ++recv_interiorborder_interiorborder_it;
511 ++send_interiorborder_overlapfront_it;
512 ++recv_overlapfront_interiorborder_it;
513 }
514
515 // set end iterators in the corresponding ygrids
516 g.overlapfront[codim].finalize(&*overlapfront_it);
517 g.overlap[codim].finalize(&*overlap_it);
518 g.interiorborder[codim].finalize(&*interiorborder_it);
519 g.interior[codim].finalize(&*interior_it);
520 g.send_overlapfront_overlapfront[codim].finalize(send_overlapfront_overlapfront_it,g.overlapfront[codim]);
521 g.recv_overlapfront_overlapfront[codim].finalize(recv_overlapfront_overlapfront_it,g.overlapfront[codim]);
522 g.send_overlap_overlapfront[codim].finalize(send_overlap_overlapfront_it,g.overlapfront[codim]);
523 g.recv_overlapfront_overlap[codim].finalize(recv_overlapfront_overlap_it,g.overlapfront[codim]);
524 g.send_interiorborder_interiorborder[codim].finalize(send_interiorborder_interiorborder_it,g.overlapfront[codim]);
525 g.recv_interiorborder_interiorborder[codim].finalize(recv_interiorborder_interiorborder_it,g.overlapfront[codim]);
526 g.send_interiorborder_overlapfront[codim].finalize(send_interiorborder_overlapfront_it,g.overlapfront[codim]);
527 g.recv_overlapfront_interiorborder[codim].finalize(recv_overlapfront_interiorborder_it,g.overlapfront[codim]);
528 }
529 }
530
531#ifndef DOXYGEN
540 struct mpifriendly_ygrid {
541 mpifriendly_ygrid ()
542 {
543 std::fill(origin.begin(), origin.end(), 0);
544 std::fill(size.begin(), size.end(), 0);
545 }
546 mpifriendly_ygrid (const YGridComponent<Coordinates>& grid)
547 : origin(grid.origin()), size(grid.size())
548 {}
549 iTupel origin;
550 iTupel size;
551 };
552#endif
553
563 std::deque<Intersection>& sendlist, std::deque<Intersection>& recvlist)
564 {
565 iTupel size = globalSize();
566
567 // the exchange buffers
568 std::vector<YGridComponent<Coordinates> > send_recvgrid(_torus.neighbors());
569 std::vector<YGridComponent<Coordinates> > recv_recvgrid(_torus.neighbors());
570 std::vector<YGridComponent<Coordinates> > send_sendgrid(_torus.neighbors());
571 std::vector<YGridComponent<Coordinates> > recv_sendgrid(_torus.neighbors());
572
573 // new exchange buffers to send simple struct without virtual functions
574 std::vector<mpifriendly_ygrid> mpifriendly_send_recvgrid(_torus.neighbors());
575 std::vector<mpifriendly_ygrid> mpifriendly_recv_recvgrid(_torus.neighbors());
576 std::vector<mpifriendly_ygrid> mpifriendly_send_sendgrid(_torus.neighbors());
577 std::vector<mpifriendly_ygrid> mpifriendly_recv_sendgrid(_torus.neighbors());
578
579 // fill send buffers; iterate over all neighboring processes
580 // non-periodic case is handled automatically because intersection will be zero
581 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
582 {
583 // determine if we communicate with this neighbor (and what)
584 bool skip = false;
585 iTupel coord = _torus.coord(); // my coordinates
586 iTupel delta = i.delta(); // delta to neighbor
587 iTupel nb = coord; // the neighbor
588 for (int k=0; k<dim; k++) nb[k] += delta[k];
589 iTupel v; // grid movement
590 std::fill(v.begin(), v.end(), 0);
591
592 for (int k=0; k<dim; k++)
593 {
594 if (nb[k]<0)
595 {
596 if (_periodic[k])
597 v[k] += size[k];
598 else
599 skip = true;
600 }
601 if (nb[k]>=_torus.dims(k))
602 {
603 if (_periodic[k])
604 v[k] -= size[k];
605 else
606 skip = true;
607 }
608 // neither might be true, then v=0
609 }
610
611 // store moved grids in send buffers
612 if (!skip)
613 {
614 send_sendgrid[i.index()] = sendgrid.move(v);
615 send_recvgrid[i.index()] = recvgrid.move(v);
616 }
617 else
618 {
619 send_sendgrid[i.index()] = YGridComponent<Coordinates>();
620 send_recvgrid[i.index()] = YGridComponent<Coordinates>();
621 }
622 }
623
624 // issue send requests for sendgrid being sent to all neighbors
625 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
626 {
627 mpifriendly_send_sendgrid[i.index()] = mpifriendly_ygrid(send_sendgrid[i.index()]);
628 _torus.send(i.rank(), &mpifriendly_send_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
629 }
630
631 // issue recv requests for sendgrids of neighbors
632 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
633 _torus.recv(i.rank(), &mpifriendly_recv_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
634
635 // exchange the sendgrids
636 _torus.exchange();
637
638 // issue send requests for recvgrid being sent to all neighbors
639 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
640 {
641 mpifriendly_send_recvgrid[i.index()] = mpifriendly_ygrid(send_recvgrid[i.index()]);
642 _torus.send(i.rank(), &mpifriendly_send_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
643 }
644
645 // issue recv requests for recvgrid of neighbors
646 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
647 _torus.recv(i.rank(), &mpifriendly_recv_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
648
649 // exchange the recvgrid
650 _torus.exchange();
651
652 // process receive buffers and compute intersections
653 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
654 {
655 // what must be sent to this neighbor
656 Intersection send_intersection{};
657 mpifriendly_ygrid yg = mpifriendly_recv_recvgrid[i.index()];
658 recv_recvgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
659 send_intersection.grid = sendgrid.intersection(recv_recvgrid[i.index()]);
660 send_intersection.rank = i.rank();
661 send_intersection.distance = i.distance();
662 if (!send_intersection.grid.empty()) sendlist.push_front(send_intersection);
663
664 Intersection recv_intersection{};
665 yg = mpifriendly_recv_sendgrid[i.index()];
666 recv_sendgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
667 recv_intersection.grid = recvgrid.intersection(recv_sendgrid[i.index()]);
668 recv_intersection.rank = i.rank();
669 recv_intersection.distance = i.distance();
670 if(!recv_intersection.grid.empty()) recvlist.push_back(recv_intersection);
671 }
672 }
673
674 protected:
675
676 typedef const YaspGrid<dim,Coordinates> GridImp;
677
678 void init()
679 {
680 indexsets.push_back( std::make_shared< YaspIndexSet<const YaspGrid<dim, Coordinates>, false > >(*this,0) );
681 boundarysegmentssize();
682 }
683
684 void boundarysegmentssize()
685 {
686 // sizes of local macro grid
687 std::array<int, dim> sides;
688 {
689 for (int i=0; i<dim; i++)
690 {
691 sides[i] =
692 ((begin()->overlap[0].dataBegin()->origin(i) == 0)+
693 (begin()->overlap[0].dataBegin()->origin(i) + begin()->overlap[0].dataBegin()->size(i)
694 == levelSize(0,i)));
695 }
696 }
697 nBSegments = 0;
698 for (int k=0; k<dim; k++)
699 {
700 int offset = 1;
701 for (int l=0; l<dim; l++)
702 {
703 if (l==k) continue;
704 offset *= begin()->overlap[0].dataBegin()->size(l);
705 }
706 nBSegments += sides[k]*offset;
707 }
708 }
709
710 public:
711
712 // define the persistent index type
713 typedef bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim> PersistentIndexType;
714
716 typedef YaspGridFamily<dim, Coordinates> GridFamily;
717 // the Traits
719
720 // need for friend declarations in entity
724
732 YaspGrid (const Coordinates& coordinates,
733 std::bitset<dim> periodic = std::bitset<dim>(0ULL),
734 int overlap = 1,
735 Communication comm = Communication(),
736 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
737 : ccobj(comm)
738 , leafIndexSet_(*this)
739 , _periodic(periodic)
740 , _overlap(overlap)
741 , keep_ovlp(true)
742 , adaptRefCount(0)
743 , adaptActive(false)
744 {
745 _levels.resize(1);
746
747 // Number of elements per coordinate direction on the coarsest level
748 for (std::size_t i=0; i<dim; i++)
749 _coarseSize[i] = coordinates.size(i);
750
751 // Construct the communication torus
752 _torus = decltype(_torus)(comm,tag,_coarseSize,overlap,partitioner);
753
754 iTupel o;
755 std::fill(o.begin(), o.end(), 0);
756 iTupel o_interior(o);
757 iTupel s_interior(_coarseSize);
758
759 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
760
761 // Set domain size
762 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
763 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
764 {
765 for (std::size_t i=0; i<dim; i++)
766 _L[i] = coordinates.size(i) * coordinates.meshsize(i,0);
767 }
768 if (std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value)
769 {
770 //determine sizes of vector to correctly construct torus structure and store for later size requests
771 for (int i=0; i<dim; i++)
772 _L[i] = coordinates.coordinate(i,_coarseSize[i]) - coordinates.coordinate(i,0);
773 }
774
775#if HAVE_MPI
776 // TODO: Settle on a single value for all coordinate types
777 int mysteryFactor = (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value) ? 1 : 2;
778
779 // check whether the grid is large enough to be overlapping
780 for (int i=0; i<dim; i++)
781 {
782 // find out whether the grid is too small to
783 int toosmall = (s_interior[i] <= mysteryFactor * overlap) && // interior is very small
784 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
785 // communicate the result to all those processes to have all processors error out if one process failed.
786 int global = 0;
787 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
788 if (global)
789 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
790 " Note that this also holds for DOFs on subdomain boundaries."
791 " Increase grid elements or decrease overlap accordingly.");
792 }
793#endif // #if HAVE_MPI
794
795 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
796 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
797 {
798 iTupel s_overlap(s_interior);
799 for (int i=0; i<dim; i++)
800 {
801 if ((o_interior[i] - overlap > 0) || (periodic[i]))
802 s_overlap[i] += overlap;
803 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
804 s_overlap[i] += overlap;
805 }
806
807 FieldVector<ctype,dim> upperRightWithOverlap;
808 for (int i=0; i<dim; i++)
809 upperRightWithOverlap[i] = coordinates.coordinate(i,0) + coordinates.meshsize(i,0) * s_overlap[i];
810
811 if constexpr (std::is_same_v<Coordinates,EquidistantCoordinates<ctype,dim>>)
812 {
813 // New coordinate object that additionally contains the overlap elements
814 EquidistantCoordinates<ctype,dim> coordinatesWithOverlap(upperRightWithOverlap,s_overlap);
815
816 // add level (the this-> is needed to make g++-6 happy)
817 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
818 }
819
820 if constexpr (std::is_same_v<Coordinates,EquidistantOffsetCoordinates<ctype,dim>>)
821 {
823 for (int i=0; i<dim; i++)
824 lowerleft[i] = coordinates.origin(i);
825
826 // New coordinate object that additionally contains the overlap elements
827 EquidistantOffsetCoordinates<ctype,dim> coordinatesWithOverlap(lowerleft,upperRightWithOverlap,s_overlap);
828
829 // add level (the this-> is needed to make g++-6 happy)
830 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
831 }
832 }
833
834 if constexpr (std::is_same_v<Coordinates,TensorProductCoordinates<ctype,dim>>)
835 {
836 std::array<std::vector<ctype>,dim> newCoords;
837 std::array<int, dim> offset(o_interior);
838
839 // find the relevant part of the coords vector for this processor and copy it to newCoords
840 for (int i=0; i<dim; ++i)
841 {
842 //define the coordinate range to be used
843 std::size_t begin = o_interior[i];
844 std::size_t end = begin + s_interior[i] + 1;
845
846 // check whether we are not at the physical boundary. In that case overlap is a simple
847 // extension of the coordinate range to be used
848 if (o_interior[i] - overlap > 0)
849 {
850 begin = begin - overlap;
851 offset[i] -= overlap;
852 }
853 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
854 end = end + overlap;
855
856 //copy the selected part in the new coord vector
857 newCoords[i].resize(end-begin);
858 auto newCoordsIt = newCoords[i].begin();
859 for (std::size_t j=begin; j<end; j++)
860 {
861 *newCoordsIt = coordinates.coordinate(i, j);
862 newCoordsIt++;
863 }
864
865 // Check whether we are at the physical boundary and have a periodic grid.
866 // In this case the coordinate vector has to be tweaked manually.
867 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
868 {
869 // we need to add the first <overlap> cells to the end of newcoords
870 for (int j=0; j<overlap; ++j)
871 newCoords[i].push_back(newCoords[i].back() - coordinates.coordinate(i,j) + coordinates.coordinate(i,j+1));
872 }
873
874 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
875 {
876 offset[i] -= overlap;
877
878 // we need to add the last <overlap> cells to the begin of newcoords
879 std::size_t reverseCounter = coordinates.size(i);
880 for (int j=0; j<overlap; ++j, --reverseCounter)
881 newCoords[i].insert(newCoords[i].begin(), newCoords[i].front()
882 - coordinates.coordinate(i,reverseCounter) + coordinates.coordinate(i,reverseCounter-1));
883 }
884 }
885
886 TensorProductCoordinates<ctype,dim> coordinatesWithOverlap(newCoords, offset);
887
888 // add level (the this-> is needed to make g++-6 happy)
889 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
890 }
891
892 init();
893 }
894
903 template<class C = Coordinates,
904 typename std::enable_if_t< std::is_same_v<C, EquidistantCoordinates<ctype,dim> >, int> = 0>
906 std::array<int, std::size_t{dim}> s,
907 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>{0ULL},
908 int overlap = 1,
909 Communication comm = Communication(),
910 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
911 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
912 _L(L), _periodic(periodic), _coarseSize(s), _overlap(overlap),
913 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
914 {
915 _levels.resize(1);
916
917 iTupel o;
918 std::fill(o.begin(), o.end(), 0);
919 iTupel o_interior(o);
920 iTupel s_interior(s);
921
922 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
923
924#if HAVE_MPI
925 // check whether the grid is large enough to be overlapping
926 for (int i=0; i<dim; i++)
927 {
928 // find out whether the grid is too small to
929 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
930 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
931 // communicate the result to all those processes to have all processors error out if one process failed.
932 int global = 0;
933 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
934 if (global)
935 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
936 " Note that this also holds for DOFs on subdomain boundaries."
937 " Increase grid elements or decrease overlap accordingly.");
938 }
939#endif // #if HAVE_MPI
940
941 iTupel s_overlap(s_interior);
942 for (int i=0; i<dim; i++)
943 {
944 if ((o_interior[i] - overlap > 0) || (periodic[i]))
945 s_overlap[i] += overlap;
946 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
947 s_overlap[i] += overlap;
948 }
949
950 FieldVector<ctype,dim> upperRightWithOverlap;
951
952 for (int i=0; i<dim; i++)
953 upperRightWithOverlap[i] = (L[i] / s[i]) * s_overlap[i];
954
955 // New coordinate object that additionally contains the overlap elements
956 EquidistantCoordinates<ctype,dim> cc(upperRightWithOverlap,s_overlap);
957
958 // add level
959 makelevel(cc,periodic,o_interior,overlap);
960
961 init();
962 }
963
973 template<class C = Coordinates,
974 typename std::enable_if_t< std::is_same_v<C, EquidistantOffsetCoordinates<ctype,dim> >, int> = 0>
977 std::array<int, std::size_t{dim}> s,
978 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
979 int overlap = 1,
980 Communication comm = Communication(),
981 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
982 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
983 _L(upperright - lowerleft),
984 _periodic(periodic), _coarseSize(s), _overlap(overlap),
985 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
986 {
987 _levels.resize(1);
988
989 iTupel o;
990 std::fill(o.begin(), o.end(), 0);
991 iTupel o_interior(o);
992 iTupel s_interior(s);
993
994 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
995
996#if HAVE_MPI
997 // check whether the grid is large enough to be overlapping
998 for (int i=0; i<dim; i++)
999 {
1000 // find out whether the grid is too small to
1001 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1002 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
1003 // communicate the result to all those processes to have all processors error out if one process failed.
1004 int global = 0;
1005 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1006 if (global)
1007 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1008 " Note that this also holds for DOFs on subdomain boundaries."
1009 " Increase grid elements or decrease overlap accordingly.");
1010 }
1011#endif // #if HAVE_MPI
1012
1013 iTupel s_overlap(s_interior);
1014 for (int i=0; i<dim; i++)
1015 {
1016 if ((o_interior[i] - overlap > 0) || (periodic[i]))
1017 s_overlap[i] += overlap;
1018 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
1019 s_overlap[i] += overlap;
1020 }
1021
1022 FieldVector<ctype,dim> upperRightWithOverlap;
1023 for (int i=0; i<dim; i++)
1024 upperRightWithOverlap[i] = lowerleft[i]
1025 + s_overlap[i] * (upperright[i]-lowerleft[i]) / s[i];
1026
1027 EquidistantOffsetCoordinates<ctype,dim> cc(lowerleft,upperRightWithOverlap,s_overlap);
1028
1029 // add level
1030 makelevel(cc,periodic,o_interior,overlap);
1031
1032 init();
1033 }
1034
1042 template<class C = Coordinates,
1043 typename std::enable_if_t< std::is_same_v<C, TensorProductCoordinates<ctype,dim> >, int> = 0>
1044 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1045 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
1046 int overlap = 1,
1047 Communication comm = Communication(),
1048 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1049 : ccobj(comm), _torus(comm,tag,Dune::Yasp::sizeArray<dim>(coords),overlap,partitioner),
1050 leafIndexSet_(*this), _periodic(periodic), _overlap(overlap),
1051 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1052 {
1053 if (!Dune::Yasp::checkIfMonotonous(coords))
1054 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1055
1056 _levels.resize(1);
1057
1058 //determine sizes of vector to correctly construct torus structure and store for later size requests
1059 for (int i=0; i<dim; i++) {
1060 _coarseSize[i] = coords[i].size() - 1;
1061 _L[i] = coords[i][_coarseSize[i]] - coords[i][0];
1062 }
1063
1064 iTupel o;
1065 std::fill(o.begin(), o.end(), 0);
1066 iTupel o_interior(o);
1067 iTupel s_interior(_coarseSize);
1068
1069 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
1070
1071#if HAVE_MPI
1072 // check whether the grid is large enough to be overlapping
1073 for (int i=0; i<dim; i++)
1074 {
1075 // find out whether the grid is too small to
1076 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1077 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
1078 // communicate the result to all those processes to have all processors error out if one process failed.
1079 int global = 0;
1080 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1081 if (global)
1082 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1083 " Note that this also holds for DOFs on subdomain boundaries."
1084 " Increase grid elements or decrease overlap accordingly.");
1085 }
1086#endif // #if HAVE_MPI
1087
1088
1089 std::array<std::vector<ctype>,dim> newcoords;
1090 std::array<int, dim> offset(o_interior);
1091
1092 // find the relevant part of the coords vector for this processor and copy it to newcoords
1093 for (int i=0; i<dim; ++i)
1094 {
1095 //define iterators on coords that specify the coordinate range to be used
1096 typename std::vector<ctype>::iterator begin = coords[i].begin() + o_interior[i];
1097 typename std::vector<ctype>::iterator end = begin + s_interior[i] + 1;
1098
1099 // check whether we are not at the physical boundary. In that case overlap is a simple
1100 // extension of the coordinate range to be used
1101 if (o_interior[i] - overlap > 0)
1102 {
1103 begin = begin - overlap;
1104 offset[i] -= overlap;
1105 }
1106 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
1107 end = end + overlap;
1108
1109 //copy the selected part in the new coord vector
1110 newcoords[i].resize(end-begin);
1111 std::copy(begin, end, newcoords[i].begin());
1112
1113 // check whether we are at the physical boundary and a have a periodic grid.
1114 // In this case the coordinate vector has to be tweaked manually.
1115 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
1116 {
1117 // we need to add the first <overlap> cells to the end of newcoords
1118 typename std::vector<ctype>::iterator it = coords[i].begin();
1119 for (int j=0; j<overlap; ++j)
1120 newcoords[i].push_back(newcoords[i].back() - *it + *(++it));
1121 }
1122
1123 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
1124 {
1125 offset[i] -= overlap;
1126
1127 // we need to add the last <overlap> cells to the begin of newcoords
1128 typename std::vector<ctype>::iterator it = coords[i].end() - 1;
1129 for (int j=0; j<overlap; ++j)
1130 newcoords[i].insert(newcoords[i].begin(), newcoords[i].front() - *it + *(--it));
1131 }
1132 }
1133
1134 TensorProductCoordinates<ctype,dim> cc(newcoords, offset);
1135
1136 // add level
1137 makelevel(cc,periodic,o_interior,overlap);
1138 init();
1139 }
1140
1141 private:
1142
1157 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1158 std::bitset<std::size_t{dim}> periodic,
1159 int overlap,
1160 Communication comm,
1161 std::array<int,dim> coarseSize,
1162 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1163 : ccobj(comm), _torus(comm,tag,coarseSize,overlap,partitioner), leafIndexSet_(*this),
1164 _periodic(periodic), _coarseSize(coarseSize), _overlap(overlap),
1165 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1166 {
1167 // check whether YaspGrid has been given the correct template parameter
1168 static_assert(std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value,
1169 "YaspGrid coordinate container template parameter and given constructor values do not match!");
1170
1171 if (!Dune::Yasp::checkIfMonotonous(coords))
1172 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1173
1174 for (int i=0; i<dim; i++)
1175 _L[i] = coords[i][coords[i].size() - 1] - coords[i][0];
1176
1177 _levels.resize(1);
1178
1179 std::array<int,dim> o;
1180 std::fill(o.begin(), o.end(), 0);
1181 std::array<int,dim> o_interior(o);
1182 std::array<int,dim> s_interior(coarseSize);
1183
1184 _torus.partition(_torus.rank(),o,coarseSize,o_interior,s_interior);
1185
1186 // get offset by modifying o_interior according to overlap
1187 std::array<int,dim> offset(o_interior);
1188 for (int i=0; i<dim; i++)
1189 if ((periodic[i]) || (o_interior[i] > 0))
1190 offset[i] -= overlap;
1191
1192 TensorProductCoordinates<ctype,dim> cc(coords, offset);
1193
1194 // add level
1195 makelevel(cc,periodic,o_interior,overlap);
1196
1197 init();
1198 }
1199
1200 // the backup restore facility needs to be able to use above constructor
1201 friend struct BackupRestoreFacility<YaspGrid<dim,Coordinates> >;
1202
1203 // do not copy this class
1204 YaspGrid(const YaspGrid&);
1205
1206 public:
1207
1211 int maxLevel() const
1212 {
1213 return _levels.size()-1;
1214 }
1215
1217 void globalRefine (int refCount)
1218 {
1219 if (refCount < -maxLevel())
1220 DUNE_THROW(GridError, "Only " << maxLevel() << " levels left. " <<
1221 "Coarsening " << -refCount << " levels requested!");
1222
1223 // If refCount is negative then coarsen the grid
1224 for (int k=refCount; k<0; k++)
1225 {
1226 // create an empty grid level
1227 YGridLevel empty;
1228 _levels.back() = empty;
1229 // reduce maxlevel
1230 _levels.pop_back();
1231
1232 indexsets.pop_back();
1233 }
1234
1235 // If refCount is positive refine the grid
1236 for (int k=0; k<refCount; k++)
1237 {
1238 // access to coarser grid level
1239 YGridLevel& cg = _levels[maxLevel()];
1240
1241 std::bitset<dim> ovlp_low(0ULL), ovlp_up(0ULL);
1242 for (int i=0; i<dim; i++)
1243 {
1244 if (cg.overlap[0].dataBegin()->origin(i) > 0 || _periodic[i])
1245 ovlp_low[i] = true;
1246 if (cg.overlap[0].dataBegin()->max(i) + 1 < globalSize(i) || _periodic[i])
1247 ovlp_up[i] = true;
1248 }
1249
1250 Coordinates newcont(cg.coords.refine(ovlp_low, ovlp_up, cg.overlapSize, keep_ovlp));
1251
1252 int overlap = (keep_ovlp) ? 2*cg.overlapSize : cg.overlapSize;
1253
1254 //determine new origin
1255 iTupel o_interior;
1256 for (int i=0; i<dim; i++)
1257 o_interior[i] = 2*cg.interior[0].dataBegin()->origin(i);
1258
1259 // add level
1260 _levels.resize(_levels.size() + 1);
1261 makelevel(newcont,_periodic,o_interior,overlap);
1262
1263 indexsets.push_back( std::make_shared<YaspIndexSet<const YaspGrid<dim,Coordinates>, false > >(*this,maxLevel()) );
1264 }
1265 }
1266
1271 void refineOptions (bool keepPhysicalOverlap)
1272 {
1273 keep_ovlp = keepPhysicalOverlap;
1274 }
1275
1287 bool mark( int refCount, const typename Traits::template Codim<0>::Entity & e )
1288 {
1289 assert(adaptActive == false);
1290 if (e.level() != maxLevel()) return false;
1291 adaptRefCount = std::max(adaptRefCount, refCount);
1292 return true;
1293 }
1294
1301 int getMark ( const typename Traits::template Codim<0>::Entity &e ) const
1302 {
1303 return ( e.level() == maxLevel() ) ? adaptRefCount : 0;
1304 }
1305
1307 bool adapt ()
1308 {
1309 globalRefine(adaptRefCount);
1310 return (adaptRefCount > 0);
1311 }
1312
1314 bool preAdapt ()
1315 {
1316 adaptActive = true;
1317 adaptRefCount = comm().max(adaptRefCount);
1318 return (adaptRefCount < 0);
1319 }
1320
1323 {
1324 adaptActive = false;
1325 adaptRefCount = 0;
1326 }
1327
1329 template<int cd, PartitionIteratorType pitype>
1330 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lbegin (int level) const
1331 {
1332 return levelbegin<cd,pitype>(level);
1333 }
1334
1336 template<int cd, PartitionIteratorType pitype>
1337 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lend (int level) const
1338 {
1339 return levelend<cd,pitype>(level);
1340 }
1341
1343 template<int cd>
1344 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lbegin (int level) const
1345 {
1346 return levelbegin<cd,All_Partition>(level);
1347 }
1348
1350 template<int cd>
1351 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lend (int level) const
1352 {
1353 return levelend<cd,All_Partition>(level);
1354 }
1355
1357 template<int cd, PartitionIteratorType pitype>
1358 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafbegin () const
1359 {
1360 return levelbegin<cd,pitype>(maxLevel());
1361 }
1362
1364 template<int cd, PartitionIteratorType pitype>
1365 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafend () const
1366 {
1367 return levelend<cd,pitype>(maxLevel());
1368 }
1369
1371 template<int cd>
1372 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafbegin () const
1373 {
1374 return levelbegin<cd,All_Partition>(maxLevel());
1375 }
1376
1378 template<int cd>
1379 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafend () const
1380 {
1381 return levelend<cd,All_Partition>(maxLevel());
1382 }
1383
1384 // \brief obtain Entity from EntitySeed. */
1385 template <typename Seed>
1386 typename Traits::template Codim<Seed::codimension>::Entity
1387 entity(const Seed& seed) const
1388 {
1389 const int codim = Seed::codimension;
1390 YGridLevelIterator g = begin(seed.impl().level());
1391
1392 typedef typename Traits::template Codim<Seed::codimension>::Entity Entity;
1393 typedef YaspEntity<codim,dim,const YaspGrid> EntityImp;
1394 typedef typename YGrid::Iterator YIterator;
1395
1396 return Entity(EntityImp(g,YIterator(g->overlapfront[codim],seed.impl().coord(),seed.impl().offset())));
1397 }
1398
1400 int overlapSize (int level, [[maybe_unused]] int codim) const
1401 {
1402 YGridLevelIterator g = begin(level);
1403 return g->overlapSize;
1404 }
1405
1407 int overlapSize ([[maybe_unused]] int odim) const
1408 {
1410 return g->overlapSize;
1411 }
1412
1414 int ghostSize ([[maybe_unused]] int level, [[maybe_unused]] int codim) const
1415 {
1416 return 0;
1417 }
1418
1420 int ghostSize ([[maybe_unused]] int codim) const
1421 {
1422 return 0;
1423 }
1424
1426 int size (int level, int codim) const
1427 {
1428 YGridLevelIterator g = begin(level);
1429
1430 // sum over all components of the codimension
1431 int count = 0;
1432 for (auto it = g->overlapfront[codim].dataBegin(); it != g->overlapfront[codim].dataEnd(); ++it)
1433 count += it->totalsize();
1434
1435 return count;
1436 }
1437
1439 int size (int codim) const
1440 {
1441 return size(maxLevel(),codim);
1442 }
1443
1445 int size (int level, GeometryType type) const
1446 {
1447 return (type.isCube()) ? size(level,dim-type.dim()) : 0;
1448 }
1449
1451 int size (GeometryType type) const
1452 {
1453 return size(maxLevel(),type);
1454 }
1455
1457 size_t numBoundarySegments () const
1458 {
1459 return nBSegments;
1460 }
1461
1464 return _L;
1465 }
1466
1471 template<class DataHandleImp, class DataType>
1473 {
1474 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,level);
1475 }
1476
1481 template<class DataHandleImp, class DataType>
1483 {
1484 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,this->maxLevel());
1485 }
1486
1491 template<class DataHandle, int codim>
1492 void communicateCodim (DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level) const
1493 {
1494 // check input
1495 if (!data.contains(dim,codim)) return; // should have been checked outside
1496
1497 // data types
1498 typedef typename DataHandle::DataType DataType;
1499
1500 // access to grid level
1501 YGridLevelIterator g = begin(level);
1502
1503 // find send/recv lists or throw error
1504 const YGridList<Coordinates>* sendlist = 0;
1505 const YGridList<Coordinates>* recvlist = 0;
1506
1508 {
1509 sendlist = &g->send_interiorborder_interiorborder[codim];
1510 recvlist = &g->recv_interiorborder_interiorborder[codim];
1511 }
1512 if (iftype==InteriorBorder_All_Interface)
1513 {
1514 sendlist = &g->send_interiorborder_overlapfront[codim];
1515 recvlist = &g->recv_overlapfront_interiorborder[codim];
1516 }
1518 {
1519 sendlist = &g->send_overlap_overlapfront[codim];
1520 recvlist = &g->recv_overlapfront_overlap[codim];
1521 }
1522 if (iftype==All_All_Interface)
1523 {
1524 sendlist = &g->send_overlapfront_overlapfront[codim];
1525 recvlist = &g->recv_overlapfront_overlapfront[codim];
1526 }
1527
1528 // change communication direction?
1529 if (dir==BackwardCommunication)
1530 std::swap(sendlist,recvlist);
1531
1532 int cnt;
1533
1534 // Size computation (requires communication if variable size)
1535 std::vector<int> send_size(sendlist->size(),-1); // map rank to total number of objects (of type DataType) to be sent
1536 std::vector<int> recv_size(recvlist->size(),-1); // map rank to total number of objects (of type DataType) to be recvd
1537 std::vector<size_t*> send_sizes(sendlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be sent
1538 std::vector<size_t*> recv_sizes(recvlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be recvd
1539
1540 // define type to iterate over send and recv lists
1541 typedef typename YGridList<Coordinates>::Iterator ListIt;
1542
1543 if (data.fixedSize(dim,codim))
1544 {
1545 // fixed size: just take a dummy entity, size can be computed without communication
1546 cnt=0;
1547 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1548 {
1549 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1551 send_size[cnt] = is->grid.totalsize() * data.size(*it);
1552 cnt++;
1553 }
1554 cnt=0;
1555 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1556 {
1557 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1559 recv_size[cnt] = is->grid.totalsize() * data.size(*it);
1560 cnt++;
1561 }
1562 }
1563 else
1564 {
1565 // variable size case: sender side determines the size
1566 cnt=0;
1567 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1568 {
1569 // allocate send buffer for sizes per entity
1570 size_t *buf = new size_t[is->grid.totalsize()];
1571 send_sizes[cnt] = buf;
1572
1573 // loop over entities and ask for size
1574 int i=0; size_t n=0;
1575 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1577 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1578 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1579 for ( ; it!=itend; ++it)
1580 {
1581 buf[i] = data.size(*it);
1582 n += buf[i];
1583 i++;
1584 }
1585
1586 // now we know the size for this rank
1587 send_size[cnt] = n;
1588
1589 // hand over send request to torus class
1590 torus().send(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1591 cnt++;
1592 }
1593
1594 // allocate recv buffers for sizes and store receive request
1595 cnt=0;
1596 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1597 {
1598 // allocate recv buffer
1599 size_t *buf = new size_t[is->grid.totalsize()];
1600 recv_sizes[cnt] = buf;
1601
1602 // hand over recv request to torus class
1603 torus().recv(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1604 cnt++;
1605 }
1606
1607 // exchange all size buffers now
1608 torus().exchange();
1609
1610 // release send size buffers
1611 cnt=0;
1612 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1613 {
1614 delete[] send_sizes[cnt];
1615 send_sizes[cnt] = 0;
1616 cnt++;
1617 }
1618
1619 // process receive size buffers
1620 cnt=0;
1621 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1622 {
1623 // get recv buffer
1624 size_t *buf = recv_sizes[cnt];
1625
1626 // compute total size
1627 size_t n=0;
1628 for (int i=0; i<is->grid.totalsize(); ++i)
1629 n += buf[i];
1630
1631 // ... and store it
1632 recv_size[cnt] = n;
1633 ++cnt;
1634 }
1635 }
1636
1637
1638 // allocate & fill the send buffers & store send request
1639 std::vector<DataType*> sends(sendlist->size(), static_cast<DataType*>(0)); // store pointers to send buffers
1640 cnt=0;
1641 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1642 {
1643 // allocate send buffer
1644 DataType *buf = new DataType[send_size[cnt]];
1645
1646 // remember send buffer
1647 sends[cnt] = buf;
1648
1649 // make a message buffer
1650 MessageBuffer<DataType> mb(buf);
1651
1652 // fill send buffer; iterate over cells in intersection
1653 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1655 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1656 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1657 for ( ; it!=itend; ++it)
1658 data.gather(mb,*it);
1659
1660 // hand over send request to torus class
1661 torus().send(is->rank,buf,send_size[cnt]*sizeof(DataType));
1662 cnt++;
1663 }
1664
1665 // allocate recv buffers and store receive request
1666 std::vector<DataType*> recvs(recvlist->size(),static_cast<DataType*>(0)); // store pointers to send buffers
1667 cnt=0;
1668 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1669 {
1670 // allocate recv buffer
1671 DataType *buf = new DataType[recv_size[cnt]];
1672
1673 // remember recv buffer
1674 recvs[cnt] = buf;
1675
1676 // hand over recv request to torus class
1677 torus().recv(is->rank,buf,recv_size[cnt]*sizeof(DataType));
1678 cnt++;
1679 }
1680
1681 // exchange all buffers now
1682 torus().exchange();
1683
1684 // release send buffers
1685 cnt=0;
1686 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1687 {
1688 delete[] sends[cnt];
1689 sends[cnt] = 0;
1690 cnt++;
1691 }
1692
1693 // process receive buffers and delete them
1694 cnt=0;
1695 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1696 {
1697 // get recv buffer
1698 DataType *buf = recvs[cnt];
1699
1700 // make a message buffer
1701 MessageBuffer<DataType> mb(buf);
1702
1703 // copy data from receive buffer; iterate over cells in intersection
1704 if (data.fixedSize(dim,codim))
1705 {
1706 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1708 size_t n=data.size(*it);
1709 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1710 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1711 for ( ; it!=itend; ++it)
1712 data.scatter(mb,*it,n);
1713 }
1714 else
1715 {
1716 int i=0;
1717 size_t *sbuf = recv_sizes[cnt];
1718 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1720 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1721 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1722 for ( ; it!=itend; ++it)
1723 data.scatter(mb,*it,sbuf[i++]);
1724 delete[] sbuf;
1725 }
1726
1727 // delete buffer
1728 delete[] buf; // hier krachts !
1729 cnt++;
1730 }
1731 }
1732
1733 // The new index sets from DDM 11.07.2005
1734 const typename Traits::GlobalIdSet& globalIdSet() const
1735 {
1736 return theglobalidset;
1737 }
1738
1739 const typename Traits::LocalIdSet& localIdSet() const
1740 {
1741 return theglobalidset;
1742 }
1743
1744 const typename Traits::LevelIndexSet& levelIndexSet(int level) const
1745 {
1746 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1747 return *(indexsets[level]);
1748 }
1749
1750 const typename Traits::LeafIndexSet& leafIndexSet() const
1751 {
1752 return leafIndexSet_;
1753 }
1754
1757 const Communication& comm () const
1758 {
1759 return ccobj;
1760 }
1761
1762 private:
1763
1764 // number of boundary segments of the level 0 grid
1765 int nBSegments;
1766
1767 // Index classes need access to the real entity
1768 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, true >;
1769 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, false >;
1770 friend class Dune::YaspGlobalIdSet<const Dune::YaspGrid<dim, Coordinates> >;
1771 friend class Dune::YaspPersistentContainerIndex<const Dune::YaspGrid<dim, Coordinates> >;
1772
1773 friend class Dune::YaspIntersectionIterator<const Dune::YaspGrid<dim, Coordinates> >;
1774 friend class Dune::YaspIntersection<const Dune::YaspGrid<dim, Coordinates> >;
1775 friend class Dune::YaspEntity<0, dim, const Dune::YaspGrid<dim, Coordinates> >;
1776
1777 template<int codim_, int dim_, class GridImp_, template<int,int,class> class EntityImp_>
1778 friend class Entity;
1779
1780 template<class DT>
1781 class MessageBuffer {
1782 public:
1783 // Constructor
1784 MessageBuffer (DT *p)
1785 {
1786 a=p;
1787 i=0;
1788 j=0;
1789 }
1790
1791 // write data to message buffer, acts like a stream !
1792 template<class Y>
1793 void write (const Y& data)
1794 {
1795 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1796 a[i++] = data;
1797 }
1798
1799 // read data from message buffer, acts like a stream !
1800 template<class Y>
1801 void read (Y& data) const
1802 {
1803 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1804 data = a[j++];
1805 }
1806
1807 private:
1808 DT *a;
1809 int i;
1810 mutable int j;
1811 };
1812
1814 template<int cd, PartitionIteratorType pitype>
1815 YaspLevelIterator<cd,pitype,GridImp> levelbegin (int level) const
1816 {
1817 YGridLevelIterator g = begin(level);
1818 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1819
1820 if (pitype==Interior_Partition)
1821 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].begin());
1822 if (pitype==InteriorBorder_Partition)
1823 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].begin());
1824 if (pitype==Overlap_Partition)
1825 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].begin());
1826 if (pitype<=All_Partition)
1827 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].begin());
1828 if (pitype==Ghost_Partition)
1829 return levelend <cd, pitype> (level);
1830
1831 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1832 }
1833
1835 template<int cd, PartitionIteratorType pitype>
1836 YaspLevelIterator<cd,pitype,GridImp> levelend (int level) const
1837 {
1838 YGridLevelIterator g = begin(level);
1839 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1840
1841 if (pitype==Interior_Partition)
1842 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].end());
1843 if (pitype==InteriorBorder_Partition)
1844 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].end());
1845 if (pitype==Overlap_Partition)
1846 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].end());
1847 if (pitype<=All_Partition || pitype == Ghost_Partition)
1848 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].end());
1849
1850 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1851 }
1852
1853 Communication ccobj;
1854
1855 Torus<Communication,dim> _torus;
1856
1857 std::vector< std::shared_ptr< YaspIndexSet<const YaspGrid<dim,Coordinates>, false > > > indexsets;
1858 YaspIndexSet<const YaspGrid<dim,Coordinates>, true> leafIndexSet_;
1859 YaspGlobalIdSet<const YaspGrid<dim,Coordinates> > theglobalidset;
1860
1862 iTupel _s;
1863 std::bitset<dim> _periodic;
1864 iTupel _coarseSize;
1865 ReservedVector<YGridLevel,32> _levels;
1866 int _overlap;
1867 bool keep_ovlp;
1868 int adaptRefCount;
1869 bool adaptActive;
1870 };
1871
1872 // Class template deduction guides
1873 template<typename ctype, int dim>
1874 YaspGrid(FieldVector<ctype, dim>,
1875 std::array<int, std::size_t{dim}>,
1876 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1877 int = 1,
1878 YaspCommunication = YaspCommunication(),
1879 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantCoordinates<ctype, dim>>::defaultPartitioner())
1880 -> YaspGrid< dim, EquidistantCoordinates<ctype, dim> >;
1881
1882 template<typename ctype, int dim>
1883 YaspGrid(FieldVector<ctype, dim>,
1884 FieldVector<ctype, dim>,
1885 std::array<int, std::size_t{dim}>,
1886 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1887 int = 1,
1888 YaspCommunication = YaspCommunication(),
1889 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantOffsetCoordinates<ctype, dim>>::defaultPartitioner())
1890 -> YaspGrid< dim, EquidistantOffsetCoordinates<ctype, dim> >;
1891
1892 template<typename ctype, std::size_t dim>
1893 YaspGrid(std::array<std::vector<ctype>, dim>,
1894 std::bitset<dim> = std::bitset<dim>{0ULL},
1895 int = 1,
1896 YaspCommunication = YaspCommunication(),
1897 const Yasp::Partitioning<dim>* = YaspGrid<int{dim}, TensorProductCoordinates<ctype, int{dim}>>::defaultPartitioner())
1898 -> YaspGrid< int{dim}, TensorProductCoordinates<ctype, int{dim}> >;
1899
1901 template <int d, class CC>
1902 std::ostream& operator<< (std::ostream& s, const YaspGrid<d,CC>& grid)
1903 {
1904 int rank = grid.torus().rank();
1905
1906 s << "[" << rank << "]:" << " YaspGrid maxlevel=" << grid.maxLevel() << std::endl;
1907
1908 s << "Printing the torus: " <<std::endl;
1909 s << grid.torus() << std::endl;
1910
1911 for (typename YaspGrid<d,CC>::YGridLevelIterator g=grid.begin(); g!=grid.end(); ++g)
1912 {
1913 s << "[" << rank << "]: " << std::endl;
1914 s << "[" << rank << "]: " << "==========================================" << std::endl;
1915 s << "[" << rank << "]: " << "level=" << g->level() << std::endl;
1916
1917 for (int codim = 0; codim < d + 1; ++codim)
1918 {
1919 s << "[" << rank << "]: " << "overlapfront[" << codim << "]: " << g->overlapfront[codim] << std::endl;
1920 s << "[" << rank << "]: " << "overlap[" << codim << "]: " << g->overlap[codim] << std::endl;
1921 s << "[" << rank << "]: " << "interiorborder[" << codim << "]: " << g->interiorborder[codim] << std::endl;
1922 s << "[" << rank << "]: " << "interior[" << codim << "]: " << g->interior[codim] << std::endl;
1923
1924 typedef typename YGridList<CC>::Iterator I;
1925 for (I i=g->send_overlapfront_overlapfront[codim].begin();
1926 i!=g->send_overlapfront_overlapfront[codim].end(); ++i)
1927 s << "[" << rank << "]: " << " s_of_of[" << codim << "] to rank "
1928 << i->rank << " " << i->grid << std::endl;
1929
1930 for (I i=g->recv_overlapfront_overlapfront[codim].begin();
1931 i!=g->recv_overlapfront_overlapfront[codim].end(); ++i)
1932 s << "[" << rank << "]: " << " r_of_of[" << codim << "] to rank "
1933 << i->rank << " " << i->grid << std::endl;
1934
1935 for (I i=g->send_overlap_overlapfront[codim].begin();
1936 i!=g->send_overlap_overlapfront[codim].end(); ++i)
1937 s << "[" << rank << "]: " << " s_o_of[" << codim << "] to rank "
1938 << i->rank << " " << i->grid << std::endl;
1939
1940 for (I i=g->recv_overlapfront_overlap[codim].begin();
1941 i!=g->recv_overlapfront_overlap[codim].end(); ++i)
1942 s << "[" << rank << "]: " << " r_of_o[" << codim << "] to rank "
1943 << i->rank << " " << i->grid << std::endl;
1944
1945 for (I i=g->send_interiorborder_interiorborder[codim].begin();
1946 i!=g->send_interiorborder_interiorborder[codim].end(); ++i)
1947 s << "[" << rank << "]: " << " s_ib_ib[" << codim << "] to rank "
1948 << i->rank << " " << i->grid << std::endl;
1949
1950 for (I i=g->recv_interiorborder_interiorborder[codim].begin();
1951 i!=g->recv_interiorborder_interiorborder[codim].end(); ++i)
1952 s << "[" << rank << "]: " << " r_ib_ib[" << codim << "] to rank "
1953 << i->rank << " " << i->grid << std::endl;
1954
1955 for (I i=g->send_interiorborder_overlapfront[codim].begin();
1956 i!=g->send_interiorborder_overlapfront[codim].end(); ++i)
1957 s << "[" << rank << "]: " << " s_ib_of[" << codim << "] to rank "
1958 << i->rank << " " << i->grid << std::endl;
1959
1960 for (I i=g->recv_overlapfront_interiorborder[codim].begin();
1961 i!=g->recv_overlapfront_interiorborder[codim].end(); ++i)
1962 s << "[" << rank << "]: " << " r_of_ib[" << codim << "] to rank "
1963 << i->rank << " " << i->grid << std::endl;
1964 }
1965 }
1966
1967 s << std::endl;
1968
1969 return s;
1970 }
1971
1972 namespace Capabilities
1973 {
1974
1982 template<int dim, class Coordinates>
1983 struct hasBackupRestoreFacilities< YaspGrid<dim, Coordinates> >
1984 {
1985 static const bool v = true;
1986 };
1987
1991 template<int dim, class Coordinates>
1992 struct hasSingleGeometryType< YaspGrid<dim, Coordinates> >
1993 {
1994 static const bool v = true;
1995 static const unsigned int topologyId = GeometryTypes::cube(dim).id();
1996 };
1997
2001 template<int dim, class Coordinates>
2002 struct isCartesian< YaspGrid<dim, Coordinates> >
2003 {
2004 static const bool v = true;
2005 };
2006
2010 template<int dim, class Coordinates, int codim>
2011 struct hasEntity< YaspGrid<dim, Coordinates>, codim>
2012 {
2013 static const bool v = true;
2014 };
2015
2020 template<int dim, class Coordinates, int codim>
2021 struct hasEntityIterator<YaspGrid<dim, Coordinates>, codim>
2022 {
2023 static const bool v = true;
2024 };
2025
2029 template<int dim, int codim, class Coordinates>
2030 struct canCommunicate< YaspGrid< dim, Coordinates>, codim >
2031 {
2032 static const bool v = true;
2033 };
2034
2038 template<int dim, class Coordinates>
2039 struct isLevelwiseConforming< YaspGrid<dim, Coordinates> >
2040 {
2041 static const bool v = true;
2042 };
2043
2047 template<int dim, class Coordinates>
2048 struct isLeafwiseConforming< YaspGrid<dim, Coordinates> >
2049 {
2050 static const bool v = true;
2051 };
2052
2056 template<int dim, class Coordinates>
2057 struct viewThreadSafe< YaspGrid<dim, Coordinates> >
2058 {
2059 static const bool v = true;
2060 };
2061
2062 }
2063
2064} // end namespace
2065
2066// Include the specialization of the StructuredGridFactory class for YaspGrid
2068// Include the specialization of the BackupRestoreFacility class for YaspGrid
2069#include <dune/grid/yaspgrid/backuprestore.hh>
2070
2071#endif
A geometry implementation for axis-aligned hypercubes.
Portable very large unsigned integers.
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:78
Wrapper class for entities.
Definition: entity.hh:66
Container for equidistant coordinates in a YaspGrid.
Definition: coordinates.hh:29
Container for equidistant coordinates in a YaspGrid with non-trivial origin.
Definition: coordinates.hh:131
static constexpr size_type size() noexcept
Obtain the number of elements stored in the vector.
Definition: fvector.hh:193
Unique label for each type of entities that can occur in DUNE grids.
Definition: type.hh:114
constexpr unsigned int dim() const
Return dimension of the type.
Definition: type.hh:360
constexpr bool isCube() const
Return true if entity is a cube of any dimension.
Definition: type.hh:324
constexpr unsigned int id() const
Return the topology id of the type.
Definition: type.hh:365
Definition: grid.hh:848
Base class for exceptions in Dune grid modules.
Definition: exceptions.hh:20
typename GridFamily::Traits::Communication Communication
A type that is a model of Dune::Communication. It provides a portable way for communication on the se...
Definition: grid.hh:515
Intersection of a mesh entity of codimension 0 ("element") with a "neighboring" element or with the d...
Definition: intersection.hh:164
A Vector class with statically reserved memory.
Definition: reservedvector.hh:47
constexpr iterator end() noexcept
Returns an iterator pointing to the end of the vector.
Definition: reservedvector.hh:273
constexpr size_type size() const noexcept
Returns number of elements in the vector.
Definition: reservedvector.hh:387
constexpr iterator begin() noexcept
Returns a iterator pointing to the beginning of the vector.
Definition: reservedvector.hh:237
constexpr void pop_back() noexcept
Erases the last element of the vector, O(1) time.
Definition: reservedvector.hh:227
constexpr reference back() noexcept
Returns reference to last element of vector.
Definition: reservedvector.hh:357
constexpr void resize(size_type s) noexcept
Specifies a new size for the vector.
Definition: reservedvector.hh:187
Coordinate container for a tensor product YaspGrid.
Definition: coordinates.hh:245
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:239
iTupel coord() const
return own coordinates
Definition: torus.hh:100
int rank() const
return own rank
Definition: torus.hh:94
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:112
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:374
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:361
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:203
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:355
ProcListIterator sendend() const
end of send list
Definition: torus.hh:343
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:337
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:387
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:349
Definition: ygrid.hh:75
YGridComponent< Coordinates > move(iTupel v) const
return grid moved by the vector v
Definition: ygrid.hh:263
YGridComponent< Coordinates > intersection(const YGridComponent< Coordinates > &r) const
Return YGridComponent of supergrid of self which is the intersection of self and another YGridCompone...
Definition: ygrid.hh:271
implements a collection of multiple std::deque<Intersection> Intersections with neighboring processor...
Definition: ygrid.hh:823
int size() const
return the size of the container, this is the sum of the sizes of all deques
Definition: ygrid.hh:953
Iterator end() const
return iterator pointing to the end of the container
Definition: ygrid.hh:929
Iterator begin() const
return iterator pointing to the begin of the container
Definition: ygrid.hh:923
Iterator over a collection o YGrids A YGrid::Iterator is the heart of an entity in YaspGrid.
Definition: ygrid.hh:594
implements a collection of YGridComponents which form a codimension Entities of given codimension c n...
Definition: ygrid.hh:551
persistent, globally unique Ids
Definition: yaspgrididset.hh:25
[ provides Dune::Grid ]
Definition: yaspgrid.hh:166
YaspGridFamily< dim, Coordinates > GridFamily
the GridFamily of this grid
Definition: yaspgrid.hh:716
int ghostSize(int level, int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1414
const Torus< Communication, dim > & torus() const
return reference to torus
Definition: yaspgrid.hh:246
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lbegin(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1344
int size(int level, GeometryType type) const
number of entities per level and geometry type in this process
Definition: yaspgrid.hh:1445
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1372
void globalRefine(int refCount)
refine the grid refCount times.
Definition: yaspgrid.hh:1217
int getMark(const typename Traits::template Codim< 0 >::Entity &e) const
returns adaptation mark for given entity
Definition: yaspgrid.hh:1301
YaspGrid(std::array< std::vector< ctype >, std::size_t{dim}> coords, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Standard constructor for a tensorproduct YaspGrid.
Definition: yaspgrid.hh:1044
int ghostSize(int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1420
int globalSize(int i) const
return number of cells on finest level in given direction on all processors
Definition: yaspgrid.hh:252
YaspGrid(Dune::FieldVector< ctype, dim > lowerleft, Dune::FieldVector< ctype, dim > upperright, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:975
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1365
void postAdapt()
clean up some markers
Definition: yaspgrid.hh:1322
const Dune::FieldVector< ctype, dim > & domainSize() const
returns the size of the physical domain
Definition: yaspgrid.hh:1463
YGridLevelIterator end() const
return iterator pointing to one past the finest level
Definition: yaspgrid.hh:307
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: yaspgrid.hh:1451
int maxLevel() const
Definition: yaspgrid.hh:1211
YaspGrid(Dune::FieldVector< ctype, dim > L, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>{0ULL}, int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:905
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1379
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1472
void intersections(const YGridComponent< Coordinates > &sendgrid, const YGridComponent< Coordinates > &recvgrid, std::deque< Intersection > &sendlist, std::deque< Intersection > &recvlist)
Construct list of intersections with neighboring processors.
Definition: yaspgrid.hh:562
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1358
bool preAdapt()
returns true, if the grid will be coarsened
Definition: yaspgrid.hh:1314
iTupel levelSize(int l) const
return size vector of the grid (in cells) on level l
Definition: yaspgrid.hh:270
bool mark(int refCount, const typename Traits::template Codim< 0 >::Entity &e)
Marks an entity to be refined/coarsened in a subsequent adapt.
Definition: yaspgrid.hh:1287
int overlapSize(int level, int codim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1400
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir) const
Definition: yaspgrid.hh:1482
int size(int codim) const
number of leaf entities per codim in this process
Definition: yaspgrid.hh:1439
bool isPeriodic(int i) const
return whether the grid is periodic in direction i
Definition: yaspgrid.hh:279
void refineOptions(bool keepPhysicalOverlap)
set options for refinement
Definition: yaspgrid.hh:1271
int size(int level, int codim) const
number of entities per level and codim in this process
Definition: yaspgrid.hh:1426
YaspGrid(const Coordinates &coordinates, std::bitset< dim > periodic=std::bitset< dim >(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:732
bool adapt()
map adapt to global refine
Definition: yaspgrid.hh:1307
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lend(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1351
ReservedVector< YGridLevel, 32 >::const_iterator YGridLevelIterator
Iterator over the grid levels.
Definition: yaspgrid.hh:290
size_t numBoundarySegments() const
returns the number of boundary segments within the macro grid
Definition: yaspgrid.hh:1457
void communicateCodim(DataHandle &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1492
void makelevel(const Coordinates &coords, std::bitset< dim > periodic, iTupel o_interior, int overlap)
Make a new YGridLevel structure.
Definition: yaspgrid.hh:327
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lend(int level) const
Iterator to one past the last entity of given codim on level for partition type.
Definition: yaspgrid.hh:1337
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lbegin(int level) const
one past the end on this level
Definition: yaspgrid.hh:1330
iTupel globalSize() const
return number of cells on finest level on all processors
Definition: yaspgrid.hh:258
const Communication & comm() const
return a communication object
Definition: yaspgrid.hh:1757
int overlapSize(int odim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1407
YGridLevelIterator begin(int i) const
return iterator pointing to given level
Definition: yaspgrid.hh:299
Coordinates::ctype ctype
Type used for coordinates.
Definition: yaspgrid.hh:179
int levelSize(int l, int i) const
return size of the grid (in cells) on level l in direction i
Definition: yaspgrid.hh:264
YGridLevelIterator begin() const
return iterator pointing to coarsest level
Definition: yaspgrid.hh:293
YaspHierarchicIterator enables iteration over son entities of codim 0.
Definition: yaspgridhierarchiciterator.hh:20
Implementation of Level- and LeafIndexSets for YaspGrid.
Definition: yaspgridindexsets.hh:25
YaspIntersectionIterator enables iteration over intersections with neighboring codim 0 entities.
Definition: yaspgridintersectioniterator.hh:22
YaspIntersection provides data about intersection with neighboring codim 0 entities.
Definition: yaspgridintersection.hh:22
Iterates over entities of one grid level.
Definition: yaspgridleveliterator.hh:19
implement a consecutive index for all entities of given codim of a YaspGrid
Definition: yaspgridpersistentcontainer.hh:35
a base class for the yaspgrid partitioning strategy
Definition: partitioning.hh:38
Different resources needed by all grid implementations.
Provides base classes for index and id sets.
Implements an utility class that provides collective communication methods for sequential programs.
This provides container classes for the coordinates to be used in YaspGrid Upon implementation of the...
Describes the parallel communication interface class for MessageBuffers and DataHandles.
A set of traits classes to store static information about grid implementation.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
CommunicationDirection
Define a type for communication direction parameter.
Definition: gridenums.hh:170
InterfaceType
Parameter to be used for the communication functions.
Definition: gridenums.hh:86
@ All_Partition
all entities
Definition: gridenums.hh:141
@ Interior_Partition
only interior entities
Definition: gridenums.hh:137
@ InteriorBorder_Partition
interior and border entities
Definition: gridenums.hh:138
@ Overlap_Partition
interior, border, and overlap entities
Definition: gridenums.hh:139
@ Ghost_Partition
only ghost entities
Definition: gridenums.hh:142
@ BackwardCommunication
reverse communication direction
Definition: gridenums.hh:172
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:88
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:91
@ Overlap_All_Interface
send overlap, receive all entities
Definition: gridenums.hh:90
@ Overlap_OverlapFront_Interface
send overlap, receive overlap and front entities
Definition: gridenums.hh:89
@ InteriorBorder_InteriorBorder_Interface
send/receive interior and border entities
Definition: gridenums.hh:87
constexpr GeometryType cube(unsigned int dim)
Returns a GeometryType representing a hypercube of dimension dim.
Definition: type.hh:462
Some useful basic math stuff.
Implements an utility class that provides MPI's collective communication methods.
Helpers for dealing with MPI.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integer_sequence< T, II..., T(IN)> push_back(std::integer_sequence< T, II... >, std::integral_constant< T, IN >={})
Append an index IN to the back of the sequence.
Definition: integersequence.hh:69
constexpr Base power(Base m, Exponent p)
Power method for integer exponents.
Definition: math.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
constexpr std::integral_constant< T, I0 > front(std::integer_sequence< T, I0, II... >)
Return the first entry of the sequence.
Definition: integersequence.hh:39
constexpr auto back(std::integer_sequence< T, II... > seq)
Return the last entry of the sequence.
Definition: integersequence.hh:44
An stl-compliant random-access container which stores everything on the stack.
specialize with 'true' for all codims that a grid can communicate data on (default=false)
Definition: capabilities.hh:97
specialize with 'true' for all codims that a grid provides an iterator for (default=hasEntity<codim>:...
Definition: capabilities.hh:74
Specialize with 'true' for all codims that a grid implements entities for. (default=false)
Definition: capabilities.hh:58
Specialize with 'true' for if the codimension 0 entity of the grid has only one possible geometry typ...
Definition: capabilities.hh:27
Specialize with 'true' if the grid is a Cartesian grid. Cartesian grids satisfy the following propert...
Definition: capabilities.hh:48
Specialize with 'true' if implementation guarantees a conforming leaf grid. (default=false)
Definition: capabilities.hh:115
Specialize with 'true' if implementation guarantees conforming level grids. (default=false)
Definition: capabilities.hh:106
Specialize with 'true' if the grid implementation is thread safe, while it is not modified....
Definition: capabilities.hh:169
Static tag representing a codimension.
Definition: dimension.hh:24
A traits struct that collects all associated types of one grid model.
Definition: grid.hh:1013
IndexSet< const GridImp, LeafIndexSetImp, LeafIndexType, LeafGeometryTypes > LeafIndexSet
The type of the leaf index set.
Definition: grid.hh:1082
IndexSet< const GridImp, LevelIndexSetImp, LevelIndexType, LevelGeometryTypes > LevelIndexSet
The type of the level index set.
Definition: grid.hh:1080
IdSet< const GridImp, LocalIdSetImp, LIDType > LocalIdSet
The type of the local id set.
Definition: grid.hh:1086
IdSet< const GridImp, GlobalIdSetImp, GIDType > GlobalIdSet
The type of the global id set.
Definition: grid.hh:1084
type describing an intersection with a neighboring processor
Definition: ygrid.hh:829
Specialization of the StructuredGridFactory class for YaspGrid.
This file provides the infrastructure for toroidal communication in YaspGrid.
A unique label for each type of element that can occur in a grid.
Traits for type conversions and type information.
the YaspEntity class and its specializations
The YaspEntitySeed class.
The YaspGeometry class and its specializations.
level-wise, non-persistent, consecutive indices for YaspGrid
The YaspIntersection class.
The YaspIntersectionIterator class.
The YaspLevelIterator class.
Specialization of the PersistentContainer for YaspGrid.
This provides a YGrid, the elemental component of the yaspgrid implementation.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 20, 23:30, 2024)