Dune Core Modules (2.3.1)

yaspgrid.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_GRID_YASPGRID_HH
4#define DUNE_GRID_YASPGRID_HH
5
6#include <iostream>
7#include <vector>
8#include <algorithm>
9#include <stack>
10
11// either include stdint.h or provide fallback for uint8_t
12#if HAVE_STDINT_H
13#include <stdint.h>
14#else
15typedef unsigned char uint8_t;
16#endif
17
18#include <dune/grid/common/grid.hh> // the grid base classes
19#include <dune/grid/yaspgrid/grids.hh> // the yaspgrid base classes
20#include <dune/grid/common/capabilities.hh> // the capabilities
27#include <dune/geometry/genericgeometry/topologytypes.hh>
31
32
33#if HAVE_MPI
35#endif
36
44namespace Dune {
45
46 //************************************************************************
50 typedef double yaspgrid_ctype;
51
52 /* some sizes for building global ids
53 */
54 const int yaspgrid_dim_bits = 24; // bits for encoding each dimension
55 const int yaspgrid_level_bits = 6; // bits for encoding level number
56 const int yaspgrid_codim_bits = 4; // bits for encoding codimension
57
58
59 //************************************************************************
60 // forward declaration of templates
61
62 template<int dim> class YaspGrid;
63 template<int mydim, int cdim, class GridImp> class YaspGeometry;
64 template<int codim, int dim, class GridImp> class YaspEntity;
65 template<int codim, class GridImp> class YaspEntityPointer;
66 template<int codim, class GridImp> class YaspEntitySeed;
67 template<int codim, PartitionIteratorType pitype, class GridImp> class YaspLevelIterator;
68 template<class GridImp> class YaspIntersectionIterator;
69 template<class GridImp> class YaspIntersection;
70 template<class GridImp> class YaspHierarchicIterator;
71 template<class GridImp, bool isLeafIndexSet> class YaspIndexSet;
72 template<class GridImp> class YaspGlobalIdSet;
73
74 namespace FacadeOptions
75 {
76
77 template<int dim, int mydim, int cdim>
78 struct StoreGeometryReference<mydim, cdim, YaspGrid<dim>, YaspGeometry>
79 {
80 static const bool v = false;
81 };
82
83 template<int dim, int mydim, int cdim>
84 struct StoreGeometryReference<mydim, cdim, const YaspGrid<dim>, YaspGeometry>
85 {
86 static const bool v = false;
87 };
88
89 }
90
91} // namespace Dune
92
93#include <dune/grid/yaspgrid/yaspgridgeometry.hh>
94#include <dune/grid/yaspgrid/yaspgridentity.hh>
95#include <dune/grid/yaspgrid/yaspgridintersection.hh>
96#include <dune/grid/yaspgrid/yaspgridintersectioniterator.hh>
97#include <dune/grid/yaspgrid/yaspgridhierarchiciterator.hh>
98#include <dune/grid/yaspgrid/yaspgridentityseed.hh>
99#include <dune/grid/yaspgrid/yaspgridentitypointer.hh>
100#include <dune/grid/yaspgrid/yaspgridleveliterator.hh>
101#include <dune/grid/yaspgrid/yaspgridindexsets.hh>
102#include <dune/grid/yaspgrid/yaspgrididset.hh>
103
104namespace Dune {
105
106 template<int dim>
107 struct YaspGridFamily
108 {
109#if HAVE_MPI
110 typedef CollectiveCommunication<MPI_Comm> CCType;
111#else
112 typedef CollectiveCommunication<Dune::YaspGrid<dim> > CCType;
113#endif
114
115 typedef GridTraits<dim, // dimension of the grid
116 dim, // dimension of the world space
118 YaspGeometry,YaspEntity,
119 YaspEntityPointer,
120 YaspLevelIterator, // type used for the level iterator
121 YaspIntersection, // leaf intersection
122 YaspIntersection, // level intersection
123 YaspIntersectionIterator, // leaf intersection iter
124 YaspIntersectionIterator, // level intersection iter
125 YaspHierarchicIterator,
126 YaspLevelIterator, // type used for the leaf(!) iterator
127 YaspIndexSet< const YaspGrid< dim >, false >, // level index set
128 YaspIndexSet< const YaspGrid< dim >, true >, // leaf index set
129 YaspGlobalIdSet<const YaspGrid<dim> >,
130 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+yaspgrid_codim_bits>,
131 YaspGlobalIdSet<const YaspGrid<dim> >,
132 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+yaspgrid_codim_bits>,
133 CCType,
134 DefaultLevelGridViewTraits, DefaultLeafGridViewTraits,
135 YaspEntitySeed>
136 Traits;
137 };
138
139 template<int dim, int codim>
140 struct YaspCommunicateMeta {
141 template<class G, class DataHandle>
142 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
143 {
144 if (data.contains(dim,codim))
145 {
146 DUNE_THROW(GridError, "interface communication not implemented");
147 }
148 YaspCommunicateMeta<dim,codim-1>::comm(g,data,iftype,dir,level);
149 }
150 };
151
152 template<int dim>
153 struct YaspCommunicateMeta<dim,dim> {
154 template<class G, class DataHandle>
155 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
156 {
157 if (data.contains(dim,dim))
158 g.template communicateCodim<DataHandle,dim>(data,iftype,dir,level);
159 YaspCommunicateMeta<dim,dim-1>::comm(g,data,iftype,dir,level);
160 }
161 };
162
163 template<int dim>
164 struct YaspCommunicateMeta<dim,0> {
165 template<class G, class DataHandle>
166 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
167 {
168 if (data.contains(dim,0))
169 g.template communicateCodim<DataHandle,0>(data,iftype,dir,level);
170 }
171 };
172
173 //************************************************************************
190 template<int dim>
192 : public GridDefaultImplementation<dim,dim,yaspgrid_ctype,YaspGridFamily<dim> >
193 {
194 public:
197
198 struct Intersection {
200 SubYGrid<dim,ctype> grid;
201
203 int rank;
204
206 int distance;
207 };
208
211 struct YGridLevel {
212
214 int level() const
215 {
216 return level_;
217 }
218
219 // cell (codim 0) data
220 YGrid<dim,ctype> cell_global; // the whole cell grid on that level
221 SubYGrid<dim,ctype> cell_overlap; // we have no ghost cells, so our part is overlap completely
222 SubYGrid<dim,ctype> cell_interior; // interior cells are a subgrid of all cells
223
224 std::deque<Intersection> send_cell_overlap_overlap; // each intersection is a subgrid of overlap
225 std::deque<Intersection> recv_cell_overlap_overlap; // each intersection is a subgrid of overlap
226
227 std::deque<Intersection> send_cell_interior_overlap; // each intersection is a subgrid of overlap
228 std::deque<Intersection> recv_cell_overlap_interior; // each intersection is a subgrid of overlap
229
230 // vertex (codim dim) data
231 YGrid<dim,ctype> vertex_global; // the whole vertex grid on that level
232 SubYGrid<dim,ctype> vertex_overlapfront; // all our vertices are overlap and front
233 SubYGrid<dim,ctype> vertex_overlap; // subgrid containing only overlap
234 SubYGrid<dim,ctype> vertex_interiorborder; // subgrid containing only interior and border
235 SubYGrid<dim,ctype> vertex_interior; // subgrid containing only interior
236
237 std::deque<Intersection> send_vertex_overlapfront_overlapfront; // each intersection is a subgrid of overlapfront
238 std::deque<Intersection> recv_vertex_overlapfront_overlapfront; // each intersection is a subgrid of overlapfront
239
240 std::deque<Intersection> send_vertex_overlap_overlapfront; // each intersection is a subgrid of overlapfront
241 std::deque<Intersection> recv_vertex_overlapfront_overlap; // each intersection is a subgrid of overlapfront
242
243 std::deque<Intersection> send_vertex_interiorborder_interiorborder; // each intersection is a subgrid of overlapfront
244 std::deque<Intersection> recv_vertex_interiorborder_interiorborder; // each intersection is a subgrid of overlapfront
245
246 std::deque<Intersection> send_vertex_interiorborder_overlapfront; // each intersection is a subgrid of overlapfront
247 std::deque<Intersection> recv_vertex_overlapfront_interiorborder; // each intersection is a subgrid of overlapfront
248
249 // general
250 YaspGrid<dim>* mg; // each grid level knows its multigrid
251 int overlap; // in mesh cells on this level
254 };
255
259
260 // communication tag used by multigrid
261 enum { tag = 17 };
262
264 const Torus<dim>& torus () const
265 {
266 return _torus;
267 }
268
271
274 {
275 return YGridLevelIterator(_levels,0);
276 }
277
280 {
281 if (i<0 || i>maxLevel())
282 DUNE_THROW(GridError, "level not existing");
283 return YGridLevelIterator(_levels,i);
284 }
285
288 {
289 return YGridLevelIterator(_levels,maxLevel()+1);
290 }
291
292 // static method to create the default load balance strategy
293 static const YLoadBalance<dim>* defaultLoadbalancer()
294 {
295 static YLoadBalance<dim> lb;
296 return & lb;
297 }
298
299 protected:
309 YGridLevel makelevel (int level, fTupel L, iTupel s, std::bitset<dim> periodic, iTupel o_interior, iTupel s_interior, int overlap)
310 {
311 // first, lets allocate a new structure
312 YGridLevel g;
313 g.overlap = overlap;
314 g.mg = this;
315 g.level_ = level;
316
317 // the global cell grid
318 iTupel o = iTupel(0); // logical origin is always 0, that is not a restriction
319 fTupel h;
320 fTupel r;
321 for (int i=0; i<dim; i++) h[i] = L[i]/s[i]; // the mesh size in each direction
322 for (int i=0; i<dim; i++) r[i] = 0.5*h[i]; // the shift for cell centers
323 g.cell_global = YGrid<dim,ctype>(o,s,h,r); // this is the global cell grid
324
325 // extend the cell interior grid by overlap considering periodicity
326 iTupel o_overlap;
327 iTupel s_overlap;
328 for (int i=0; i<dim; i++)
329 {
330 if (periodic[i])
331 {
332 // easy case, extend by 2 overlaps in total
333 o_overlap[i] = o_interior[i]-overlap; // Note: origin might be negative now
334 s_overlap[i] = s_interior[i]+2*overlap; // Note: might be larger than global size
335 }
336 else
337 {
338 // nonperiodic case, intersect with global size
339 int min = std::max(0,o_interior[i]-overlap);
340 int max = std::min(s[i]-1,o_interior[i]+s_interior[i]-1+overlap);
341 o_overlap[i] = min;
342 s_overlap[i] = max-min+1;
343 }
344 }
345 g.cell_overlap = SubYGrid<dim,ctype>(YGrid<dim,ctype>(o_overlap,s_overlap,h,r));
346
347 // now make the interior grid a subgrid of the overlapping grid
348 iTupel offset;
349 for (int i=0; i<dim; i++) offset[i] = o_interior[i]-o_overlap[i];
350 g.cell_interior = SubYGrid<dim,ctype>(o_interior,s_interior,offset,s_overlap,h,r);
351
352 // compute cell intersections
353 intersections(g.cell_overlap,g.cell_overlap,g.cell_global.size(),g.send_cell_overlap_overlap,g.recv_cell_overlap_overlap);
354 intersections(g.cell_interior,g.cell_overlap,g.cell_global.size(),g.send_cell_interior_overlap,g.recv_cell_overlap_interior);
355
356 // now we can do the vertex grids. They are derived completely from the cell grids
357 iTupel o_vertex_global, s_vertex_global;
358 for (int i=0; i<dim; i++) r[i] = 0.0; // the shift for vertices is zero, and the mesh size is same as for cells
359
360 // first let's make the global grid
361 for (int i=0; i<dim; i++) o_vertex_global[i] = g.cell_global.origin(i);
362 for (int i=0; i<dim; i++) s_vertex_global[i] = g.cell_global.size(i)+1; // one more vertices than cells ...
363 g.vertex_global = YGrid<dim,ctype>(o_vertex_global,s_vertex_global,h,r);
364
365 // now the local grid stored in this processor. All other grids are subgrids of this
366 iTupel o_vertex_overlapfront;
367 iTupel s_vertex_overlapfront;
368 for (int i=0; i<dim; i++) o_vertex_overlapfront[i] = g.cell_overlap.origin(i);
369 for (int i=0; i<dim; i++) s_vertex_overlapfront[i] = g.cell_overlap.size(i)+1; // one more vertices than cells ...
370 g.vertex_overlapfront = SubYGrid<dim,ctype>(YGrid<dim,ctype>(o_vertex_overlapfront,s_vertex_overlapfront,h,r));
371
372 // now overlap only (i.e. without front), is subgrid of overlapfront
373 iTupel o_vertex_overlap;
374 iTupel s_vertex_overlap;
375 for (int i=0; i<dim; i++)
376 {
377 o_vertex_overlap[i] = g.cell_overlap.origin(i);
378 s_vertex_overlap[i] = g.cell_overlap.size(i)+1;
379
380 if (!periodic[i] && g.cell_overlap.origin(i)>g.cell_global.origin(i))
381 {
382 // not at the lower boundary
383 o_vertex_overlap[i] += 1;
384 s_vertex_overlap[i] -= 1;
385 }
386
387 if (!periodic[i] && g.cell_overlap.origin(i)+g.cell_overlap.size(i)<g.cell_global.origin(i)+g.cell_global.size(i))
388 {
389 // not at the upper boundary
390 s_vertex_overlap[i] -= 1;
391 }
392
393
394 offset[i] = o_vertex_overlap[i]-o_vertex_overlapfront[i];
395 }
396 g.vertex_overlap = SubYGrid<dim,ctype>(o_vertex_overlap,s_vertex_overlap,offset,s_vertex_overlapfront,h,r);
397
398 // now interior with border
399 iTupel o_vertex_interiorborder;
400 iTupel s_vertex_interiorborder;
401 for (int i=0; i<dim; i++) o_vertex_interiorborder[i] = g.cell_interior.origin(i);
402 for (int i=0; i<dim; i++) s_vertex_interiorborder[i] = g.cell_interior.size(i)+1;
403 for (int i=0; i<dim; i++) offset[i] = o_vertex_interiorborder[i]-o_vertex_overlapfront[i];
404 g.vertex_interiorborder = SubYGrid<dim,ctype>(o_vertex_interiorborder,s_vertex_interiorborder,offset,s_vertex_overlapfront,h,r);
405
406 // now only interior
407 iTupel o_vertex_interior;
408 iTupel s_vertex_interior;
409 for (int i=0; i<dim; i++)
410 {
411 o_vertex_interior[i] = g.cell_interior.origin(i);
412 s_vertex_interior[i] = g.cell_interior.size(i)+1;
413
414 if (!periodic[i] && g.cell_interior.origin(i)>g.cell_global.origin(i))
415 {
416 // not at the lower boundary
417 o_vertex_interior[i] += 1;
418 s_vertex_interior[i] -= 1;
419 }
420
421 if (!periodic[i] && g.cell_interior.origin(i)+g.cell_interior.size(i)<g.cell_global.origin(i)+g.cell_global.size(i))
422 {
423 // not at the upper boundary
424 s_vertex_interior[i] -= 1;
425 }
426
427 offset[i] = o_vertex_interior[i]-o_vertex_overlapfront[i];
428 }
429 g.vertex_interior = SubYGrid<dim,ctype>(o_vertex_interior,s_vertex_interior,offset,s_vertex_overlapfront,h,r);
430
431 // compute vertex intersections
432 intersections(g.vertex_overlapfront,g.vertex_overlapfront,g.cell_global.size(),
433 g.send_vertex_overlapfront_overlapfront,g.recv_vertex_overlapfront_overlapfront);
434 intersections(g.vertex_overlap,g.vertex_overlapfront,g.cell_global.size(),
435 g.send_vertex_overlap_overlapfront,g.recv_vertex_overlapfront_overlap);
436 intersections(g.vertex_interiorborder,g.vertex_interiorborder,g.cell_global.size(),
437 g.send_vertex_interiorborder_interiorborder,g.recv_vertex_interiorborder_interiorborder);
438 intersections(g.vertex_interiorborder,g.vertex_overlapfront,g.cell_global.size(),
439 g.send_vertex_interiorborder_overlapfront,g.recv_vertex_overlapfront_interiorborder);
440
441 // return the whole thing
442 return g;
443 }
444
445
446 struct mpifriendly_ygrid {
447 mpifriendly_ygrid ()
448 : origin(0), size(0), h(0.0), r(0.0)
449 {}
450 mpifriendly_ygrid (const YGrid<dim,ctype>& grid)
451 : origin(grid.origin()), size(grid.size()), h(grid.meshsize()), r(grid.shift())
452 {}
453 iTupel origin;
454 iTupel size;
455 fTupel h;
456 fTupel r;
457 };
458
467 void intersections (const SubYGrid<dim,ctype>& sendgrid, const SubYGrid<dim,ctype>& recvgrid, const iTupel& size,
468 std::deque<Intersection>& sendlist, std::deque<Intersection>& recvlist)
469 {
470 // the exchange buffers
471 std::vector<YGrid<dim,ctype> > send_recvgrid(_torus.neighbors());
472 std::vector<YGrid<dim,ctype> > recv_recvgrid(_torus.neighbors());
473 std::vector<YGrid<dim,ctype> > send_sendgrid(_torus.neighbors());
474 std::vector<YGrid<dim,ctype> > recv_sendgrid(_torus.neighbors());
475
476 // new exchange buffers to send simple struct without virtual functions
477 std::vector<mpifriendly_ygrid> mpifriendly_send_recvgrid(_torus.neighbors());
478 std::vector<mpifriendly_ygrid> mpifriendly_recv_recvgrid(_torus.neighbors());
479 std::vector<mpifriendly_ygrid> mpifriendly_send_sendgrid(_torus.neighbors());
480 std::vector<mpifriendly_ygrid> mpifriendly_recv_sendgrid(_torus.neighbors());
481
482 // fill send buffers; iterate over all neighboring processes
483 // non-periodic case is handled automatically because intersection will be zero
484 for (typename Torus<dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
485 {
486 // determine if we communicate with this neighbor (and what)
487 bool skip = false;
488 iTupel coord = _torus.coord(); // my coordinates
489 iTupel delta = i.delta(); // delta to neighbor
490 iTupel nb = coord; // the neighbor
491 for (int k=0; k<dim; k++) nb[k] += delta[k];
492 iTupel v = iTupel(0); // grid movement
493
494 for (int k=0; k<dim; k++)
495 {
496 if (nb[k]<0)
497 {
498 if (_periodic[k])
499 v[k] += size[k];
500 else
501 skip = true;
502 }
503 if (nb[k]>=_torus.dims(k))
504 {
505 if (_periodic[k])
506 v[k] -= size[k];
507 else
508 skip = true;
509 }
510 // neither might be true, then v=0
511 }
512
513 // store moved grids in send buffers
514 if (!skip)
515 {
516 send_sendgrid[i.index()] = sendgrid.move(v);
517 send_recvgrid[i.index()] = recvgrid.move(v);
518 }
519 else
520 {
521 send_sendgrid[i.index()] = YGrid<dim,ctype>(iTupel(0),iTupel(0),fTupel(0.0),fTupel(0.0));
522 send_recvgrid[i.index()] = YGrid<dim,ctype>(iTupel(0),iTupel(0),fTupel(0.0),fTupel(0.0));
523 }
524 }
525
526 // issue send requests for sendgrid being sent to all neighbors
527 for (typename Torus<dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
528 {
529 mpifriendly_send_sendgrid[i.index()] = mpifriendly_ygrid(send_sendgrid[i.index()]);
530 _torus.send(i.rank(), &mpifriendly_send_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
531 }
532
533 // issue recv requests for sendgrids of neighbors
534 for (typename Torus<dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
535 _torus.recv(i.rank(), &mpifriendly_recv_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
536
537 // exchange the sendgrids
538 _torus.exchange();
539
540 // issue send requests for recvgrid being sent to all neighbors
541 for (typename Torus<dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
542 {
543 mpifriendly_send_recvgrid[i.index()] = mpifriendly_ygrid(send_recvgrid[i.index()]);
544 _torus.send(i.rank(), &mpifriendly_send_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
545 }
546
547 // issue recv requests for recvgrid of neighbors
548 for (typename Torus<dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
549 _torus.recv(i.rank(), &mpifriendly_recv_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
550
551 // exchange the recvgrid
552 _torus.exchange();
553
554 // process receive buffers and compute intersections
555 for (typename Torus<dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
556 {
557 // what must be sent to this neighbor
558 Intersection send_intersection;
559 mpifriendly_ygrid yg = mpifriendly_recv_recvgrid[i.index()];
560 recv_recvgrid[i.index()] = YGrid<dim,ctype>(yg.origin,yg.size,yg.h,yg.r);
561 send_intersection.grid = sendgrid.intersection(recv_recvgrid[i.index()]);
562 send_intersection.rank = i.rank();
563 send_intersection.distance = i.distance();
564 if (!send_intersection.grid.empty()) sendlist.push_front(send_intersection);
565
566 Intersection recv_intersection;
567 yg = mpifriendly_recv_sendgrid[i.index()];
568 recv_sendgrid[i.index()] = YGrid<dim,ctype>(yg.origin,yg.size,yg.h,yg.r);
569 recv_intersection.grid = recvgrid.intersection(recv_sendgrid[i.index()]);
570 recv_intersection.rank = i.rank();
571 recv_intersection.distance = i.distance();
572 if(!recv_intersection.grid.empty()) recvlist.push_back(recv_intersection);
573 }
574 }
575
576 protected:
577
578 typedef const YaspGrid<dim> GridImp;
579
580 void init()
581 {
582 setsizes();
583 indexsets.push_back( make_shared< YaspIndexSet<const YaspGrid<dim>, false > >(*this,0) );
584 boundarysegmentssize();
585 }
586
587 void boundarysegmentssize()
588 {
589 // sizes of local macro grid
590 const FieldVector<int, dim> & size = begin()->cell_overlap.size();
592 {
593 for (int i=0; i<dim; i++)
594 {
595 sides[i] =
596 ((begin()->cell_overlap.origin(i) == begin()->cell_global.origin(i))+
597 (begin()->cell_overlap.origin(i) + begin()->cell_overlap.size(i)
598 == begin()->cell_global.origin(i) + begin()->cell_global.size(i)));
599 }
600 }
601 nBSegments = 0;
602 for (int k=0; k<dim; k++)
603 {
604 int offset = 1;
605 for (int l=0; l<dim; l++)
606 {
607 if (l==k) continue;
608 offset *= size[l];
609 }
610 nBSegments += sides[k]*offset;
611 }
612 }
613
614 public:
615
616 // define the persistent index type
617 typedef bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+yaspgrid_codim_bits> PersistentIndexType;
618
620 typedef YaspGridFamily<dim> GridFamily;
621 // the Traits
622 typedef typename YaspGridFamily<dim>::Traits Traits;
623
624 // need for friend declarations in entity
625 typedef YaspIndexSet<YaspGrid<dim>, false > LevelIndexSetType;
626 typedef YaspIndexSet<YaspGrid<dim>, true > LeafIndexSetType;
627 typedef YaspGlobalIdSet<YaspGrid<dim> > GlobalIdSetType;
628
630 typedef typename SubYGrid<dim,ctype>::TransformingSubIterator TSI;
631 typedef typename std::deque<Intersection>::const_iterator ISIT;
632
635 fTupel L, iTupel s, std::bitset<dim> periodic, int overlap, const YLoadBalance<dim>* lb = defaultLoadbalancer())
636 {
637 _LL = L;
638 _s = s;
639 _periodic = periodic;
640 _levels.resize(1);
641 _overlap = overlap;
642
643 // coarse cell interior grid obtained through partitioning of global grid
644#if HAVE_MPI
645 iTupel o_interior;
646 iTupel s_interior;
647 iTupel o = iTupel(0);
648 array<int,dim> sArray;
649 std::copy(s.begin(), s.end(), sArray.begin());
650 double imbal = _torus.partition(_torus.rank(),o,sArray,o_interior,s_interior);
651 imbal = _torus.global_max(imbal);
652#else
653 iTupel o = iTupel(0);
654 iTupel o_interior(o);
655 iTupel s_interior(s);
656#endif
657 // add level
658 _levels[0] = makelevel(0,L,s,periodic,o_interior,s_interior,overlap);
659 }
660
663 fTupel L,
665 std::bitset<dim> periodic,
666 int overlap,
667 const YLoadBalance<dim>* lb = defaultLoadbalancer())
668 {
669 _LL = L;
670 _periodic = periodic;
671 _levels.resize(1);
672 _overlap = overlap;
673
674 std::copy(s.begin(), s.end(), this->_s.begin());
675
676 // coarse cell interior grid obtained through partitioning of global grid
677 iTupel o = iTupel(0);
678 iTupel o_interior(o);
679 iTupel s_interior;
680 std::copy(s.begin(), s.end(), s_interior.begin());
681#if HAVE_MPI
682 double imbal = _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
683 imbal = _torus.global_max(imbal);
684#endif
685
686 // add level
687 _levels[0] = makelevel(0,L,_s,periodic,o_interior,s_interior,overlap);
688 }
689
704 Dune::FieldVector<bool, dim> periodic, int overlap,
705 const YLoadBalance<dim>* lb = defaultLoadbalancer())
706 DUNE_DEPRECATED_MSG("Use the corresponding constructor taking array<int> and std::bitset")
707#if HAVE_MPI
708 : ccobj(comm),
709 _torus(comm,tag,s,lb),
710#else
711 : _torus(tag,s,lb),
712#endif
713 leafIndexSet_(*this),
714 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
715 {
716 MultiYGridSetup(L,s,std::bitset<dim>(),overlap,lb);
717
718 // hack: copy input bitfield (in FieldVector<bool>) into std::bitset
719 for (size_t i=0; i<dim; i++)
720 this->_periodic[i] = periodic[i];
721 init();
722 }
723
724
742 Dune::FieldVector<bool, dim> periodic, int overlap,
743 const YLoadBalance<dim>* lb = defaultLoadbalancer())
744 DUNE_DEPRECATED_MSG("Use the corresponding constructor taking array<int> and std::bitset")
745#if HAVE_MPI
746 : ccobj(MPI_COMM_SELF),
747 _torus(MPI_COMM_SELF,tag,s,lb),
748#else
749 : _torus(tag,s,lb),
750#endif
751 leafIndexSet_(*this),
752 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
753 {
754 MultiYGridSetup(L,s,std::bitset<dim>(),overlap,lb);
755
756 // hack: copy input bitfield (in FieldVector<bool>) into std::bitset
757 for (size_t i=0; i<dim; i++)
758 this->_periodic[i] = periodic[i];
759 init();
760 }
761
773 std::bitset<dim> periodic,
774 int overlap,
775 const YLoadBalance<dim>* lb = defaultLoadbalancer())
776#if HAVE_MPI
777 : ccobj(comm),
778 _torus(comm,tag,s,lb),
779#else
780 : _torus(tag,s,lb),
781#endif
782 leafIndexSet_(*this),
783 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
784 {
785 MultiYGridSetup(L,s,periodic,overlap,lb);
786
787 init();
788 }
789
790
805 std::bitset<dim> periodic,
806 int overlap,
807 const YLoadBalance<dim>* lb = defaultLoadbalancer())
808#if HAVE_MPI
809 : ccobj(MPI_COMM_SELF),
810 _torus(MPI_COMM_SELF,tag,s,lb),
811#else
812 : _torus(tag,s,lb),
813#endif
814 leafIndexSet_(*this),
815 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
816 {
817 MultiYGridSetup(L,s,periodic,overlap,lb);
818
819 init();
820 }
821
832 Dune::array<int, dim> elements)
833#if HAVE_MPI
834 : ccobj(MPI_COMM_SELF),
835 _torus(MPI_COMM_SELF,tag,elements,defaultLoadbalancer()),
836#else
837 : _torus(tag,elements,defaultLoadbalancer()),
838#endif
839 leafIndexSet_(*this),
840 _LL(L),
841 _overlap(0),
842 keep_ovlp(true),
843 adaptRefCount(0), adaptActive(false)
844 {
845 _levels.resize(1);
846
847 std::copy(elements.begin(), elements.end(), _s.begin());
848
849 // coarse cell interior grid obtained through partitioning of global grid
850 iTupel o = iTupel(0);
851 iTupel o_interior(o);
852 iTupel s_interior;
853 std::copy(elements.begin(), elements.end(), s_interior.begin());
854#if HAVE_MPI
855 double imbal = _torus.partition(_torus.rank(),o,elements,o_interior,s_interior);
856 imbal = _torus.global_max(imbal);
857#endif
858
859 // add level
860 _levels[0] = makelevel(0,L,_s,_periodic,o_interior,s_interior,0);
861
862 init();
863 }
864
865 private:
866 // do not copy this class
867 YaspGrid(const YaspGrid&);
868
869 public:
870
874 int maxLevel() const
875 {
876 return _levels.size()-1;
877 }
878
880 void globalRefine (int refCount)
881 {
882 if (refCount < -maxLevel())
883 DUNE_THROW(GridError, "Only " << maxLevel() << " levels left. " <<
884 "Coarsening " << -refCount << " levels requested!");
885
886 // If refCount is negative then coarsen the grid
887 for (int k=refCount; k<0; k++)
888 {
889 // create an empty grid level
890 YGridLevel empty;
891 _levels.back() = empty;
892 // reduce maxlevel
893 _levels.pop_back();
894
895 setsizes();
896 indexsets.pop_back();
897 }
898
899 // If refCount is positive refine the grid
900 for (int k=0; k<refCount; k++)
901 {
902 // access to coarser grid level
903 YGridLevel& cg = _levels[maxLevel()];
904
905 // compute size of new global grid
906 iTupel s;
907 for (int i=0; i<dim; i++)
908 s[i] = 2*cg.cell_global.size(i);
909
910 // compute overlap
911 int overlap = (keep_ovlp) ? 2*cg.overlap : cg.overlap;
912
913 // the cell interior grid obtained from coarse cell interior grid
914 iTupel o_interior;
915 iTupel s_interior;
916 for (int i=0; i<dim; i++)
917 o_interior[i] = 2*cg.cell_interior.origin(i);
918 for (int i=0; i<dim; i++)
919 s_interior[i] = 2*cg.cell_interior.size(i);
920
921 // add level
922 _levels.push_back( makelevel(_levels.size(),_LL,s,_periodic,o_interior,s_interior,overlap) );
923
924 setsizes();
925 indexsets.push_back( make_shared<YaspIndexSet<const YaspGrid<dim>, false > >(*this,maxLevel()) );
926 }
927 }
928
933 void refineOptions (bool keepPhysicalOverlap)
934 {
935 keep_ovlp = keepPhysicalOverlap;
936 }
937
949 bool mark( int refCount, const typename Traits::template Codim<0>::Entity & e )
950 {
951 assert(adaptActive == false);
952 if (e.level() != maxLevel()) return false;
953 adaptRefCount = std::max(adaptRefCount, refCount);
954 return true;
955 }
956
963 int getMark ( const typename Traits::template Codim<0>::Entity &e ) const
964 {
965 return ( e.level() == maxLevel() ) ? adaptRefCount : 0;
966 }
967
969 bool adapt ()
970 {
971 globalRefine(adaptRefCount);
972 return (adaptRefCount > 0);
973 }
974
976 bool preAdapt ()
977 {
978 adaptActive = true;
979 adaptRefCount = comm().max(adaptRefCount);
980 return (adaptRefCount < 0);
981 }
982
985 {
986 adaptActive = false;
987 adaptRefCount = 0;
988 }
989
991 template<int cd, PartitionIteratorType pitype>
992 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lbegin (int level) const
993 {
994 return levelbegin<cd,pitype>(level);
995 }
996
998 template<int cd, PartitionIteratorType pitype>
999 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lend (int level) const
1000 {
1001 return levelend<cd,pitype>(level);
1002 }
1003
1005 template<int cd>
1006 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lbegin (int level) const
1007 {
1008 return levelbegin<cd,All_Partition>(level);
1009 }
1010
1012 template<int cd>
1013 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lend (int level) const
1014 {
1015 return levelend<cd,All_Partition>(level);
1016 }
1017
1019 template<int cd, PartitionIteratorType pitype>
1020 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafbegin () const
1021 {
1022 return levelbegin<cd,pitype>(maxLevel());
1023 }
1024
1026 template<int cd, PartitionIteratorType pitype>
1027 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafend () const
1028 {
1029 return levelend<cd,pitype>(maxLevel());
1030 }
1031
1033 template<int cd>
1034 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafbegin () const
1035 {
1036 return levelbegin<cd,All_Partition>(maxLevel());
1037 }
1038
1040 template<int cd>
1041 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafend () const
1042 {
1043 return levelend<cd,All_Partition>(maxLevel());
1044 }
1045
1046 // \brief obtain EntityPointer from EntitySeed. */
1047 template <typename Seed>
1048 typename Traits::template Codim<Seed::codimension>::EntityPointer
1049 entityPointer(const Seed& seed) const
1050 {
1051 const int codim = Seed::codimension;
1052 YGridLevelIterator g = begin(this->getRealImplementation(seed).level());
1053 switch (codim)
1054 {
1055 case 0 :
1056 return YaspEntityPointer<codim,GridImp>(this,g,
1057 TSI(g->cell_overlap, this->getRealImplementation(seed).coord()));
1058 case dim :
1059 return YaspEntityPointer<codim,GridImp>(this,g,
1060 TSI(g->vertex_overlap, this->getRealImplementation(seed).coord()));
1061 default :
1062 DUNE_THROW(GridError, "YaspEntityPointer: codim not implemented");
1063 }
1064 }
1065
1067 int overlapSize (int level, int codim) const
1068 {
1069 YGridLevelIterator g = begin(level);
1070 return g->overlap;
1071 }
1072
1074 int overlapSize (int codim) const
1075 {
1077 return g->overlap;
1078 }
1079
1081 int ghostSize (int level, int codim) const
1082 {
1083 return 0;
1084 }
1085
1087 int ghostSize (int codim) const
1088 {
1089 return 0;
1090 }
1091
1093 int size (int level, int codim) const
1094 {
1095 return sizes[level][codim];
1096 }
1097
1099 int size (int codim) const
1100 {
1101 return sizes[maxLevel()][codim];
1102 }
1103
1105 int size (int level, GeometryType type) const
1106 {
1107 return (type.isCube()) ? sizes[level][dim-type.dim()] : 0;
1108 }
1109
1111 int size (GeometryType type) const
1112 {
1113 return size(maxLevel(),type);
1114 }
1115
1117 size_t numBoundarySegments () const
1118 {
1119 return nBSegments;
1120 }
1121
1126 template<class DataHandleImp, class DataType>
1128 {
1129 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,level);
1130 }
1131
1136 template<class DataHandleImp, class DataType>
1138 {
1139 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,this->maxLevel());
1140 }
1141
1146 template<class DataHandle, int codim>
1147 void communicateCodim (DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level) const
1148 {
1149 // check input
1150 if (!data.contains(dim,codim)) return; // should have been checked outside
1151
1152 // data types
1153 typedef typename DataHandle::DataType DataType;
1154
1155 // access to grid level
1156 YGridLevelIterator g = begin(level);
1157
1158 // find send/recv lists or throw error
1159 const std::deque<Intersection>* sendlist=0;
1160 const std::deque<Intersection>* recvlist=0;
1161 if (codim==0) // the elements
1162 {
1164 return; // there is nothing to do in this case
1165 if (iftype==InteriorBorder_All_Interface)
1166 {
1167 sendlist = &g->send_cell_interior_overlap;
1168 recvlist = &g->recv_cell_overlap_interior;
1169 }
1171 {
1172 sendlist = &g->send_cell_overlap_overlap;
1173 recvlist = &g->recv_cell_overlap_overlap;
1174 }
1175 }
1176 if (codim==dim) // the vertices
1177 {
1179 {
1180 sendlist = &g->send_vertex_interiorborder_interiorborder;
1181 recvlist = &g->recv_vertex_interiorborder_interiorborder;
1182 }
1183
1184 if (iftype==InteriorBorder_All_Interface)
1185 {
1186 sendlist = &g->send_vertex_interiorborder_overlapfront;
1187 recvlist = &g->recv_vertex_overlapfront_interiorborder;
1188 }
1190 {
1191 sendlist = &g->send_vertex_overlap_overlapfront;
1192 recvlist = &g->recv_vertex_overlapfront_overlap;
1193 }
1194 if (iftype==All_All_Interface)
1195 {
1196 sendlist = &g->send_vertex_overlapfront_overlapfront;
1197 recvlist = &g->recv_vertex_overlapfront_overlapfront;
1198 }
1199 }
1200
1201 // change communication direction?
1202 if (dir==BackwardCommunication)
1203 std::swap(sendlist,recvlist);
1204
1205 int cnt;
1206
1207 // Size computation (requires communication if variable size)
1208 std::vector<int> send_size(sendlist->size(),-1); // map rank to total number of objects (of type DataType) to be sent
1209 std::vector<int> recv_size(recvlist->size(),-1); // map rank to total number of objects (of type DataType) to be recvd
1210 std::vector<size_t*> send_sizes(sendlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be sent
1211 std::vector<size_t*> recv_sizes(recvlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be recvd
1212 if (data.fixedsize(dim,codim))
1213 {
1214 // fixed size: just take a dummy entity, size can be computed without communication
1215 cnt=0;
1216 for (ISIT is=sendlist->begin(); is!=sendlist->end(); ++is)
1217 {
1218 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1219 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1220 send_size[cnt] = is->grid.totalsize() * data.size(*it);
1221 cnt++;
1222 }
1223 cnt=0;
1224 for (ISIT is=recvlist->begin(); is!=recvlist->end(); ++is)
1225 {
1226 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1227 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1228 recv_size[cnt] = is->grid.totalsize() * data.size(*it);
1229 cnt++;
1230 }
1231 }
1232 else
1233 {
1234 // variable size case: sender side determines the size
1235 cnt=0;
1236 for (ISIT is=sendlist->begin(); is!=sendlist->end(); ++is)
1237 {
1238 // allocate send buffer for sizes per entitiy
1239 size_t *buf = new size_t[is->grid.totalsize()];
1240 send_sizes[cnt] = buf;
1241
1242 // loop over entities and ask for size
1243 int i=0; size_t n=0;
1244 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1245 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1246 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1247 tsubend(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubend()));
1248 for ( ; it!=tsubend; ++it)
1249 {
1250 buf[i] = data.size(*it);
1251 n += buf[i];
1252 i++;
1253 }
1254
1255 // now we know the size for this rank
1256 send_size[cnt] = n;
1257
1258 // hand over send request to torus class
1259 torus().send(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1260 cnt++;
1261 }
1262
1263 // allocate recv buffers for sizes and store receive request
1264 cnt=0;
1265 for (ISIT is=recvlist->begin(); is!=recvlist->end(); ++is)
1266 {
1267 // allocate recv buffer
1268 size_t *buf = new size_t[is->grid.totalsize()];
1269 recv_sizes[cnt] = buf;
1270
1271 // hand over recv request to torus class
1272 torus().recv(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1273 cnt++;
1274 }
1275
1276 // exchange all size buffers now
1277 torus().exchange();
1278
1279 // release send size buffers
1280 cnt=0;
1281 for (ISIT is=sendlist->begin(); is!=sendlist->end(); ++is)
1282 {
1283 delete[] send_sizes[cnt];
1284 send_sizes[cnt] = 0;
1285 cnt++;
1286 }
1287
1288 // process receive size buffers
1289 cnt=0;
1290 for (ISIT is=recvlist->begin(); is!=recvlist->end(); ++is)
1291 {
1292 // get recv buffer
1293 size_t *buf = recv_sizes[cnt];
1294
1295 // compute total size
1296 size_t n=0;
1297 for (int i=0; i<is->grid.totalsize(); ++i)
1298 n += buf[i];
1299
1300 // ... and store it
1301 recv_size[cnt] = n;
1302 ++cnt;
1303 }
1304 }
1305
1306
1307 // allocate & fill the send buffers & store send request
1308 std::vector<DataType*> sends(sendlist->size(), static_cast<DataType*>(0)); // store pointers to send buffers
1309 cnt=0;
1310 for (ISIT is=sendlist->begin(); is!=sendlist->end(); ++is)
1311 {
1312 // allocate send buffer
1313 DataType *buf = new DataType[send_size[cnt]];
1314
1315 // remember send buffer
1316 sends[cnt] = buf;
1317
1318 // make a message buffer
1319 MessageBuffer<DataType> mb(buf);
1320
1321 // fill send buffer; iterate over cells in intersection
1322 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1323 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1324 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1325 tsubend(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubend()));
1326 for ( ; it!=tsubend; ++it)
1327 data.gather(mb,*it);
1328
1329 // hand over send request to torus class
1330 torus().send(is->rank,buf,send_size[cnt]*sizeof(DataType));
1331 cnt++;
1332 }
1333
1334 // allocate recv buffers and store receive request
1335 std::vector<DataType*> recvs(recvlist->size(),static_cast<DataType*>(0)); // store pointers to send buffers
1336 cnt=0;
1337 for (ISIT is=recvlist->begin(); is!=recvlist->end(); ++is)
1338 {
1339 // allocate recv buffer
1340 DataType *buf = new DataType[recv_size[cnt]];
1341
1342 // remember recv buffer
1343 recvs[cnt] = buf;
1344
1345 // hand over recv request to torus class
1346 torus().recv(is->rank,buf,recv_size[cnt]*sizeof(DataType));
1347 cnt++;
1348 }
1349
1350 // exchange all buffers now
1351 torus().exchange();
1352
1353 // release send buffers
1354 cnt=0;
1355 for (ISIT is=sendlist->begin(); is!=sendlist->end(); ++is)
1356 {
1357 delete[] sends[cnt];
1358 sends[cnt] = 0;
1359 cnt++;
1360 }
1361
1362 // process receive buffers and delete them
1363 cnt=0;
1364 for (ISIT is=recvlist->begin(); is!=recvlist->end(); ++is)
1365 {
1366 // get recv buffer
1367 DataType *buf = recvs[cnt];
1368
1369 // make a message buffer
1370 MessageBuffer<DataType> mb(buf);
1371
1372 // copy data from receive buffer; iterate over cells in intersection
1373 if (data.fixedsize(dim,codim))
1374 {
1375 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1376 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1377 size_t n=data.size(*it);
1378 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1379 tsubend(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubend()));
1380 for ( ; it!=tsubend; ++it)
1381 data.scatter(mb,*it,n);
1382 }
1383 else
1384 {
1385 int i=0;
1386 size_t *sbuf = recv_sizes[cnt];
1387 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1388 it(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubbegin()));
1389 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1390 tsubend(YaspLevelIterator<codim,All_Partition,GridImp>(this,g,is->grid.tsubend()));
1391 for ( ; it!=tsubend; ++it)
1392 data.scatter(mb,*it,sbuf[i++]);
1393 delete[] sbuf;
1394 }
1395
1396 // delete buffer
1397 delete[] buf; // hier krachts !
1398 cnt++;
1399 }
1400 }
1401
1402 // The new index sets from DDM 11.07.2005
1403 const typename Traits::GlobalIdSet& globalIdSet() const
1404 {
1405 return theglobalidset;
1406 }
1407
1408 const typename Traits::LocalIdSet& localIdSet() const
1409 {
1410 return theglobalidset;
1411 }
1412
1413 const typename Traits::LevelIndexSet& levelIndexSet(int level) const
1414 {
1415 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1416 return *(indexsets[level]);
1417 }
1418
1419 const typename Traits::LeafIndexSet& leafIndexSet() const
1420 {
1421 return leafIndexSet_;
1422 }
1423
1424#if HAVE_MPI
1428 {
1429 return ccobj;
1430 }
1431#else
1435 {
1436 return ccobj;
1437 }
1438#endif
1439
1440 private:
1441
1442 // number of boundary segments of the level 0 grid
1443 int nBSegments;
1444
1445 // Index classes need access to the real entity
1446 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim>, true >;
1447 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim>, false >;
1448 friend class Dune::YaspGlobalIdSet<const Dune::YaspGrid<dim> >;
1449
1450 friend class Dune::YaspIntersectionIterator<const Dune::YaspGrid<dim> >;
1451 friend class Dune::YaspIntersection<const Dune::YaspGrid<dim> >;
1452 friend class Dune::YaspEntity<0, dim, const Dune::YaspGrid<dim> >;
1453
1454 template <int codim_, class GridImp_>
1455 friend class Dune::YaspEntityPointer;
1456
1457 template<int codim_, int dim_, class GridImp_, template<int,int,class> class EntityImp_>
1458 friend class Entity;
1459
1460 template<class DT>
1461 class MessageBuffer {
1462 public:
1463 // Constructor
1464 MessageBuffer (DT *p)
1465 {
1466 a=p;
1467 i=0;
1468 j=0;
1469 }
1470
1471 // write data to message buffer, acts like a stream !
1472 template<class Y>
1473 void write (const Y& data)
1474 {
1475 dune_static_assert(( is_same<DT,Y>::value ), "DataType mismatch");
1476 a[i++] = data;
1477 }
1478
1479 // read data from message buffer, acts like a stream !
1480 template<class Y>
1481 void read (Y& data) const
1482 {
1483 dune_static_assert(( is_same<DT,Y>::value ), "DataType mismatch");
1484 data = a[j++];
1485 }
1486
1487 private:
1488 DT *a;
1489 int i;
1490 mutable int j;
1491 };
1492
1493 void setsizes ()
1494 {
1495 for (YGridLevelIterator g=begin(); g!=end(); ++g)
1496 {
1497 // codim 0 (elements)
1498 sizes[g->level()][0] = 1;
1499 for (int i=0; i<dim; ++i)
1500 sizes[g->level()][0] *= g->cell_overlap.size(i);
1501
1502 // codim 1 (faces)
1503 if (dim>1)
1504 {
1505 sizes[g->level()][1] = 0;
1506 for (int i=0; i<dim; ++i)
1507 {
1508 int s=g->cell_overlap.size(i)+1;
1509 for (int j=0; j<dim; ++j)
1510 if (j!=i)
1511 s *= g->cell_overlap.size(j);
1512 sizes[g->level()][1] += s;
1513 }
1514 }
1515
1516 // codim dim-1 (edges)
1517 if (dim>2)
1518 {
1519 sizes[g->level()][dim-1] = 0;
1520 for (int i=0; i<dim; ++i)
1521 {
1522 int s=g->cell_overlap.size(i);
1523 for (int j=0; j<dim; ++j)
1524 if (j!=i)
1525 s *= g->cell_overlap.size(j)+1;
1526 sizes[g->level()][dim-1] += s;
1527 }
1528 }
1529
1530 // codim dim (vertices)
1531 sizes[g->level()][dim] = 1;
1532 for (int i=0; i<dim; ++i)
1533 sizes[g->level()][dim] *= g->vertex_overlapfront.size(i);
1534 }
1535 }
1536
1538 template<int cd, PartitionIteratorType pitype>
1539 YaspLevelIterator<cd,pitype,GridImp> levelbegin (int level) const
1540 {
1541 dune_static_assert( cd == dim || cd == 0 ,
1542 "YaspGrid only supports Entities with codim=dim and codim=0");
1543 YGridLevelIterator g = begin(level);
1544 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1545 if (pitype==Ghost_Partition)
1546 return levelend <cd, pitype> (level);
1547 if (cd==0) // the elements
1548 {
1549 if (pitype<=InteriorBorder_Partition)
1550 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->cell_interior.tsubbegin());
1551 if (pitype<=All_Partition)
1552 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->cell_overlap.tsubbegin());
1553 }
1554 if (cd==dim) // the vertices
1555 {
1556 if (pitype==Interior_Partition)
1557 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_interior.tsubbegin());
1558 if (pitype==InteriorBorder_Partition)
1559 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_interiorborder.tsubbegin());
1560 if (pitype==Overlap_Partition)
1561 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_overlap.tsubbegin());
1562 if (pitype<=All_Partition)
1563 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_overlapfront.tsubbegin());
1564 }
1565 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1566 }
1567
1569 template<int cd, PartitionIteratorType pitype>
1570 YaspLevelIterator<cd,pitype,GridImp> levelend (int level) const
1571 {
1572 dune_static_assert( cd == dim || cd == 0 ,
1573 "YaspGrid only supports Entities with codim=dim and codim=0");
1574 YGridLevelIterator g = begin(level);
1575 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1576 if (cd==0) // the elements
1577 {
1578 if (pitype<=InteriorBorder_Partition)
1579 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->cell_interior.tsubend());
1580 if (pitype<=All_Partition || pitype == Ghost_Partition)
1581 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->cell_overlap.tsubend());
1582 }
1583 if (cd==dim) // the vertices
1584 {
1585 if (pitype==Interior_Partition)
1586 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_interior.tsubend());
1587 if (pitype==InteriorBorder_Partition)
1588 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_interiorborder.tsubend());
1589 if (pitype==Overlap_Partition)
1590 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_overlap.tsubend());
1591 if (pitype<=All_Partition || pitype == Ghost_Partition)
1592 return YaspLevelIterator<cd,pitype,GridImp>(this,g,g->vertex_overlapfront.tsubend());
1593 }
1594 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1595 }
1596
1597#if HAVE_MPI
1598 CollectiveCommunication<MPI_Comm> ccobj;
1599#else
1600 CollectiveCommunication<YaspGrid> ccobj;
1601#endif
1602
1603 Torus<dim> _torus;
1604
1605 std::vector< shared_ptr< YaspIndexSet<const YaspGrid<dim>, false > > > indexsets;
1606 YaspIndexSet<const YaspGrid<dim>, true> leafIndexSet_;
1607 YaspGlobalIdSet<const YaspGrid<dim> > theglobalidset;
1608
1609 fTupel _LL;
1610 iTupel _s;
1611 std::bitset<dim> _periodic;
1612 ReservedVector<YGridLevel,32> _levels;
1613 int _overlap;
1614 int sizes[32][dim+1]; // total number of entities per level and codim
1615 bool keep_ovlp;
1616 int adaptRefCount;
1617 bool adaptActive;
1618 };
1619
1621
1622 template <int d>
1623 inline std::ostream& operator<< (std::ostream& s, YaspGrid<d>& grid)
1624 {
1625 int rank = grid.torus().rank();
1626
1627 s << "[" << rank << "]:" << " YaspGrid maxlevel=" << grid.maxLevel() << std::endl;
1628
1629 for (typename YaspGrid<d>::YGridLevelIterator g=grid.begin(); g!=grid.end(); ++g)
1630 {
1631 s << "[" << rank << "]: " << std::endl;
1632 s << "[" << rank << "]: " << "==========================================" << std::endl;
1633 s << "[" << rank << "]: " << "level=" << g->level() << std::endl;
1634 s << "[" << rank << "]: " << "cell_global=" << g->cell_global << std::endl;
1635 s << "[" << rank << "]: " << "cell_overlap=" << g->cell_overlap << std::endl;
1636 s << "[" << rank << "]: " << "cell_interior=" << g->cell_interior << std::endl;
1637 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_cell_overlap_overlap.begin();
1638 i!=g->send_cell_overlap_overlap.end(); ++i)
1639 {
1640 s << "[" << rank << "]: " << " s_c_o_o "
1641 << i->rank << " " << i->grid << std::endl;
1642 }
1643 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_cell_overlap_overlap.begin();
1644 i!=g->recv_cell_overlap_overlap.end(); ++i)
1645 {
1646 s << "[" << rank << "]: " << " r_c_o_o "
1647 << i->rank << " " << i->grid << std::endl;
1648 }
1649 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_cell_interior_overlap.begin();
1650 i!=g->send_cell_interior_overlap.end(); ++i)
1651 {
1652 s << "[" << rank << "]: " << " s_c_i_o "
1653 << i->rank << " " << i->grid << std::endl;
1654 }
1655 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_cell_overlap_interior.begin();
1656 i!=g->recv_cell_overlap_interior.end(); ++i)
1657 {
1658 s << "[" << rank << "]: " << " r_c_o_i "
1659 << i->rank << " " << i->grid << std::endl;
1660 }
1661
1662 s << "[" << rank << "]: " << "-----------------------------------------------" << std::endl;
1663 s << "[" << rank << "]: " << "vertex_global=" << g->vertex_global << std::endl;
1664 s << "[" << rank << "]: " << "vertex_overlapfront=" << g->vertex_overlapfront << std::endl;
1665 s << "[" << rank << "]: " << "vertex_overlap=" << g->vertex_overlap << std::endl;
1666 s << "[" << rank << "]: " << "vertex_interiorborder=" << g->vertex_interiorborder << std::endl;
1667 s << "[" << rank << "]: " << "vertex_interior=" << g->vertex_interior << std::endl;
1668 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_vertex_overlapfront_overlapfront.begin();
1669 i!=g->send_vertex_overlapfront_overlapfront.end(); ++i)
1670 {
1671 s << "[" << rank << "]: " << " s_v_of_of "
1672 << i->rank << " " << i->grid << std::endl;
1673 }
1674 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_vertex_overlapfront_overlapfront.begin();
1675 i!=g->recv_vertex_overlapfront_overlapfront.end(); ++i)
1676 {
1677 s << "[" << rank << "]: " << " r_v_of_of "
1678 << i->rank << " " << i->grid << std::endl;
1679 }
1680 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_vertex_overlap_overlapfront.begin();
1681 i!=g->send_vertex_overlap_overlapfront.end(); ++i)
1682 {
1683 s << "[" << rank << "]: " << " s_v_o_of "
1684 << i->rank << " " << i->grid << std::endl;
1685 }
1686 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_vertex_overlapfront_overlap.begin();
1687 i!=g->recv_vertex_overlapfront_overlap.end(); ++i)
1688 {
1689 s << "[" << rank << "]: " << " r_v_of_o "
1690 << i->rank << " " << i->grid << std::endl;
1691 }
1692 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_vertex_interiorborder_interiorborder.begin();
1693 i!=g->send_vertex_interiorborder_interiorborder.end(); ++i)
1694 {
1695 s << "[" << rank << "]: " << " s_v_ib_ib "
1696 << i->rank << " " << i->grid << std::endl;
1697 }
1698 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_vertex_interiorborder_interiorborder.begin();
1699 i!=g->recv_vertex_interiorborder_interiorborder.end(); ++i)
1700 {
1701 s << "[" << rank << "]: " << " r_v_ib_ib "
1702 << i->rank << " " << i->grid << std::endl;
1703 }
1704 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->send_vertex_interiorborder_overlapfront.begin();
1705 i!=g->send_vertex_interiorborder_overlapfront.end(); ++i)
1706 {
1707 s << "[" << rank << "]: " << " s_v_ib_of "
1708 << i->rank << " " << i->grid << std::endl;
1709 }
1710 for (typename std::deque<typename YaspGrid<d>::Intersection>::const_iterator i=g->recv_vertex_overlapfront_interiorborder.begin();
1711 i!=g->recv_vertex_overlapfront_interiorborder.end(); ++i)
1712 {
1713 s << "[" << rank << "]: " << " s_v_of_ib "
1714 << i->rank << " " << i->grid << std::endl;
1715 }
1716 }
1717
1718 s << std::endl;
1719
1720 return s;
1721 }
1722
1723 namespace Capabilities
1724 {
1725
1737 template<int dim>
1739 {
1740 static const bool v = true;
1741 static const unsigned int topologyId = GenericGeometry :: CubeTopology< dim > :: type :: id ;
1742 };
1743
1747 template<int dim>
1748 struct isCartesian< YaspGrid<dim> >
1749 {
1750 static const bool v = true;
1751 };
1752
1756 template<int dim>
1757 struct hasEntity< YaspGrid<dim>, 0 >
1758 {
1759 static const bool v = true;
1760 };
1761
1765 template<int dim>
1766 struct hasEntity< YaspGrid<dim>, dim >
1767 {
1768 static const bool v = true;
1769 };
1770
1771 template< int dim >
1772 struct canCommunicate< YaspGrid< dim >, 0 >
1773 {
1774 static const bool v = true;
1775 };
1776
1777 template< int dim >
1778 struct canCommunicate< YaspGrid< dim >, dim >
1779 {
1780 static const bool v = true;
1781 };
1782
1786 template<int dim>
1787 struct isParallel< YaspGrid<dim> >
1788 {
1789 static const bool v = true;
1790 };
1791
1795 template<int dim>
1797 {
1798 static const bool v = true;
1799 };
1800
1804 template<int dim>
1806 {
1807 static const bool v = true;
1808 };
1809
1810 }
1811
1812} // end namespace
1813
1814
1815#endif
A geometry implementation for axis-aligned hypercubes.
Portable very large unsigned integers.
Specialization of CollectiveCommunication for MPI.
Definition: mpicollectivecommunication.hh:146
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:224
Collective communication interface and sequential default implementation.
Definition: collectivecommunication.hh:72
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:75
Iterator begin()
begin iterator
Definition: densevector.hh:296
Iterator end()
end iterator
Definition: densevector.hh:302
Generic class for stl-conforming iterators for container classes with operator[].
Definition: genericiterator.hh:152
Unique label for each type of entities that can occur in DUNE grids.
Definition: type.hh:25
Definition: grid.hh:1017
Traits::template Partition< pitype >::LevelGridView DUNE_DEPRECATED_MSG("The method levelView has been renamed to levelGridView.") levelView(int level) const
View for a grid level.
Definition: grid.hh:1030
static ReturnImplementationType< InterfaceType >::ImplementationType & getRealImplementation(InterfaceType &i)
return real implementation of interface class
Definition: grid.hh:1223
Base class for exceptions in Dune grid modules.
Definition: exceptions.hh:18
MPI_Comm MPICommunicator
The type of the mpi communicator.
Definition: mpihelper.hh:174
[ provides Dune::Grid ]
Definition: yaspgrid.hh:193
int size(int level, int codim) const
number of entities per level and codim in this process
Definition: yaspgrid.hh:1093
YGridLevelIterator end() const
return iterator pointing to one past the finest level
Definition: yaspgrid.hh:287
int size(int codim) const
number of leaf entities per codim in this process
Definition: yaspgrid.hh:1099
void refineOptions(bool keepPhysicalOverlap)
set options for refinement
Definition: yaspgrid.hh:933
int overlapSize(int codim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1074
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lbegin(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1006
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lend(int level) const
Iterator to one past the last entity of given codim on level for partition type.
Definition: yaspgrid.hh:999
YaspGrid(Dune::MPIHelper::MPICommunicator comm, Dune::FieldVector< ctype, dim > L, Dune::FieldVector< int, dim > s, Dune::FieldVector< bool, dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
Definition: yaspgrid.hh:701
void MultiYGridSetup(fTupel L, iTupel s, std::bitset< dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
The constructor of the old MultiYGrid class.
Definition: yaspgrid.hh:634
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1034
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir) const
Definition: yaspgrid.hh:1137
SubYGrid< dim, ctype >::TransformingSubIterator TSI
shorthand for some data types
Definition: yaspgrid.hh:630
YGridLevelIterator begin(int i) const
return iterator pointing to given level
Definition: yaspgrid.hh:279
size_t numBoundarySegments() const
returns the number of boundary segments within the macro grid
Definition: yaspgrid.hh:1117
ReservedVector< YGridLevel, 32 >::const_iterator YGridLevelIterator
Iterator over the grid levels.
Definition: yaspgrid.hh:270
YaspGrid(Dune::FieldVector< ctype, dim > L, Dune::array< int, dim > elements)
Definition: yaspgrid.hh:831
int getMark(const typename Traits::template Codim< 0 >::Entity &e) const
returns adaptation mark for given entity
Definition: yaspgrid.hh:963
YGridLevelIterator begin() const
return iterator pointing to coarsest level
Definition: yaspgrid.hh:273
int ghostSize(int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1087
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1041
void postAdapt()
clean up some markers
Definition: yaspgrid.hh:984
const CollectiveCommunication< MPI_Comm > & comm() const
return a collective communication object
Definition: yaspgrid.hh:1427
int maxLevel() const
Definition: yaspgrid.hh:874
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lend(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1013
void globalRefine(int refCount)
refine the grid refCount times. What about overlap?
Definition: yaspgrid.hh:880
int overlapSize(int level, int codim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1067
bool mark(int refCount, const typename Traits::template Codim< 0 >::Entity &e)
Marks an entity to be refined/coarsened in a subsequent adapt.
Definition: yaspgrid.hh:949
void communicateCodim(DataHandle &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1147
bool preAdapt()
returns true, if the grid will be coarsened
Definition: yaspgrid.hh:976
YaspGrid(Dune::MPIHelper::MPICommunicator comm, Dune::FieldVector< ctype, dim > L, Dune::array< int, dim > s, std::bitset< dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
Definition: yaspgrid.hh:770
FieldVector< int, dim > iTupel
define types used for arguments
Definition: yaspgrid.hh:257
YaspGrid(Dune::FieldVector< ctype, dim > L, Dune::FieldVector< int, dim > s, Dune::FieldVector< bool, dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
Definition: yaspgrid.hh:740
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lbegin(int level) const
one past the end on this level
Definition: yaspgrid.hh:992
int ghostSize(int level, int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1081
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1127
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1020
YGridLevel makelevel(int level, fTupel L, iTupel s, std::bitset< dim > periodic, iTupel o_interior, iTupel s_interior, int overlap)
Make a new YGridLevel structure.
Definition: yaspgrid.hh:309
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: yaspgrid.hh:1111
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1027
void intersections(const SubYGrid< dim, ctype > &sendgrid, const SubYGrid< dim, ctype > &recvgrid, const iTupel &size, std::deque< Intersection > &sendlist, std::deque< Intersection > &recvlist)
Construct list of intersections with neighboring processors.
Definition: yaspgrid.hh:467
YaspGrid(Dune::FieldVector< ctype, dim > L, Dune::array< int, dim > s, std::bitset< dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
Definition: yaspgrid.hh:803
YaspGridFamily< dim > GridFamily
the GridFamily of this grid
Definition: yaspgrid.hh:620
bool adapt()
map adapt to global refine
Definition: yaspgrid.hh:969
void MultiYGridSetup(fTupel L, Dune::array< int, dim > s, std::bitset< dim > periodic, int overlap, const YLoadBalance< dim > *lb=defaultLoadbalancer())
The constructor of the old MultiYGrid class.
Definition: yaspgrid.hh:662
int size(int level, GeometryType type) const
number of entities per level and geometry type in this process
Definition: yaspgrid.hh:1105
yaspgrid_ctype ctype
Type used for coordinates.
Definition: yaspgrid.hh:196
const Torus< dim > & torus() const
return reference to torus
Definition: yaspgrid.hh:264
size_type size() const
Return array size.
Definition: array.hh:71
A set of traits classes to store static information about grid implementation.
Different resources needed by all grid implementations.
Describes the parallel communication interface class for MessageBuffers and DataHandles.
#define dune_static_assert(COND, MSG)
Helper template so that compilation fails if condition is not true.
Definition: static_assert.hh:79
std::ostream & operator<<(std::ostream &s, const array< T, N > &e)
Output operator for array.
Definition: array.hh:159
#define DUNE_THROW(E, m)
Definition: exceptions.hh:244
Provides base classes for index and id sets.
Dune namespace.
Definition: alignment.hh:14
@ All_Partition
all entities
Definition: gridenums.hh:135
@ Interior_Partition
only interior entities
Definition: gridenums.hh:131
@ InteriorBorder_Partition
interior and border entities
Definition: gridenums.hh:132
@ Overlap_Partition
interior, border, and overlap entities
Definition: gridenums.hh:133
@ Ghost_Partition
only ghost entities
Definition: gridenums.hh:136
CommunicationDirection
Define a type for communication direction parameter.
Definition: gridenums.hh:164
@ BackwardCommunication
reverse communication direction
Definition: gridenums.hh:166
double yaspgrid_ctype
Definition: yaspgrid.hh:50
InterfaceType
Parameter to be used for the communication functions.
Definition: gridenums.hh:80
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:82
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:85
@ Overlap_All_Interface
send overlap, receive all entities
Definition: gridenums.hh:84
@ Overlap_OverlapFront_Interface
send overlap, receive overlap and front entities
Definition: gridenums.hh:83
@ InteriorBorder_InteriorBorder_Interface
send/receive interior and border entities
Definition: gridenums.hh:81
STL namespace.
Implements an utility class that provides collective communication methods for sequential programs.
Implements an utility class that provides MPI's collective communication methods.
Helpers for dealing with MPI.
An stl-compliant random-access container which stores everything on the stack.
This file implements the class shared_ptr (a reference counting pointer), for those systems that don'...
specialize with 'true' for all codims that a grid can communicate data on (default=false)
Definition: capabilities.hh:78
Specialize with 'true' for all codims that a grid implements entities for. (default=false)
Definition: capabilities.hh:56
Specialize with 'true' for if the codimension 0 entity of the grid has only one possible geometry typ...
Definition: capabilities.hh:25
Specialize with 'true' if the grid is a Cartesian grid. Cartesian grids satisfy the following propert...
Definition: capabilities.hh:46
Specialize with 'true' if implementation guarantees a conforming leaf grid. (default=false)
Definition: capabilities.hh:96
Specialize with 'true' if implementation guarantees conforming level grids. (default=false)
Definition: capabilities.hh:87
Specialize with 'true' if implementation supports parallelism. (default=false)
Definition: capabilities.hh:65
static const bool v
Whether to store by reference.
Definition: geometry.hh:49
A traits struct that collects all associated types of one grid model.
Definition: grid.hh:1261
IdSet< const GridImp, GlobalIdSetImp, GIDType > GlobalIdSet
The type of the global id set.
Definition: grid.hh:1347
IndexSet< const GridImp, LevelIndexSetImp > LevelIndexSet
The type of the level index set.
Definition: grid.hh:1343
IndexSet< const GridImp, LeafIndexSetImp > LeafIndexSet
The type of the leaf index set.
Definition: grid.hh:1345
IdSet< const GridImp, LocalIdSetImp, LIDType > LocalIdSet
The type of the local id set.
Definition: grid.hh:1349
A single grid level within a YaspGrid.
Definition: yaspgrid.hh:211
int level_
The level number within the YaspGrid level hierarchy.
Definition: yaspgrid.hh:253
int level() const
Level number of this level grid.
Definition: yaspgrid.hh:214
Traits for type conversions and type information.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)