DUNE-FEM (unstable)

galerkin.hh
1#ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
2#define DUNE_FEM_SCHEMES_GALERKIN_HH
3
4#include <cstddef>
5
6#include <tuple>
7#include <type_traits>
8#include <utility>
9#include <shared_mutex>
10#include <vector>
11#include <memory>
12
13#include <dune/common/hybridutilities.hh>
14#include <dune/common/timer.hh>
15
16#include <dune/grid/common/rangegenerators.hh>
17
18#include <dune/fem/function/localfunction/temporary.hh>
19#include <dune/fem/io/parameter/reader.hh>
20#include <dune/fem/operator/common/automaticdifferenceoperator.hh>
21#include <dune/fem/operator/common/differentiableoperator.hh>
22#include <dune/fem/operator/common/operator.hh>
23#include <dune/fem/operator/common/stencil.hh>
24#include <dune/fem/operator/common/temporarylocalmatrix.hh>
25#include <dune/fem/quadrature/cachingquadrature.hh>
26#include <dune/fem/quadrature/intersectionquadrature.hh>
27#include <dune/fem/common/bindguard.hh>
28
29#include <dune/fem/misc/threads/threaditerator.hh>
30#include <dune/fem/misc/threads/threadsafevalue.hh>
31#include <dune/fem/misc/hasboundaryintersection.hh>
32
33#include <dune/fem/operator/common/localmatrixcolumn.hh>
34#include <dune/fem/operator/common/localcontribution.hh>
35#include <dune/fem/operator/1order/localmassmatrix.hh>
36#include <dune/fem/schemes/integrands.hh>
37#include <dune/fem/schemes/dirichletwrapper.hh>
38#include <dune/fem/schemes/femscheme.hh>
39
40#include <dune/fem/space/common/capabilities.hh>
41
42// fempy includes
43#include <dune/fempy/quadrature/fempyquadratures.hh>
44
45namespace Dune
46{
47
48 namespace Fem
49 {
50
51 namespace Impl
52 {
53 template <class M>
54 class CallOrder
55 {
56
57 template <class F>
58 static int callOrder(const F& f, char)
59 {
60#ifndef NDEBUG
61 std::cerr << "WARNING: no order method available on " << typeid(F).name() << ", defaulting to 1!" << std::endl;
62#endif
63 return 1;
64 }
65
66 template <class F>
67 static auto callOrder(const F& f, int) -> decltype( f.order() )
68 {
69 return f.order();
70 }
71
72 public:
73 template <class F>
74 static int order (const F& f ) { return callOrder(f, 0); }
75 };
76
77 // GalerkinOperator
78 // ----------------
79
80 template <class Space>
81 struct DefaultGalerkinOperatorQuadratureSelector
82 {
83 typedef typename Space :: GridPartType GridPartType;
84 typedef CachingQuadrature< GridPartType, 0, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > InteriorQuadratureType;
85 typedef CachingQuadrature< GridPartType, 1, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > SurfaceQuadratureType;
86 // typedef CachingQuadrature< GridPartType, 0, Dune::FemPy::FempyQuadratureTraits > InteriorQuadratureType;
87 // typedef CachingQuadrature< GridPartType, 1, Dune::FemPy::FempyQuadratureTraits > SurfaceQuadratureType;
88 };
89
90 // LocalGalerkinOperator
91 // ---------------------
92
93 template< class Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
94 struct LocalGalerkinOperator
95 {
96 typedef LocalGalerkinOperator<Integrands> ThisType;
97 typedef std::conditional_t< Fem::IntegrandsTraits< Integrands >::isFull, Integrands, FullIntegrands< Integrands > > IntegrandsType;
98
99 typedef typename IntegrandsType::GridPartType GridPartType;
100
101 typedef typename GridPartType::ctype ctype;
102 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
103
104 // typedef QuadratureSelector
105 template <class Space>
106 using QuadratureSelector = QuadSelector< Space >;
107
108 // constructor
109 template< class... Args >
110 explicit LocalGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
111 : gridPart_( gridPart ),
112 integrands_( std::forward< Args >( args )... ),
113 defaultInteriorOrder_( [] (const int order) { return 2 * order; } ),
114 defaultSurfaceOrder_ ( [] (const int order) { return 2 * order + 1; } ),
115 interiorQuadOrder_(0), surfaceQuadOrder_(0)
116 {
117 }
118
119 protected:
120 typedef typename IntegrandsType::DomainValueType DomainValueType;
121 typedef typename IntegrandsType::RangeValueType RangeValueType;
122 typedef std::make_index_sequence< std::tuple_size< DomainValueType >::value > DomainValueIndices;
123 typedef std::make_index_sequence< std::tuple_size< RangeValueType >::value > RangeValueIndices;
124
125
126 template< std::size_t... i >
127 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
128 {
129 return std::make_tuple( std::vector< std::tuple_element_t< i, DomainValueType > >( maxNumLocalDofs )... );
130 }
131
132 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs )
133 {
134 return makeDomainValueVector( maxNumLocalDofs, DomainValueIndices() );
135 }
136
137 template< std::size_t... i >
138 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
139 {
140 return std::make_tuple( std::vector< std::tuple_element_t< i, RangeValueType > >( maxNumLocalDofs )... );
141 }
142
143 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs )
144 {
145 return makeRangeValueVector( maxNumLocalDofs, RangeValueIndices() );
146 }
147
148 typedef decltype( makeDomainValueVector( 0u ) ) DomainValueVectorType;
149 typedef decltype( makeRangeValueVector( 0u ) ) RangeValueVectorType;
150
151 static void resizeDomainValueVector ( DomainValueVectorType& vec, const std::size_t size )
152 {
153 Hybrid::forEach( DomainValueIndices(), [ &vec, &size ] ( auto i ) {
154 std::get< i >( vec ).resize( size );
155 } );
156 }
157
158 static void resizeRangeValueVector ( RangeValueVectorType& vec, const std::size_t size )
159 {
160 Hybrid::forEach( RangeValueIndices(), [ &vec, &size ] ( auto i ) {
161 std::get< i >( vec ).resize( size );
162 } );
163 }
164
165 public:
166 void prepare( const std::size_t size ) const
167 {
168 resizeDomainValueVector( phiIn_, size );
169 resizeDomainValueVector( phiOut_, size );
170 resizeDomainValueVector( basisValues_, size );
171 resizeDomainValueVector( domainValues_, size );
172 }
173
174 template< class LocalFunction, class Quadrature >
175 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::RangeType > &phi )
176 {
177 u.evaluateQuadrature( quad, phi );
178 }
179
180 template< class LocalFunction, class Quadrature>
181 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::JacobianRangeType > &phi )
182 {
183 u.jacobianQuadrature( quad, phi );
184 }
185
186 template< class LocalFunction, class Quadrature >
187 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::HessianRangeType > &phi )
188 {
189 u.hessianQuadrature( quad, phi );
190 }
191
192 protected:
193 template< class LocalFunction, class Point >
194 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::RangeType &phi )
195 {
196 u.evaluate( x, phi );
197 }
198
199 template< class LocalFunction, class Point >
200 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::JacobianRangeType &phi )
201 {
202 u.jacobian( x, phi );
203 }
204
205 template< class LocalFunction, class Point >
206 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::HessianRangeType &phi )
207 {
208 u.hessian( x, phi );
209 }
210
211 template< class LocalFunction, class Point, class... T >
212 static void value ( const LocalFunction &u, const Point &x, std::tuple< T... > &phi )
213 {
214 Hybrid::forEach( std::index_sequence_for< T... >(), [ &u, &x, &phi ] ( auto i ) { LocalGalerkinOperator::value( u, x, std::get< i >( phi ) ); } );
215 }
216
217 template< class Basis, class Point >
218 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::RangeType > &phi )
219 {
220 basis.evaluateAll( x, phi );
221 }
222
223 template< class Basis, class Point >
224 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::JacobianRangeType > &phi )
225 {
226 basis.jacobianAll( x, phi );
227 }
228
229 template< class Basis, class Point >
230 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::HessianRangeType > &phi )
231 {
232 basis.hessianAll( x, phi );
233 }
234
235 template< class Basis, class Point, class... T >
236 static void values ( const Basis &basis, const Point &x, std::tuple< std::vector< T >... > &phi )
237 {
238 Hybrid::forEach( std::index_sequence_for< T... >(), [ &basis, &x, &phi ] ( auto i ) { LocalGalerkinOperator::values( basis, x, std::get< i >( phi ) ); } );
239 }
240
241 template< class LocalFunction, class Point >
242 static DomainValueType domainValue ( const LocalFunction &u, const Point &x )
243 {
244 DomainValueType phi;
245 value( u, x, phi );
246 return phi;
247 }
248
249 static DomainValueType domainValue ( const unsigned int qpIdx, DomainValueVectorType& vec)
250 {
251 DomainValueType phi;
252 Hybrid::forEach( DomainValueIndices(), [ &qpIdx, &vec, &phi ] ( auto i ) {
253 std::get< i > ( phi ) = std::get< i >( vec )[ qpIdx ];
254 } );
255 return phi;
256 }
257
258 template< class LocalFunction, class Quadrature >
259 static void domainValue ( const LocalFunction &u, const Quadrature& quadrature, DomainValueVectorType &result )
260 {
261 Hybrid::forEach( DomainValueIndices(), [ &u, &quadrature, &result ] ( auto i ) {
262 auto& vec = std::get< i >( result );
263 vec.resize( quadrature.nop() );
264 ThisType::evaluateQuadrature( u, quadrature, vec );
265 } );
266 }
267
268 template< class Phi, std::size_t... i >
269 static auto value ( const Phi &phi, std::size_t col, std::index_sequence< i... > )
270 {
271 return std::make_tuple( std::get< i >( phi )[ col ]... );
272 }
273
274 template< class... T >
275 static auto value ( const std::tuple< std::vector< T >... > &phi, std::size_t col )
276 {
277 return value( phi, col, std::index_sequence_for< T... >() );
278 }
279
280 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range )
281 {
282 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range ] ( auto i ) {
283 std::get< i >( ranges )[ idx ] = std::get< i >( range );
284 });
285 }
286 template <class W>
287 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range, const W &weight )
288 {
289 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range, &weight ] ( auto i ) {
290 std::get< i >( ranges )[ idx ] = std::get< i >( range );
291 std::get< i >( ranges )[ idx ] *= weight;
292 });
293 }
294
295 static void assignDomain( DomainValueVectorType& domains, const std::size_t idx, const DomainValueType& domain )
296 {
297 Hybrid::forEach( DomainValueIndices(), [ &domains, &idx, &domain ] ( auto i ) {
298 std::get< i >( domains )[ idx ] = std::get< i >( domain );
299 });
300 }
301
302 template <class W, class Quadrature>
303 static void axpyQuadrature( W& w, const Quadrature& quadrature, RangeValueVectorType& ranges )
304 {
305 Hybrid::forEach( RangeValueIndices(), [ &w, &quadrature, &ranges ] ( auto i ) {
306 w.axpyQuadrature( quadrature, std::get< i >( ranges ) );
307 } );
308 }
309
310 public:
311 // interior integral
312
313 template< class U, class W >
314 void addInteriorIntegral ( const U &u, W &w ) const
315 {
316 if( !integrands().init( u.entity() ) )
317 return;
318
319 const auto& geometry = u.geometry();
320
321 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
322 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder(maxOrder(u, w)) );
323
324 // evaluate u for all quadrature points
325 DomainValueVectorType& domains = domainValues_;
326 domainValue( u, quadrature, domains );
327
328 auto& ranges = values_;
329 resizeRangeValueVector( ranges, quadrature.nop() );
330
331 // evaluate integrands for all quadrature points
332 for( const auto qp : quadrature )
333 {
334 const ctype weight = qp.weight() * geometry.integrationElement( qp.position() );
335 assignRange( ranges, qp.index(), integrands().interior( qp, domainValue( qp.index(), domains ) ), weight );
336 }
337
338 // add to w for all quadrature points
339 axpyQuadrature( w, quadrature, ranges );
340 integrands().unbind();
341 }
342
343 template< class U, class J >
344 void addLinearizedInteriorIntegral ( const U &u, J &j ) const
345 {
346 if( !integrands().init( u.entity() ) )
347 return;
348
349 const auto &geometry = u.geometry();
350 const auto &domainBasis = j.domainBasisFunctionSet();
351 const auto &rangeBasis = j.rangeBasisFunctionSet();
352
353 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
354 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder( maxOrder( u, domainBasis, rangeBasis )) );
355 const size_t domainSize = domainBasis.size();
356 const size_t quadNop = quadrature.nop();
357
358 auto& basisValues = basisValues_;
359 resizeDomainValueVector( basisValues, domainSize );
360
361 // evaluate u for all quadrature points
362 auto& rangeValues = rangeValues_;
363 DomainValueVectorType& domains = domainValues_;
364 domainValue( u, quadrature, domains );
365
366 rangeValues.resize( domainSize );
367 for( std::size_t col = 0; col < domainSize; ++col )
368 {
369 resizeRangeValueVector( rangeValues[ col ], quadNop );
370 }
371
372 // evaluate all basis functions and integrands
373 for( const auto qp : quadrature )
374 {
375 values( domainBasis, qp, basisValues );
376 const auto weight = qp.weight() * geometry.integrationElement( qp.position() );
377 auto integrand = integrands().linearizedInterior( qp, domainValue( qp.index(), domains ) );
378 for( std::size_t col = 0; col < domainSize; ++col )
379 {
380 assignRange( rangeValues[ col ], qp.index(), integrand( value( basisValues, col ) ), weight );
381 }
382 }
383
384 // add to local matrix for all quadrature points and basis functions
385 for( std::size_t col = 0; col < domainSize; ++col )
386 {
387 LocalMatrixColumn< J > jCol( j, col );
388 axpyQuadrature( jCol, quadrature, rangeValues[ col ] );
389 }
390 integrands().unbind();
391 }
392
393 // boundary integral
394
395 template< class Intersection, class U, class W >
396 void addBoundaryIntegral ( const Intersection &intersection, const U &u, W &w ) const
397 {
398 if( !integrands().init( intersection ) )
399 return;
400
401 const auto geometry = intersection.geometry();
402 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
403 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( u, w )), SurfaceQuadratureType::INSIDE );
404 for( const auto qp : quadrature )
405 {
406 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
407
408 RangeValueType integrand = integrands().boundary( qp, domainValue( u, qp ) );
409
410 Hybrid::forEach( RangeValueIndices(), [ &qp, &w, &integrand, weight ] ( auto i ) {
411 std::get< i >( integrand ) *= weight;
412 w.axpy( qp, std::get< i >( integrand ) );
413 } );
414 }
415 integrands().unbind();
416 }
417
418 template< class Intersection, class U, class J >
419 void addLinearizedBoundaryIntegral ( const Intersection &intersection, const U &u, J &j ) const
420 {
421 if( !integrands().init( intersection ) )
422 return;
423
424 DomainValueVectorType &phi = phiIn_;
425
426 const auto geometry = intersection.geometry();
427 const auto &domainBasis = j.domainBasisFunctionSet();
428 const auto &rangeBasis = j.rangeBasisFunctionSet();
429
430 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
431 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder(u, domainBasis, rangeBasis )), SurfaceQuadratureType::INSIDE );
432 for( const auto qp : quadrature )
433 {
434 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
435
436 values( domainBasis, qp, phi );
437 auto integrand = integrands().linearizedBoundary( qp, domainValue( u, qp ) );
438
439 for( std::size_t col = 0, cols = domainBasis.size(); col < cols; ++col )
440 {
441 LocalMatrixColumn< J > jCol( j, col );
442 RangeValueType intPhi = integrand( value( phi, col ) );
443
444 Hybrid::forEach( RangeValueIndices(), [ &qp, &jCol, &intPhi, weight ] ( auto i ) {
445 std::get< i >( intPhi ) *= weight;
446 jCol.axpy( qp, std::get< i >( intPhi ) );
447 } );
448 }
449 }
450 integrands().unbind();
451 }
452
453 // addSkeletonIntegral
454
455 protected:
456 template< bool conforming, class Intersection, class U, class W >
457 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn ) const
458 {
459 const auto geometry = intersection.geometry();
460
461 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
462 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
463 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn)), false );
464 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
465 {
466 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
467
468 const auto qpIn = quadrature.inside()[ qp ];
469 const auto qpOut = quadrature.outside()[ qp ];
470 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
471
472 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &integrand, weight ] ( auto i ) {
473 std::get< i >( integrand.first ) *= weight;
474 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
475 } );
476 }
477 }
478
479 template< bool conforming, class Intersection, class U, class W >
480 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn, W &wOut ) const
481 {
482 const auto geometry = intersection.geometry();
483 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
484 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
485 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn, wOut)), false );
486 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
487 {
488 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
489
490 const auto qpIn = quadrature.inside()[ qp ];
491 const auto qpOut = quadrature.outside()[ qp ];
492 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
493
494 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &qpOut, &wOut, &integrand, weight ] ( auto i ) {
495 std::get< i >( integrand.first ) *= weight;
496 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
497
498 std::get< i >( integrand.second ) *= weight;
499 wOut.axpy( qpOut, std::get< i >( integrand.second ) );
500 } );
501 }
502 }
503
504 template< bool conforming, class Intersection, class U, class J >
505 void addLinearizedSkeletonIntegral ( const Intersection &intersection,
506 const U &uIn, const U &uOut, J &jInIn, J &jOutIn ) const
507 {
508 DomainValueVectorType &phiIn = phiIn_;
509 DomainValueVectorType &phiOut = phiOut_;
510
511 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
512 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
513
514 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
515
516 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn ));
517
518 const auto geometry = intersection.geometry();
519 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
520 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
521 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
522 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
523 {
524 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
525
526 const auto qpIn = quadrature.inside()[ qp ];
527 const auto qpOut = quadrature.outside()[ qp ];
528
529 values( domainBasisIn, qpIn, phiIn );
530 values( domainBasisOut, qpOut, phiOut );
531
532 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
533 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
534 {
535 LocalMatrixColumn< J > jInInCol( jInIn, col );
536 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
537
538 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &intPhi, weight ] ( auto i ) {
539 std::get< i >( intPhi.first ) *= weight;
540 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
541 } );
542 }
543 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
544 {
545 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
546 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
547
548 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &intPhi, weight ] ( auto i ) {
549 std::get< i >( intPhi.first ) *= weight;
550 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
551 } );
552 }
553 }
554 }
555
556 template< bool conforming, class Intersection, class U, class J >
557 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut,
558 J &jInIn, J &jOutIn, J &jInOut, J &jOutOut ) const
559 {
560 DomainValueVectorType &phiIn = phiIn_;
561 DomainValueVectorType &phiOut = phiOut_;
562
563 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
564 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
565
566 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
567 const auto &rangeBasisOut = jInOut.rangeBasisFunctionSet();
568
569 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn, rangeBasisOut ));
570
571 const auto geometry = intersection.geometry();
572 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
573 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
574 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
575 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
576 {
577 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
578
579 const auto qpIn = quadrature.inside()[ qp ];
580 const auto qpOut = quadrature.outside()[ qp ];
581
582 values( domainBasisIn, qpIn, phiIn );
583 values( domainBasisOut, qpOut, phiOut );
584
585 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
586 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
587 {
588 LocalMatrixColumn< J > jInInCol( jInIn, col );
589 LocalMatrixColumn< J > jInOutCol( jInOut, col );
590 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
591
592 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &qpOut, &jInOutCol, &intPhi, weight ] ( auto i ) {
593 std::get< i >( intPhi.first ) *= weight;
594 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
595
596 std::get< i >( intPhi.second ) *= weight;
597 jInOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
598 } );
599 }
600 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
601 {
602 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
603 LocalMatrixColumn< J > jOutOutCol( jOutOut, col );
604 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
605
606 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &qpOut, &jOutOutCol, &intPhi, weight ] ( auto i ) {
607 std::get< i >( intPhi.first ) *= weight;
608 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
609
610 std::get< i >( intPhi.second ) *= weight;
611 jOutOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
612 } );
613 }
614 }
615 }
616
617 public:
618 template< class Intersection, class U, class... W >
619 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &... w ) const
620 {
621 if( !integrands().init( intersection ) )
622 return;
623
624 if( intersection.conforming() )
625 addSkeletonIntegral< true >( intersection, uIn, uOut, w... );
626 else
627 addSkeletonIntegral< false >( intersection, uIn, uOut, w... );
628 integrands().unbind();
629 }
630
631 template< class Intersection, class U, class... J >
632 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, J &... j ) const
633 {
634 if( !integrands().init( intersection ) )
635 return;
636
637 if( intersection.conforming() )
638 addLinearizedSkeletonIntegral< true >( intersection, uIn, uOut, j... );
639 else
640 addLinearizedSkeletonIntegral< false >( intersection, uIn, uOut, j... );
641 integrands().unbind();
642 }
643
644 void setQuadratureOrders(unsigned int interior, unsigned int surface)
645 {
646 interiorQuadOrder_ = interior;
647 surfaceQuadOrder_ = surface;
648 }
649
650 void setQuadratureOrderFunctions( std::function<int(const int)> interiorOrder,
651 std::function<int(const int)> surfaceOrder )
652 {
653 defaultInteriorOrder_ = interiorOrder;
654 defaultSurfaceOrder_ = surfaceOrder;
655 }
656
657 IntegrandsType& model() const
658 {
659 return integrands();
660 }
661 bool nonlinear() const { return model().nonlinear(); }
662 bool hasInterior() const { return model().hasInterior(); }
663 bool hasSkeleton() const { return model().hasSkeleton(); }
664 bool hasBoundary() const { return model().hasBoundary(); }
665
666 private:
667 IntegrandsType& integrands() const
668 {
669 return integrands_;
670 }
671
672 public:
673 // accessors
674 const GridPartType &gridPart () const { return gridPart_; }
675
676 unsigned int interiorQuadratureOrder(unsigned int order) const { return interiorQuadOrder_ == 0 ? defaultInteriorOrder_(order) : interiorQuadOrder_; }
677 unsigned int surfaceQuadratureOrder(unsigned int order) const { return surfaceQuadOrder_ == 0 ? defaultSurfaceOrder_ (order) : surfaceQuadOrder_; }
678
679 protected:
680 template <class U>
681 int maxOrder( const U& u ) const
682 {
683 return CallOrder< U > :: order( u );
684 }
685
686 template< class U, class W >
687 int maxOrder( const U& u, const W& w ) const
688 {
689 return std::max( maxOrder( u ), maxOrder( w ) );
690 }
691
692 template< class U, class V, class W >
693 int maxOrder( const U& u, const V& v, const W& w ) const
694 {
695 return std::max( maxOrder( u, v ), maxOrder( w ) );
696 }
697
698 template< class U, class V, class W, class X >
699 int maxOrder( const U& u, const V& v, const W& w, const X& x ) const
700 {
701 return std::max( maxOrder( u, v ), maxOrder( w, x) );
702 }
703
704 protected:
705 const GridPartType &gridPart_;
706
707 mutable IntegrandsType integrands_;
708
709 mutable std::function<int(const int)> defaultInteriorOrder_;
710 mutable std::function<int(const int)> defaultSurfaceOrder_;
711
712 unsigned int interiorQuadOrder_;
713 unsigned int surfaceQuadOrder_;
714
715 mutable std::vector< RangeValueVectorType > rangeValues_;
716 mutable RangeValueVectorType values_;
717 mutable DomainValueVectorType phiIn_;
718 mutable DomainValueVectorType phiOut_;
719 mutable DomainValueVectorType basisValues_;
720 mutable DomainValueVectorType domainValues_;
721 };
722
723
724
725 // GalerkinOperator
726 // ----------------
727
728 template< class GridPart >
729 // Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
730 struct GalerkinOperator
731 {
732 typedef GridPart GridPartType;
733 typedef GalerkinOperator< GridPartType > ThisType;
734
735 typedef typename GridPartType::ctype ctype;
736 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
737
738 // constructor
739 explicit GalerkinOperator ( const GridPartType &gridPart )
740 : gridPart_( gridPart ),
741 gridSizeInterior_( 0 )
742 {
743 }
744
745 protected:
746 template <class IntegrandsTuple>
747 bool hasBoundary( const IntegrandsTuple& integrandsTuple ) const
748 {
749 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
750 bool hasBoundary = false ;
751 Hybrid::forEach( Indices(), [&integrandsTuple, &hasBoundary]( auto i ) {
752 if( std::get< i > (integrandsTuple).hasBoundary() )
753 {
754 hasBoundary = true ;
755 return ;
756 }
757 });
758 return hasBoundary;
759 }
760
761 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
762 void evaluateImpl ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
763 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs, std::integral_constant<bool, hasSkeleton> ) const
764 {
765 Dune::Fem::ConstLocalFunction< GridFunction > uInside( u );
766 Dune::Fem::ConstLocalFunction< GridFunction > uOutside( u );
767
768 typedef typename DiscreteFunction::DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
769 TemporaryLocalFunction< DiscreteFunctionSpaceType > wInside( w.space() ), wOutside( w.space() );
770
771 // element counter
772 gridSizeInterior_ = 0;
773
774 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
775
776 // true if one of the integrands has a boundary term
777 const bool hasBnd = hasBoundary( integrandsTuple );
778
779 const auto &indexSet = gridPart().indexSet();
780 const auto end = iterators.end();
781 for( auto it = iterators.begin(); it != end; ++it )
782 {
783 // assert( iterators.thread( *it ) == MPIManager::thread() );
784 const EntityType inside = *it ;
785
786 // increase counter for interior elements
787 ++gridSizeInterior_;
788
789 auto uGuard = bindGuard( uInside, inside );
790 auto wGuard = bindGuard( wInside, inside );
791 wInside.clear();
792
793 auto addInteriorIntegral = [&integrandsTuple, &uInside, &wInside]( auto i )
794 {
795 const auto& integrands = std::get< i >( integrandsTuple );
796 if( integrands.hasInterior() )
797 integrands.addInteriorIntegral( uInside, wInside );
798 };
799 // add interior integral of any integrands
800 Hybrid::forEach( Indices(), addInteriorIntegral );
801
802 if( hasSkeleton || (hasBnd && HasBoundaryIntersection<GridPartType>::apply(inside) ) )
803 {
804 for( const auto &intersection : intersections( gridPart(), inside ) )
805 {
806 bool neighbor = false;
807 if constexpr ( hasSkeleton )
808 {
809 // check neighbor first since on periodic boundaries both,
810 // neighbor and boundary are true, so we treat neighbor first
811 if( intersection.neighbor() )
812 {
813 neighbor = true;
814 const EntityType outside = intersection.outside();
815
816 if( outside.partitionType() != InteriorEntity )
817 {
818 auto uOutGuard = bindGuard( uOutside, outside );
819
820 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside] ( auto i )
821 {
822 const auto& integrands = std::get< i >( integrandsTuple );
823 if( integrands.hasSkeleton() )
824 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside );
825 };
826 // add skeleton integral of any integrands
827 Hybrid::forEach( Indices(), addSkeletonIntegral );
828 }
829 else if( indexSet.index( inside ) < indexSet.index( outside ) )
830 {
831 auto uOutGuard = bindGuard( uOutside, outside );
832 auto wOutGuard = bindGuard( wOutside, outside );
833 wOutside.clear();
834
835 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside, &wOutside] ( auto i )
836 {
837 const auto& integrands = std::get< i >( integrandsTuple );
838 if( integrands.hasSkeleton() )
839 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside, wOutside );
840 };
841 // add skeleton integral of any integrands
842 Hybrid::forEach( Indices(), addSkeletonIntegral );
843
844 // addLocalDofs calls w.addLocalDofs but also
845 // prevents race condition for thread parallel runs
846 addLocalDofs( outside, wOutside );
847 }
848 }
849 } // end skeleton
850
851 if( ! neighbor && intersection.boundary() )
852 {
853 auto addBoundaryIntegral = [&integrandsTuple, &intersection, &uInside, &wInside]( auto i )
854 {
855 const auto& integrands = std::get< i >( integrandsTuple );
856 if( integrands.hasBoundary() )
857 integrands.addBoundaryIntegral( intersection, uInside, wInside );
858 };
859 // add boundary integral of any integrands
860 Hybrid::forEach( Indices(), addBoundaryIntegral );
861 } // end boundary
862 }
863 } // end intersections
864
865 addLocalDofs( inside, wInside );
866 }
867 }
868
869 template <class Space>
870 struct InsideEntity
871 {
872 typedef typename Space::EntityType EntityType;
873 template <class Iterators>
874 InsideEntity(const Space &space, const Iterators& iterators)
875 : space_(space), dofThread_(space.size(),-1)
876 , thread_( iterators.thread() )
877 {
878 const auto& mapper = space_.blockMapper();
879 for (const auto &entity : space_)
880 {
881 int t=iterators.threadParallel(entity);
882 mapper.mapEach(entity, [ this, t ] ( int local, auto global )
883 { dofThread_[global] = (dofThread_[global]==t || dofThread_[global]==-1)?
884 t : -2 ; } ); // -2: shared dof
885 }
886 }
887 bool operator()(const EntityType &entity) const
888 {
889 bool needsLocking = false;
890 space_.blockMapper().mapEach(entity,
891 [ this, &needsLocking ] ( int local, auto global )
892 { needsLocking = (needsLocking || dofThread_[global]!=thread_); });
893 return !needsLocking;
894 }
895 const Space &space_;
896 std::vector<int> dofThread_;
897 int thread_;
898 };
899
900 template <class DiscreteFunction>
901 struct AddLocalEvaluate
902 {
903 AddLocalEvaluate(DiscreteFunction &w)
904 : w_(w) {}
905 template <class LocalDofs>
906 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
907 {
908 w_.addLocalDofs( entity, wLocal.localDofVector() );
909 }
910 DiscreteFunction &w_;
911 };
912
913 template <class DiscreteFunction>
914 struct AddLocalEvaluateLocked : public AddLocalEvaluate<DiscreteFunction>
915 {
916 typedef AddLocalEvaluate<DiscreteFunction> BaseType;
917
918 std::shared_mutex& mutex_;
919 InsideEntity<typename DiscreteFunction::DiscreteFunctionSpaceType> inside_;
920
921 template <class Iterators>
922 AddLocalEvaluateLocked(DiscreteFunction &w, std::shared_mutex& mtx, const Iterators &iterators)
923 : BaseType(w), mutex_(mtx), inside_(w.space(),iterators) {}
924
925 template <class LocalDofs>
926 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
927 {
928 // call addLocalDofs on w
929 if (inside_(entity))
930 {
931 std::shared_lock<std::shared_mutex> guard ( mutex_ );
932 BaseType::operator()( entity, wLocal );
933 }
934 else
935 {
936 // lock mutex (unlock on destruction)
937 std::lock_guard<std::shared_mutex> guard ( mutex_ );
938 BaseType::operator()( entity, wLocal );
939 }
940 }
941 };
942
943 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor >
944 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
945 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs ) const
946 {
947 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
948 static_assert( std::is_same< typename DiscreteFunction::GridPartType, GridPartType >::value, "Argument 'w' and Integrands must be defined on the same grid part." );
949
950 if( hasSkeleton( integrandsTuple ) )
951 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::true_type() );
952 else
953 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::false_type() );
954 }
955
956 public:
957 template <class IntegrandsTuple>
958 bool hasSkeleton( const IntegrandsTuple& integrandsTuple ) const
959 {
960 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
961 bool hasSkeleton = false ;
962 Hybrid::forEach( Indices(), [&integrandsTuple, &hasSkeleton] ( auto i ) {
963 if( std::get< i >( integrandsTuple ).hasSkeleton() )
964 {
965 hasSkeleton = true;
966 return ;
967 }
968 });
969 return hasSkeleton ;
970 }
971
972 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
973 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
974 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx ) const
975 {
976 AddLocalEvaluateLocked<DiscreteFunction> addLocalEvaluate(w,mtx,iterators);
977 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
978 }
979
980 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
981 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
982 {
983 AddLocalEvaluate<DiscreteFunction> addLocalEvaluate(w);
984 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
985 }
986
987 protected:
988 template<class T, int length>
989 class FiniteStack
990 {
991 public :
992 // Makes empty stack
993 FiniteStack () : _f(0) {}
994
995 // Returns true if the stack is empty
996 bool empty () const { return _f <= 0; }
997
998 // Returns true if the stack is full
999 bool full () const { return (_f >= length); }
1000
1001 // clear stack
1002 void clear() { _f = 0; }
1003
1004 // Puts a new object onto the stack
1005 void push (const T& t)
1006 {
1007 assert ( _f < length );
1008 _s[_f++] = t;
1009 }
1010
1011 // Removes and returns the uppermost object from the stack
1012 T pop () {
1013 assert ( _f > 0 );
1014 return _s[--_f];
1015 }
1016
1017 // Returns the uppermost object on the stack
1018 T top () const {
1019 assert ( _f > 0 );
1020 return _s[_f-1];
1021 }
1022
1023 // stacksize
1024 int size () const { return _f; }
1025
1026 private:
1027 T _s[length]; // the stack
1028 int _f; // actual position in stack
1029 };
1030
1031
1032 template <class JacobianOperator>
1033 struct AddLocalAssemble
1034 {
1035 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1036 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1037 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1038 JacobianOperator &jOp_;
1039 std::vector< TemporaryLocalMatrixType > jOpLocal_;
1040
1041 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFinalized_;
1042 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFree_;
1043
1044 std::size_t locked, notLocked, timesLocked;
1045 AddLocalAssemble(JacobianOperator& jOp)
1046 : jOp_(jOp)
1047 , jOpLocal_(12, TemporaryLocalMatrixType(jOp_.domainSpace(), jOp_.rangeSpace()))
1048 , jOpLocalFinalized_()
1049 , jOpLocalFree_()
1050 , locked(0), notLocked(0), timesLocked(0)
1051 {
1052 for( auto& jOpLocal : jOpLocal_ )
1053 jOpLocalFree_.push( &jOpLocal );
1054 }
1055
1056 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1057 {
1058 assert( ! jOpLocalFree_.empty() );
1059 TemporaryLocalMatrixType& lop = *(jOpLocalFree_.pop());
1060 lop.bind(dE,rE);
1061 lop.clear();
1062 return lop;
1063 }
1064
1065 void unbind(TemporaryLocalMatrixType &lop)
1066 {
1067 notLocked += 1;
1068 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1069 lop.unbind();
1070 jOpLocalFree_.push( &lop );
1071 }
1072
1073 void finalize()
1074 {
1075 locked += jOpLocalFinalized_.size();
1076 while ( ! jOpLocalFinalized_.empty() )
1077 {
1078 TemporaryLocalMatrixType &lop = *(jOpLocalFinalized_.pop());
1079 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1080 lop.unbind();
1081 jOpLocalFree_.push( &lop );
1082 }
1083 }
1084 };
1085
1086 template <class JacobianOperator>
1087 struct AddLocalAssembleLocked : public AddLocalAssemble<JacobianOperator>
1088 {
1089 typedef AddLocalAssemble<JacobianOperator> BaseType;
1090 typedef typename BaseType::TemporaryLocalMatrixType TemporaryLocalMatrixType;
1091 using BaseType::jOpLocalFinalized_;
1092 using BaseType::jOpLocalFree_;
1093
1094 std::shared_mutex& mutex_;
1095 InsideEntity<typename JacobianOperator::DomainSpaceType> insideDomain_;
1096 InsideEntity<typename JacobianOperator::RangeSpaceType> insideRange_;
1097
1098 template <class Iterators>
1099 AddLocalAssembleLocked(JacobianOperator &jOp, std::shared_mutex &mtx, const Iterators &iterators)
1100 : BaseType(jOp)
1101 , mutex_(mtx)
1102 , insideDomain_(jOp.domainSpace(),iterators)
1103 , insideRange_(jOp.rangeSpace(),iterators)
1104 {}
1105
1106 void finalize()
1107 {
1108 // lock mutex (unlock on destruction)
1109 ++BaseType::timesLocked;
1110 std::lock_guard<std::shared_mutex> guard ( mutex_ );
1111 BaseType::finalize();
1112 }
1113
1114 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1115 {
1116 if ( jOpLocalFree_.empty() )
1117 {
1118 finalize();
1119 }
1120 return BaseType::bind(dE,rE);
1121 }
1122
1123 void unbind(TemporaryLocalMatrixType &lop)
1124 {
1125 /* // always lock
1126 ++BaseType::timesLocked;
1127 ++BaseType::locked;
1128 std::lock_guard guard ( mutex_ );
1129 BaseType::unbind(lop);
1130 return;
1131 */
1132 if ( insideDomain_(lop.domainEntity()) &&
1133 insideRange_(lop.rangeEntity()) )
1134 {
1135 std::shared_lock<std::shared_mutex> guard ( mutex_ );
1136 BaseType::unbind(lop);
1137 }
1138 else
1139 {
1140 jOpLocalFinalized_.push( &lop );
1141 }
1142 }
1143 };
1144
1145 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
1146 void assembleImpl ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple,
1147 Functor& addLocalMatrix, std::integral_constant<bool, hasSkeleton> ) const
1148 {
1149 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1150 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1151
1152 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1153
1154 Dune::Fem::ConstLocalFunction< GridFunction > uIn( u );
1155 Dune::Fem::ConstLocalFunction< GridFunction > uOut( u );
1156
1157 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
1158 const std::size_t maxNumLocalDofs = jOp.domainSpace().blockMapper().maxNumDofs() * jOp.domainSpace().localBlockSize;
1159
1160 // initialize local temporary data
1161 Hybrid::forEach( Indices(), [&integrandsTuple, &maxNumLocalDofs] ( auto i ) {
1162 const auto& integrands = std::get< i >( integrandsTuple );
1163 integrands.prepare( maxNumLocalDofs );
1164 });
1165
1166 // element counter
1167 gridSizeInterior_ = 0;
1168
1169 // true if one of the integrands has a boundary term
1170 const bool hasBnd = hasBoundary( integrandsTuple );
1171
1172 const auto &indexSet = gridPart().indexSet();
1173 // threaded iterators provide from outside
1174 const auto end = iterators.end();
1175 for( auto it = iterators.begin(); it != end; ++it )
1176 {
1177 // increase counter for interior elements
1178 ++gridSizeInterior_;
1179
1180 const EntityType inside = *it;
1181
1182 auto uiGuard = bindGuard( uIn, inside );
1183
1184 TemporaryLocalMatrixType& jOpInIn = addLocalMatrix.bind( inside, inside );
1185 auto addLinearizedInteriorIntegral = [&integrandsTuple, &uIn, &jOpInIn]( auto i )
1186 {
1187 const auto& integrands = std::get< i >( integrandsTuple );
1188 if( integrands.hasInterior() )
1189 integrands.addLinearizedInteriorIntegral( uIn, jOpInIn );
1190 };
1191 // add interior integral of any integrands
1192 Hybrid::forEach( Indices(), addLinearizedInteriorIntegral );
1193
1194 if( hasSkeleton || (hasBnd && HasBoundaryIntersection<GridPartType>::apply(inside) ) )
1195 {
1196 for( const auto &intersection : intersections( gridPart(), inside ) )
1197 {
1198 bool neighbor = false ;
1199 // check neighbor first since on periodic boundaries both,
1200 // neighbor and boundary are true, so we treat neighbor first
1201 if constexpr ( hasSkeleton )
1202 {
1203 if( intersection.neighbor() )
1204 {
1205 neighbor = true ;
1206 const EntityType &outside = intersection.outside();
1207
1208 TemporaryLocalMatrixType &jOpOutIn = addLocalMatrix.bind( outside, inside );
1209
1210 auto uoGuard = bindGuard( uOut, outside );
1211
1212 if( outside.partitionType() != InteriorEntity )
1213 {
1214 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn]( auto i )
1215 {
1216 const auto& integrands = std::get< i >( integrandsTuple );
1217 if( integrands.hasSkeleton() )
1218 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn );
1219 };
1220 // add skeleton integral of any integrands
1221 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1222 }
1223 else if( indexSet.index( inside ) < indexSet.index( outside ) )
1224 {
1225 TemporaryLocalMatrixType &jOpInOut = addLocalMatrix.bind( inside, outside );
1226 TemporaryLocalMatrixType &jOpOutOut = addLocalMatrix.bind( outside, outside );
1227
1228 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn, &jOpInOut, &jOpOutOut]( auto i )
1229 {
1230 const auto& integrands = std::get< i >( integrandsTuple );
1231 if( integrands.hasSkeleton() )
1232 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn, jOpInOut, jOpOutOut );
1233 };
1234 // add skeleton integral of any integrands
1235 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1236
1237 addLocalMatrix.unbind(jOpInOut);
1238 addLocalMatrix.unbind(jOpOutOut);
1239 }
1240
1241 addLocalMatrix.unbind(jOpOutIn);
1242 }
1243 } // end skeleton
1244
1245 if( !neighbor && intersection.boundary() )
1246 {
1247 auto addLinearizedBoundaryIntegral = [&integrandsTuple, &intersection, &uIn, &jOpInIn]( auto i )
1248 {
1249 const auto& integrands = std::get< i >( integrandsTuple );
1250 if( integrands.hasBoundary() )
1251 integrands.addLinearizedBoundaryIntegral( intersection, uIn, jOpInIn );
1252 };
1253 // add skeleton integral of any integrands
1254 Hybrid::forEach( Indices(), addLinearizedBoundaryIntegral );
1255
1256 } // end boundary
1257 }
1258 } // end intersection
1259 addLocalMatrix.unbind(jOpInIn);
1260 }
1261
1262 // complete the matrix build
1263 addLocalMatrix.finalize();
1264 }
1265
1266
1267 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor >
1268 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1269 const IntegrandsTuple& integrandsTuple, Functor& addLocalMatrix, int ) const
1270 {
1271 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
1272 static_assert( std::is_same< typename JacobianOperator::DomainSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1273 static_assert( std::is_same< typename JacobianOperator::RangeSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1274
1275 if( hasSkeleton( integrandsTuple ) )
1276 assembleImpl( u, jOp, iterators, integrandsTuple ,addLocalMatrix, std::true_type() );
1277 else
1278 assembleImpl( u, jOp, iterators, integrandsTuple, addLocalMatrix, std::false_type() );
1279 }
1280
1281 public:
1282 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1283 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1284 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx) const
1285 {
1286 AddLocalAssembleLocked<JacobianOperator> addLocalAssemble( jOp, mtx, iterators);
1287 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1288 #if 0 // print information about how many times a lock was used during assemble
1289 std::lock_guard guard ( mtx );
1290 std::cout << MPIManager::thread() << " : "
1291 << addLocalAssemble.locked << " " << addLocalAssemble.notLocked << " "
1292 << addLocalAssemble.timesLocked << std::endl;
1293 #endif
1294 }
1295
1296 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1297 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
1298 {
1299 AddLocalAssemble<JacobianOperator> addLocalAssemble(jOp);
1300 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1301 }
1302
1303 // accessors
1304 const GridPartType &gridPart () const { return gridPart_; }
1305
1306 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1307
1308 protected:
1309 const GridPartType &gridPart_;
1310 mutable std::size_t gridSizeInterior_;
1311 };
1312
1313
1314 template <class GalerkinOperator >
1315 static std::size_t accumulateGridSize( const ThreadSafeValue< GalerkinOperator >& ops )
1316 {
1317 std::size_t s = ops.size();
1318 std::size_t sum = 0;
1319 for( std::size_t i=0; i<s; ++i )
1320 sum += ops[ i ].gridSizeInterior();
1321 return sum;
1322 }
1323
1324 } // namespace Impl
1325
1328
1329
1330 // GalerkinOperator
1331 // ----------------
1332
1333 template< class Integrands, class DomainFunction, class RangeFunction = DomainFunction >
1334 struct GalerkinOperator
1335 : public virtual Operator< DomainFunction, RangeFunction >
1336 {
1337 typedef DomainFunction DomainFunctionType;
1338 typedef RangeFunction RangeFunctionType;
1339
1340 typedef typename RangeFunctionType::GridPartType GridPartType;
1341
1342 typedef Impl::LocalGalerkinOperator< Integrands > LocalGalerkinOperatorImplType;
1343 typedef Impl::GalerkinOperator< GridPartType > GalerkinOperatorImplType;
1344
1345 static_assert( std::is_same< typename DomainFunctionType::GridPartType, typename RangeFunctionType::GridPartType >::value, "DomainFunction and RangeFunction must be defined on the same grid part." );
1346
1347 typedef ThreadIterator< GridPartType > ThreadIteratorType;
1348
1349 template< class... Args >
1350 explicit GalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1351 : iterators_( gridPart ),
1352 opImpl_( gridPart ),
1353 localOp_( gridPart, std::forward< Args >( args )... ),
1354 gridSizeInterior_( 0 ),
1355 communicate_( true )
1356 {
1357 }
1358
1359 void setCommunicate( const bool communicate )
1360 {
1361 communicate_ = communicate;
1362 if( ! communicate_ && Dune::Fem::Parameter::verbose() )
1363 {
1364 std::cout << "GalerkinOperator::setCommunicate: communicate was disabled!" << std::endl;
1365 }
1366 }
1367
1368 void setQuadratureOrders(unsigned int interior, unsigned int surface)
1369 {
1370 size_t size = localOp_.size();
1371 for( size_t i=0; i<size; ++i )
1372 localOp_[ i ].setQuadratureOrders(interior,surface);
1373 }
1374
1375 virtual bool nonlinear() const final override
1376 {
1377 return localOperator().nonlinear();
1378 }
1379
1380 virtual void operator() ( const DomainFunctionType &u, RangeFunctionType &w ) const final override
1381 {
1382 evaluate( u, w );
1383 }
1384
1385 template< class GridFunction >
1386 void operator() ( const GridFunction &u, RangeFunctionType &w ) const
1387 {
1388 evaluate( u, w );
1389 }
1390
1391 const GridPartType &gridPart () const { return op().gridPart(); }
1392
1393 typedef Integrands ModelType;
1394 typedef Integrands DirichletModelType;
1395 ModelType &model() const { return localOperator().model(); }
1396
1397 [[deprecated("Use localOperator instead!")]]
1398 const LocalGalerkinOperatorImplType& impl() const { return localOperator(); }
1399
1401 const LocalGalerkinOperatorImplType& localOperator() const { return *localOp_; }
1402
1403 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1404
1405 protected:
1407 const GalerkinOperatorImplType& op() const { return *opImpl_; }
1408
1409 template < class GridFunction >
1410 void evaluate( const GridFunction &u, RangeFunctionType &w ) const
1411 {
1412 iterators_.update();
1413 w.clear();
1414
1415 std::shared_mutex mutex;
1416
1417 auto doEval = [this, &u, &w, &mutex] ()
1418 {
1419 // TODO: Move this to be a class variable
1420 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1421 this->op().evaluate( u, w, this->iterators_, integrands, mutex );
1422 };
1423
1424 try {
1425 // execute in parallel
1426 MPIManager :: run ( doEval );
1427
1428 // update number of interior elements as sum over threads
1429 gridSizeInterior_ = Impl::accumulateGridSize( opImpl_ );
1430 }
1431 catch ( const SingleThreadModeError& e )
1432 {
1433 // reset w from previous entries
1434 w.clear();
1435 // re-run in single thread mode if previous attempt failed
1436 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1437 op().evaluate( u, w, iterators_, integrands );
1438
1439 // update number of interior elements as sum over threads
1440 gridSizeInterior_ = op().gridSizeInterior();
1441 }
1442
1443 // synchronize result
1444 if( communicate_ )
1445 w.communicate();
1446 }
1447
1448 mutable ThreadIteratorType iterators_;
1451
1452 mutable std::size_t gridSizeInterior_;
1453 bool communicate_;
1454 };
1455
1456
1457
1458 // DifferentiableGalerkinOperator
1459 // ------------------------------
1460
1461 template< class Integrands, class JacobianOperator >
1462 class DifferentiableGalerkinOperator
1463 : public GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType >,
1464 public DifferentiableOperator< JacobianOperator >
1465 {
1466 typedef GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType > BaseType;
1467
1468 typedef typename BaseType :: LocalGalerkinOperatorImplType LocalGalerkinOperatorImplType;
1469 public:
1470 typedef JacobianOperator JacobianOperatorType;
1471
1472 typedef typename BaseType::DomainFunctionType DomainFunctionType;
1473 typedef typename BaseType::RangeFunctionType RangeFunctionType;
1474 typedef typename DomainFunctionType::DiscreteFunctionSpaceType DomainDiscreteFunctionSpaceType;
1475 typedef typename RangeFunctionType::DiscreteFunctionSpaceType RangeDiscreteFunctionSpaceType;
1476
1477 typedef DiagonalAndNeighborStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalAndNeighborStencilType;
1478 typedef DiagonalStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalStencilType;
1479
1480 typedef typename BaseType::GridPartType GridPartType;
1481
1482 template< class... Args >
1483 explicit DifferentiableGalerkinOperator ( const DomainDiscreteFunctionSpaceType &dSpace,
1484 const RangeDiscreteFunctionSpaceType &rSpace,
1485 Args &&... args )
1486 : BaseType( rSpace.gridPart(), std::forward< Args >( args )... ),
1487 dSpace_(dSpace), rSpace_(rSpace),
1488 domainSpaceSequence_(dSpace.sequence()),
1489 rangeSpaceSequence_(rSpace.sequence()),
1490 stencilDAN_(), stencilD_()
1491 {
1492 if( hasSkeleton() )
1493 stencilDAN_.reset( new DiagonalAndNeighborStencilType( dSpace_, rSpace_ ) );
1494 else
1495 stencilD_.reset( new DiagonalStencilType( dSpace_, rSpace_ ) );
1496 }
1497
1498 virtual void jacobian ( const DomainFunctionType &u, JacobianOperatorType &jOp ) const final override
1499 {
1500 assemble( u, jOp );
1501 }
1502
1503 template< class GridFunction >
1504 void jacobian ( const GridFunction &u, JacobianOperatorType &jOp ) const
1505 {
1506 assemble( u, jOp );
1507 }
1508
1509 const DomainDiscreteFunctionSpaceType& domainSpace() const
1510 {
1511 return dSpace_;
1512 }
1513 const RangeDiscreteFunctionSpaceType& rangeSpace() const
1514 {
1515 return rSpace_;
1516 }
1517
1518 using BaseType::localOperator;
1519 using BaseType::nonlinear;
1520
1521 protected:
1522 using BaseType::op;
1523
1524 bool hasSkeleton() const
1525 {
1526 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1527 return op().hasSkeleton( integrands );
1528 }
1529
1530 void prepare( JacobianOperatorType& jOp ) const
1531 {
1532 if ( domainSpaceSequence_ != domainSpace().sequence()
1533 || rangeSpaceSequence_ != rangeSpace().sequence() )
1534 {
1535 domainSpaceSequence_ = domainSpace().sequence();
1536 rangeSpaceSequence_ = rangeSpace().sequence();
1537 if( hasSkeleton() )
1538 {
1539 assert( stencilDAN_ );
1540 stencilDAN_->update();
1541 }
1542 else
1543 {
1544 assert( stencilD_ );
1545 stencilD_->update();
1546 }
1547 }
1548 if( hasSkeleton() )
1549 jOp.reserve( *stencilDAN_ );
1550 else
1551 jOp.reserve( *stencilD_ );
1552 // set all entries to zero
1553 jOp.clear();
1554 }
1555
1556 template < class GridFunction >
1557 void assemble( const GridFunction &u, JacobianOperatorType &jOp ) const
1558 {
1559 // reserve memory and clear entries
1560 {
1561 prepare( jOp );
1562 iterators_.update();
1563 }
1564
1565 std::shared_mutex mutex;
1566
1567 auto doAssemble = [this, &u, &jOp, &mutex] ()
1568 {
1569 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1570 this->op().assemble( u, jOp, this->iterators_, integrands, mutex );
1571 };
1572
1573 try {
1574 // execute in parallel
1575 MPIManager :: run ( doAssemble );
1576
1577 // update number of interior elements as sum over threads
1578 gridSizeInterior_ = Impl::accumulateGridSize( this->opImpl_ );
1579 }
1580 catch ( const SingleThreadModeError& e )
1581 {
1582 // redo assemble since it failed previously
1583 jOp.clear();
1584 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1585 op().assemble( u, jOp, iterators_, integrands );
1586 // update number of interior elements as sum over threads
1587 gridSizeInterior_ = op().gridSizeInterior();
1588 }
1589
1590 // note: assembly done without local contributions so need
1591 // to call flush assembly
1592 jOp.flushAssembly();
1593 }
1594
1595 using BaseType::iterators_;
1596 using BaseType::gridSizeInterior_;
1597
1598 const DomainDiscreteFunctionSpaceType &dSpace_;
1599 const RangeDiscreteFunctionSpaceType &rSpace_;
1600
1601 mutable int domainSpaceSequence_, rangeSpaceSequence_;
1602
1603 mutable std::unique_ptr< DiagonalAndNeighborStencilType > stencilDAN_;
1604 mutable std::unique_ptr< DiagonalStencilType > stencilD_;
1605 };
1606
1607
1608
1609 // AutomaticDifferenceGalerkinOperator
1610 // -----------------------------------
1611
1612 template< class Integrands, class DomainFunction, class RangeFunction >
1613 class AutomaticDifferenceGalerkinOperator
1614 : public GalerkinOperator< Integrands, DomainFunction, RangeFunction >,
1615 public AutomaticDifferenceOperator< DomainFunction, RangeFunction >
1616 {
1617 typedef GalerkinOperator< Integrands, DomainFunction, RangeFunction > BaseType;
1618 typedef AutomaticDifferenceOperator< DomainFunction, RangeFunction > AutomaticDifferenceOperatorType;
1619
1620 public:
1621 typedef typename BaseType::GridPartType GridPartType;
1622
1623 template< class... Args >
1624 explicit AutomaticDifferenceGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1625 : BaseType( gridPart, std::forward< Args >( args )... ), AutomaticDifferenceOperatorType()
1626 {}
1627 };
1628
1629
1630
1631 // ModelDifferentiableGalerkinOperator
1632 // -----------------------------------
1633
1634 template < class LinearOperator, class ModelIntegrands >
1635 struct ModelDifferentiableGalerkinOperator
1636 : public DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator >
1637 {
1638 typedef DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator > BaseType;
1639
1640 typedef typename ModelIntegrands::ModelType ModelType;
1641
1642 typedef typename LinearOperator::DomainFunctionType RangeFunctionType;
1643 typedef typename LinearOperator::RangeSpaceType DiscreteFunctionSpaceType;
1644
1645 ModelDifferentiableGalerkinOperator ( ModelType &model, const DiscreteFunctionSpaceType &dfSpace )
1646 : BaseType( dfSpace.gridPart(), model )
1647 {}
1648
1649 template< class GridFunction >
1650 void apply ( const GridFunction &u, RangeFunctionType &w ) const
1651 {
1652 (*this)( u, w );
1653 }
1654
1655 template< class GridFunction >
1656 void apply ( const GridFunction &u, LinearOperator &jOp ) const
1657 {
1658 (*this).jacobian( u, jOp );
1659 }
1660 };
1661
1662 namespace Impl
1663 {
1664
1665 // GalerkinSchemeImpl
1666 // ------------------
1667 template< class Integrands, class LinearOperator, bool addDirichletBC,
1668 template <class,class> class DifferentiableGalerkinOperatorImpl >
1669 struct GalerkinSchemeTraits
1670 {
1671 template <class O, bool addDBC>
1672 struct DirichletBlockSelector { using type = void; };
1673 template <class O>
1674 struct DirichletBlockSelector<O,true> { using type = typename O::DirichletBlockVector; };
1675
1676 using DifferentiableOperatorType = std::conditional_t< addDirichletBC,
1677 DirichletWrapperOperator< DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1678 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator > >;
1679 using DirichletBlockVector = typename DirichletBlockSelector<
1680 DirichletWrapperOperator<
1681 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1682 addDirichletBC>::type;
1683
1684 typedef DifferentiableOperatorType type;
1685 };
1686
1687 template< class Integrands, class LinearOperator, class LinearInverseOperator, bool addDirichletBC,
1688 template <class,class> class DifferentiableGalerkinOperatorImpl = DifferentiableGalerkinOperator >
1689 struct GalerkinSchemeImpl : public FemScheme< typename
1690 GalerkinSchemeTraits< Integrands, LinearOperator,
1691 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1692 LinearInverseOperator > // LinearInverseOperator
1693 {
1694 typedef FemScheme< typename GalerkinSchemeTraits< Integrands, LinearOperator,
1695 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1696 LinearInverseOperator > // LinearInverseOperator
1697 BaseType;
1698
1699 typedef typename BaseType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
1700
1701 GalerkinSchemeImpl ( const DiscreteFunctionSpaceType &dfSpace,
1702 const Integrands &integrands,
1703 const ParameterReader& parameter = Parameter::container() )
1704 : BaseType(dfSpace,
1705 parameter,
1706 std::move(integrands))
1707 {}
1708 };
1709
1710 } // end namespace Impl
1711
1712 // GalerkinScheme
1713 // --------------
1714
1715 template< class Integrands, class LinearOperator, class InverseOperator, bool addDirichletBC >
1716 using GalerkinScheme = Impl::GalerkinSchemeImpl< Integrands, LinearOperator, InverseOperator, addDirichletBC,
1717 DifferentiableGalerkinOperator >;
1718
1719 } // namespace Fem
1720
1721} // namespace Dune
1722
1723#endif // #ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
FunctionSpaceType::RangeType RangeType
type of range vectors, i.e., type of function values
Definition: localfunction.hh:110
FunctionSpaceType::HessianRangeType HessianRangeType
type of the Hessian
Definition: localfunction.hh:114
FunctionSpaceType::JacobianRangeType JacobianRangeType
type of the Jacobian, i.e., type of evaluated Jacobian matrix
Definition: localfunction.hh:112
static bool verbose()
obtain the cached value for fem.verbose with default verbosity level 2
Definition: parameter.hh:466
actual interface class for quadratures
@ InteriorEntity
all interior entities
Definition: gridenums.hh:31
constexpr void forEach(Range &&range, F &&f)
Range based for loop.
Definition: hybridutilities.hh:257
static constexpr IntegralRange< std::decay_t< T > > range(T &&from, U &&to) noexcept
free standing function for setting up a range based for loop over an integer range for (auto i: range...
Definition: rangeutilities.hh:288
constexpr Interior interior
PartitionSet for the interior partition.
Definition: partitionset.hh:271
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
STL namespace.
DomainFunction DomainFunctionType
type of discrete function in the operator's domain
Definition: operator.hh:36
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Sep 4, 22:38, 2025)