DUNE-FEM (unstable)

galerkin.hh
1#ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
2#define DUNE_FEM_SCHEMES_GALERKIN_HH
3
4#include <cstddef>
5
6#include <tuple>
7#include <type_traits>
8#include <utility>
9#include <shared_mutex>
10#include <vector>
11#include <memory>
12
13#include <dune/common/hybridutilities.hh>
14#include <dune/common/timer.hh>
15
16#include <dune/grid/common/rangegenerators.hh>
17
18#include <dune/fem/function/localfunction/temporary.hh>
19#include <dune/fem/io/parameter/reader.hh>
20#include <dune/fem/operator/common/automaticdifferenceoperator.hh>
21#include <dune/fem/operator/common/differentiableoperator.hh>
22#include <dune/fem/operator/common/operator.hh>
23#include <dune/fem/operator/common/stencil.hh>
24#include <dune/fem/operator/common/temporarylocalmatrix.hh>
25#include <dune/fem/quadrature/cachingquadrature.hh>
26#include <dune/fem/quadrature/intersectionquadrature.hh>
27#include <dune/fem/common/bindguard.hh>
28
29#include <dune/fem/misc/threads/threaditerator.hh>
30#include <dune/fem/misc/threads/threadsafevalue.hh>
31
32#include <dune/fem/operator/common/localmatrixcolumn.hh>
33#include <dune/fem/operator/common/localcontribution.hh>
34#include <dune/fem/operator/1order/localmassmatrix.hh>
35#include <dune/fem/schemes/integrands.hh>
36#include <dune/fem/schemes/dirichletwrapper.hh>
37#include <dune/fem/schemes/femscheme.hh>
38
39#include <dune/fem/space/common/capabilities.hh>
40
41// fempy includes
42#include <dune/fempy/quadrature/fempyquadratures.hh>
43
44namespace Dune
45{
46
47 namespace Fem
48 {
49
50 namespace Impl
51 {
52 template <class M>
53 class CallOrder
54 {
55
56 template <class F>
57 static int callOrder(const F& f, char)
58 {
59#ifndef NDEBUG
60 std::cerr << "WARNING: no order method available on " << typeid(F).name() << ", defaulting to 1!" << std::endl;
61#endif
62 return 1;
63 }
64
65 template <class F>
66 static auto callOrder(const F& f, int) -> decltype( f.order() )
67 {
68 return f.order();
69 }
70
71 public:
72 template <class F>
73 static int order (const F& f ) { return callOrder(f, 0); }
74 };
75
76 // GalerkinOperator
77 // ----------------
78
79 template <class Space>
80 struct DefaultGalerkinOperatorQuadratureSelector
81 {
82 typedef typename Space :: GridPartType GridPartType;
83 typedef CachingQuadrature< GridPartType, 0, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > InteriorQuadratureType;
84 typedef CachingQuadrature< GridPartType, 1, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > SurfaceQuadratureType;
85 // typedef CachingQuadrature< GridPartType, 0, Dune::FemPy::FempyQuadratureTraits > InteriorQuadratureType;
86 // typedef CachingQuadrature< GridPartType, 1, Dune::FemPy::FempyQuadratureTraits > SurfaceQuadratureType;
87 };
88
89 // LocalGalerkinOperator
90 // ---------------------
91
92 template< class Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
93 struct LocalGalerkinOperator
94 {
95 typedef LocalGalerkinOperator<Integrands> ThisType;
96 typedef std::conditional_t< Fem::IntegrandsTraits< Integrands >::isFull, Integrands, FullIntegrands< Integrands > > IntegrandsType;
97
98 typedef typename IntegrandsType::GridPartType GridPartType;
99
100 typedef typename GridPartType::ctype ctype;
101 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
102
103 // typedef QuadratureSelector
104 template <class Space>
105 using QuadratureSelector = QuadSelector< Space >;
106
107 // constructor
108 template< class... Args >
109 explicit LocalGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
110 : gridPart_( gridPart ),
111 integrands_( std::forward< Args >( args )... ),
112 defaultInteriorOrder_( [] (const int order) { return 2 * order; } ),
113 defaultSurfaceOrder_ ( [] (const int order) { return 2 * order + 1; } ),
114 interiorQuadOrder_(0), surfaceQuadOrder_(0)
115 {
116 }
117
118 protected:
119 typedef typename IntegrandsType::DomainValueType DomainValueType;
120 typedef typename IntegrandsType::RangeValueType RangeValueType;
121 typedef std::make_index_sequence< std::tuple_size< DomainValueType >::value > DomainValueIndices;
122 typedef std::make_index_sequence< std::tuple_size< RangeValueType >::value > RangeValueIndices;
123
124
125 template< std::size_t... i >
126 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
127 {
128 return std::make_tuple( std::vector< std::tuple_element_t< i, DomainValueType > >( maxNumLocalDofs )... );
129 }
130
131 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs )
132 {
133 return makeDomainValueVector( maxNumLocalDofs, DomainValueIndices() );
134 }
135
136 template< std::size_t... i >
137 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
138 {
139 return std::make_tuple( std::vector< std::tuple_element_t< i, RangeValueType > >( maxNumLocalDofs )... );
140 }
141
142 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs )
143 {
144 return makeRangeValueVector( maxNumLocalDofs, RangeValueIndices() );
145 }
146
147 typedef decltype( makeDomainValueVector( 0u ) ) DomainValueVectorType;
148 typedef decltype( makeRangeValueVector( 0u ) ) RangeValueVectorType;
149
150 static void resizeDomainValueVector ( DomainValueVectorType& vec, const std::size_t size )
151 {
152 Hybrid::forEach( DomainValueIndices(), [ &vec, &size ] ( auto i ) {
153 std::get< i >( vec ).resize( size );
154 } );
155 }
156
157 static void resizeRangeValueVector ( RangeValueVectorType& vec, const std::size_t size )
158 {
159 Hybrid::forEach( RangeValueIndices(), [ &vec, &size ] ( auto i ) {
160 std::get< i >( vec ).resize( size );
161 } );
162 }
163
164 public:
165 void prepare( const std::size_t size ) const
166 {
167 resizeDomainValueVector( phiIn_, size );
168 resizeDomainValueVector( phiOut_, size );
169 resizeDomainValueVector( basisValues_, size );
170 resizeDomainValueVector( domainValues_, size );
171 }
172
173 template< class LocalFunction, class Quadrature >
174 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::RangeType > &phi )
175 {
176 u.evaluateQuadrature( quad, phi );
177 }
178
179 template< class LocalFunction, class Quadrature>
180 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::JacobianRangeType > &phi )
181 {
182 u.jacobianQuadrature( quad, phi );
183 }
184
185 template< class LocalFunction, class Quadrature >
186 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::HessianRangeType > &phi )
187 {
188 u.hessianQuadrature( quad, phi );
189 }
190
191 protected:
192 template< class LocalFunction, class Point >
193 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::RangeType &phi )
194 {
195 u.evaluate( x, phi );
196 }
197
198 template< class LocalFunction, class Point >
199 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::JacobianRangeType &phi )
200 {
201 u.jacobian( x, phi );
202 }
203
204 template< class LocalFunction, class Point >
205 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::HessianRangeType &phi )
206 {
207 u.hessian( x, phi );
208 }
209
210 template< class LocalFunction, class Point, class... T >
211 static void value ( const LocalFunction &u, const Point &x, std::tuple< T... > &phi )
212 {
213 Hybrid::forEach( std::index_sequence_for< T... >(), [ &u, &x, &phi ] ( auto i ) { LocalGalerkinOperator::value( u, x, std::get< i >( phi ) ); } );
214 }
215
216 template< class Basis, class Point >
217 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::RangeType > &phi )
218 {
219 basis.evaluateAll( x, phi );
220 }
221
222 template< class Basis, class Point >
223 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::JacobianRangeType > &phi )
224 {
225 basis.jacobianAll( x, phi );
226 }
227
228 template< class Basis, class Point >
229 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::HessianRangeType > &phi )
230 {
231 basis.hessianAll( x, phi );
232 }
233
234 template< class Basis, class Point, class... T >
235 static void values ( const Basis &basis, const Point &x, std::tuple< std::vector< T >... > &phi )
236 {
237 Hybrid::forEach( std::index_sequence_for< T... >(), [ &basis, &x, &phi ] ( auto i ) { LocalGalerkinOperator::values( basis, x, std::get< i >( phi ) ); } );
238 }
239
240 template< class LocalFunction, class Point >
241 static DomainValueType domainValue ( const LocalFunction &u, const Point &x )
242 {
243 DomainValueType phi;
244 value( u, x, phi );
245 return phi;
246 }
247
248 static DomainValueType domainValue ( const unsigned int qpIdx, DomainValueVectorType& vec)
249 {
250 DomainValueType phi;
251 Hybrid::forEach( DomainValueIndices(), [ &qpIdx, &vec, &phi ] ( auto i ) {
252 std::get< i > ( phi ) = std::get< i >( vec )[ qpIdx ];
253 } );
254 return phi;
255 }
256
257 template< class LocalFunction, class Quadrature >
258 static void domainValue ( const LocalFunction &u, const Quadrature& quadrature, DomainValueVectorType &result )
259 {
260 Hybrid::forEach( DomainValueIndices(), [ &u, &quadrature, &result ] ( auto i ) {
261 auto& vec = std::get< i >( result );
262 vec.resize( quadrature.nop() );
263 ThisType::evaluateQuadrature( u, quadrature, vec );
264 } );
265 }
266
267 template< class Phi, std::size_t... i >
268 static auto value ( const Phi &phi, std::size_t col, std::index_sequence< i... > )
269 {
270 return std::make_tuple( std::get< i >( phi )[ col ]... );
271 }
272
273 template< class... T >
274 static auto value ( const std::tuple< std::vector< T >... > &phi, std::size_t col )
275 {
276 return value( phi, col, std::index_sequence_for< T... >() );
277 }
278
279 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range )
280 {
281 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range ] ( auto i ) {
282 std::get< i >( ranges )[ idx ] = std::get< i >( range );
283 });
284 }
285 template <class W>
286 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range, const W &weight )
287 {
288 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range, &weight ] ( auto i ) {
289 std::get< i >( ranges )[ idx ] = std::get< i >( range );
290 std::get< i >( ranges )[ idx ] *= weight;
291 });
292 }
293
294 static void assignDomain( DomainValueVectorType& domains, const std::size_t idx, const DomainValueType& domain )
295 {
296 Hybrid::forEach( DomainValueIndices(), [ &domains, &idx, &domain ] ( auto i ) {
297 std::get< i >( domains )[ idx ] = std::get< i >( domain );
298 });
299 }
300
301 template <class W, class Quadrature>
302 static void axpyQuadrature( W& w, const Quadrature& quadrature, RangeValueVectorType& ranges )
303 {
304 Hybrid::forEach( RangeValueIndices(), [ &w, &quadrature, &ranges ] ( auto i ) {
305 w.axpyQuadrature( quadrature, std::get< i >( ranges ) );
306 } );
307 }
308
309 public:
310 // interior integral
311
312 template< class U, class W >
313 void addInteriorIntegral ( const U &u, W &w ) const
314 {
315 if( !integrands().init( u.entity() ) )
316 return;
317
318 const auto& geometry = u.geometry();
319
320 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
321 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder(maxOrder(u, w)) );
322
323 // evaluate u for all quadrature points
324 DomainValueVectorType& domains = domainValues_;
325 domainValue( u, quadrature, domains );
326
327 auto& ranges = values_;
328 resizeRangeValueVector( ranges, quadrature.nop() );
329
330 // evaluate integrands for all quadrature points
331 for( const auto qp : quadrature )
332 {
333 const ctype weight = qp.weight() * geometry.integrationElement( qp.position() );
334 assignRange( ranges, qp.index(), integrands().interior( qp, domainValue( qp.index(), domains ) ), weight );
335 }
336
337 // add to w for all quadrature points
338 axpyQuadrature( w, quadrature, ranges );
339 integrands().unbind();
340 }
341
342 template< class U, class J >
343 void addLinearizedInteriorIntegral ( const U &u, J &j ) const
344 {
345 if( !integrands().init( u.entity() ) )
346 return;
347
348 const auto &geometry = u.geometry();
349 const auto &domainBasis = j.domainBasisFunctionSet();
350 const auto &rangeBasis = j.rangeBasisFunctionSet();
351
352 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
353 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder( maxOrder( u, domainBasis, rangeBasis )) );
354 const size_t domainSize = domainBasis.size();
355 const size_t quadNop = quadrature.nop();
356
357 auto& basisValues = basisValues_;
358 resizeDomainValueVector( basisValues, domainSize );
359
360 // evaluate u for all quadrature points
361 auto& rangeValues = rangeValues_;
362 DomainValueVectorType& domains = domainValues_;
363 domainValue( u, quadrature, domains );
364
365 rangeValues.resize( domainSize );
366 for( std::size_t col = 0; col < domainSize; ++col )
367 {
368 resizeRangeValueVector( rangeValues[ col ], quadNop );
369 }
370
371 // evaluate all basis functions and integrands
372 for( const auto qp : quadrature )
373 {
374 values( domainBasis, qp, basisValues );
375 const auto weight = qp.weight() * geometry.integrationElement( qp.position() );
376 auto integrand = integrands().linearizedInterior( qp, domainValue( qp.index(), domains ) );
377 for( std::size_t col = 0; col < domainSize; ++col )
378 {
379 assignRange( rangeValues[ col ], qp.index(), integrand( value( basisValues, col ) ), weight );
380 }
381 }
382
383 // add to local matrix for all quadrature points and basis functions
384 for( std::size_t col = 0; col < domainSize; ++col )
385 {
386 LocalMatrixColumn< J > jCol( j, col );
387 axpyQuadrature( jCol, quadrature, rangeValues[ col ] );
388 }
389 integrands().unbind();
390 }
391
392 // boundary integral
393
394 template< class Intersection, class U, class W >
395 void addBoundaryIntegral ( const Intersection &intersection, const U &u, W &w ) const
396 {
397 if( !integrands().init( intersection ) )
398 return;
399
400 const auto geometry = intersection.geometry();
401 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
402 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( u, w )), SurfaceQuadratureType::INSIDE );
403 for( const auto qp : quadrature )
404 {
405 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
406
407 RangeValueType integrand = integrands().boundary( qp, domainValue( u, qp ) );
408
409 Hybrid::forEach( RangeValueIndices(), [ &qp, &w, &integrand, weight ] ( auto i ) {
410 std::get< i >( integrand ) *= weight;
411 w.axpy( qp, std::get< i >( integrand ) );
412 } );
413 }
414 integrands().unbind();
415 }
416
417 template< class Intersection, class U, class J >
418 void addLinearizedBoundaryIntegral ( const Intersection &intersection, const U &u, J &j ) const
419 {
420 if( !integrands().init( intersection ) )
421 return;
422
423 DomainValueVectorType &phi = phiIn_;
424
425 const auto geometry = intersection.geometry();
426 const auto &domainBasis = j.domainBasisFunctionSet();
427 const auto &rangeBasis = j.rangeBasisFunctionSet();
428
429 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
430 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder(u, domainBasis, rangeBasis )), SurfaceQuadratureType::INSIDE );
431 for( const auto qp : quadrature )
432 {
433 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
434
435 values( domainBasis, qp, phi );
436 auto integrand = integrands().linearizedBoundary( qp, domainValue( u, qp ) );
437
438 for( std::size_t col = 0, cols = domainBasis.size(); col < cols; ++col )
439 {
440 LocalMatrixColumn< J > jCol( j, col );
441 RangeValueType intPhi = integrand( value( phi, col ) );
442
443 Hybrid::forEach( RangeValueIndices(), [ &qp, &jCol, &intPhi, weight ] ( auto i ) {
444 std::get< i >( intPhi ) *= weight;
445 jCol.axpy( qp, std::get< i >( intPhi ) );
446 } );
447 }
448 }
449 integrands().unbind();
450 }
451
452 // addSkeletonIntegral
453
454 protected:
455 template< bool conforming, class Intersection, class U, class W >
456 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn ) const
457 {
458 const auto geometry = intersection.geometry();
459
460 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
461 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
462 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn)), false );
463 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
464 {
465 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
466
467 const auto qpIn = quadrature.inside()[ qp ];
468 const auto qpOut = quadrature.outside()[ qp ];
469 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
470
471 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &integrand, weight ] ( auto i ) {
472 std::get< i >( integrand.first ) *= weight;
473 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
474 } );
475 }
476 }
477
478 template< bool conforming, class Intersection, class U, class W >
479 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn, W &wOut ) const
480 {
481 const auto geometry = intersection.geometry();
482 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
483 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
484 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn, wOut)), false );
485 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
486 {
487 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
488
489 const auto qpIn = quadrature.inside()[ qp ];
490 const auto qpOut = quadrature.outside()[ qp ];
491 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
492
493 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &qpOut, &wOut, &integrand, weight ] ( auto i ) {
494 std::get< i >( integrand.first ) *= weight;
495 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
496
497 std::get< i >( integrand.second ) *= weight;
498 wOut.axpy( qpOut, std::get< i >( integrand.second ) );
499 } );
500 }
501 }
502
503 template< bool conforming, class Intersection, class U, class J >
504 void addLinearizedSkeletonIntegral ( const Intersection &intersection,
505 const U &uIn, const U &uOut, J &jInIn, J &jOutIn ) const
506 {
507 DomainValueVectorType &phiIn = phiIn_;
508 DomainValueVectorType &phiOut = phiOut_;
509
510 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
511 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
512
513 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
514
515 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn ));
516
517 const auto geometry = intersection.geometry();
518 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
519 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
520 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
521 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
522 {
523 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
524
525 const auto qpIn = quadrature.inside()[ qp ];
526 const auto qpOut = quadrature.outside()[ qp ];
527
528 values( domainBasisIn, qpIn, phiIn );
529 values( domainBasisOut, qpOut, phiOut );
530
531 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
532 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
533 {
534 LocalMatrixColumn< J > jInInCol( jInIn, col );
535 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
536
537 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &intPhi, weight ] ( auto i ) {
538 std::get< i >( intPhi.first ) *= weight;
539 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
540 } );
541 }
542 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
543 {
544 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
545 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
546
547 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &intPhi, weight ] ( auto i ) {
548 std::get< i >( intPhi.first ) *= weight;
549 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
550 } );
551 }
552 }
553 }
554
555 template< bool conforming, class Intersection, class U, class J >
556 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut,
557 J &jInIn, J &jOutIn, J &jInOut, J &jOutOut ) const
558 {
559 DomainValueVectorType &phiIn = phiIn_;
560 DomainValueVectorType &phiOut = phiOut_;
561
562 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
563 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
564
565 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
566 const auto &rangeBasisOut = jInOut.rangeBasisFunctionSet();
567
568 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn, rangeBasisOut ));
569
570 const auto geometry = intersection.geometry();
571 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
572 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
573 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
574 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
575 {
576 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
577
578 const auto qpIn = quadrature.inside()[ qp ];
579 const auto qpOut = quadrature.outside()[ qp ];
580
581 values( domainBasisIn, qpIn, phiIn );
582 values( domainBasisOut, qpOut, phiOut );
583
584 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
585 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
586 {
587 LocalMatrixColumn< J > jInInCol( jInIn, col );
588 LocalMatrixColumn< J > jInOutCol( jInOut, col );
589 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
590
591 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &qpOut, &jInOutCol, &intPhi, weight ] ( auto i ) {
592 std::get< i >( intPhi.first ) *= weight;
593 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
594
595 std::get< i >( intPhi.second ) *= weight;
596 jInOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
597 } );
598 }
599 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
600 {
601 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
602 LocalMatrixColumn< J > jOutOutCol( jOutOut, col );
603 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
604
605 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &qpOut, &jOutOutCol, &intPhi, weight ] ( auto i ) {
606 std::get< i >( intPhi.first ) *= weight;
607 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
608
609 std::get< i >( intPhi.second ) *= weight;
610 jOutOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
611 } );
612 }
613 }
614 }
615
616 public:
617 template< class Intersection, class U, class... W >
618 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &... w ) const
619 {
620 if( !integrands().init( intersection ) )
621 return;
622
623 if( intersection.conforming() )
624 addSkeletonIntegral< true >( intersection, uIn, uOut, w... );
625 else
626 addSkeletonIntegral< false >( intersection, uIn, uOut, w... );
627 integrands().unbind();
628 }
629
630 template< class Intersection, class U, class... J >
631 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, J &... j ) const
632 {
633 if( !integrands().init( intersection ) )
634 return;
635
636 if( intersection.conforming() )
637 addLinearizedSkeletonIntegral< true >( intersection, uIn, uOut, j... );
638 else
639 addLinearizedSkeletonIntegral< false >( intersection, uIn, uOut, j... );
640 integrands().unbind();
641 }
642
643 void setQuadratureOrders(unsigned int interior, unsigned int surface)
644 {
645 interiorQuadOrder_ = interior;
646 surfaceQuadOrder_ = surface;
647 }
648
649 void setQuadratureOrderFunctions( std::function<int(const int)> interiorOrder,
650 std::function<int(const int)> surfaceOrder )
651 {
652 defaultInteriorOrder_ = interiorOrder;
653 defaultSurfaceOrder_ = surfaceOrder;
654 }
655
656 IntegrandsType& model() const
657 {
658 return integrands();
659 }
660 bool nonlinear() const { return model().nonlinear(); }
661 bool hasInterior() const { return model().hasInterior(); }
662 bool hasSkeleton() const { return model().hasSkeleton(); }
663 bool hasBoundary() const { return model().hasBoundary(); }
664
665 private:
666 IntegrandsType& integrands() const
667 {
668 return integrands_;
669 }
670
671 public:
672 // accessors
673 const GridPartType &gridPart () const { return gridPart_; }
674
675 unsigned int interiorQuadratureOrder(unsigned int order) const { return interiorQuadOrder_ == 0 ? defaultInteriorOrder_(order) : interiorQuadOrder_; }
676 unsigned int surfaceQuadratureOrder(unsigned int order) const { return surfaceQuadOrder_ == 0 ? defaultSurfaceOrder_ (order) : surfaceQuadOrder_; }
677
678 protected:
679 template <class U>
680 int maxOrder( const U& u ) const
681 {
682 return CallOrder< U > :: order( u );
683 }
684
685 template< class U, class W >
686 int maxOrder( const U& u, const W& w ) const
687 {
688 return std::max( maxOrder( u ), maxOrder( w ) );
689 }
690
691 template< class U, class V, class W >
692 int maxOrder( const U& u, const V& v, const W& w ) const
693 {
694 return std::max( maxOrder( u, v ), maxOrder( w ) );
695 }
696
697 template< class U, class V, class W, class X >
698 int maxOrder( const U& u, const V& v, const W& w, const X& x ) const
699 {
700 return std::max( maxOrder( u, v ), maxOrder( w, x) );
701 }
702
703 protected:
704 const GridPartType &gridPart_;
705
706 mutable IntegrandsType integrands_;
707
708 mutable std::function<int(const int)> defaultInteriorOrder_;
709 mutable std::function<int(const int)> defaultSurfaceOrder_;
710
711 unsigned int interiorQuadOrder_;
712 unsigned int surfaceQuadOrder_;
713
714 mutable std::vector< RangeValueVectorType > rangeValues_;
715 mutable RangeValueVectorType values_;
716 mutable DomainValueVectorType phiIn_;
717 mutable DomainValueVectorType phiOut_;
718 mutable DomainValueVectorType basisValues_;
719 mutable DomainValueVectorType domainValues_;
720 };
721
722
723
724 // GalerkinOperator
725 // ----------------
726
727 template< class GridPart >
728 // Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
729 struct GalerkinOperator
730 {
731 typedef GridPart GridPartType;
732 typedef GalerkinOperator< GridPartType > ThisType;
733
734 typedef typename GridPartType::ctype ctype;
735 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
736
737 // constructor
738 explicit GalerkinOperator ( const GridPartType &gridPart )
739 : gridPart_( gridPart ),
740 gridSizeInterior_( 0 )
741 {
742 }
743
744 protected:
745 template <class IntegrandsTuple>
746 bool hasBoundary( const IntegrandsTuple& integrandsTuple ) const
747 {
748 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
749 bool hasBoundary = false ;
750 Hybrid::forEach( Indices(), [&integrandsTuple, &hasBoundary]( auto i ) {
751 if( std::get< i > (integrandsTuple).hasBoundary() )
752 {
753 hasBoundary = true ;
754 return ;
755 }
756 });
757 return hasBoundary;
758 }
759
760 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
761 void evaluateImpl ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
762 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs, std::integral_constant<bool, hasSkeleton> ) const
763 {
764 Dune::Fem::ConstLocalFunction< GridFunction > uInside( u );
765 Dune::Fem::ConstLocalFunction< GridFunction > uOutside( u );
766
767 typedef typename DiscreteFunction::DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
768 TemporaryLocalFunction< DiscreteFunctionSpaceType > wInside( w.space() ), wOutside( w.space() );
769
770 // element counter
771 gridSizeInterior_ = 0;
772
773 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
774
775 // true if one of the integrands has a boundary term
776 const bool hasBnd = hasBoundary( integrandsTuple );
777
778 const auto &indexSet = gridPart().indexSet();
779 const auto end = iterators.end();
780 for( auto it = iterators.begin(); it != end; ++it )
781 {
782 // assert( iterators.thread( *it ) == MPIManager::thread() );
783 const EntityType inside = *it ;
784
785 // increase counter for interior elements
786 ++gridSizeInterior_;
787
788 auto uGuard = bindGuard( uInside, inside );
789 auto wGuard = bindGuard( wInside, inside );
790 wInside.clear();
791
792 auto addInteriorIntegral = [&integrandsTuple, &uInside, &wInside]( auto i )
793 {
794 const auto& integrands = std::get< i >( integrandsTuple );
795 if( integrands.hasInterior() )
796 integrands.addInteriorIntegral( uInside, wInside );
797 };
798 // add interior integral of any integrands
799 Hybrid::forEach( Indices(), addInteriorIntegral );
800
801 if( hasSkeleton || (hasBnd && inside.hasBoundaryIntersections() ) )
802 {
803 for( const auto &intersection : intersections( gridPart(), inside ) )
804 {
805 bool neighbor = false;
806 if constexpr ( hasSkeleton )
807 {
808 // check neighbor first since on periodic boundaries both,
809 // neighbor and boundary are true, so we treat neighbor first
810 if( intersection.neighbor() )
811 {
812 neighbor = true;
813 const EntityType outside = intersection.outside();
814
815 if( outside.partitionType() != InteriorEntity )
816 {
817 auto uOutGuard = bindGuard( uOutside, outside );
818
819 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside] ( auto i )
820 {
821 const auto& integrands = std::get< i >( integrandsTuple );
822 if( integrands.hasSkeleton() )
823 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside );
824 };
825 // add skeleton integral of any integrands
826 Hybrid::forEach( Indices(), addSkeletonIntegral );
827 }
828 else if( indexSet.index( inside ) < indexSet.index( outside ) )
829 {
830 auto uOutGuard = bindGuard( uOutside, outside );
831 auto wOutGuard = bindGuard( wOutside, outside );
832 wOutside.clear();
833
834 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside, &wOutside] ( auto i )
835 {
836 const auto& integrands = std::get< i >( integrandsTuple );
837 if( integrands.hasSkeleton() )
838 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside, wOutside );
839 };
840 // add skeleton integral of any integrands
841 Hybrid::forEach( Indices(), addSkeletonIntegral );
842
843 // addLocalDofs calls w.addLocalDofs but also
844 // prevents race condition for thread parallel runs
845 addLocalDofs( outside, wOutside );
846 }
847 }
848 } // end skeleton
849
850 if( ! neighbor && intersection.boundary() )
851 {
852 auto addBoundaryIntegral = [&integrandsTuple, &intersection, &uInside, &wInside]( auto i )
853 {
854 const auto& integrands = std::get< i >( integrandsTuple );
855 if( integrands.hasBoundary() )
856 integrands.addBoundaryIntegral( intersection, uInside, wInside );
857 };
858 // add boundary integral of any integrands
859 Hybrid::forEach( Indices(), addBoundaryIntegral );
860 } // end boundary
861 }
862 } // end intersections
863
864 addLocalDofs( inside, wInside );
865 }
866 }
867
868 template <class Space>
869 struct InsideEntity
870 {
871 typedef typename Space::EntityType EntityType;
872 template <class Iterators>
873 InsideEntity(const Space &space, const Iterators& iterators)
874 : space_(space), dofThread_(space.size(),-1)
875 , thread_( iterators.thread() )
876 {
877 const auto& mapper = space_.blockMapper();
878 for (const auto &entity : space_)
879 {
880 int t=iterators.threadParallel(entity);
881 mapper.mapEach(entity, [ this, t ] ( int local, auto global )
882 { dofThread_[global] = (dofThread_[global]==t || dofThread_[global]==-1)?
883 t : -2 ; } ); // -2: shared dof
884 }
885 }
886 bool operator()(const EntityType &entity) const
887 {
888 bool needsLocking = false;
889 space_.blockMapper().mapEach(entity,
890 [ this, &needsLocking ] ( int local, auto global )
891 { needsLocking = (needsLocking || dofThread_[global]!=thread_); });
892 return !needsLocking;
893 }
894 const Space &space_;
895 std::vector<int> dofThread_;
896 int thread_;
897 };
898
899 template <class DiscreteFunction>
900 struct AddLocalEvaluate
901 {
902 AddLocalEvaluate(DiscreteFunction &w)
903 : w_(w) {}
904 template <class LocalDofs>
905 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
906 {
907 w_.addLocalDofs( entity, wLocal.localDofVector() );
908 }
909 DiscreteFunction &w_;
910 };
911
912 template <class DiscreteFunction>
913 struct AddLocalEvaluateLocked : public AddLocalEvaluate<DiscreteFunction>
914 {
915 typedef AddLocalEvaluate<DiscreteFunction> BaseType;
916
917 std::shared_mutex& mutex_;
918 InsideEntity<typename DiscreteFunction::DiscreteFunctionSpaceType> inside_;
919
920 template <class Iterators>
921 AddLocalEvaluateLocked(DiscreteFunction &w, std::shared_mutex& mtx, const Iterators &iterators)
922 : BaseType(w), mutex_(mtx), inside_(w.space(),iterators) {}
923
924 template <class LocalDofs>
925 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
926 {
927 // call addLocalDofs on w
928 if (inside_(entity))
929 {
930 std::shared_lock<std::shared_mutex> guard ( mutex_ );
931 BaseType::operator()( entity, wLocal );
932 }
933 else
934 {
935 // lock mutex (unlock on destruction)
936 std::lock_guard<std::shared_mutex> guard ( mutex_ );
937 BaseType::operator()( entity, wLocal );
938 }
939 }
940 };
941
942 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor >
943 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
944 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs ) const
945 {
946 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
947 static_assert( std::is_same< typename DiscreteFunction::GridPartType, GridPartType >::value, "Argument 'w' and Integrands must be defined on the same grid part." );
948
949 if( hasSkeleton( integrandsTuple ) )
950 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::true_type() );
951 else
952 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::false_type() );
953 }
954
955 public:
956 template <class IntegrandsTuple>
957 bool hasSkeleton( const IntegrandsTuple& integrandsTuple ) const
958 {
959 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
960 bool hasSkeleton = false ;
961 Hybrid::forEach( Indices(), [&integrandsTuple, &hasSkeleton] ( auto i ) {
962 if( std::get< i >( integrandsTuple ).hasSkeleton() )
963 {
964 hasSkeleton = true;
965 return ;
966 }
967 });
968 return hasSkeleton ;
969 }
970
971 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
972 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
973 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx ) const
974 {
975 AddLocalEvaluateLocked<DiscreteFunction> addLocalEvaluate(w,mtx,iterators);
976 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
977 }
978
979 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
980 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
981 {
982 AddLocalEvaluate<DiscreteFunction> addLocalEvaluate(w);
983 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
984 }
985
986 protected:
987 template<class T, int length>
988 class FiniteStack
989 {
990 public :
991 // Makes empty stack
992 FiniteStack () : _f(0) {}
993
994 // Returns true if the stack is empty
995 bool empty () const { return _f <= 0; }
996
997 // Returns true if the stack is full
998 bool full () const { return (_f >= length); }
999
1000 // clear stack
1001 void clear() { _f = 0; }
1002
1003 // Puts a new object onto the stack
1004 void push (const T& t)
1005 {
1006 assert ( _f < length );
1007 _s[_f++] = t;
1008 }
1009
1010 // Removes and returns the uppermost object from the stack
1011 T pop () {
1012 assert ( _f > 0 );
1013 return _s[--_f];
1014 }
1015
1016 // Returns the uppermost object on the stack
1017 T top () const {
1018 assert ( _f > 0 );
1019 return _s[_f-1];
1020 }
1021
1022 // stacksize
1023 int size () const { return _f; }
1024
1025 private:
1026 T _s[length]; // the stack
1027 int _f; // actual position in stack
1028 };
1029
1030
1031 template <class JacobianOperator>
1032 struct AddLocalAssemble
1033 {
1034 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1035 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1036 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1037 JacobianOperator &jOp_;
1038 std::vector< TemporaryLocalMatrixType > jOpLocal_;
1039
1040 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFinalized_;
1041 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFree_;
1042
1043 std::size_t locked, notLocked, timesLocked;
1044 AddLocalAssemble(JacobianOperator& jOp)
1045 : jOp_(jOp)
1046 , jOpLocal_(12, TemporaryLocalMatrixType(jOp_.domainSpace(), jOp_.rangeSpace()))
1047 , jOpLocalFinalized_()
1048 , jOpLocalFree_()
1049 , locked(0), notLocked(0), timesLocked(0)
1050 {
1051 for( auto& jOpLocal : jOpLocal_ )
1052 jOpLocalFree_.push( &jOpLocal );
1053 }
1054
1055 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1056 {
1057 assert( ! jOpLocalFree_.empty() );
1058 TemporaryLocalMatrixType& lop = *(jOpLocalFree_.pop());
1059 lop.bind(dE,rE);
1060 lop.clear();
1061 return lop;
1062 }
1063
1064 void unbind(TemporaryLocalMatrixType &lop)
1065 {
1066 notLocked += 1;
1067 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1068 lop.unbind();
1069 jOpLocalFree_.push( &lop );
1070 }
1071
1072 void finalize()
1073 {
1074 locked += jOpLocalFinalized_.size();
1075 while ( ! jOpLocalFinalized_.empty() )
1076 {
1077 TemporaryLocalMatrixType &lop = *(jOpLocalFinalized_.pop());
1078 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1079 lop.unbind();
1080 jOpLocalFree_.push( &lop );
1081 }
1082 }
1083 };
1084
1085 template <class JacobianOperator>
1086 struct AddLocalAssembleLocked : public AddLocalAssemble<JacobianOperator>
1087 {
1088 typedef AddLocalAssemble<JacobianOperator> BaseType;
1089 typedef typename BaseType::TemporaryLocalMatrixType TemporaryLocalMatrixType;
1090 using BaseType::jOpLocalFinalized_;
1091 using BaseType::jOpLocalFree_;
1092
1093 std::shared_mutex& mutex_;
1094 InsideEntity<typename JacobianOperator::DomainSpaceType> insideDomain_;
1095 InsideEntity<typename JacobianOperator::RangeSpaceType> insideRange_;
1096
1097 template <class Iterators>
1098 AddLocalAssembleLocked(JacobianOperator &jOp, std::shared_mutex &mtx, const Iterators &iterators)
1099 : BaseType(jOp)
1100 , mutex_(mtx)
1101 , insideDomain_(jOp.domainSpace(),iterators)
1102 , insideRange_(jOp.rangeSpace(),iterators)
1103 {}
1104
1105 void finalize()
1106 {
1107 // lock mutex (unlock on destruction)
1108 ++BaseType::timesLocked;
1109 std::lock_guard<std::shared_mutex> guard ( mutex_ );
1110 BaseType::finalize();
1111 }
1112
1113 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1114 {
1115 if ( jOpLocalFree_.empty() )
1116 {
1117 finalize();
1118 }
1119 return BaseType::bind(dE,rE);
1120 }
1121
1122 void unbind(TemporaryLocalMatrixType &lop)
1123 {
1124 /* // always lock
1125 ++BaseType::timesLocked;
1126 ++BaseType::locked;
1127 std::lock_guard guard ( mutex_ );
1128 BaseType::unbind(lop);
1129 return;
1130 */
1131 if ( insideDomain_(lop.domainEntity()) &&
1132 insideRange_(lop.rangeEntity()) )
1133 {
1134 std::shared_lock<std::shared_mutex> guard ( mutex_ );
1135 BaseType::unbind(lop);
1136 }
1137 else
1138 {
1139 jOpLocalFinalized_.push( &lop );
1140 }
1141 }
1142 };
1143
1144 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
1145 void assembleImpl ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple,
1146 Functor& addLocalMatrix, std::integral_constant<bool, hasSkeleton> ) const
1147 {
1148 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1149 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1150
1151 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1152
1153 Dune::Fem::ConstLocalFunction< GridFunction > uIn( u );
1154 Dune::Fem::ConstLocalFunction< GridFunction > uOut( u );
1155
1156 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
1157 const std::size_t maxNumLocalDofs = jOp.domainSpace().blockMapper().maxNumDofs() * jOp.domainSpace().localBlockSize;
1158
1159 // initialize local temporary data
1160 Hybrid::forEach( Indices(), [&integrandsTuple, &maxNumLocalDofs] ( auto i ) {
1161 const auto& integrands = std::get< i >( integrandsTuple );
1162 integrands.prepare( maxNumLocalDofs );
1163 });
1164
1165 // element counter
1166 gridSizeInterior_ = 0;
1167
1168 // true if one of the integrands has a boundary term
1169 const bool hasBnd = hasBoundary( integrandsTuple );
1170
1171 const auto &indexSet = gridPart().indexSet();
1172 // threaded iterators provide from outside
1173 const auto end = iterators.end();
1174 for( auto it = iterators.begin(); it != end; ++it )
1175 {
1176 // increase counter for interior elements
1177 ++gridSizeInterior_;
1178
1179 const EntityType inside = *it;
1180
1181 auto uiGuard = bindGuard( uIn, inside );
1182
1183 TemporaryLocalMatrixType& jOpInIn = addLocalMatrix.bind( inside, inside );
1184 auto addLinearizedInteriorIntegral = [&integrandsTuple, &uIn, &jOpInIn]( auto i )
1185 {
1186 const auto& integrands = std::get< i >( integrandsTuple );
1187 if( integrands.hasInterior() )
1188 integrands.addLinearizedInteriorIntegral( uIn, jOpInIn );
1189 };
1190 // add interior integral of any integrands
1191 Hybrid::forEach( Indices(), addLinearizedInteriorIntegral );
1192
1193 if( hasSkeleton || (hasBnd && inside.hasBoundaryIntersections() ) )
1194 {
1195 for( const auto &intersection : intersections( gridPart(), inside ) )
1196 {
1197 bool neighbor = false ;
1198 // check neighbor first since on periodic boundaries both,
1199 // neighbor and boundary are true, so we treat neighbor first
1200 if constexpr ( hasSkeleton )
1201 {
1202 if( intersection.neighbor() )
1203 {
1204 neighbor = true ;
1205 const EntityType &outside = intersection.outside();
1206
1207 TemporaryLocalMatrixType &jOpOutIn = addLocalMatrix.bind( outside, inside );
1208
1209 auto uoGuard = bindGuard( uOut, outside );
1210
1211 if( outside.partitionType() != InteriorEntity )
1212 {
1213 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn]( auto i )
1214 {
1215 const auto& integrands = std::get< i >( integrandsTuple );
1216 if( integrands.hasSkeleton() )
1217 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn );
1218 };
1219 // add skeleton integral of any integrands
1220 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1221 }
1222 else if( indexSet.index( inside ) < indexSet.index( outside ) )
1223 {
1224 TemporaryLocalMatrixType &jOpInOut = addLocalMatrix.bind( inside, outside );
1225 TemporaryLocalMatrixType &jOpOutOut = addLocalMatrix.bind( outside, outside );
1226
1227 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn, &jOpInOut, &jOpOutOut]( auto i )
1228 {
1229 const auto& integrands = std::get< i >( integrandsTuple );
1230 if( integrands.hasSkeleton() )
1231 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn, jOpInOut, jOpOutOut );
1232 };
1233 // add skeleton integral of any integrands
1234 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1235
1236 addLocalMatrix.unbind(jOpInOut);
1237 addLocalMatrix.unbind(jOpOutOut);
1238 }
1239
1240 addLocalMatrix.unbind(jOpOutIn);
1241 }
1242 } // end skeleton
1243
1244 if( !neighbor && intersection.boundary() )
1245 {
1246 auto addLinearizedBoundaryIntegral = [&integrandsTuple, &intersection, &uIn, &jOpInIn]( auto i )
1247 {
1248 const auto& integrands = std::get< i >( integrandsTuple );
1249 if( integrands.hasBoundary() )
1250 integrands.addLinearizedBoundaryIntegral( intersection, uIn, jOpInIn );
1251 };
1252 // add skeleton integral of any integrands
1253 Hybrid::forEach( Indices(), addLinearizedBoundaryIntegral );
1254
1255 } // end boundary
1256 }
1257 } // end intersection
1258 addLocalMatrix.unbind(jOpInIn);
1259 }
1260
1261 // complete the matrix build
1262 addLocalMatrix.finalize();
1263 }
1264
1265
1266 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor >
1267 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1268 const IntegrandsTuple& integrandsTuple, Functor& addLocalMatrix, int ) const
1269 {
1270 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
1271 static_assert( std::is_same< typename JacobianOperator::DomainSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1272 static_assert( std::is_same< typename JacobianOperator::RangeSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1273
1274 if( hasSkeleton( integrandsTuple ) )
1275 assembleImpl( u, jOp, iterators, integrandsTuple ,addLocalMatrix, std::true_type() );
1276 else
1277 assembleImpl( u, jOp, iterators, integrandsTuple, addLocalMatrix, std::false_type() );
1278 }
1279
1280 public:
1281 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1282 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1283 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx) const
1284 {
1285 AddLocalAssembleLocked<JacobianOperator> addLocalAssemble( jOp, mtx, iterators);
1286 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1287 #if 0 // print information about how many times a lock was used during assemble
1288 std::lock_guard guard ( mtx );
1289 std::cout << MPIManager::thread() << " : "
1290 << addLocalAssemble.locked << " " << addLocalAssemble.notLocked << " "
1291 << addLocalAssemble.timesLocked << std::endl;
1292 #endif
1293 }
1294
1295 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1296 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
1297 {
1298 AddLocalAssemble<JacobianOperator> addLocalAssemble(jOp);
1299 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1300 }
1301
1302 // accessors
1303 const GridPartType &gridPart () const { return gridPart_; }
1304
1305 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1306
1307 protected:
1308 const GridPartType &gridPart_;
1309 mutable std::size_t gridSizeInterior_;
1310 };
1311
1312
1313 template <class GalerkinOperator >
1314 static std::size_t accumulateGridSize( const ThreadSafeValue< GalerkinOperator >& ops )
1315 {
1316 std::size_t s = ops.size();
1317 std::size_t sum = 0;
1318 for( std::size_t i=0; i<s; ++i )
1319 sum += ops[ i ].gridSizeInterior();
1320 return sum;
1321 }
1322
1323 } // namespace Impl
1324
1327
1328
1329 // GalerkinOperator
1330 // ----------------
1331
1332 template< class Integrands, class DomainFunction, class RangeFunction = DomainFunction >
1333 struct GalerkinOperator
1334 : public virtual Operator< DomainFunction, RangeFunction >
1335 {
1336 typedef DomainFunction DomainFunctionType;
1337 typedef RangeFunction RangeFunctionType;
1338
1339 typedef typename RangeFunctionType::GridPartType GridPartType;
1340
1341 typedef Impl::LocalGalerkinOperator< Integrands > LocalGalerkinOperatorImplType;
1342 typedef Impl::GalerkinOperator< GridPartType > GalerkinOperatorImplType;
1343
1344 static_assert( std::is_same< typename DomainFunctionType::GridPartType, typename RangeFunctionType::GridPartType >::value, "DomainFunction and RangeFunction must be defined on the same grid part." );
1345
1346 typedef ThreadIterator< GridPartType > ThreadIteratorType;
1347
1348 template< class... Args >
1349 explicit GalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1350 : iterators_( gridPart ),
1351 opImpl_( gridPart ),
1352 localOp_( gridPart, std::forward< Args >( args )... ),
1353 gridSizeInterior_( 0 ),
1354 communicate_( true )
1355 {
1356 }
1357
1358 void setCommunicate( const bool communicate )
1359 {
1360 communicate_ = communicate;
1361 if( ! communicate_ && Dune::Fem::Parameter::verbose() )
1362 {
1363 std::cout << "GalerkinOperator::setCommunicate: communicate was disabled!" << std::endl;
1364 }
1365 }
1366
1367 void setQuadratureOrders(unsigned int interior, unsigned int surface)
1368 {
1369 size_t size = localOp_.size();
1370 for( size_t i=0; i<size; ++i )
1371 localOp_[ i ].setQuadratureOrders(interior,surface);
1372 }
1373
1374 virtual bool nonlinear() const final override
1375 {
1376 return localOperator().nonlinear();
1377 }
1378
1379 virtual void operator() ( const DomainFunctionType &u, RangeFunctionType &w ) const final override
1380 {
1381 evaluate( u, w );
1382 }
1383
1384 template< class GridFunction >
1385 void operator() ( const GridFunction &u, RangeFunctionType &w ) const
1386 {
1387 evaluate( u, w );
1388 }
1389
1390 const GridPartType &gridPart () const { return op().gridPart(); }
1391
1392 typedef Integrands ModelType;
1393 typedef Integrands DirichletModelType;
1394 ModelType &model() const { return localOperator().model(); }
1395
1396 [[deprecated("Use localOperator instead!")]]
1397 const LocalGalerkinOperatorImplType& impl() const { return localOperator(); }
1398
1400 const LocalGalerkinOperatorImplType& localOperator() const { return *localOp_; }
1401
1402 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1403
1404 protected:
1406 const GalerkinOperatorImplType& op() const { return *opImpl_; }
1407
1408 template < class GridFunction >
1409 void evaluate( const GridFunction &u, RangeFunctionType &w ) const
1410 {
1411 iterators_.update();
1412 w.clear();
1413
1414 std::shared_mutex mutex;
1415
1416 auto doEval = [this, &u, &w, &mutex] ()
1417 {
1418 // TODO: Move this to be a class variable
1419 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1420 this->op().evaluate( u, w, this->iterators_, integrands, mutex );
1421 };
1422
1423 try {
1424 // execute in parallel
1425 MPIManager :: run ( doEval );
1426
1427 // update number of interior elements as sum over threads
1428 gridSizeInterior_ = Impl::accumulateGridSize( opImpl_ );
1429 }
1430 catch ( const SingleThreadModeError& e )
1431 {
1432 // reset w from previous entries
1433 w.clear();
1434 // re-run in single thread mode if previous attempt failed
1435 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1436 op().evaluate( u, w, iterators_, integrands );
1437
1438 // update number of interior elements as sum over threads
1439 gridSizeInterior_ = op().gridSizeInterior();
1440 }
1441
1442 // synchronize result
1443 if( communicate_ )
1444 w.communicate();
1445 }
1446
1447 mutable ThreadIteratorType iterators_;
1450
1451 mutable std::size_t gridSizeInterior_;
1452 bool communicate_;
1453 };
1454
1455
1456
1457 // DifferentiableGalerkinOperator
1458 // ------------------------------
1459
1460 template< class Integrands, class JacobianOperator >
1461 class DifferentiableGalerkinOperator
1462 : public GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType >,
1463 public DifferentiableOperator< JacobianOperator >
1464 {
1465 typedef GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType > BaseType;
1466
1467 typedef typename BaseType :: LocalGalerkinOperatorImplType LocalGalerkinOperatorImplType;
1468 public:
1469 typedef JacobianOperator JacobianOperatorType;
1470
1471 typedef typename BaseType::DomainFunctionType DomainFunctionType;
1472 typedef typename BaseType::RangeFunctionType RangeFunctionType;
1473 typedef typename DomainFunctionType::DiscreteFunctionSpaceType DomainDiscreteFunctionSpaceType;
1474 typedef typename RangeFunctionType::DiscreteFunctionSpaceType RangeDiscreteFunctionSpaceType;
1475
1476 typedef DiagonalAndNeighborStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalAndNeighborStencilType;
1477 typedef DiagonalStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalStencilType;
1478
1479 typedef typename BaseType::GridPartType GridPartType;
1480
1481 template< class... Args >
1482 explicit DifferentiableGalerkinOperator ( const DomainDiscreteFunctionSpaceType &dSpace,
1483 const RangeDiscreteFunctionSpaceType &rSpace,
1484 Args &&... args )
1485 : BaseType( rSpace.gridPart(), std::forward< Args >( args )... ),
1486 dSpace_(dSpace), rSpace_(rSpace),
1487 domainSpaceSequence_(dSpace.sequence()),
1488 rangeSpaceSequence_(rSpace.sequence()),
1489 stencilDAN_(), stencilD_()
1490 {
1491 if( hasSkeleton() )
1492 stencilDAN_.reset( new DiagonalAndNeighborStencilType( dSpace_, rSpace_ ) );
1493 else
1494 stencilD_.reset( new DiagonalStencilType( dSpace_, rSpace_ ) );
1495 }
1496
1497 virtual void jacobian ( const DomainFunctionType &u, JacobianOperatorType &jOp ) const final override
1498 {
1499 assemble( u, jOp );
1500 }
1501
1502 template< class GridFunction >
1503 void jacobian ( const GridFunction &u, JacobianOperatorType &jOp ) const
1504 {
1505 assemble( u, jOp );
1506 }
1507
1508 const DomainDiscreteFunctionSpaceType& domainSpace() const
1509 {
1510 return dSpace_;
1511 }
1512 const RangeDiscreteFunctionSpaceType& rangeSpace() const
1513 {
1514 return rSpace_;
1515 }
1516
1517 using BaseType::localOperator;
1518 using BaseType::nonlinear;
1519
1520 protected:
1521 using BaseType::op;
1522
1523 bool hasSkeleton() const
1524 {
1525 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1526 return op().hasSkeleton( integrands );
1527 }
1528
1529 void prepare( JacobianOperatorType& jOp ) const
1530 {
1531 if ( domainSpaceSequence_ != domainSpace().sequence()
1532 || rangeSpaceSequence_ != rangeSpace().sequence() )
1533 {
1534 domainSpaceSequence_ = domainSpace().sequence();
1535 rangeSpaceSequence_ = rangeSpace().sequence();
1536 if( hasSkeleton() )
1537 {
1538 assert( stencilDAN_ );
1539 stencilDAN_->update();
1540 }
1541 else
1542 {
1543 assert( stencilD_ );
1544 stencilD_->update();
1545 }
1546 }
1547 if( hasSkeleton() )
1548 jOp.reserve( *stencilDAN_ );
1549 else
1550 jOp.reserve( *stencilD_ );
1551 // set all entries to zero
1552 jOp.clear();
1553 }
1554
1555 template < class GridFunction >
1556 void assemble( const GridFunction &u, JacobianOperatorType &jOp ) const
1557 {
1558 // reserve memory and clear entries
1559 {
1560 prepare( jOp );
1561 iterators_.update();
1562 }
1563
1564 std::shared_mutex mutex;
1565
1566 auto doAssemble = [this, &u, &jOp, &mutex] ()
1567 {
1568 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1569 this->op().assemble( u, jOp, this->iterators_, integrands, mutex );
1570 };
1571
1572 try {
1573 // execute in parallel
1574 MPIManager :: run ( doAssemble );
1575
1576 // update number of interior elements as sum over threads
1577 gridSizeInterior_ = Impl::accumulateGridSize( this->opImpl_ );
1578 }
1579 catch ( const SingleThreadModeError& e )
1580 {
1581 // redo assemble since it failed previously
1582 jOp.clear();
1583 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1584 op().assemble( u, jOp, iterators_, integrands );
1585 // update number of interior elements as sum over threads
1586 gridSizeInterior_ = op().gridSizeInterior();
1587 }
1588
1589 // note: assembly done without local contributions so need
1590 // to call flush assembly
1591 jOp.flushAssembly();
1592 }
1593
1594 using BaseType::iterators_;
1595 using BaseType::gridSizeInterior_;
1596
1597 const DomainDiscreteFunctionSpaceType &dSpace_;
1598 const RangeDiscreteFunctionSpaceType &rSpace_;
1599
1600 mutable int domainSpaceSequence_, rangeSpaceSequence_;
1601
1602 mutable std::unique_ptr< DiagonalAndNeighborStencilType > stencilDAN_;
1603 mutable std::unique_ptr< DiagonalStencilType > stencilD_;
1604 };
1605
1606
1607
1608 // AutomaticDifferenceGalerkinOperator
1609 // -----------------------------------
1610
1611 template< class Integrands, class DomainFunction, class RangeFunction >
1612 class AutomaticDifferenceGalerkinOperator
1613 : public GalerkinOperator< Integrands, DomainFunction, RangeFunction >,
1614 public AutomaticDifferenceOperator< DomainFunction, RangeFunction >
1615 {
1616 typedef GalerkinOperator< Integrands, DomainFunction, RangeFunction > BaseType;
1617 typedef AutomaticDifferenceOperator< DomainFunction, RangeFunction > AutomaticDifferenceOperatorType;
1618
1619 public:
1620 typedef typename BaseType::GridPartType GridPartType;
1621
1622 template< class... Args >
1623 explicit AutomaticDifferenceGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1624 : BaseType( gridPart, std::forward< Args >( args )... ), AutomaticDifferenceOperatorType()
1625 {}
1626 };
1627
1628
1629
1630 // ModelDifferentiableGalerkinOperator
1631 // -----------------------------------
1632
1633 template < class LinearOperator, class ModelIntegrands >
1634 struct ModelDifferentiableGalerkinOperator
1635 : public DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator >
1636 {
1637 typedef DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator > BaseType;
1638
1639 typedef typename ModelIntegrands::ModelType ModelType;
1640
1641 typedef typename LinearOperator::DomainFunctionType RangeFunctionType;
1642 typedef typename LinearOperator::RangeSpaceType DiscreteFunctionSpaceType;
1643
1644 ModelDifferentiableGalerkinOperator ( ModelType &model, const DiscreteFunctionSpaceType &dfSpace )
1645 : BaseType( dfSpace.gridPart(), model )
1646 {}
1647
1648 template< class GridFunction >
1649 void apply ( const GridFunction &u, RangeFunctionType &w ) const
1650 {
1651 (*this)( u, w );
1652 }
1653
1654 template< class GridFunction >
1655 void apply ( const GridFunction &u, LinearOperator &jOp ) const
1656 {
1657 (*this).jacobian( u, jOp );
1658 }
1659 };
1660
1661 namespace Impl
1662 {
1663
1664 // GalerkinSchemeImpl
1665 // ------------------
1666 template< class Integrands, class LinearOperator, bool addDirichletBC,
1667 template <class,class> class DifferentiableGalerkinOperatorImpl >
1668 struct GalerkinSchemeTraits
1669 {
1670 template <class O, bool addDBC>
1671 struct DirichletBlockSelector { using type = void; };
1672 template <class O>
1673 struct DirichletBlockSelector<O,true> { using type = typename O::DirichletBlockVector; };
1674
1675 using DifferentiableOperatorType = std::conditional_t< addDirichletBC,
1676 DirichletWrapperOperator< DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1677 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator > >;
1678 using DirichletBlockVector = typename DirichletBlockSelector<
1679 DirichletWrapperOperator<
1680 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1681 addDirichletBC>::type;
1682
1683 typedef DifferentiableOperatorType type;
1684 };
1685
1686 template< class Integrands, class LinearOperator, class LinearInverseOperator, bool addDirichletBC,
1687 template <class,class> class DifferentiableGalerkinOperatorImpl = DifferentiableGalerkinOperator >
1688 struct GalerkinSchemeImpl : public FemScheme< typename
1689 GalerkinSchemeTraits< Integrands, LinearOperator,
1690 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1691 LinearInverseOperator > // LinearInverseOperator
1692 {
1693 typedef FemScheme< typename GalerkinSchemeTraits< Integrands, LinearOperator,
1694 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1695 LinearInverseOperator > // LinearInverseOperator
1696 BaseType;
1697
1698 typedef typename BaseType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
1699
1700 GalerkinSchemeImpl ( const DiscreteFunctionSpaceType &dfSpace,
1701 const Integrands &integrands,
1702 const ParameterReader& parameter = Parameter::container() )
1703 : BaseType(dfSpace,
1704 parameter,
1705 std::move(integrands))
1706 {}
1707 };
1708
1709 } // end namespace Impl
1710
1711 // GalerkinScheme
1712 // --------------
1713
1714 template< class Integrands, class LinearOperator, class InverseOperator, bool addDirichletBC >
1715 using GalerkinScheme = Impl::GalerkinSchemeImpl< Integrands, LinearOperator, InverseOperator, addDirichletBC,
1716 DifferentiableGalerkinOperator >;
1717
1718 } // namespace Fem
1719
1720} // namespace Dune
1721
1722#endif // #ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
FunctionSpaceType::RangeType RangeType
type of range vectors, i.e., type of function values
Definition: localfunction.hh:110
FunctionSpaceType::HessianRangeType HessianRangeType
type of the Hessian
Definition: localfunction.hh:114
FunctionSpaceType::JacobianRangeType JacobianRangeType
type of the Jacobian, i.e., type of evaluated Jacobian matrix
Definition: localfunction.hh:112
static bool verbose()
obtain the cached value for fem.verbose with default verbosity level 2
Definition: parameter.hh:466
actual interface class for quadratures
@ InteriorEntity
all interior entities
Definition: gridenums.hh:31
constexpr void forEach(Range &&range, F &&f)
Range based for loop.
Definition: hybridutilities.hh:256
constexpr Interior interior
PartitionSet for the interior partition.
Definition: partitionset.hh:271
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
STL namespace.
DomainFunction DomainFunctionType
type of discrete function in the operator's domain
Definition: operator.hh:36
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 21, 23:30, 2024)