DUNE-FEM (unstable)

galerkin.hh
1#ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
2#define DUNE_FEM_SCHEMES_GALERKIN_HH
3
4#include <cstddef>
5
6#include <tuple>
7#include <type_traits>
8#include <utility>
9#include <shared_mutex>
10#include <vector>
11#include <memory>
12
13#include <dune/common/hybridutilities.hh>
14#include <dune/common/timer.hh>
15
16#include <dune/grid/common/rangegenerators.hh>
17
18#include <dune/fem/function/localfunction/temporary.hh>
19#include <dune/fem/io/parameter/reader.hh>
20#include <dune/fem/operator/common/automaticdifferenceoperator.hh>
21#include <dune/fem/operator/common/differentiableoperator.hh>
22#include <dune/fem/operator/common/operator.hh>
23#include <dune/fem/operator/common/stencil.hh>
24#include <dune/fem/operator/common/temporarylocalmatrix.hh>
25#include <dune/fem/quadrature/cachingquadrature.hh>
26#include <dune/fem/quadrature/intersectionquadrature.hh>
27#include <dune/fem/common/bindguard.hh>
28
29#include <dune/fem/misc/threads/threaditerator.hh>
30#include <dune/fem/misc/threads/threadsafevalue.hh>
31
32#include <dune/fem/operator/common/localmatrixcolumn.hh>
33#include <dune/fem/operator/common/localcontribution.hh>
34#include <dune/fem/operator/1order/localmassmatrix.hh>
35#include <dune/fem/schemes/integrands.hh>
36#include <dune/fem/schemes/dirichletwrapper.hh>
37#include <dune/fem/schemes/femscheme.hh>
38
39#include <dune/fem/space/common/capabilities.hh>
40
41// fempy includes
42#include <dune/fempy/quadrature/fempyquadratures.hh>
43
44namespace Dune
45{
46
47 namespace Fem
48 {
49
50 namespace Impl
51 {
52 template <class M>
53 class CallOrder
54 {
55
56 template <class F>
57 static int callOrder(const F& f, char)
58 {
59#ifndef NDEBUG
60 std::cerr << "WARNING: no order method available on " << typeid(F).name() << ", defaulting to 1!" << std::endl;
61#endif
62 return 1;
63 }
64
65 template <class F>
66 static auto callOrder(const F& f, int) -> decltype( f.order() )
67 {
68 return f.order();
69 }
70
71 public:
72 template <class F>
73 static int order (const F& f ) { return callOrder(f, 0); }
74 };
75
76 // GalerkinOperator
77 // ----------------
78
79 template <class Space>
80 struct DefaultGalerkinOperatorQuadratureSelector
81 {
82 typedef typename Space :: GridPartType GridPartType;
83 typedef CachingQuadrature< GridPartType, 0, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > InteriorQuadratureType;
84 typedef CachingQuadrature< GridPartType, 1, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > SurfaceQuadratureType;
85 // typedef CachingQuadrature< GridPartType, 0, Dune::FemPy::FempyQuadratureTraits > InteriorQuadratureType;
86 // typedef CachingQuadrature< GridPartType, 1, Dune::FemPy::FempyQuadratureTraits > SurfaceQuadratureType;
87 };
88
89 // LocalGalerkinOperator
90 // ---------------------
91
92 template< class Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
93 struct LocalGalerkinOperator
94 {
95 typedef LocalGalerkinOperator<Integrands> ThisType;
96 typedef std::conditional_t< Fem::IntegrandsTraits< Integrands >::isFull, Integrands, FullIntegrands< Integrands > > IntegrandsType;
97
98 typedef typename IntegrandsType::GridPartType GridPartType;
99
100 typedef typename GridPartType::ctype ctype;
101 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
102
103 // typedef QuadratureSelector
104 template <class Space>
105 using QuadratureSelector = QuadSelector< Space >;
106
107 // constructor
108 template< class... Args >
109 explicit LocalGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
110 : gridPart_( gridPart ),
111 integrands_( std::forward< Args >( args )... ),
112 defaultInteriorOrder_( [] (const int order) { return 2 * order; } ),
113 defaultSurfaceOrder_ ( [] (const int order) { return 2 * order + 1; } ),
114 interiorQuadOrder_(0), surfaceQuadOrder_(0)
115 {
116 }
117
118 protected:
119 typedef typename IntegrandsType::DomainValueType DomainValueType;
120 typedef typename IntegrandsType::RangeValueType RangeValueType;
121 typedef std::make_index_sequence< std::tuple_size< DomainValueType >::value > DomainValueIndices;
122 typedef std::make_index_sequence< std::tuple_size< RangeValueType >::value > RangeValueIndices;
123
124
125 template< std::size_t... i >
126 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
127 {
128 return std::make_tuple( std::vector< std::tuple_element_t< i, DomainValueType > >( maxNumLocalDofs )... );
129 }
130
131 static auto makeDomainValueVector ( std::size_t maxNumLocalDofs )
132 {
133 return makeDomainValueVector( maxNumLocalDofs, DomainValueIndices() );
134 }
135
136 template< std::size_t... i >
137 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs, std::index_sequence< i... > )
138 {
139 return std::make_tuple( std::vector< std::tuple_element_t< i, RangeValueType > >( maxNumLocalDofs )... );
140 }
141
142 static auto makeRangeValueVector ( std::size_t maxNumLocalDofs )
143 {
144 return makeRangeValueVector( maxNumLocalDofs, RangeValueIndices() );
145 }
146
147 typedef decltype( makeDomainValueVector( 0u ) ) DomainValueVectorType;
148 typedef decltype( makeRangeValueVector( 0u ) ) RangeValueVectorType;
149
150 static void resizeDomainValueVector ( DomainValueVectorType& vec, const std::size_t size )
151 {
152 Hybrid::forEach( DomainValueIndices(), [ &vec, &size ] ( auto i ) {
153 std::get< i >( vec ).resize( size );
154 } );
155 }
156
157 static void resizeRangeValueVector ( RangeValueVectorType& vec, const std::size_t size )
158 {
159 Hybrid::forEach( RangeValueIndices(), [ &vec, &size ] ( auto i ) {
160 std::get< i >( vec ).resize( size );
161 } );
162 }
163
164 public:
165 void prepare( const std::size_t size ) const
166 {
167 resizeDomainValueVector( phiIn_, size );
168 resizeDomainValueVector( phiOut_, size );
169 resizeDomainValueVector( basisValues_, size );
170 resizeDomainValueVector( domainValues_, size );
171 }
172
173 template< class LocalFunction, class Quadrature >
174 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::RangeType > &phi )
175 {
176 u.evaluateQuadrature( quad, phi );
177 }
178
179 template< class LocalFunction, class Quadrature>
180 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::JacobianRangeType > &phi )
181 {
182 u.jacobianQuadrature( quad, phi );
183 }
184
185 template< class LocalFunction, class Quadrature >
186 static void evaluateQuadrature ( const LocalFunction &u, const Quadrature &quad, std::vector< typename LocalFunction::HessianRangeType > &phi )
187 {
188 u.hessianQuadrature( quad, phi );
189 }
190
191 protected:
192 template< class LocalFunction, class Point >
193 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::RangeType &phi )
194 {
195 u.evaluate( x, phi );
196 }
197
198 template< class LocalFunction, class Point >
199 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::JacobianRangeType &phi )
200 {
201 u.jacobian( x, phi );
202 }
203
204 template< class LocalFunction, class Point >
205 static void value ( const LocalFunction &u, const Point &x, typename LocalFunction::HessianRangeType &phi )
206 {
207 u.hessian( x, phi );
208 }
209
210 template< class LocalFunction, class Point, class... T >
211 static void value ( const LocalFunction &u, const Point &x, std::tuple< T... > &phi )
212 {
213 Hybrid::forEach( std::index_sequence_for< T... >(), [ &u, &x, &phi ] ( auto i ) { LocalGalerkinOperator::value( u, x, std::get< i >( phi ) ); } );
214 }
215
216 template< class Basis, class Point >
217 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::RangeType > &phi )
218 {
219 basis.evaluateAll( x, phi );
220 }
221
222 template< class Basis, class Point >
223 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::JacobianRangeType > &phi )
224 {
225 basis.jacobianAll( x, phi );
226 }
227
228 template< class Basis, class Point >
229 static void values ( const Basis &basis, const Point &x, std::vector< typename Basis::HessianRangeType > &phi )
230 {
231 basis.hessianAll( x, phi );
232 }
233
234 template< class Basis, class Point, class... T >
235 static void values ( const Basis &basis, const Point &x, std::tuple< std::vector< T >... > &phi )
236 {
237 Hybrid::forEach( std::index_sequence_for< T... >(), [ &basis, &x, &phi ] ( auto i ) { LocalGalerkinOperator::values( basis, x, std::get< i >( phi ) ); } );
238 }
239
240 template< class LocalFunction, class Point >
241 static DomainValueType domainValue ( const LocalFunction &u, const Point &x )
242 {
243 DomainValueType phi;
244 value( u, x, phi );
245 return phi;
246 }
247
248 static DomainValueType domainValue ( const unsigned int qpIdx, DomainValueVectorType& vec)
249 {
250 DomainValueType phi;
251 Hybrid::forEach( DomainValueIndices(), [ &qpIdx, &vec, &phi ] ( auto i ) {
252 std::get< i > ( phi ) = std::get< i >( vec )[ qpIdx ];
253 } );
254 return phi;
255 }
256
257 template< class LocalFunction, class Quadrature >
258 static void domainValue ( const LocalFunction &u, const Quadrature& quadrature, DomainValueVectorType &result )
259 {
260 Hybrid::forEach( DomainValueIndices(), [ &u, &quadrature, &result ] ( auto i ) {
261 auto& vec = std::get< i >( result );
262 vec.resize( quadrature.nop() );
263 ThisType::evaluateQuadrature( u, quadrature, vec );
264 } );
265 }
266
267 template< class Phi, std::size_t... i >
268 static auto value ( const Phi &phi, std::size_t col, std::index_sequence< i... > )
269 {
270 return std::make_tuple( std::get< i >( phi )[ col ]... );
271 }
272
273 template< class... T >
274 static auto value ( const std::tuple< std::vector< T >... > &phi, std::size_t col )
275 {
276 return value( phi, col, std::index_sequence_for< T... >() );
277 }
278
279 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range )
280 {
281 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range ] ( auto i ) {
282 std::get< i >( ranges )[ idx ] = std::get< i >( range );
283 });
284 }
285 template <class W>
286 static void assignRange( RangeValueVectorType& ranges, const std::size_t idx, const RangeValueType& range, const W &weight )
287 {
288 Hybrid::forEach( RangeValueIndices(), [ &ranges, &idx, &range, &weight ] ( auto i ) {
289 std::get< i >( ranges )[ idx ] = std::get< i >( range );
290 std::get< i >( ranges )[ idx ] *= weight;
291 });
292 }
293
294 static void assignDomain( DomainValueVectorType& domains, const std::size_t idx, const DomainValueType& domain )
295 {
296 Hybrid::forEach( DomainValueIndices(), [ &domains, &idx, &domain ] ( auto i ) {
297 std::get< i >( domains )[ idx ] = std::get< i >( domain );
298 });
299 }
300
301 template <class W, class Quadrature>
302 static void axpyQuadrature( W& w, const Quadrature& quadrature, RangeValueVectorType& ranges )
303 {
304 Hybrid::forEach( RangeValueIndices(), [ &w, &quadrature, &ranges ] ( auto i ) {
305 w.axpyQuadrature( quadrature, std::get< i >( ranges ) );
306 } );
307 }
308
309 public:
310 // interior integral
311
312 template< class U, class W >
313 void addInteriorIntegral ( const U &u, W &w ) const
314 {
315 if( !integrands().init( u.entity() ) )
316 return;
317
318 const auto geometry = u.entity().geometry();
319
320 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
321 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder(maxOrder(u, w)) );
322
323 // evaluate u for all quadrature points
324 DomainValueVectorType& domains = domainValues_;
325 domainValue( u, quadrature, domains );
326
327 auto& ranges = values_;
328 resizeRangeValueVector( ranges, quadrature.nop() );
329
330 // evaluate integrands for all quadrature points
331 for( const auto qp : quadrature )
332 {
333 const ctype weight = qp.weight() * geometry.integrationElement( qp.position() );
334 assignRange( ranges, qp.index(), integrands().interior( qp, domainValue( qp.index(), domains ) ), weight );
335 }
336
337 // add to w for all quadrature points
338 axpyQuadrature( w, quadrature, ranges );
339 integrands().unbind();
340 }
341
342 template< class U, class J >
343 void addLinearizedInteriorIntegral ( const U &u, J &j ) const
344 {
345 if( !integrands().init( u.entity() ) )
346 return;
347
348 const auto geometry = u.entity().geometry();
349 const auto &domainBasis = j.domainBasisFunctionSet();
350 const auto &rangeBasis = j.rangeBasisFunctionSet();
351
352 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: InteriorQuadratureType InteriorQuadratureType;
353 const InteriorQuadratureType quadrature( u.entity(), interiorQuadratureOrder( maxOrder( u, domainBasis, rangeBasis )) );
354 const size_t domainSize = domainBasis.size();
355 const size_t quadNop = quadrature.nop();
356
357 auto& basisValues = basisValues_;
358 resizeDomainValueVector( basisValues, domainSize );
359
360 // evaluate u for all quadrature points
361 auto& rangeValues = rangeValues_;
362 DomainValueVectorType& domains = domainValues_;
363 domainValue( u, quadrature, domains );
364
365 rangeValues.resize( domainSize );
366 for( std::size_t col = 0; col < domainSize; ++col )
367 {
368 resizeRangeValueVector( rangeValues[ col ], quadNop );
369 }
370
371 // evaluate all basis functions and integrands
372 for( const auto qp : quadrature )
373 {
374 values( domainBasis, qp, basisValues );
375 const auto weight = qp.weight() * geometry.integrationElement( qp.position() );
376 auto integrand = integrands().linearizedInterior( qp, domainValue( qp.index(), domains ) );
377 for( std::size_t col = 0; col < domainSize; ++col )
378 {
379 assignRange( rangeValues[ col ], qp.index(), integrand( value( basisValues, col ) ), weight );
380 }
381 }
382
383 // add to local matrix for all quadrature points and basis functions
384 for( std::size_t col = 0; col < domainSize; ++col )
385 {
386 LocalMatrixColumn< J > jCol( j, col );
387 axpyQuadrature( jCol, quadrature, rangeValues[ col ] );
388 }
389 integrands().unbind();
390 }
391
392 // boundary integral
393
394 template< class Intersection, class U, class W >
395 void addBoundaryIntegral ( const Intersection &intersection, const U &u, W &w ) const
396 {
397 if( !integrands().init( intersection ) )
398 return;
399
400 const auto geometry = intersection.geometry();
401 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
402 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( u, w )), SurfaceQuadratureType::INSIDE );
403 for( const auto qp : quadrature )
404 {
405 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
406
407 RangeValueType integrand = integrands().boundary( qp, domainValue( u, qp ) );
408
409 Hybrid::forEach( RangeValueIndices(), [ &qp, &w, &integrand, weight ] ( auto i ) {
410 std::get< i >( integrand ) *= weight;
411 w.axpy( qp, std::get< i >( integrand ) );
412 } );
413 }
414 integrands().unbind();
415 }
416
417 template< class Intersection, class U, class J >
418 void addLinearizedBoundaryIntegral ( const Intersection &intersection, const U &u, J &j ) const
419 {
420 if( !integrands().init( intersection ) )
421 return;
422
423 DomainValueVectorType &phi = phiIn_;
424
425 const auto geometry = intersection.geometry();
426 const auto &domainBasis = j.domainBasisFunctionSet();
427 const auto &rangeBasis = j.rangeBasisFunctionSet();
428
429 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
430 const SurfaceQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder(u, domainBasis, rangeBasis )), SurfaceQuadratureType::INSIDE );
431 for( const auto qp : quadrature )
432 {
433 const ctype weight = qp.weight() * geometry.integrationElement( qp.localPosition() );
434
435 values( domainBasis, qp, phi );
436 auto integrand = integrands().linearizedBoundary( qp, domainValue( u, qp ) );
437
438 for( std::size_t col = 0, cols = domainBasis.size(); col < cols; ++col )
439 {
440 LocalMatrixColumn< J > jCol( j, col );
441 RangeValueType intPhi = integrand( value( phi, col ) );
442
443 Hybrid::forEach( RangeValueIndices(), [ &qp, &jCol, &intPhi, weight ] ( auto i ) {
444 std::get< i >( intPhi ) *= weight;
445 jCol.axpy( qp, std::get< i >( intPhi ) );
446 } );
447 }
448 }
449 integrands().unbind();
450 }
451
452 // addSkeletonIntegral
453
454 protected:
455 template< bool conforming, class Intersection, class U, class W >
456 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn ) const
457 {
458 const auto geometry = intersection.geometry();
459
460 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
461 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
462 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn)), false );
463 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
464 {
465 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
466
467 const auto qpIn = quadrature.inside()[ qp ];
468 const auto qpOut = quadrature.outside()[ qp ];
469 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
470
471 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &integrand, weight ] ( auto i ) {
472 std::get< i >( integrand.first ) *= weight;
473 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
474 } );
475 }
476 }
477
478 template< bool conforming, class Intersection, class U, class W >
479 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &wIn, W &wOut ) const
480 {
481 const auto geometry = intersection.geometry();
482 typedef typename QuadratureSelector< typename W::DiscreteFunctionSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
483 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
484 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(maxOrder( uIn, uOut, wIn, wOut)), false );
485 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
486 {
487 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
488
489 const auto qpIn = quadrature.inside()[ qp ];
490 const auto qpOut = quadrature.outside()[ qp ];
491 std::pair< RangeValueType, RangeValueType > integrand = integrands().skeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
492
493 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &wIn, &qpOut, &wOut, &integrand, weight ] ( auto i ) {
494 std::get< i >( integrand.first ) *= weight;
495 wIn.axpy( qpIn, std::get< i >( integrand.first ) );
496
497 std::get< i >( integrand.second ) *= weight;
498 wOut.axpy( qpOut, std::get< i >( integrand.second ) );
499 } );
500 }
501 }
502
503 template< bool conforming, class Intersection, class U, class J >
504 void addLinearizedSkeletonIntegral ( const Intersection &intersection,
505 const U &uIn, const U &uOut, J &jInIn, J &jOutIn ) const
506 {
507 DomainValueVectorType &phiIn = phiIn_;
508 DomainValueVectorType &phiOut = phiOut_;
509
510 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
511 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
512
513 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
514
515 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn ));
516
517 const auto geometry = intersection.geometry();
518 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
519 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
520 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
521 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
522 {
523 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
524
525 const auto qpIn = quadrature.inside()[ qp ];
526 const auto qpOut = quadrature.outside()[ qp ];
527
528 values( domainBasisIn, qpIn, phiIn );
529 values( domainBasisOut, qpOut, phiOut );
530
531 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
532 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
533 {
534 LocalMatrixColumn< J > jInInCol( jInIn, col );
535 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
536
537 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &intPhi, weight ] ( auto i ) {
538 std::get< i >( intPhi.first ) *= weight;
539 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
540 } );
541 }
542 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
543 {
544 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
545 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
546
547 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &intPhi, weight ] ( auto i ) {
548 std::get< i >( intPhi.first ) *= weight;
549 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
550 } );
551 }
552 }
553 }
554
555 template< bool conforming, class Intersection, class U, class J >
556 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut,
557 J &jInIn, J &jOutIn, J &jInOut, J &jOutOut ) const
558 {
559 DomainValueVectorType &phiIn = phiIn_;
560 DomainValueVectorType &phiOut = phiOut_;
561
562 const auto &domainBasisIn = jInIn.domainBasisFunctionSet();
563 const auto &domainBasisOut = jOutIn.domainBasisFunctionSet();
564
565 const auto &rangeBasisIn = jInIn.rangeBasisFunctionSet();
566 const auto &rangeBasisOut = jInOut.rangeBasisFunctionSet();
567
568 const int order = std::max( maxOrder(uIn, uOut), maxOrder( domainBasisIn, domainBasisOut, rangeBasisIn, rangeBasisOut ));
569
570 const auto geometry = intersection.geometry();
571 typedef typename QuadratureSelector< typename J::RangeSpaceType > :: SurfaceQuadratureType SurfaceQuadratureType;
572 typedef IntersectionQuadrature< SurfaceQuadratureType, conforming > IntersectionQuadratureType;
573 const IntersectionQuadratureType quadrature( gridPart(), intersection, surfaceQuadratureOrder(order), false );
574 for( std::size_t qp = 0, nop = quadrature.nop(); qp != nop; ++qp )
575 {
576 const ctype weight = quadrature.weight( qp ) * geometry.integrationElement( quadrature.localPoint( qp ) );
577
578 const auto qpIn = quadrature.inside()[ qp ];
579 const auto qpOut = quadrature.outside()[ qp ];
580
581 values( domainBasisIn, qpIn, phiIn );
582 values( domainBasisOut, qpOut, phiOut );
583
584 auto integrand = integrands().linearizedSkeleton( qpIn, domainValue( uIn, qpIn ), qpOut, domainValue( uOut, qpOut ) );
585 for( std::size_t col = 0, cols = domainBasisIn.size(); col < cols; ++col )
586 {
587 LocalMatrixColumn< J > jInInCol( jInIn, col );
588 LocalMatrixColumn< J > jInOutCol( jInOut, col );
589 std::pair< RangeValueType, RangeValueType > intPhi = integrand.first( value( phiIn, col ) );
590
591 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jInInCol, &qpOut, &jInOutCol, &intPhi, weight ] ( auto i ) {
592 std::get< i >( intPhi.first ) *= weight;
593 jInInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
594
595 std::get< i >( intPhi.second ) *= weight;
596 jInOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
597 } );
598 }
599 for( std::size_t col = 0, cols = domainBasisOut.size(); col < cols; ++col )
600 {
601 LocalMatrixColumn< J > jOutInCol( jOutIn, col );
602 LocalMatrixColumn< J > jOutOutCol( jOutOut, col );
603 std::pair< RangeValueType, RangeValueType > intPhi = integrand.second( value( phiOut, col ) );
604
605 Hybrid::forEach( RangeValueIndices(), [ &qpIn, &jOutInCol, &qpOut, &jOutOutCol, &intPhi, weight ] ( auto i ) {
606 std::get< i >( intPhi.first ) *= weight;
607 jOutInCol.axpy( qpIn, std::get< i >( intPhi.first ) );
608
609 std::get< i >( intPhi.second ) *= weight;
610 jOutOutCol.axpy( qpOut, std::get< i >( intPhi.second ) );
611 } );
612 }
613 }
614 }
615
616 public:
617 template< class Intersection, class U, class... W >
618 void addSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, W &... w ) const
619 {
620 if( !integrands().init( intersection ) )
621 return;
622
623 if( intersection.conforming() )
624 addSkeletonIntegral< true >( intersection, uIn, uOut, w... );
625 else
626 addSkeletonIntegral< false >( intersection, uIn, uOut, w... );
627 integrands().unbind();
628 }
629
630 template< class Intersection, class U, class... J >
631 void addLinearizedSkeletonIntegral ( const Intersection &intersection, const U &uIn, const U &uOut, J &... j ) const
632 {
633 if( !integrands().init( intersection ) )
634 return;
635
636 if( intersection.conforming() )
637 addLinearizedSkeletonIntegral< true >( intersection, uIn, uOut, j... );
638 else
639 addLinearizedSkeletonIntegral< false >( intersection, uIn, uOut, j... );
640 integrands().unbind();
641 }
642
643 void setQuadratureOrders(unsigned int interior, unsigned int surface)
644 {
645 interiorQuadOrder_ = interior;
646 surfaceQuadOrder_ = surface;
647 }
648
649 IntegrandsType& model() const
650 {
651 return integrands();
652 }
653 bool nonlinear() const { return model().nonlinear(); }
654 bool hasInterior() const { return model().hasInterior(); }
655 bool hasSkeleton() const { return model().hasSkeleton(); }
656 bool hasBoundary() const { return model().hasBoundary(); }
657
658 private:
659 IntegrandsType& integrands() const
660 {
661 return integrands_;
662 }
663
664 public:
665 // accessors
666 const GridPartType &gridPart () const { return gridPart_; }
667
668 unsigned int interiorQuadratureOrder(unsigned int order) const { return interiorQuadOrder_ == 0 ? defaultInteriorOrder_(order) : interiorQuadOrder_; }
669 unsigned int surfaceQuadratureOrder(unsigned int order) const { return surfaceQuadOrder_ == 0 ? defaultSurfaceOrder_ (order) : surfaceQuadOrder_; }
670
671 protected:
672 template <class U>
673 int maxOrder( const U& u ) const
674 {
675 return CallOrder< U > :: order( u );
676 }
677
678 template< class U, class W >
679 int maxOrder( const U& u, const W& w ) const
680 {
681 return std::max( maxOrder( u ), maxOrder( w ) );
682 }
683
684 template< class U, class V, class W >
685 int maxOrder( const U& u, const V& v, const W& w ) const
686 {
687 return std::max( maxOrder( u, v ), maxOrder( w ) );
688 }
689
690 template< class U, class V, class W, class X >
691 int maxOrder( const U& u, const V& v, const W& w, const X& x ) const
692 {
693 return std::max( maxOrder( u, v ), maxOrder( w, x) );
694 }
695
696 protected:
697 const GridPartType &gridPart_;
698
699 mutable IntegrandsType integrands_;
700
701 mutable std::function<int(const int)> defaultInteriorOrder_;
702 mutable std::function<int(const int)> defaultSurfaceOrder_;
703
704 unsigned int interiorQuadOrder_;
705 unsigned int surfaceQuadOrder_;
706
707 mutable std::vector< RangeValueVectorType > rangeValues_;
708 mutable RangeValueVectorType values_;
709 mutable DomainValueVectorType phiIn_;
710 mutable DomainValueVectorType phiOut_;
711 mutable DomainValueVectorType basisValues_;
712 mutable DomainValueVectorType domainValues_;
713 };
714
715
716
717 // GalerkinOperator
718 // ----------------
719
720 template< class GridPart >
721 // Integrands, template <class> class QuadSelector = DefaultGalerkinOperatorQuadratureSelector >
722 struct GalerkinOperator
723 {
724 typedef GridPart GridPartType;
725 typedef GalerkinOperator< GridPartType > ThisType;
726
727 typedef typename GridPartType::ctype ctype;
728 typedef typename GridPartType::template Codim< 0 >::EntityType EntityType;
729
730 // constructor
731 explicit GalerkinOperator ( const GridPartType &gridPart )
732 : gridPart_( gridPart ),
733 gridSizeInterior_( 0 )
734 {
735 }
736
737 protected:
738 template <class IntegrandsTuple>
739 bool hasBoundary( const IntegrandsTuple& integrandsTuple ) const
740 {
741 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
742 bool hasBoundary = false ;
743 Hybrid::forEach( Indices(), [&integrandsTuple, &hasBoundary]( auto i ) {
744 if( std::get< i > (integrandsTuple).hasBoundary() )
745 {
746 hasBoundary = true ;
747 return ;
748 }
749 });
750 return hasBoundary;
751 }
752
753 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
754 void evaluateImpl ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
755 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs, std::integral_constant<bool, hasSkeleton> ) const
756 {
757 Dune::Fem::ConstLocalFunction< GridFunction > uInside( u );
758 Dune::Fem::ConstLocalFunction< GridFunction > uOutside( u );
759
760 typedef typename DiscreteFunction::DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
761 TemporaryLocalFunction< DiscreteFunctionSpaceType > wInside( w.space() ), wOutside( w.space() );
762
763 // element counter
764 gridSizeInterior_ = 0;
765
766 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
767
768 // true if one of the integrands has a boundary term
769 const bool hasBnd = hasBoundary( integrandsTuple );
770
771 const auto &indexSet = gridPart().indexSet();
772 const auto end = iterators.end();
773 for( auto it = iterators.begin(); it != end; ++it )
774 {
775 // assert( iterators.thread( *it ) == MPIManager::thread() );
776 const EntityType inside = *it ;
777
778 // increase counter for interior elements
779 ++gridSizeInterior_;
780
781 auto uGuard = bindGuard( uInside, inside );
782 auto wGuard = bindGuard( wInside, inside );
783 wInside.clear();
784
785 auto addInteriorIntegral = [&integrandsTuple, &uInside, &wInside]( auto i )
786 {
787 const auto& integrands = std::get< i >( integrandsTuple );
788 if( integrands.hasInterior() )
789 integrands.addInteriorIntegral( uInside, wInside );
790 };
791 // add interior integral of any integrands
792 Hybrid::forEach( Indices(), addInteriorIntegral );
793
794 if( hasSkeleton || (hasBnd && inside.hasBoundaryIntersections() ) )
795 {
796 for( const auto &intersection : intersections( gridPart(), inside ) )
797 {
798 bool neighbor = false;
799 if constexpr ( hasSkeleton )
800 {
801 // check neighbor first since on periodic boundaries both,
802 // neighbor and boundary are true, so we treat neighbor first
803 if( intersection.neighbor() )
804 {
805 neighbor = true;
806 const EntityType outside = intersection.outside();
807
808 if( outside.partitionType() != InteriorEntity )
809 {
810 auto uOutGuard = bindGuard( uOutside, outside );
811
812 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside] ( auto i )
813 {
814 const auto& integrands = std::get< i >( integrandsTuple );
815 if( integrands.hasSkeleton() )
816 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside );
817 };
818 // add skeleton integral of any integrands
819 Hybrid::forEach( Indices(), addSkeletonIntegral );
820 }
821 else if( indexSet.index( inside ) < indexSet.index( outside ) )
822 {
823 auto uOutGuard = bindGuard( uOutside, outside );
824 auto wOutGuard = bindGuard( wOutside, outside );
825 wOutside.clear();
826
827 auto addSkeletonIntegral = [&integrandsTuple, &intersection, &uInside, &uOutside, &wInside, &wOutside] ( auto i )
828 {
829 const auto& integrands = std::get< i >( integrandsTuple );
830 if( integrands.hasSkeleton() )
831 integrands.addSkeletonIntegral( intersection, uInside, uOutside, wInside, wOutside );
832 };
833 // add skeleton integral of any integrands
834 Hybrid::forEach( Indices(), addSkeletonIntegral );
835
836 // addLocalDofs calls w.addLocalDofs but also
837 // prevents race condition for thread parallel runs
838 addLocalDofs( outside, wOutside );
839 }
840 }
841 } // end skeleton
842
843 if( ! neighbor && intersection.boundary() )
844 {
845 auto addBoundaryIntegral = [&integrandsTuple, &intersection, &uInside, &wInside]( auto i )
846 {
847 const auto& integrands = std::get< i >( integrandsTuple );
848 if( integrands.hasBoundary() )
849 integrands.addBoundaryIntegral( intersection, uInside, wInside );
850 };
851 // add boundary integral of any integrands
852 Hybrid::forEach( Indices(), addBoundaryIntegral );
853 } // end boundary
854 }
855 } // end intersections
856
857 addLocalDofs( inside, wInside );
858 }
859 }
860
861 template <class Space>
862 struct InsideEntity
863 {
864 typedef typename Space::EntityType EntityType;
865 template <class Iterators>
866 InsideEntity(const Space &space, const Iterators& iterators)
867 : space_(space), dofThread_(space.size(),-1)
868 , thread_(MPIManager::thread())
869 {
870 const auto& mapper = space_.blockMapper();
871 for (const auto &entity : space_)
872 {
873 int t=iterators.threadParallel(entity);
874 mapper.mapEach(entity, [ this, t ] ( int local, auto global )
875 { dofThread_[global] = (dofThread_[global]==t || dofThread_[global]==-1)?
876 t : -2 ; } ); // -2: shared dof
877 }
878 }
879 bool operator()(const EntityType &entity) const
880 {
881 bool needsLocking = false;
882 space_.blockMapper().mapEach(entity,
883 [ this, &needsLocking ] ( int local, auto global )
884 { needsLocking = (needsLocking || dofThread_[global]!=thread_); });
885 return !needsLocking;
886 }
887 const Space &space_;
888 std::vector<int> dofThread_;
889 int thread_;
890 };
891
892 template <class DiscreteFunction>
893 struct AddLocalEvaluate
894 {
895 AddLocalEvaluate(DiscreteFunction &w)
896 : w_(w) {}
897 template <class LocalDofs>
898 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
899 {
900 w_.addLocalDofs( entity, wLocal.localDofVector() );
901 }
902 DiscreteFunction &w_;
903 };
904
905 template <class DiscreteFunction>
906 struct AddLocalEvaluateLocked : public AddLocalEvaluate<DiscreteFunction>
907 {
908 typedef AddLocalEvaluate<DiscreteFunction> BaseType;
909
910 std::shared_mutex& mutex_;
911 InsideEntity<typename DiscreteFunction::DiscreteFunctionSpaceType> inside_;
912
913 template <class Iterators>
914 AddLocalEvaluateLocked(DiscreteFunction &w, std::shared_mutex& mtx, const Iterators &iterators)
915 : BaseType(w), mutex_(mtx), inside_(w.space(),iterators) {}
916
917 template <class LocalDofs>
918 void operator () (const EntityType& entity, const LocalDofs& wLocal ) const
919 {
920 // call addLocalDofs on w
921 if (inside_(entity))
922 {
923 std::shared_lock<std::shared_mutex> guard ( mutex_ );
924 BaseType::operator()( entity, wLocal );
925 }
926 else
927 {
928 // lock mutex (unlock on destruction)
929 std::lock_guard<std::shared_mutex> guard ( mutex_ );
930 BaseType::operator()( entity, wLocal );
931 }
932 }
933 };
934
935 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple, class Functor >
936 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
937 const IntegrandsTuple& integrandsTuple, Functor& addLocalDofs ) const
938 {
939 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
940 static_assert( std::is_same< typename DiscreteFunction::GridPartType, GridPartType >::value, "Argument 'w' and Integrands must be defined on the same grid part." );
941
942 if( hasSkeleton( integrandsTuple ) )
943 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::true_type() );
944 else
945 evaluateImpl( u, w, iterators, integrandsTuple, addLocalDofs, std::false_type() );
946 }
947
948 public:
949 template <class IntegrandsTuple>
950 bool hasSkeleton( const IntegrandsTuple& integrandsTuple ) const
951 {
952 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
953 bool hasSkeleton = false ;
954 Hybrid::forEach( Indices(), [&integrandsTuple, &hasSkeleton] ( auto i ) {
955 if( std::get< i >( integrandsTuple ).hasSkeleton() )
956 {
957 hasSkeleton = true;
958 return ;
959 }
960 });
961 return hasSkeleton ;
962 }
963
964 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
965 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators,
966 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx ) const
967 {
968 AddLocalEvaluateLocked<DiscreteFunction> addLocalEvaluate(w,mtx,iterators);
969 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
970 }
971
972 template< class GridFunction, class DiscreteFunction, class Iterators, class IntegrandsTuple >
973 void evaluate ( const GridFunction &u, DiscreteFunction &w, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
974 {
975 AddLocalEvaluate<DiscreteFunction> addLocalEvaluate(w);
976 evaluate( u, w, iterators, integrandsTuple, addLocalEvaluate );
977 }
978
979 protected:
980 template<class T, int length>
981 class FiniteStack
982 {
983 public :
984 // Makes empty stack
985 FiniteStack () : _f(0) {}
986
987 // Returns true if the stack is empty
988 bool empty () const { return _f <= 0; }
989
990 // Returns true if the stack is full
991 bool full () const { return (_f >= length); }
992
993 // clear stack
994 void clear() { _f = 0; }
995
996 // Puts a new object onto the stack
997 void push (const T& t)
998 {
999 assert ( _f < length );
1000 _s[_f++] = t;
1001 }
1002
1003 // Removes and returns the uppermost object from the stack
1004 T pop () {
1005 assert ( _f > 0 );
1006 return _s[--_f];
1007 }
1008
1009 // Returns the uppermost object on the stack
1010 T top () const {
1011 assert ( _f > 0 );
1012 return _s[_f-1];
1013 }
1014
1015 // stacksize
1016 int size () const { return _f; }
1017
1018 private:
1019 T _s[length]; // the stack
1020 int _f; // actual position in stack
1021 };
1022
1023
1024 template <class JacobianOperator>
1025 struct AddLocalAssemble
1026 {
1027 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1028 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1029 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1030 JacobianOperator &jOp_;
1031 std::vector< TemporaryLocalMatrixType > jOpLocal_;
1032
1033 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFinalized_;
1034 FiniteStack< TemporaryLocalMatrixType*, 12 > jOpLocalFree_;
1035
1036 std::size_t locked, notLocked, timesLocked;
1037 AddLocalAssemble(JacobianOperator& jOp)
1038 : jOp_(jOp)
1039 , jOpLocal_(12, TemporaryLocalMatrixType(jOp_.domainSpace(), jOp_.rangeSpace()))
1040 , jOpLocalFinalized_()
1041 , jOpLocalFree_()
1042 , locked(0), notLocked(0), timesLocked(0)
1043 {
1044 for( auto& jOpLocal : jOpLocal_ )
1045 jOpLocalFree_.push( &jOpLocal );
1046 }
1047
1048 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1049 {
1050 assert( ! jOpLocalFree_.empty() );
1051 TemporaryLocalMatrixType& lop = *(jOpLocalFree_.pop());
1052 lop.bind(dE,rE);
1053 lop.clear();
1054 return lop;
1055 }
1056
1057 void unbind(TemporaryLocalMatrixType &lop)
1058 {
1059 notLocked += 1;
1060 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1061 lop.unbind();
1062 jOpLocalFree_.push( &lop );
1063 }
1064
1065 void finalize()
1066 {
1067 locked += jOpLocalFinalized_.size();
1068 while ( ! jOpLocalFinalized_.empty() )
1069 {
1070 TemporaryLocalMatrixType &lop = *(jOpLocalFinalized_.pop());
1071 jOp_.addLocalMatrix( lop.domainEntity(), lop.rangeEntity(), lop );
1072 lop.unbind();
1073 jOpLocalFree_.push( &lop );
1074 }
1075 }
1076 };
1077
1078 template <class JacobianOperator>
1079 struct AddLocalAssembleLocked : public AddLocalAssemble<JacobianOperator>
1080 {
1081 typedef AddLocalAssemble<JacobianOperator> BaseType;
1082 typedef typename BaseType::TemporaryLocalMatrixType TemporaryLocalMatrixType;
1083 using BaseType::jOpLocalFinalized_;
1084 using BaseType::jOpLocalFree_;
1085
1086 std::shared_mutex& mutex_;
1087 InsideEntity<typename JacobianOperator::DomainSpaceType> insideDomain_;
1088 InsideEntity<typename JacobianOperator::RangeSpaceType> insideRange_;
1089
1090 template <class Iterators>
1091 AddLocalAssembleLocked(JacobianOperator &jOp, std::shared_mutex &mtx, const Iterators &iterators)
1092 : BaseType(jOp)
1093 , mutex_(mtx)
1094 , insideDomain_(jOp.domainSpace(),iterators)
1095 , insideRange_(jOp.rangeSpace(),iterators)
1096 {}
1097
1098 void finalize()
1099 {
1100 // lock mutex (unlock on destruction)
1101 ++BaseType::timesLocked;
1102 std::lock_guard<std::shared_mutex> guard ( mutex_ );
1103 BaseType::finalize();
1104 }
1105
1106 TemporaryLocalMatrixType& bind(const EntityType& dE, const EntityType& rE)
1107 {
1108 if ( jOpLocalFree_.empty() )
1109 {
1110 finalize();
1111 }
1112 return BaseType::bind(dE,rE);
1113 }
1114
1115 void unbind(TemporaryLocalMatrixType &lop)
1116 {
1117 /* // always lock
1118 ++BaseType::timesLocked;
1119 ++BaseType::locked;
1120 std::lock_guard guard ( mutex_ );
1121 BaseType::unbind(lop);
1122 return;
1123 */
1124 if ( insideDomain_(lop.domainEntity()) &&
1125 insideRange_(lop.rangeEntity()) )
1126 {
1127 std::shared_lock<std::shared_mutex> guard ( mutex_ );
1128 BaseType::unbind(lop);
1129 }
1130 else
1131 {
1132 jOpLocalFinalized_.push( &lop );
1133 }
1134 }
1135 };
1136
1137 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor, bool hasSkeleton >
1138 void assembleImpl ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple,
1139 Functor& addLocalMatrix, std::integral_constant<bool, hasSkeleton> ) const
1140 {
1141 typedef typename JacobianOperator::DomainSpaceType DomainSpaceType;
1142 typedef typename JacobianOperator::RangeSpaceType RangeSpaceType;
1143
1144 typedef TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
1145
1146 Dune::Fem::ConstLocalFunction< GridFunction > uIn( u );
1147 Dune::Fem::ConstLocalFunction< GridFunction > uOut( u );
1148
1149 typedef std::make_index_sequence< std::tuple_size< IntegrandsTuple >::value > Indices;
1150 const std::size_t maxNumLocalDofs = jOp.domainSpace().blockMapper().maxNumDofs() * jOp.domainSpace().localBlockSize;
1151
1152 // initialize local temporary data
1153 Hybrid::forEach( Indices(), [&integrandsTuple, &maxNumLocalDofs] ( auto i ) {
1154 const auto& integrands = std::get< i >( integrandsTuple );
1155 integrands.prepare( maxNumLocalDofs );
1156 });
1157
1158 // element counter
1159 gridSizeInterior_ = 0;
1160
1161 // true if one of the integrands has a boundary term
1162 const bool hasBnd = hasBoundary( integrandsTuple );
1163
1164 const auto &indexSet = gridPart().indexSet();
1165 // threaded iterators provide from outside
1166 const auto end = iterators.end();
1167 for( auto it = iterators.begin(); it != end; ++it )
1168 {
1169 // increase counter for interior elements
1170 ++gridSizeInterior_;
1171
1172 const EntityType inside = *it;
1173
1174 auto uiGuard = bindGuard( uIn, inside );
1175
1176 TemporaryLocalMatrixType& jOpInIn = addLocalMatrix.bind( inside, inside );
1177 auto addLinearizedInteriorIntegral = [&integrandsTuple, &uIn, &jOpInIn]( auto i )
1178 {
1179 const auto& integrands = std::get< i >( integrandsTuple );
1180 if( integrands.hasInterior() )
1181 integrands.addLinearizedInteriorIntegral( uIn, jOpInIn );
1182 };
1183 // add interior integral of any integrands
1184 Hybrid::forEach( Indices(), addLinearizedInteriorIntegral );
1185
1186 if( hasSkeleton || (hasBnd && inside.hasBoundaryIntersections() ) )
1187 {
1188 for( const auto &intersection : intersections( gridPart(), inside ) )
1189 {
1190 bool neighbor = false ;
1191 // check neighbor first since on periodic boundaries both,
1192 // neighbor and boundary are true, so we treat neighbor first
1193 if constexpr ( hasSkeleton )
1194 {
1195 if( intersection.neighbor() )
1196 {
1197 neighbor = true ;
1198 const EntityType &outside = intersection.outside();
1199
1200 TemporaryLocalMatrixType &jOpOutIn = addLocalMatrix.bind( outside, inside );
1201
1202 auto uoGuard = bindGuard( uOut, outside );
1203
1204 if( outside.partitionType() != InteriorEntity )
1205 {
1206 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn]( auto i )
1207 {
1208 const auto& integrands = std::get< i >( integrandsTuple );
1209 if( integrands.hasSkeleton() )
1210 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn );
1211 };
1212 // add skeleton integral of any integrands
1213 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1214 }
1215 else if( indexSet.index( inside ) < indexSet.index( outside ) )
1216 {
1217 TemporaryLocalMatrixType &jOpInOut = addLocalMatrix.bind( inside, outside );
1218 TemporaryLocalMatrixType &jOpOutOut = addLocalMatrix.bind( outside, outside );
1219
1220 auto addLinearizedSkeletonIntegral = [&integrandsTuple, &intersection, &uIn, &uOut, &jOpInIn, &jOpOutIn, &jOpInOut, &jOpOutOut]( auto i )
1221 {
1222 const auto& integrands = std::get< i >( integrandsTuple );
1223 if( integrands.hasSkeleton() )
1224 integrands.addLinearizedSkeletonIntegral( intersection, uIn, uOut, jOpInIn, jOpOutIn, jOpInOut, jOpOutOut );
1225 };
1226 // add skeleton integral of any integrands
1227 Hybrid::forEach( Indices(), addLinearizedSkeletonIntegral );
1228
1229 addLocalMatrix.unbind(jOpInOut);
1230 addLocalMatrix.unbind(jOpOutOut);
1231 }
1232
1233 addLocalMatrix.unbind(jOpOutIn);
1234 }
1235 } // end skeleton
1236
1237 if( !neighbor && intersection.boundary() )
1238 {
1239 auto addLinearizedBoundaryIntegral = [&integrandsTuple, &intersection, &uIn, &jOpInIn]( auto i )
1240 {
1241 const auto& integrands = std::get< i >( integrandsTuple );
1242 if( integrands.hasBoundary() )
1243 integrands.addLinearizedBoundaryIntegral( intersection, uIn, jOpInIn );
1244 };
1245 // add skeleton integral of any integrands
1246 Hybrid::forEach( Indices(), addLinearizedBoundaryIntegral );
1247
1248 } // end boundary
1249 }
1250 } // end intersection
1251 addLocalMatrix.unbind(jOpInIn);
1252 }
1253
1254 // complete the matrix build
1255 addLocalMatrix.finalize();
1256 }
1257
1258
1259 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple, class Functor >
1260 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1261 const IntegrandsTuple& integrandsTuple, Functor& addLocalMatrix, int ) const
1262 {
1263 static_assert( std::is_same< typename GridFunction::GridPartType, GridPartType >::value, "Argument 'u' and Integrands must be defined on the same grid part." );
1264 static_assert( std::is_same< typename JacobianOperator::DomainSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1265 static_assert( std::is_same< typename JacobianOperator::RangeSpaceType::GridPartType, GridPartType >::value, "Argument 'jOp' and Integrands must be defined on the same grid part." );
1266
1267 if( hasSkeleton( integrandsTuple ) )
1268 assembleImpl( u, jOp, iterators, integrandsTuple ,addLocalMatrix, std::true_type() );
1269 else
1270 assembleImpl( u, jOp, iterators, integrandsTuple, addLocalMatrix, std::false_type() );
1271 }
1272
1273 public:
1274 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1275 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators,
1276 const IntegrandsTuple& integrandsTuple, std::shared_mutex& mtx) const
1277 {
1278 AddLocalAssembleLocked<JacobianOperator> addLocalAssemble( jOp, mtx, iterators);
1279 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1280 #if 0 // print information about how many times a lock was used during assemble
1281 std::lock_guard guard ( mtx );
1282 std::cout << MPIManager::thread() << " : "
1283 << addLocalAssemble.locked << " " << addLocalAssemble.notLocked << " "
1284 << addLocalAssemble.timesLocked << std::endl;
1285 #endif
1286 }
1287
1288 template< class GridFunction, class JacobianOperator, class Iterators, class IntegrandsTuple>
1289 void assemble ( const GridFunction &u, JacobianOperator &jOp, const Iterators& iterators, const IntegrandsTuple& integrandsTuple ) const
1290 {
1291 AddLocalAssemble<JacobianOperator> addLocalAssemble(jOp);
1292 assemble( u, jOp, iterators, integrandsTuple, addLocalAssemble, 10 );
1293 }
1294
1295 // accessors
1296 const GridPartType &gridPart () const { return gridPart_; }
1297
1298 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1299
1300 protected:
1301 const GridPartType &gridPart_;
1302 mutable std::size_t gridSizeInterior_;
1303 };
1304
1305
1306 template <class GalerkinOperator >
1307 static std::size_t accumulateGridSize( const ThreadSafeValue< GalerkinOperator >& ops )
1308 {
1309 std::size_t s = ops.size();
1310 std::size_t sum = 0;
1311 for( std::size_t i=0; i<s; ++i )
1312 sum += ops[ i ].gridSizeInterior();
1313 return sum;
1314 }
1315
1316 } // namespace Impl
1317
1320
1321
1322 // GalerkinOperator
1323 // ----------------
1324
1325 template< class Integrands, class DomainFunction, class RangeFunction = DomainFunction >
1326 struct GalerkinOperator
1327 : public virtual Operator< DomainFunction, RangeFunction >
1328 {
1329 typedef DomainFunction DomainFunctionType;
1330 typedef RangeFunction RangeFunctionType;
1331
1332 typedef typename RangeFunctionType::GridPartType GridPartType;
1333
1334 typedef Impl::LocalGalerkinOperator< Integrands > LocalGalerkinOperatorImplType;
1335 typedef Impl::GalerkinOperator< GridPartType > GalerkinOperatorImplType;
1336
1337 static_assert( std::is_same< typename DomainFunctionType::GridPartType, typename RangeFunctionType::GridPartType >::value, "DomainFunction and RangeFunction must be defined on the same grid part." );
1338
1339 typedef ThreadIterator< GridPartType > ThreadIteratorType;
1340
1341 template< class... Args >
1342 explicit GalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1343 : iterators_( gridPart ),
1344 opImpl_( gridPart ),
1345 localOp_( gridPart, std::forward< Args >( args )... ),
1346 gridSizeInterior_( 0 ),
1347 communicate_( true )
1348 {
1349 }
1350
1351 void setCommunicate( const bool communicate )
1352 {
1353 communicate_ = communicate;
1354 if( ! communicate_ && Dune::Fem::Parameter::verbose() )
1355 {
1356 std::cout << "GalerkinOperator::setCommunicate: communicate was disabled!" << std::endl;
1357 }
1358 }
1359
1360 void setQuadratureOrders(unsigned int interior, unsigned int surface)
1361 {
1362 size_t size = localOp_.size();
1363 for( size_t i=0; i<size; ++i )
1364 localOp_[ i ].setQuadratureOrders(interior,surface);
1365 }
1366
1367 virtual bool nonlinear() const final override
1368 {
1369 return localOperator().nonlinear();
1370 }
1371
1372 virtual void operator() ( const DomainFunctionType &u, RangeFunctionType &w ) const final override
1373 {
1374 evaluate( u, w );
1375 }
1376
1377 template< class GridFunction >
1378 void operator() ( const GridFunction &u, RangeFunctionType &w ) const
1379 {
1380 evaluate( u, w );
1381 }
1382
1383 const GridPartType &gridPart () const { return op().gridPart(); }
1384
1385 typedef Integrands ModelType;
1386 typedef Integrands DirichletModelType;
1387 ModelType &model() const { return localOperator().model(); }
1388
1389 [[deprecated("Use localOperator instead!")]]
1390 const LocalGalerkinOperatorImplType& impl() const { return localOperator(); }
1391
1393 const LocalGalerkinOperatorImplType& localOperator() const { return *localOp_; }
1394
1395 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
1396
1397 protected:
1399 const GalerkinOperatorImplType& op() const { return *opImpl_; }
1400
1401 template < class GridFunction >
1402 void evaluate( const GridFunction &u, RangeFunctionType &w ) const
1403 {
1404 iterators_.update();
1405 w.clear();
1406
1407 std::shared_mutex mutex;
1408
1409 auto doEval = [this, &u, &w, &mutex] ()
1410 {
1411 // TODO: Move this to be a class variable
1412 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1413 this->op().evaluate( u, w, this->iterators_, integrands, mutex );
1414 };
1415
1416 try {
1417 // execute in parallel
1418 MPIManager :: run ( doEval );
1419
1420 // update number of interior elements as sum over threads
1421 gridSizeInterior_ = Impl::accumulateGridSize( opImpl_ );
1422 }
1423 catch ( const SingleThreadModeError& e )
1424 {
1425 // reset w from previous entries
1426 w.clear();
1427 // re-run in single thread mode if previous attempt failed
1428 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1429 op().evaluate( u, w, iterators_, integrands );
1430
1431 // update number of interior elements as sum over threads
1432 gridSizeInterior_ = op().gridSizeInterior();
1433 }
1434
1435 // synchronize result
1436 if( communicate_ )
1437 w.communicate();
1438 }
1439
1440 mutable ThreadIteratorType iterators_;
1443
1444 mutable std::size_t gridSizeInterior_;
1445 bool communicate_;
1446 };
1447
1448
1449
1450 // DifferentiableGalerkinOperator
1451 // ------------------------------
1452
1453 template< class Integrands, class JacobianOperator >
1454 class DifferentiableGalerkinOperator
1455 : public GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType >,
1456 public DifferentiableOperator< JacobianOperator >
1457 {
1458 typedef GalerkinOperator< Integrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType > BaseType;
1459
1460 typedef typename BaseType :: LocalGalerkinOperatorImplType LocalGalerkinOperatorImplType;
1461 public:
1462 typedef JacobianOperator JacobianOperatorType;
1463
1464 typedef typename BaseType::DomainFunctionType DomainFunctionType;
1465 typedef typename BaseType::RangeFunctionType RangeFunctionType;
1466 typedef typename DomainFunctionType::DiscreteFunctionSpaceType DomainDiscreteFunctionSpaceType;
1467 typedef typename RangeFunctionType::DiscreteFunctionSpaceType RangeDiscreteFunctionSpaceType;
1468
1469 typedef DiagonalAndNeighborStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalAndNeighborStencilType;
1470 typedef DiagonalStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalStencilType;
1471
1472 typedef typename BaseType::GridPartType GridPartType;
1473
1474 template< class... Args >
1475 explicit DifferentiableGalerkinOperator ( const DomainDiscreteFunctionSpaceType &dSpace,
1476 const RangeDiscreteFunctionSpaceType &rSpace,
1477 Args &&... args )
1478 : BaseType( rSpace.gridPart(), std::forward< Args >( args )... ),
1479 dSpace_(dSpace), rSpace_(rSpace),
1480 domainSpaceSequence_(dSpace.sequence()),
1481 rangeSpaceSequence_(rSpace.sequence()),
1482 stencilDAN_(), stencilD_()
1483 {
1484 if( hasSkeleton() )
1485 stencilDAN_.reset( new DiagonalAndNeighborStencilType( dSpace_, rSpace_ ) );
1486 else
1487 stencilD_.reset( new DiagonalStencilType( dSpace_, rSpace_ ) );
1488 }
1489
1490 virtual void jacobian ( const DomainFunctionType &u, JacobianOperatorType &jOp ) const final override
1491 {
1492 assemble( u, jOp );
1493 }
1494
1495 template< class GridFunction >
1496 void jacobian ( const GridFunction &u, JacobianOperatorType &jOp ) const
1497 {
1498 assemble( u, jOp );
1499 }
1500
1501 const DomainDiscreteFunctionSpaceType& domainSpace() const
1502 {
1503 return dSpace_;
1504 }
1505 const RangeDiscreteFunctionSpaceType& rangeSpace() const
1506 {
1507 return rSpace_;
1508 }
1509
1510 using BaseType::localOperator;
1511 using BaseType::nonlinear;
1512
1513 protected:
1514 using BaseType::op;
1515
1516 bool hasSkeleton() const
1517 {
1518 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1519 return op().hasSkeleton( integrands );
1520 }
1521
1522 void prepare( JacobianOperatorType& jOp ) const
1523 {
1524 if ( domainSpaceSequence_ != domainSpace().sequence()
1525 || rangeSpaceSequence_ != rangeSpace().sequence() )
1526 {
1527 domainSpaceSequence_ = domainSpace().sequence();
1528 rangeSpaceSequence_ = rangeSpace().sequence();
1529 if( hasSkeleton() )
1530 {
1531 assert( stencilDAN_ );
1532 stencilDAN_->update();
1533 }
1534 else
1535 {
1536 assert( stencilD_ );
1537 stencilD_->update();
1538 }
1539 }
1540 if( hasSkeleton() )
1541 jOp.reserve( *stencilDAN_ );
1542 else
1543 jOp.reserve( *stencilD_ );
1544 // set all entries to zero
1545 jOp.clear();
1546 }
1547
1548 template < class GridFunction >
1549 void assemble( const GridFunction &u, JacobianOperatorType &jOp ) const
1550 {
1551 // reserve memory and clear entries
1552 {
1553 prepare( jOp );
1554 iterators_.update();
1555 }
1556
1557 std::shared_mutex mutex;
1558
1559 auto doAssemble = [this, &u, &jOp, &mutex] ()
1560 {
1561 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1562 this->op().assemble( u, jOp, this->iterators_, integrands, mutex );
1563 };
1564
1565 try {
1566 // execute in parallel
1567 MPIManager :: run ( doAssemble );
1568
1569 // update number of interior elements as sum over threads
1570 gridSizeInterior_ = Impl::accumulateGridSize( this->opImpl_ );
1571 }
1572 catch ( const SingleThreadModeError& e )
1573 {
1574 // redo assemble since it failed previously
1575 jOp.clear();
1576 std::tuple< const LocalGalerkinOperatorImplType& > integrands( localOperator() );
1577 op().assemble( u, jOp, iterators_, integrands );
1578 // update number of interior elements as sum over threads
1579 gridSizeInterior_ = op().gridSizeInterior();
1580 }
1581
1582 // note: assembly done without local contributions so need
1583 // to call flush assembly
1584 jOp.flushAssembly();
1585 }
1586
1587 using BaseType::iterators_;
1588 using BaseType::gridSizeInterior_;
1589
1590 const DomainDiscreteFunctionSpaceType &dSpace_;
1591 const RangeDiscreteFunctionSpaceType &rSpace_;
1592
1593 mutable int domainSpaceSequence_, rangeSpaceSequence_;
1594
1595 mutable std::unique_ptr< DiagonalAndNeighborStencilType > stencilDAN_;
1596 mutable std::unique_ptr< DiagonalStencilType > stencilD_;
1597 };
1598
1599
1600
1601 // AutomaticDifferenceGalerkinOperator
1602 // -----------------------------------
1603
1604 template< class Integrands, class DomainFunction, class RangeFunction >
1605 class AutomaticDifferenceGalerkinOperator
1606 : public GalerkinOperator< Integrands, DomainFunction, RangeFunction >,
1607 public AutomaticDifferenceOperator< DomainFunction, RangeFunction >
1608 {
1609 typedef GalerkinOperator< Integrands, DomainFunction, RangeFunction > BaseType;
1610 typedef AutomaticDifferenceOperator< DomainFunction, RangeFunction > AutomaticDifferenceOperatorType;
1611
1612 public:
1613 typedef typename BaseType::GridPartType GridPartType;
1614
1615 template< class... Args >
1616 explicit AutomaticDifferenceGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
1617 : BaseType( gridPart, std::forward< Args >( args )... ), AutomaticDifferenceOperatorType()
1618 {}
1619 };
1620
1621
1622
1623 // ModelDifferentiableGalerkinOperator
1624 // -----------------------------------
1625
1626 template < class LinearOperator, class ModelIntegrands >
1627 struct ModelDifferentiableGalerkinOperator
1628 : public DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator >
1629 {
1630 typedef DifferentiableGalerkinOperator< ModelIntegrands, LinearOperator > BaseType;
1631
1632 typedef typename ModelIntegrands::ModelType ModelType;
1633
1634 typedef typename LinearOperator::DomainFunctionType RangeFunctionType;
1635 typedef typename LinearOperator::RangeSpaceType DiscreteFunctionSpaceType;
1636
1637 ModelDifferentiableGalerkinOperator ( ModelType &model, const DiscreteFunctionSpaceType &dfSpace )
1638 : BaseType( dfSpace.gridPart(), model )
1639 {}
1640
1641 template< class GridFunction >
1642 void apply ( const GridFunction &u, RangeFunctionType &w ) const
1643 {
1644 (*this)( u, w );
1645 }
1646
1647 template< class GridFunction >
1648 void apply ( const GridFunction &u, LinearOperator &jOp ) const
1649 {
1650 (*this).jacobian( u, jOp );
1651 }
1652 };
1653
1654 namespace Impl
1655 {
1656
1657 // GalerkinSchemeImpl
1658 // ------------------
1659 template< class Integrands, class LinearOperator, bool addDirichletBC,
1660 template <class,class> class DifferentiableGalerkinOperatorImpl >
1661 struct GalerkinSchemeTraits
1662 {
1663 template <class O, bool addDBC>
1664 struct DirichletBlockSelector { using type = void; };
1665 template <class O>
1666 struct DirichletBlockSelector<O,true> { using type = typename O::DirichletBlockVector; };
1667
1668 using DifferentiableOperatorType = std::conditional_t< addDirichletBC,
1669 DirichletWrapperOperator< DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1670 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator > >;
1671 using DirichletBlockVector = typename DirichletBlockSelector<
1672 DirichletWrapperOperator<
1673 DifferentiableGalerkinOperatorImpl< Integrands, LinearOperator >>,
1674 addDirichletBC>::type;
1675
1676 typedef DifferentiableOperatorType type;
1677 };
1678
1679 template< class Integrands, class LinearOperator, class LinearInverseOperator, bool addDirichletBC,
1680 template <class,class> class DifferentiableGalerkinOperatorImpl = DifferentiableGalerkinOperator >
1681 struct GalerkinSchemeImpl : public FemScheme< typename
1682 GalerkinSchemeTraits< Integrands, LinearOperator,
1683 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1684 LinearInverseOperator > // LinearInverseOperator
1685 {
1686 typedef FemScheme< typename GalerkinSchemeTraits< Integrands, LinearOperator,
1687 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
1688 LinearInverseOperator > // LinearInverseOperator
1689 BaseType;
1690
1691 typedef typename BaseType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
1692
1693 GalerkinSchemeImpl ( const DiscreteFunctionSpaceType &dfSpace,
1694 const Integrands &integrands,
1695 const ParameterReader& parameter = Parameter::container() )
1696 : BaseType(dfSpace,
1697 parameter,
1698 std::move(integrands))
1699 {}
1700 };
1701
1702 } // end namespace Impl
1703
1704 // GalerkinScheme
1705 // --------------
1706
1707 template< class Integrands, class LinearOperator, class InverseOperator, bool addDirichletBC >
1708 using GalerkinScheme = Impl::GalerkinSchemeImpl< Integrands, LinearOperator, InverseOperator, addDirichletBC,
1709 DifferentiableGalerkinOperator >;
1710
1711 } // namespace Fem
1712
1713} // namespace Dune
1714
1715#endif // #ifndef DUNE_FEM_SCHEMES_GALERKIN_HH
FunctionSpaceType::RangeType RangeType
type of range vectors, i.e., type of function values
Definition: localfunction.hh:107
FunctionSpaceType::HessianRangeType HessianRangeType
type of the Hessian
Definition: localfunction.hh:111
FunctionSpaceType::JacobianRangeType JacobianRangeType
type of the Jacobian, i.e., type of evaluated Jacobian matrix
Definition: localfunction.hh:109
static bool verbose()
obtain the cached value for fem.verbose with default verbosity level 2
Definition: parameter.hh:466
@ InteriorEntity
all interior entities
Definition: gridenums.hh:31
constexpr void forEach(Range &&range, F &&f)
Range based for loop.
Definition: hybridutilities.hh:256
constexpr Interior interior
PartitionSet for the interior partition.
Definition: partitionset.hh:271
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
STL namespace.
DomainFunction DomainFunctionType
type of discrete function in the operator's domain
Definition: operator.hh:36
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 27, 22:29, 2024)