DUNE-FEM (unstable)

masslumping.hh
1#ifndef DUNE_FEM_SCHEMES_MASSLUMPING_HH
2#define DUNE_FEM_SCHEMES_MASSLUMPING_HH
3
4// fem includes
5#include <dune/fem/schemes/galerkin.hh>
6#include <dune/fem/function/localfunction/temporary.hh>
7#include <dune/fem/common/bindguard.hh>
8#include <dune/fem/quadrature/lumpingquadrature.hh>
9
10namespace Dune
11{
12
13 namespace Fem
14 {
15
16#if HAVE_PETSC
17 //forward declaration of PetscLinearOperator
18 template <typename DomainFunction, typename RangeFunction >
19 class PetscLinearOperator ;
20#endif
21
22 // MassLumpingOperator
23 // -------------------
24
25 template< class Integrands, class MassIntegrands, class DomainFunction, class RangeFunction = DomainFunction >
26 struct MassLumpingOperator
27 : public virtual Operator< DomainFunction, RangeFunction >
28 {
29 typedef DomainFunction DomainFunctionType;
30 typedef RangeFunction RangeFunctionType;
31
32
33 static_assert( std::is_same< typename DomainFunctionType::GridPartType, typename RangeFunctionType::GridPartType >::value, "DomainFunction and RangeFunction must be defined on the same grid part." );
34
35 typedef typename RangeFunctionType::GridPartType GridPartType;
36 typedef ThreadIterator< GridPartType > ThreadIteratorType;
37
38 typedef Impl::GalerkinOperator< GridPartType > GalerkinOperatorImplType;
39 typedef Impl::LocalGalerkinOperator< Integrands > LocalGalerkinOperatorImplType;
40
41 template <class Space>
42 struct LumpingQuadratureSelector
43 {
44 typedef typename Space :: GridPartType GridPartType;
45 typedef CachingLumpingQuadrature< GridPartType, 0 > InteriorQuadratureType;
46
47 // not needed, since lumping is only on element terms
48 typedef CachingQuadrature< GridPartType, 1, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > SurfaceQuadratureType;
49 };
50
51 // define Galerkin operator with different quadratures (i.e. LumpingQuadrature)
52 typedef Impl::LocalGalerkinOperator< MassIntegrands, LumpingQuadratureSelector > MassOperatorImplType;
53
54 typedef typename RangeFunctionType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
55
56 template< class... Args >
57 explicit MassLumpingOperator ( const GridPartType &gridPart,
58 const Integrands& integrands,
59 const MassIntegrands& massIntegrands,
60 Args &&... args )
61 : iterators_( gridPart ),
62 opImpl_( gridPart ),
63 localOp_( gridPart, std::move( integrands ), std::forward< Args >( args )... ),
64 mass_( gridPart, std::move( massIntegrands ), std::forward< Args >( args )... ),
65 gridSizeInterior_( 0 ),
66 communicate_( true )
67 {
68 if( mass().hasSkeleton() )
69 DUNE_THROW(InvalidStateException,"MassLumpingOperator: Mass model cannot have a skeleton term");
70
71 // volume order (same as space for lumping)
72 auto interiorOrder = [] (const int order) { return order; };
73 // surface order does not matter for this part
74 auto surfaceOrder = [] (const int order) { return 0; };
75
76 // set quadrature orders mass part to be fixed!
77 size_t size = mass_.size();
78 for( size_t i=0; i<size; ++i )
79 mass_[ i ].setQuadratureOrderFunctions( interiorOrder, surfaceOrder );
80 }
81
82 void setCommunicate( const bool communicate )
83 {
84 communicate_ = communicate;
85 if( ! communicate_ && Dune::Fem::Parameter::verbose() )
86 {
87 std::cout << "MassLumpingOperator::setCommunicate: communicate was disabled!" << std::endl;
88 }
89 }
90
91 void setQuadratureOrders(unsigned int interior, unsigned int surface)
92 {
93 // only set quadrature orders for Galerkin part, lumping quadratures are fixed
94 size_t size = localOp_.size();
95 for( size_t i=0; i<size; ++i )
96 localOp_[ i ].setQuadratureOrders(interior,surface);
97 }
98
99 virtual void operator() ( const DomainFunctionType &u, RangeFunctionType &w ) const final override
100 {
101 evaluate( u, w );
102 }
103
104 template< class GridFunction >
105 void operator() ( const GridFunction &u, RangeFunctionType &w ) const
106 {
107 evaluate( u, w );
108 }
109
110 const GridPartType &gridPart () const { return op().gridPart(); }
111
112 typedef Integrands ModelType;
113 typedef Integrands DirichletModelType;
114 ModelType &model() const { return localOperator().model(); }
115
116 [[deprecated("Use localOperator instead!")]]
117 const LocalGalerkinOperatorImplType& impl() const { return localOperator(); }
118
120 const LocalGalerkinOperatorImplType& localOperator() const { return (*localOp_); }
121 const MassOperatorImplType& mass() const { return (*mass_); }
122
123 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
124
125 protected:
126 const GalerkinOperatorImplType& op() const { return (*opImpl_); }
127
128 template< class GridFunction >
129 void evaluate( const GridFunction &u, RangeFunctionType &w ) const
130 {
131 iterators_.update();
132 w.clear();
133
134 std::shared_mutex mutex;
135
136 auto doEval = [this, &u, &w, &mutex] ()
137 {
138 // version with locking
139 std::tuple< const LocalGalerkinOperatorImplType&,
140 const MassOperatorImplType& > integrands( localOperator(), mass() );
141
142
143 // add galerkin and mass lumped part
144 this->op().evaluate( u, w, this->iterators_, integrands, mutex );
145 };
146
147 bool singleThreadModeError = false ;
148
149 try {
150 // execute in parallel
151 MPIManager :: run ( doEval );
152
153 // update number of interior elements as sum over threads
154 gridSizeInterior_ = Impl::accumulateGridSize( opImpl_ );
155 }
156 catch ( const SingleThreadModeError& e )
157 {
158 singleThreadModeError = true;
159 }
160
161 // if error occurred, redo the whole evaluation
162 if( singleThreadModeError )
163 {
164 // reset w from previous entries
165 w.clear();
166
167 // re-run in single thread mode if previous attempt failed
168 std::tuple< const LocalGalerkinOperatorImplType&,
169 const MassOperatorImplType& > integrands( localOperator(), mass() );
170 // add galerkin part and mass part
171 op().evaluate( u, w, iterators_, integrands );
172
173 // update number of interior elements as sum over threads
174 gridSizeInterior_ = op().gridSizeInterior();
175
176 }
177
178 // synchronize data
179 if( communicate_ )
180 w.communicate();
181 }
182
183 // GalerkinOperator implementation (see galerkin.hh)
184 mutable ThreadIteratorType iterators_;
187 ThreadSafeValue< MassOperatorImplType > mass_;
188
189 mutable std::size_t gridSizeInterior_;
190 bool communicate_;
191 };
192
193
194
195 // DifferentiableMassLumpingOperator
196 // ---------------------------------
197
198 template< class Integrands, class MassIntegrands, class JacobianOperator >
199 class MassLumpingDifferentiableOperator
200 : public MassLumpingOperator< Integrands, MassIntegrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType >,
201 public DifferentiableOperator< JacobianOperator >
202 {
203 typedef MassLumpingOperator< Integrands, MassIntegrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType > BaseType;
204
205 public:
206 typedef typename BaseType :: LocalGalerkinOperatorImplType LocalGalerkinOperatorImplType;
207 typedef typename BaseType :: MassOperatorImplType MassOperatorImplType;
208
209 typedef JacobianOperator JacobianOperatorType;
210
211 typedef typename BaseType::DomainFunctionType DomainFunctionType;
212 typedef typename BaseType::RangeFunctionType RangeFunctionType;
213 typedef typename DomainFunctionType::DiscreteFunctionSpaceType DomainDiscreteFunctionSpaceType;
214 typedef typename RangeFunctionType::DiscreteFunctionSpaceType RangeDiscreteFunctionSpaceType;
215
216 typedef DiagonalAndNeighborStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalAndNeighborStencilType;
217 typedef DiagonalStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalStencilType;
218
219 typedef typename BaseType::GridPartType GridPartType;
220
221 template< class... Args >
222 explicit MassLumpingDifferentiableOperator ( const DomainDiscreteFunctionSpaceType &dSpace,
223 const RangeDiscreteFunctionSpaceType &rSpace,
224 Args &&... args )
225 : BaseType( rSpace.gridPart(), std::forward< Args >( args )... ),
226 dSpace_(dSpace),
227 rSpace_(rSpace),
228 domainSpaceSequence_(dSpace.sequence()),
229 rangeSpaceSequence_(rSpace.sequence()),
230 stencilDAN_(), stencilD_()
231 {
232 if( hasSkeleton() )
233 stencilDAN_.reset( new DiagonalAndNeighborStencilType( dSpace_, rSpace_ ) );
234 else
235 stencilD_.reset( new DiagonalStencilType( dSpace_, rSpace_ ) );
236 }
237
238 virtual void jacobian ( const DomainFunctionType &u, JacobianOperatorType &jOp ) const final override
239 {
240 // assemble Jacobian, same as GalerkinOperator
241 assemble( u, jOp );
242 }
243
244 template< class GridFunction >
245 void jacobian ( const GridFunction &u, JacobianOperatorType &jOp ) const
246 {
247 assemble( u, jOp );
248 }
249
250 const DomainDiscreteFunctionSpaceType& domainSpace() const
251 {
252 return dSpace_;
253 }
254 const RangeDiscreteFunctionSpaceType& rangeSpace() const
255 {
256 return rSpace_;
257 }
258 // needed for DGHelmholtz operator
259 const RangeDiscreteFunctionSpaceType& space() const
260 {
261 return rangeSpace();
262 }
263
264 using BaseType::gridPart;
265 using BaseType::localOperator;
266
267 protected:
268 bool hasSkeleton() const
269 {
270 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
271 return op().hasSkeleton( integrands );
272 }
273
274 using BaseType::op;
275 using BaseType::mass;
276 using BaseType::iterators_;
277 using BaseType::gridSizeInterior_;
278
279 void prepare( JacobianOperatorType& jOp ) const
280 {
281 if ( domainSpaceSequence_ != domainSpace().sequence()
282 || rangeSpaceSequence_ != rangeSpace().sequence() )
283 {
284 domainSpaceSequence_ = domainSpace().sequence();
285 rangeSpaceSequence_ = rangeSpace().sequence();
286 if( hasSkeleton() )
287 {
288 assert( stencilDAN_ );
289 stencilDAN_->update();
290 }
291 else
292 {
293 assert( stencilD_ );
294 stencilD_->update();
295 }
296 }
297
298 if( hasSkeleton() )
299 jOp.reserve( *stencilDAN_ );
300 else
301 jOp.reserve( *stencilD_ );
302 // set all entries to zero
303 jOp.clear();
304 }
305
306 template < class GridFunction >
307 void assemble( const GridFunction &u, JacobianOperatorType &jOp ) const
308 {
309 prepare( jOp );
310 iterators_.update();
311
312 bool singleThreadModeError = false;
313 std::shared_mutex mutex;
314
315 auto doAssemble = [this, &u, &jOp, &mutex] ()
316 {
317 // assemble Jacobian, same as GalerkinOperator
318 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
319 this->op().assemble( u, jOp, this->iterators_, integrands, mutex );
320 };
321
322 try {
323 // execute in parallel
324 MPIManager :: run ( doAssemble );
325
326 // update number of interior elements as sum over threads
327 gridSizeInterior_ = Impl::accumulateGridSize( this->opImpl_ );
328 }
329 catch ( const SingleThreadModeError& e )
330 {
331 singleThreadModeError = true;
332 }
333
334 if (singleThreadModeError)
335 {
336 // redo matrix assembly since it failed
337 jOp.clear();
338 // add galerkin terms
339 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
340 op().assemble( u, jOp, iterators_, integrands );
341
342 // update number of interior elements as sum over threads
343 gridSizeInterior_ = op().gridSizeInterior();
344 }
345
346 // note: assembly done without local contributions so need
347 // to call flush assembly
348 jOp.flushAssembly();
349 }
350
351 const DomainDiscreteFunctionSpaceType &dSpace_;
352 const RangeDiscreteFunctionSpaceType &rSpace_;
353 mutable int domainSpaceSequence_, rangeSpaceSequence_;
354
355 mutable std::unique_ptr< DiagonalAndNeighborStencilType > stencilDAN_;
356 mutable std::unique_ptr< DiagonalStencilType > stencilD_;
357 };
358
359
360
361 // AutomaticDifferenceMassLumpingOperator
362 // --------------------------------------
363
364 template< class Integrands, class DomainFunction, class RangeFunction >
365 class MassLumpingAutomaticDifferenceGalerkinOperator
366 : public MassLumpingOperator< Integrands, DomainFunction, RangeFunction >,
367 public AutomaticDifferenceOperator< DomainFunction, RangeFunction >
368 {
369 typedef MassLumpingOperator< Integrands, DomainFunction, RangeFunction > BaseType;
370 typedef AutomaticDifferenceOperator< DomainFunction, RangeFunction > AutomaticDifferenceOperatorType;
371
372 public:
373 typedef typename BaseType::GridPartType GridPartType;
374
375 template< class... Args >
376 explicit MassLumpingAutomaticDifferenceGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
377 : BaseType( gridPart, std::forward< Args >( args )... ), AutomaticDifferenceOperatorType()
378 {}
379 };
380
381
382
383 namespace Impl
384 {
385
386 // MassLumpingSchemeImpl
387 // ---------------------
388 template< class Integrands, class MassIntegrands,
389 class LinearOperator, bool addDirichletBC,
390 template <class,class,class> class DifferentiableGalerkinOperatorImpl >
391 struct MassLumpingSchemeTraits
392 {
393 template <class O, bool addDBC>
394 struct DirichletBlockSelector { using type = void; };
395 template <class O>
396 struct DirichletBlockSelector<O,true> { using type = typename O::DirichletBlockVector; };
397
398 using DifferentiableOperatorType = std::conditional_t< addDirichletBC,
399 DirichletWrapperOperator< DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator >>,
400 DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator > >;
401
402 using DirichletBlockVector = typename DirichletBlockSelector<
403 DirichletWrapperOperator<
404 DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator >>,
405 addDirichletBC>::type;
406
407 typedef DifferentiableOperatorType type;
408 };
409
410
411 // MassLumpingSchemeImpl
412 // ---------------------
413 template< class Integrands, class MassIntegrands,
414 class LinearOperator, class LinearInverseOperator, bool addDirichletBC,
415 template <class,class,class> class DifferentiableGalerkinOperatorImpl = MassLumpingDifferentiableOperator >
416 struct MassLumpingSchemeImpl : public FemScheme<
417 typename MassLumpingSchemeTraits< Integrands, MassIntegrands, LinearOperator,
418 addDirichletBC, DifferentiableGalerkinOperatorImpl
419 > :: type, // Operator
420 LinearInverseOperator > // LinearInverseOperator
421 {
422 typedef FemScheme< typename MassLumpingSchemeTraits< Integrands, MassIntegrands, LinearOperator,
423 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
424 LinearInverseOperator > // LinearInverseOperator
425 BaseType;
426
427 // type of discrete function space used with this scheme
428 typedef typename BaseType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
429
430 // needed for registerSchemeConstructor (Python export)
431 typedef MassIntegrands MassModelType;
432
433 MassLumpingSchemeImpl ( const DiscreteFunctionSpaceType &dfSpace,
434 const Integrands &integrands,
435 const MassIntegrands& massIntegrands,
436 const ParameterReader& parameter = Parameter::container() )
437 : BaseType( dfSpace,
438 parameter,
439 std::move(integrands),
440 std::move(massIntegrands) )
441 {}
442 };
443
444 } // end namespace Impl
445
446 // MassLumpingScheme
447 // -----------------
448
449 template< class Integrands, class MassIntegrands, class LinearOperator, class InverseOperator, bool addDirichletBC >
450 using MassLumpingScheme = Impl::MassLumpingSchemeImpl< Integrands, MassIntegrands, LinearOperator, InverseOperator, addDirichletBC,
451 MassLumpingDifferentiableOperator >;
452
453 } // namespace Fem
454
455} // namespace Dune
456
457#endif // #ifndef DUNE_FEM_SCHEMES_MOLGALERKIN_HH
static bool verbose()
obtain the cached value for fem.verbose with default verbosity level 2
Definition: parameter.hh:466
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
STL namespace.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 21, 23:30, 2024)