DUNE-FEM (unstable)

masslumping.hh
1#ifndef DUNE_FEM_SCHEMES_MASSLUMPING_HH
2#define DUNE_FEM_SCHEMES_MASSLUMPING_HH
3
4// fem includes
5#include <dune/fem/schemes/galerkin.hh>
6#include <dune/fem/function/localfunction/temporary.hh>
7#include <dune/fem/common/bindguard.hh>
8#include <dune/fem/quadrature/lumpingquadrature.hh>
9
10namespace Dune
11{
12
13 namespace Fem
14 {
15
16#if HAVE_PETSC
17 //forward declaration of PetscLinearOperator
18 template <typename DomainFunction, typename RangeFunction >
19 class PetscLinearOperator ;
20#endif
21
22 // MassLumpingOperator
23 // -------------------
24
25 template< class Integrands, class MassIntegrands, class DomainFunction, class RangeFunction = DomainFunction >
26 struct MassLumpingOperator
27 : public virtual Operator< DomainFunction, RangeFunction >
28 {
29 typedef DomainFunction DomainFunctionType;
30 typedef RangeFunction RangeFunctionType;
31
32
33 static_assert( std::is_same< typename DomainFunctionType::GridPartType, typename RangeFunctionType::GridPartType >::value, "DomainFunction and RangeFunction must be defined on the same grid part." );
34
35 typedef typename RangeFunctionType::GridPartType GridPartType;
36 typedef ThreadIterator< GridPartType > ThreadIteratorType;
37
38 typedef Impl::GalerkinOperator< GridPartType > GalerkinOperatorImplType;
39 typedef Impl::LocalGalerkinOperator< Integrands > LocalGalerkinOperatorImplType;
40
41 template <class Space>
42 struct LumpingQuadratureSelector
43 {
44 typedef typename Space :: GridPartType GridPartType;
45 typedef CachingLumpingQuadrature< GridPartType, 0 > InteriorQuadratureType;
46
47 // not needed, since lumping is only on element terms
48 typedef CachingQuadrature< GridPartType, 1, Capabilities::DefaultQuadrature< Space > :: template DefaultQuadratureTraits > SurfaceQuadratureType;
49 };
50
51 // define Galerkin operator with different quadratures (i.e. LumpingQuadrature)
52 typedef Impl::LocalGalerkinOperator< MassIntegrands, LumpingQuadratureSelector > MassOperatorImplType;
53
54 typedef typename RangeFunctionType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
55
56 template< class... Args >
57 explicit MassLumpingOperator ( const GridPartType &gridPart,
58 const Integrands& integrands,
59 const MassIntegrands& massIntegrands,
60 Args &&... args )
61 : iterators_( gridPart ),
62 opImpl_( gridPart ),
63 localOp_( gridPart, std::move( integrands ), std::forward< Args >( args )... ),
64 mass_( gridPart, std::move( massIntegrands ), std::forward< Args >( args )... ),
65 gridSizeInterior_( 0 ),
66 communicate_( true )
67 {
68 if( mass().hasSkeleton() )
69 DUNE_THROW(InvalidStateException,"MassLumpingOperator: Mass model cannot have a skeleton term");
70 }
71
72 void setCommunicate( const bool communicate )
73 {
74 communicate_ = communicate;
75 if( ! communicate_ && Dune::Fem::Parameter::verbose() )
76 {
77 std::cout << "MassLumpingOperator::setCommunicate: communicate was disabled!" << std::endl;
78 }
79 }
80
81 void setQuadratureOrders(unsigned int interior, unsigned int surface)
82 {
83 // only set quadrature orders for Galerkin part, lumping quadratures are fixed
84 size_t size = localOp_.size();
85 for( size_t i=0; i<size; ++i )
86 localOp_[ i ].setQuadratureOrders(interior,surface);
87 }
88
89 virtual void operator() ( const DomainFunctionType &u, RangeFunctionType &w ) const final override
90 {
91 evaluate( u, w );
92 }
93
94 template< class GridFunction >
95 void operator() ( const GridFunction &u, RangeFunctionType &w ) const
96 {
97 evaluate( u, w );
98 }
99
100 const GridPartType &gridPart () const { return op().gridPart(); }
101
102 typedef Integrands ModelType;
103 typedef Integrands DirichletModelType;
104 ModelType &model() const { return localOperator().model(); }
105
106 [[deprecated("Use localOperator instead!")]]
107 const LocalGalerkinOperatorImplType& impl() const { return localOperator(); }
108
110 const LocalGalerkinOperatorImplType& localOperator() const { return (*localOp_); }
111 const MassOperatorImplType& mass() const { return (*mass_); }
112
113 std::size_t gridSizeInterior () const { return gridSizeInterior_; }
114
115 protected:
116 const GalerkinOperatorImplType& op() const { return (*opImpl_); }
117
118 template< class GridFunction >
119 void evaluate( const GridFunction &u, RangeFunctionType &w ) const
120 {
121 iterators_.update();
122 w.clear();
123
124 std::shared_mutex mutex;
125
126 auto doEval = [this, &u, &w, &mutex] ()
127 {
128 // version with locking
129 std::tuple< const LocalGalerkinOperatorImplType&,
130 const MassOperatorImplType& > integrands( localOperator(), mass() );
131
132
133 // add galerkin and mass lumped part
134 this->op().evaluate( u, w, this->iterators_, integrands, mutex );
135 };
136
137 bool singleThreadModeError = false ;
138
139 try {
140 // execute in parallel
141 MPIManager :: run ( doEval );
142
143 // update number of interior elements as sum over threads
144 gridSizeInterior_ = Impl::accumulateGridSize( opImpl_ );
145 }
146 catch ( const SingleThreadModeError& e )
147 {
148 singleThreadModeError = true;
149 }
150
151 // if error occurred, redo the whole evaluation
152 if( singleThreadModeError )
153 {
154 // reset w from previous entries
155 w.clear();
156
157 // re-run in single thread mode if previous attempt failed
158 std::tuple< const LocalGalerkinOperatorImplType&,
159 const MassOperatorImplType& > integrands( localOperator(), mass() );
160 // add galerkin part and mass part
161 op().evaluate( u, w, iterators_, integrands );
162
163 // update number of interior elements as sum over threads
164 gridSizeInterior_ = op().gridSizeInterior();
165
166 }
167
168 // synchronize data
169 if( communicate_ )
170 w.communicate();
171 }
172
173 // GalerkinOperator implementation (see galerkin.hh)
174 mutable ThreadIteratorType iterators_;
177 ThreadSafeValue< MassOperatorImplType > mass_;
178
179 mutable std::size_t gridSizeInterior_;
180 bool communicate_;
181 };
182
183
184
185 // DifferentiableMassLumpingOperator
186 // ---------------------------------
187
188 template< class Integrands, class MassIntegrands, class JacobianOperator >
189 class MassLumpingDifferentiableOperator
190 : public MassLumpingOperator< Integrands, MassIntegrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType >,
191 public DifferentiableOperator< JacobianOperator >
192 {
193 typedef MassLumpingOperator< Integrands, MassIntegrands, typename JacobianOperator::DomainFunctionType, typename JacobianOperator::RangeFunctionType > BaseType;
194
195 public:
196 typedef typename BaseType :: LocalGalerkinOperatorImplType LocalGalerkinOperatorImplType;
197 typedef typename BaseType :: MassOperatorImplType MassOperatorImplType;
198
199 typedef JacobianOperator JacobianOperatorType;
200
201 typedef typename BaseType::DomainFunctionType DomainFunctionType;
202 typedef typename BaseType::RangeFunctionType RangeFunctionType;
203 typedef typename DomainFunctionType::DiscreteFunctionSpaceType DomainDiscreteFunctionSpaceType;
204 typedef typename RangeFunctionType::DiscreteFunctionSpaceType RangeDiscreteFunctionSpaceType;
205
206 typedef DiagonalAndNeighborStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalAndNeighborStencilType;
207 typedef DiagonalStencil< DomainDiscreteFunctionSpaceType, RangeDiscreteFunctionSpaceType > DiagonalStencilType;
208
209 typedef typename BaseType::GridPartType GridPartType;
210
211 template< class... Args >
212 explicit MassLumpingDifferentiableOperator ( const DomainDiscreteFunctionSpaceType &dSpace,
213 const RangeDiscreteFunctionSpaceType &rSpace,
214 Args &&... args )
215 : BaseType( rSpace.gridPart(), std::forward< Args >( args )... ),
216 dSpace_(dSpace),
217 rSpace_(rSpace),
218 domainSpaceSequence_(dSpace.sequence()),
219 rangeSpaceSequence_(rSpace.sequence()),
220 stencilDAN_(), stencilD_()
221 {
222 if( hasSkeleton() )
223 stencilDAN_.reset( new DiagonalAndNeighborStencilType( dSpace_, rSpace_ ) );
224 else
225 stencilD_.reset( new DiagonalStencilType( dSpace_, rSpace_ ) );
226 }
227
228 virtual void jacobian ( const DomainFunctionType &u, JacobianOperatorType &jOp ) const final override
229 {
230 // assemble Jacobian, same as GalerkinOperator
231 assemble( u, jOp );
232 }
233
234 template< class GridFunction >
235 void jacobian ( const GridFunction &u, JacobianOperatorType &jOp ) const
236 {
237 assemble( u, jOp );
238 }
239
240 const DomainDiscreteFunctionSpaceType& domainSpace() const
241 {
242 return dSpace_;
243 }
244 const RangeDiscreteFunctionSpaceType& rangeSpace() const
245 {
246 return rSpace_;
247 }
248 // needed for DGHelmholtz operator
249 const RangeDiscreteFunctionSpaceType& space() const
250 {
251 return rangeSpace();
252 }
253
254 using BaseType::gridPart;
255 using BaseType::localOperator;
256
257 protected:
258 bool hasSkeleton() const
259 {
260 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
261 return op().hasSkeleton( integrands );
262 }
263
264 using BaseType::op;
265 using BaseType::mass;
266 using BaseType::iterators_;
267 using BaseType::gridSizeInterior_;
268
269 void prepare( JacobianOperatorType& jOp ) const
270 {
271 if ( domainSpaceSequence_ != domainSpace().sequence()
272 || rangeSpaceSequence_ != rangeSpace().sequence() )
273 {
274 domainSpaceSequence_ = domainSpace().sequence();
275 rangeSpaceSequence_ = rangeSpace().sequence();
276 if( hasSkeleton() )
277 {
278 assert( stencilDAN_ );
279 stencilDAN_->update();
280 }
281 else
282 {
283 assert( stencilD_ );
284 stencilD_->update();
285 }
286 }
287
288 if( hasSkeleton() )
289 jOp.reserve( *stencilDAN_ );
290 else
291 jOp.reserve( *stencilD_ );
292 // set all entries to zero
293 jOp.clear();
294 }
295
296 template < class GridFunction >
297 void assemble( const GridFunction &u, JacobianOperatorType &jOp ) const
298 {
299 prepare( jOp );
300 iterators_.update();
301
302 bool singleThreadModeError = false;
303 std::shared_mutex mutex;
304
305 auto doAssemble = [this, &u, &jOp, &mutex] ()
306 {
307 // assemble Jacobian, same as GalerkinOperator
308 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
309 this->op().assemble( u, jOp, this->iterators_, integrands, mutex );
310 };
311
312 try {
313 // execute in parallel
314 MPIManager :: run ( doAssemble );
315
316 // update number of interior elements as sum over threads
317 gridSizeInterior_ = Impl::accumulateGridSize( this->opImpl_ );
318 }
319 catch ( const SingleThreadModeError& e )
320 {
321 singleThreadModeError = true;
322 }
323
324 if (singleThreadModeError)
325 {
326 // redo matrix assembly since it failed
327 jOp.clear();
328 // add galerkin terms
329 std::tuple< const LocalGalerkinOperatorImplType&, const MassOperatorImplType& > integrands( localOperator(), mass() );
330 op().assemble( u, jOp, iterators_, integrands );
331
332 // update number of interior elements as sum over threads
333 gridSizeInterior_ = op().gridSizeInterior();
334 }
335
336 // note: assembly done without local contributions so need
337 // to call flush assembly
338 jOp.flushAssembly();
339 }
340
341 const DomainDiscreteFunctionSpaceType &dSpace_;
342 const RangeDiscreteFunctionSpaceType &rSpace_;
343 mutable int domainSpaceSequence_, rangeSpaceSequence_;
344
345 mutable std::unique_ptr< DiagonalAndNeighborStencilType > stencilDAN_;
346 mutable std::unique_ptr< DiagonalStencilType > stencilD_;
347 };
348
349
350
351 // AutomaticDifferenceMassLumpingOperator
352 // --------------------------------------
353
354 template< class Integrands, class DomainFunction, class RangeFunction >
355 class MassLumpingAutomaticDifferenceGalerkinOperator
356 : public MassLumpingOperator< Integrands, DomainFunction, RangeFunction >,
357 public AutomaticDifferenceOperator< DomainFunction, RangeFunction >
358 {
359 typedef MassLumpingOperator< Integrands, DomainFunction, RangeFunction > BaseType;
360 typedef AutomaticDifferenceOperator< DomainFunction, RangeFunction > AutomaticDifferenceOperatorType;
361
362 public:
363 typedef typename BaseType::GridPartType GridPartType;
364
365 template< class... Args >
366 explicit MassLumpingAutomaticDifferenceGalerkinOperator ( const GridPartType &gridPart, Args &&... args )
367 : BaseType( gridPart, std::forward< Args >( args )... ), AutomaticDifferenceOperatorType()
368 {}
369 };
370
371
372
373 namespace Impl
374 {
375
376 // MassLumpingSchemeImpl
377 // ---------------------
378 template< class Integrands, class MassIntegrands,
379 class LinearOperator, bool addDirichletBC,
380 template <class,class,class> class DifferentiableGalerkinOperatorImpl >
381 struct MassLumpingSchemeTraits
382 {
383 template <class O, bool addDBC>
384 struct DirichletBlockSelector { using type = void; };
385 template <class O>
386 struct DirichletBlockSelector<O,true> { using type = typename O::DirichletBlockVector; };
387
388 using DifferentiableOperatorType = std::conditional_t< addDirichletBC,
389 DirichletWrapperOperator< DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator >>,
390 DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator > >;
391
392 using DirichletBlockVector = typename DirichletBlockSelector<
393 DirichletWrapperOperator<
394 DifferentiableGalerkinOperatorImpl< Integrands, MassIntegrands, LinearOperator >>,
395 addDirichletBC>::type;
396
397 typedef DifferentiableOperatorType type;
398 };
399
400
401 // MassLumpingSchemeImpl
402 // ---------------------
403 template< class Integrands, class MassIntegrands,
404 class LinearOperator, class LinearInverseOperator, bool addDirichletBC,
405 template <class,class,class> class DifferentiableGalerkinOperatorImpl = MassLumpingDifferentiableOperator >
406 struct MassLumpingSchemeImpl : public FemScheme< typename MassLumpingSchemeTraits< Integrands, MassIntegrands, LinearOperator,
407 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
408 LinearInverseOperator > // LinearInverseOperator
409 {
410 typedef FemScheme< typename MassLumpingSchemeTraits< Integrands, MassIntegrands, LinearOperator,
411 addDirichletBC, DifferentiableGalerkinOperatorImpl>::type, // Operator
412 LinearInverseOperator > // LinearInverseOperator
413 BaseType;
414
415 // type of discrete function space used with this scheme
416 typedef typename BaseType :: DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
417
418 // needed for registerSchemeConstructor (Python export)
419 typedef MassIntegrands MassModelType;
420
421 MassLumpingSchemeImpl ( const DiscreteFunctionSpaceType &dfSpace,
422 const Integrands &integrands,
423 const MassIntegrands& massIntegrands,
424 const ParameterReader& parameter = Parameter::container() )
425 : BaseType( dfSpace,
426 parameter,
427 std::move(integrands),
428 std::move(massIntegrands) )
429 {}
430 };
431
432 } // end namespace Impl
433
434 // MassLumpingScheme
435 // -----------------
436
437 template< class Integrands, class MassIntegrands, class LinearOperator, class InverseOperator, bool addDirichletBC >
438 using MassLumpingScheme = Impl::MassLumpingSchemeImpl< Integrands, MassIntegrands, LinearOperator, InverseOperator, addDirichletBC,
439 MassLumpingDifferentiableOperator >;
440
441 } // namespace Fem
442
443} // namespace Dune
444
445#endif // #ifndef DUNE_FEM_SCHEMES_MOLGALERKIN_HH
static bool verbose()
obtain the cached value for fem.verbose with default verbosity level 2
Definition: parameter.hh:466
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
STL namespace.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 27, 22:29, 2024)