Loading [MathJax]/extensions/tex2jax.js

dune-mmesh (1.4)

jacobian_iterative.hh
1// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_PYTHON_MMESH_JACOBIAN_HH
4#define DUNE_PYTHON_MMESH_JACOBIAN_HH
5
6#include <array>
7#include <functional>
8#include <list>
9#include <map>
10#include <memory>
11#include <sstream>
12#include <type_traits>
13
14#include <dune/fem/operator/linear/spoperator.hh>
15
16#include <dune/python/pybind11/functional.h>
17#include <dune/python/pybind11/numpy.h>
18#include <dune/python/pybind11/pybind11.h>
19#include <dune/python/pybind11/stl.h>
20
21namespace Dune
22{
23
24 namespace Python
25 {
26
27 namespace MMesh
28 {
29 using Dune::Indices::_0;
30 using Dune::Indices::_1;
31
33 template< class GridPart, class Intersection, class Entity >
34 const typename GridPart::IntersectionType convert( const GridPart& gridPart, const Intersection& intersection, const Entity& inside, Dune::PriorityTag<0> )
35 {
36 const Entity outside = gridPart.convert(intersection.outside());
37
38 for (auto is : intersections(gridPart, inside))
39 if (is.outside() == outside)
40 return is;
41 DUNE_THROW(InvalidStateException, "Intersection not found!");
42 }
43
45 template< class GridPart, class Intersection, class Entity >
46 std::enable_if_t<std::is_convertible_v<Intersection, typename GridPart::IntersectionType>, const typename GridPart::IntersectionType>
47 convert( const GridPart& gridPart, const Intersection& intersection, const Entity& inside, Dune::PriorityTag<1> )
48 {
49 return intersection;
50 }
51
53 template< class GridPart, class Intersection, class Entity >
54 const typename GridPart::IntersectionType convert( const GridPart& gridPart, const Intersection& intersection, const Entity& inside )
55 {
56 return convert(gridPart, intersection, inside, Dune::PriorityTag<1>{});
57 }
58
59
64 template <class DomainSpace, class RangeSpace, class Partition = Dune::Partitions::All>
65 struct NeighborInterfaceStencil : public Fem::Stencil<DomainSpace, RangeSpace>
66 {
67 typedef Fem::Stencil<DomainSpace, RangeSpace> BaseType;
68
69 typedef typename DomainSpace::GridPartType DomainGridPart;
70 typedef typename RangeSpace::GridPartType RangeGridPart;
71
72 public:
73 NeighborInterfaceStencil(const DomainSpace &dSpace, const RangeSpace &rSpace)
74 : BaseType( dSpace, rSpace ),
75 domainGridPart_( dSpace.gridPart() ),
76 rangeGridPart_( rSpace.gridPart() )
77 {}
78
79 void setupStencil() const
80 {
81 const auto& mmesh = domainGridPart_.grid().getMMesh();
82 for( const auto& entity : elements(domainGridPart_, Partition{}) )
83 {
84 const auto mmeshIntersection = mmesh.asIntersection( entity );
85 const auto inside = rangeGridPart_.convert( mmeshIntersection.inside() );
86 const auto intersection = convert( rangeGridPart_, mmeshIntersection, inside );
87
88 BaseType::fill(entity, intersection.inside());
89 BaseType::fill(entity, intersection.outside());
90 }
91 }
92
93 private:
94 const DomainGridPart& domainGridPart_;
95 const RangeGridPart& rangeGridPart_;
96 };
97
102 template <class DomainSpace, class RangeSpace, class Partition = Dune::Partitions::All>
103 struct InterfaceNeighborStencil : public Fem::Stencil<DomainSpace, RangeSpace>
104 {
105 typedef Fem::Stencil<DomainSpace, RangeSpace> BaseType;
106
107 typedef typename DomainSpace::GridPartType DomainGridPart;
108 typedef typename RangeSpace::GridPartType RangeGridPart;
109
110 public:
111 InterfaceNeighborStencil(const DomainSpace &dSpace, const RangeSpace &rSpace)
112 : BaseType( dSpace, rSpace ),
113 domainGridPart_( dSpace.gridPart() ),
114 rangeGridPart_( rSpace.gridPart() )
115 {}
116
117 void setupStencil() const
118 {
119 for( const auto& entity : elements(domainGridPart_, Partition{}) )
120 {
121 for( const auto& intersection : intersections(domainGridPart_, entity) )
122 {
123 if( intersection.neighbor() && domainGridPart_.grid().isInterface(intersection) )
124 {
125 auto ientity = domainGridPart_.grid().asInterfaceEntity(intersection);
126 BaseType::fill(entity, ientity);
127 }
128 }
129 }
130 }
131
132 private:
133 const DomainGridPart& domainGridPart_;
134 const RangeGridPart& rangeGridPart_;
135 };
136
137
138 template<class X, class BulkDF, class InterfaceDF>
139 class ParallelizedScalarProduct : public ScalarProduct<X> {
140 public:
141 typedef X domain_type;
142 typedef typename X::field_type field_type;
143 typedef typename FieldTraits<field_type>::real_type real_type;
144
145 ParallelizedScalarProduct(const BulkDF& uh, const InterfaceDF& th)
146 : u(uh), v(uh), t(th), s(th)
147 {}
148
149 virtual field_type dot (const X& x, const X& y) const
150 {
151 u.blockVector() = x[_0];
152 t.blockVector() = x[_1];
153 v.blockVector() = y[_0];
154 s.blockVector() = y[_1];
155 return u.scalarProductDofs(v) + t.scalarProductDofs(s); // already parallel
156 }
157
158 virtual real_type norm (const X& x) const
159 {
160 u.blockVector() = x[_0];
161 t.blockVector() = x[_1];
162 return std::sqrt( u.scalarProductDofs(u) + t.scalarProductDofs(t) ); // already parallel
163 }
164
165 private:
166 mutable BulkDF u, v;
167 mutable InterfaceDF t, s;
168 };
169
170 template<class M, class X, class Y, class BulkDF, class InterfaceDF>
171 class ParallelizedMatrixAdapter : public AssembledLinearOperator<M,X,Y>
172 {
173 public:
175 typedef M matrix_type;
176 typedef X domain_type;
177 typedef Y range_type;
178 typedef typename X::field_type field_type;
179
181 explicit ParallelizedMatrixAdapter (const M& A, const BulkDF& uh, const InterfaceDF& th)
182 : _A_(stackobject_to_shared_ptr(A)), u(uh), t(th)
183 {}
184
186 void apply (const X& x, Y& y) const override
187 {
188 _A_->mv(x, y);
189 communicate(y);
190 }
191
193 void applyscaleadd (field_type alpha, const X& x, Y& y) const override
194 {
195 _A_->usmv(alpha, x, y);
196 communicate(y);
197 }
198
200 const M& getmat () const override
201 {
202 return *_A_;
203 }
204
206 SolverCategory::Category category() const override
207 {
208 return SolverCategory::sequential;
209 }
210
211 private:
212 void communicate(Y& y) const
213 {
214 u.blockVector() = y[_0];
215 t.blockVector() = y[_1];
216 u.communicate();
217 t.communicate();
218 y[_0] = u.blockVector();
219 y[_1] = t.blockVector();
220 }
221
222 const std::shared_ptr<const M> _A_;
223 mutable BulkDF u;
224 mutable InterfaceDF t;
225 };
226
227
228 template< class Sch, class ISch, class Sol, class ISol >
229 class Jacobian
230 {
231 using ThisType = Jacobian<Sch, ISch, Sol, ISol>;
232 public:
233 using Scheme = Sch;
234 using IScheme = ISch;
235 using Solution = Sol;
236 using ISolution = ISol;
237
238 using AType = typename Scheme::JacobianOperatorType;
239 using BType = Dune::Fem::ISTLLinearOperator<ISolution, Solution>;
240 using CType = Dune::Fem::ISTLLinearOperator<Solution, ISolution>;
241 using DType = typename IScheme::JacobianOperatorType;
242
243 // Block Matrix
244 using AMatrixBlock = Dune::BCRSMatrix<typename AType::MatrixBlockType>;
245 using BMatrixBlock = Dune::BCRSMatrix<typename BType::MatrixBlockType>;
246 using CMatrixBlock = Dune::BCRSMatrix<typename CType::MatrixBlockType>;
247 using DMatrixBlock = Dune::BCRSMatrix<typename DType::MatrixBlockType>;
248
249 using ABRowType = Dune::MultiTypeBlockVector<AMatrixBlock, BMatrixBlock>;
250 using CDRowType = Dune::MultiTypeBlockVector<CMatrixBlock, DMatrixBlock>;
251 using BlockMatrix = Dune::MultiTypeBlockMatrix<ABRowType, CDRowType>;
252
253 // Block Vector
254 using AVectorBlock = typename AType::ColumnBlockVectorType;
255 using DVectorBlock = typename DType::ColumnBlockVectorType;
256 using BlockVector = Dune::MultiTypeBlockVector<AVectorBlock, DVectorBlock>;
257
258 typedef typename AType::DomainSpaceType BulkSpaceType;
259 typedef typename DType::DomainSpaceType InterfaceSpaceType;
260 using ParameterType = Fem::NewtonParameter<typename IScheme::LinearInverseOperatorType::SolverParameterType>;
261
262 Jacobian( const Scheme &scheme, const IScheme &ischeme, const Solution &uh, const ISolution &th,
263 const double eps, const std::function<void()> &callback )
264 : scheme_(scheme), ischeme_(ischeme),
265 A_( "A", uh.space(), uh.space() ),
266 B_( "B", th.space(), uh.space() ),
267 C_( "C", uh.space(), th.space() ),
268 D_( "D", th.space(), th.space() ),
269 eps_(eps),
270 callback_(callback),
271 n_( uh.size() ),
272 m_( th.size() )
273 {
274 x_[_0] = uh.blockVector();
275 x_[_1] = th.blockVector();
276 }
277
278 const Scheme& scheme() const { return scheme_; };
279 const BlockMatrix& M() const { return M_; };
280
281 void init()
282 {
283 if (n_ == 0 || m_ == 0)
284 return;
285
286 NeighborInterfaceStencil< InterfaceSpaceType, BulkSpaceType > stencilB( B_.domainSpace(), B_.rangeSpace() );
287 stencilB.setupStencil();
288 B_.reserve( stencilB );
289
290 InterfaceNeighborStencil< BulkSpaceType, InterfaceSpaceType > stencilC( C_.domainSpace(), C_.rangeSpace() );
291 stencilC.setupStencil();
292 C_.reserve( stencilC );
293 }
294
295 void update( const Solution &uh, const ISolution &th )
296 {
297 scheme_.jacobian(uh, A_);
298 ischeme_.jacobian(th, D_);
299
300 if (n_ > 0 && m_ > 0)
301 {
302 B_.clear();
303 C_.clear();
304 assembleB(scheme_, th, uh);
305 assembleC(ischeme_, uh, th);
306 }
307 }
308
309 void solve( const Solution &f, const ISolution &g, Solution& u, ISolution& t )
310 {
311 auto setBlock = [this](const auto& block, const auto blockrow, const auto blockcol)
312 {
313 this->M_[blockrow][blockcol] = block.exportMatrix();
314 };
315
316 if (n_ > 0)
317 setBlock(A_, _0, _0);
318 if (n_ > 0 && m_ > 0)
319 {
320 setBlock(B_, _0, _1);
321 setBlock(C_, _1, _0);
322 }
323 if (m_ > 0)
324 setBlock(D_, _1, _1);
325
326 b_[_0] = f.blockVector();
327 b_[_1] = g.blockVector();
328
329 auto params = std::make_shared<Fem::ISTLSolverParameter>( ParameterType( scheme_.parameter() ).linear() );
330 const size_t numIterations = params->preconditionerIteration();
331 const double relaxFactor = params->relaxation();
332
333 using SolverAdapterType = Fem::ISTLSolverAdapter<-1, BlockVector>;
334 using ReductionType = typename SolverAdapterType::ReductionType;
335 SolverAdapterType solver( ReductionType( params ), params );
336 solver.setMaxIterations( params->maxIterations() );
337
338 ParallelizedMatrixAdapter<BlockMatrix, BlockVector, BlockVector, Solution, ISolution> linop(M_, u, t);
339 ParallelizedScalarProduct<BlockVector, Solution, ISolution> scp (u, t);
340 Dune::Richardson<BlockVector, BlockVector> prec(1.0);
341 InverseOperatorResult res;
342
343 solver( linop, scp, prec, b_, x_, res );
344
345 u.blockVector() = x_[_0];
346 t.blockVector() = x_[_1];
347 u.communicate();
348 t.communicate();
349 }
350
351 private:
352 template< class Scheme, class DomainGridFunction, class RangeGridFunction >
353 void assembleB (const Scheme &scheme, const DomainGridFunction &t, const RangeGridFunction &u)
354 {
355 typedef InterfaceSpaceType DomainSpaceType;
356 typedef BulkSpaceType RangeSpaceType;
357
358 typedef Fem::TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
359 typedef typename RangeGridFunction::DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
360
361 const auto& gridPart = t.gridPart();
362 const auto& grid = gridPart.grid();
363 const auto& mmesh = grid.getMMesh();
364
365 auto dFIn = u;
366 auto dFOut = u;
367
368 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > FmTmpIn( u.space() );
369 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > FmTmpOut( u.space() );
370 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > FpTmpIn( u.space() );
371 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > FpTmpOut( u.space() );
372
373 Dune::Fem::MutableLocalFunction< DomainGridFunction > tLocal( t );
374
375 Dune::Fem::ConstLocalFunction< RangeGridFunction > uInside( u );
376 Dune::Fem::ConstLocalFunction< RangeGridFunction > uOutside( u );
377
378 Dune::Fem::MutableLocalFunction< RangeGridFunction > dFLocalIn( dFIn );
379 Dune::Fem::MutableLocalFunction< RangeGridFunction > dFLocalOut( dFOut );
380
381 TemporaryLocalMatrixType localMatrixIn( B_.domainSpace(), B_.rangeSpace() );
382 TemporaryLocalMatrixType localMatrixOut( B_.domainSpace(), B_.rangeSpace() );
383
384 for( const auto &interface : elements( gridPart, Partitions::all ) )
385 {
386 tLocal.bind( interface );
387 auto& tDof = tLocal.localDofVector();
388
389 const auto mmeshIntersection = mmesh.asIntersection( interface );
390 const auto inside = u.gridPart().convert( mmeshIntersection.inside() );
391 const auto intersection = convert( u.gridPart(), mmeshIntersection, inside );
392
393 const auto& outside = intersection.outside();
394
395 FmTmpIn.bind( inside );
396 FmTmpOut.bind( outside );
397 FpTmpIn.bind( inside );
398 FpTmpOut.bind( outside );
399
400 uInside.bind( inside );
401 uOutside.bind( outside );
402
403 dFLocalIn.bind( inside );
404 dFLocalOut.bind( outside );
405
406 localMatrixIn.init( interface, inside );
407 localMatrixOut.init( interface, outside );
408
409 localMatrixIn.clear();
410 localMatrixOut.clear();
411
412 for (std::size_t i = 0; i < tDof.size(); ++i)
413 {
414 dFIn.clear();
415 dFOut.clear();
416
417 FmTmpIn.clear();
418 FmTmpOut.clear();
419 FpTmpIn.clear();
420 FpTmpOut.clear();
421
422 double h = std::max(tDof[ i ] * eps_, eps_);
423 tDof[ i ] -= h;
424 callback_();
425 scheme.fullOperator().impl().addSkeletonIntegral( intersection, uInside, uOutside, FmTmpIn, FmTmpOut );
426 tDof[ i ] += h;
427
428 dFIn.addLocalDofs( inside, FmTmpIn.localDofVector() );
429 dFOut.addLocalDofs( outside, FmTmpOut.localDofVector() );
430
431 dFIn *= -1.;
432 dFOut *= -1.;
433
434 tDof[ i ] += h;
435 callback_();
436 scheme.fullOperator().impl().addSkeletonIntegral( intersection, uInside, uOutside, FpTmpIn, FpTmpOut );
437 tDof[ i ] -= h;
438
439 dFIn.addLocalDofs( inside, FpTmpIn.localDofVector() );
440 dFOut.addLocalDofs( outside, FpTmpOut.localDofVector() );
441
442 dFIn /= 2 * h;
443 dFOut /= 2 * h;
444
445 for (std::size_t j = 0; j < dFLocalIn.localDofVector().size(); ++j)
446 localMatrixIn.set(j, i, dFLocalIn[j]);
447
448 for (std::size_t j = 0; j < dFLocalOut.localDofVector().size(); ++j)
449 localMatrixOut.set(j, i, dFLocalOut[j]);
450 }
451
452 B_.addLocalMatrix( interface, inside, localMatrixIn );
453 B_.addLocalMatrix( interface, outside, localMatrixOut );
454 }
455 B_.compress();
456 }
457
458 template< class Scheme, class DomainGridFunction, class RangeGridFunction >
459 void assembleC (const Scheme &ischeme, const DomainGridFunction &u, const RangeGridFunction &t)
460 {
461 typedef BulkSpaceType DomainSpaceType;
462 typedef InterfaceSpaceType RangeSpaceType;
463
464 typedef Fem::TemporaryLocalMatrix< DomainSpaceType, RangeSpaceType > TemporaryLocalMatrixType;
465 typedef typename RangeGridFunction::DiscreteFunctionSpaceType DiscreteFunctionSpaceType;
466
467 const auto& gridPart = u.gridPart();
468 const auto& grid = gridPart.grid();
469
470 auto dG = t;
471
472 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > GmTmp( t.space() );
473 Dune::Fem::TemporaryLocalFunction< DiscreteFunctionSpaceType > GpTmp( t.space() );
474
475 Dune::Fem::MutableLocalFunction< DomainGridFunction > uLocal( u );
476
477 Dune::Fem::ConstLocalFunction< RangeGridFunction > tInterface( t );
478 Dune::Fem::MutableLocalFunction< RangeGridFunction > dGLocal( dG );
479
480 TemporaryLocalMatrixType localMatrix( C_.domainSpace(), C_.rangeSpace() );
481
482 for( const auto &element : elements( gridPart, Partitions::all ) )
483 {
484 for( const auto& intersection : intersections( gridPart, element ) )
485 {
486 if( grid.isInterface( intersection ) )
487 {
488 const auto& interface = grid.asInterfaceEntity( intersection );
489
490 uLocal.bind( element );
491 auto& uDof = uLocal.localDofVector();
492
493 GmTmp.bind( interface );
494 GpTmp.bind( interface );
495 tInterface.bind( interface );
496
497 dGLocal.bind( interface );
498 localMatrix.init( element, interface );
499 localMatrix.clear();
500
501 for (std::size_t i = 0; i < uDof.size(); ++i)
502 {
503 dG.clear();
504
505 GmTmp.clear();
506 GpTmp.clear();
507
508 double h = std::max(uDof[ i ] * eps_, eps_);
509 uDof[ i ] -= h;
510 callback_();
511 ischeme.fullOperator().impl().addInteriorIntegral( tInterface, GmTmp );
512 uDof[ i ] += h;
513
514 dG.addLocalDofs( interface, GmTmp.localDofVector() );
515 dG *= -1.;
516
517 uDof[ i ] += h;
518 callback_();
519 ischeme.fullOperator().impl().addInteriorIntegral( tInterface, GpTmp );
520 uDof[ i ] -= h;
521
522 dG.addLocalDofs( interface, GpTmp.localDofVector() );
523 dG /= 2 * h;
524
525 for (std::size_t j = 0; j < dGLocal.localDofVector().size(); ++j)
526 localMatrix.set(j, i, dGLocal[j]);
527 }
528
529 C_.addLocalMatrix( element, interface, localMatrix );
530 }
531 }
532 }
533 C_.compress();
534 }
535
536 const Scheme& scheme_;
537 const IScheme& ischeme_;
538 AType A_;
539 BType B_;
540 CType C_;
541 DType D_;
542 BlockMatrix M_;
543 BlockVector x_, b_;
544 std::size_t n_, m_;
545 const double eps_;
546 const std::function<void()> callback_;
547 };
548
549 template< class Jacobian, class... options >
550 inline static auto registerJacobian ( pybind11::handle scope, pybind11::class_< Jacobian, options... > cls )
551 {
552 using Solution = typename Jacobian::Solution;
553 using ISolution = typename Jacobian::ISolution;
554
555 cls.def( "init", [] ( Jacobian &self ) {
556 self.init();
557 },
558 R"doc(
559 Initialize the mixed-dimensional jacobian.
560 )doc"
561 );
562
563 cls.def( "update", [] ( Jacobian &self, const Solution& uh, const ISolution& th ) {
564 self.update(uh, th);
565 },
566 R"doc(
567 Update the mixed-dimensional jacobian.
568 )doc"
569 );
570
571 cls.def( "solve", [] ( Jacobian &self, const Solution& f, const ISolution& g, Solution& ux, ISolution& tx ) {
572 self.solve(f, g, ux, tx);
573 },
574 R"doc(
575 Solve the mixed-dimensional jacobian.
576 )doc"
577 );
578 }
579
580 } // namespace MMesh
581
582 } // namespace Python
583
584} // namespace Dune
585
586#endif
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 13, 22:42, 2025)