DUNE-FEM (unstable)

fastamg.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_ISTL_FASTAMG_HH
6#define DUNE_ISTL_FASTAMG_HH
7
8#include <memory>
14#include <dune/istl/solvers.hh>
16#include <dune/istl/superlu.hh>
17#include <dune/istl/umfpack.hh>
19#include <dune/istl/io.hh>
21
22#include "fastamgsmoother.hh"
23
32namespace Dune
33{
34 namespace Amg
35 {
58 template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
59 class FastAMG : public Preconditioner<X,X>
60 {
61 public:
63 typedef M Operator;
75
77 typedef X Domain;
79 typedef X Range;
82
90 FastAMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
91 const Parameters& parms,
92 bool symmetric=true);
93
105 template<class C>
106 FastAMG(std::shared_ptr<const Operator> fineOperator,
107 const C& criterion,
108 const Parameters& parms=Parameters(),
109 bool symmetric=true,
111
124 template<class C>
125 FastAMG(const Operator& fineOperator,
126 const C& criterion,
127 const Parameters& parms=Parameters(),
128 bool symmetric=true,
130 : FastAMG(stackobject_to_shared_ptr(fineOperator),
131 criterion, parms, symmetric, pinfo)
132 {}
133
137 FastAMG(const FastAMG& amg);
138
140 void pre(Domain& x, Range& b);
141
143 void apply(Domain& v, const Range& d);
144
147 {
149 }
150
152 void post(Domain& x);
153
158 template<class A1>
159 void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
160
161 std::size_t levels();
162
163 std::size_t maxlevels();
164
174 {
175 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
176 }
177
182 bool usesDirectCoarseLevelSolver() const;
183
184 private:
191 template<class C>
192 void createHierarchies(C& criterion,
193 std::shared_ptr<const Operator> fineOperator,
194 const PI& pinfo);
195
202 struct LevelContext
203 {
215 typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
219 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
235 std::size_t level;
236 };
237
239 void mgc(LevelContext& levelContext, Domain& x, const Range& b);
240
247 void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
248
255 void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
256
263 void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
264 Domain& fineX);
265
270 bool moveToCoarseLevel(LevelContext& levelContext);
271
276 void initIteratorsWithFineLevel(LevelContext& levelContext);
277
279 std::shared_ptr<OperatorHierarchy> matrices_;
281 std::shared_ptr<CoarseSolver> solver_;
283 std::shared_ptr<Hierarchy<Range,A>> rhs_;
285 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
287 std::shared_ptr<Hierarchy<Domain,A>> residual_;
288
292 std::shared_ptr<ScalarProduct> scalarProduct_;
294 std::size_t gamma_;
296 std::size_t preSteps_;
298 std::size_t postSteps_;
299 std::size_t level;
300 bool buildHierarchy_;
301 bool symmetric;
302 bool coarsesolverconverged;
304 typedef std::shared_ptr<Smoother> SmootherPointer;
305 SmootherPointer coarseSmoother_;
307 std::size_t verbosity_;
308 };
309
310 template<class M, class X, class PI, class A>
312 : matrices_(amg.matrices_), solver_(amg.solver_),
313 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
314 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
315 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
316 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
317 {}
318
319 template<class M, class X, class PI, class A>
321 const Parameters& parms, bool symmetric_)
322 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
323 rhs_(), lhs_(), residual_(), scalarProduct_(),
324 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
325 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
326 symmetric(symmetric_), coarsesolverconverged(true),
327 coarseSmoother_(), verbosity_(parms.debugLevel())
328 {
329 if(preSteps_>1||postSteps_>1)
330 {
331 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
332 preSteps_=postSteps_=0;
333 }
334 assert(matrices_->isBuilt());
335 static_assert(std::is_same<PI,SequentialInformation>::value,
336 "Currently only sequential runs are supported");
337 }
338 template<class M, class X, class PI, class A>
339 template<class C>
340 FastAMG<M,X,PI,A>::FastAMG(std::shared_ptr<const Operator> fineOperator,
341 const C& criterion,
342 const Parameters& parms,
343 bool symmetric_,
344 const PI& pinfo)
345 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
346 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
347 buildHierarchy_(true),
348 symmetric(symmetric_), coarsesolverconverged(true),
349 coarseSmoother_(), verbosity_(criterion.debugLevel())
350 {
351 if(preSteps_>1||postSteps_>1)
352 {
353 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
354 preSteps_=postSteps_=1;
355 }
356 static_assert(std::is_same<PI,SequentialInformation>::value,
357 "Currently only sequential runs are supported");
358 // TODO: reestablish compile time checks.
359 //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
360 // "Matrix and Solver must match in terms of category!");
361 createHierarchies(criterion, std::move(fineOperator), pinfo);
362 }
363
364 template<class M, class X, class PI, class A>
365 template<class C>
366 void FastAMG<M,X,PI,A>::createHierarchies(C& criterion,
367 std::shared_ptr<const Operator> fineOperator,
368 const PI& pinfo)
369 {
370 Timer watch;
371 matrices_ = std::make_shared<OperatorHierarchy>(
372 std::const_pointer_cast<Operator>(std::move(fineOperator)),
373 stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
374
375 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
376
377 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
378 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
379
380 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
381 // We have the carsest level. Create the coarse Solver
382 typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
383 SmootherArgs sargs;
384 sargs.iterations = 1;
385
387 cargs.setArgs(sargs);
388 if(matrices_->redistributeInformation().back().isSetup()) {
389 // Solve on the redistributed partitioning
390 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
391 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
392 }else{
393 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
394 cargs.setComm(*matrices_->parallelInformation().coarsest());
395 }
396
397 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
398 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
399
400#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
401#if HAVE_SUITESPARSE_UMFPACK
402#define DIRECTSOLVER UMFPack
403#else
404#define DIRECTSOLVER SuperLU
405#endif
406 // Use superlu if we are purely sequential or with only one processor on the coarsest level.
407 if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
408 || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
409 || (matrices_->parallelInformation().coarsest().isRedistributed()
410 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
411 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
412 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
413 std::cout<<"Using superlu"<<std::endl;
414 if(matrices_->parallelInformation().coarsest().isRedistributed())
415 {
416 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
417 // We are still participating on this level
418 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
419 else
420 solver_.reset();
421 }else
422 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
423 }else
424#undef DIRECTSOLVER
425#endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
426 {
427 if(matrices_->parallelInformation().coarsest().isRedistributed())
428 {
429 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
430 // We are still participating on this level
431 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
432 *scalarProduct_,
433 *coarseSmoother_, 1E-2, 1000, 0));
434 else
435 solver_.reset();
436 }else
437 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
438 *scalarProduct_,
439 *coarseSmoother_, 1E-2, 1000, 0));
440 }
441 }
442
443 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
444 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
445 }
446
447
448 template<class M, class X, class PI, class A>
450 {
451 Timer watch, watch1;
452 // Detect Matrix rows where all offdiagonal entries are
453 // zero and set x such that A_dd*x_d=b_d
454 // Thus users can be more careless when setting up their linear
455 // systems.
456 typedef typename M::matrix_type Matrix;
457 typedef typename Matrix::ConstRowIterator RowIter;
458 typedef typename Matrix::ConstColIterator ColIter;
459 typedef typename Matrix::block_type Block;
460 Block zero;
461 zero=typename Matrix::field_type();
462
463 const Matrix& mat=matrices_->matrices().finest()->getmat();
464 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
465 bool isDirichlet = true;
466 bool hasDiagonal = false;
467 ColIter diag;
468 for(ColIter col=row->begin(); col!=row->end(); ++col) {
469 if(row.index()==col.index()) {
470 diag = col;
471 hasDiagonal = (*col != zero);
472 }else{
473 if(*col!=zero)
474 isDirichlet = false;
475 }
476 }
477 if(isDirichlet && hasDiagonal)
478 {
479 if constexpr (Dune::IsNumber<Block>::value)
480 x[row.index()] = b[row.index()]/(*diag);
481 else
482 diag->solve(x[row.index()], b[row.index()]);
483 }
484 }
485 if (verbosity_>0)
486 std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
487 watch1.reset();
488 // No smoother to make x consistent! Do it by hand
489 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
490 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
491 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
492 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
493 matrices_->coarsenVector(*rhs_);
494 matrices_->coarsenVector(*lhs_);
495 matrices_->coarsenVector(*residual_);
496
497 // The preconditioner might change x and b. So we have to
498 // copy the changes to the original vectors.
499 x = *lhs_->finest();
500 b = *rhs_->finest();
501 }
502 template<class M, class X, class PI, class A>
503 std::size_t FastAMG<M,X,PI,A>::levels()
504 {
505 return matrices_->levels();
506 }
507 template<class M, class X, class PI, class A>
508 std::size_t FastAMG<M,X,PI,A>::maxlevels()
509 {
510 return matrices_->maxlevels();
511 }
512
514 template<class M, class X, class PI, class A>
516 {
517 LevelContext levelContext;
518 // Init all iterators for the current level
519 initIteratorsWithFineLevel(levelContext);
520
521 assert(v.two_norm()==0);
522
523 level=0;
524 if(matrices_->maxlevels()==1){
525 // The coarse solver might modify the d!
526 Range b(d);
527 mgc(levelContext, v, b);
528 }else
529 mgc(levelContext, v, d);
530 if(postSteps_==0||matrices_->maxlevels()==1)
531 levelContext.pinfo->copyOwnerToAll(v, v);
532 }
533
534 template<class M, class X, class PI, class A>
535 void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
536 {
537 levelContext.matrix = matrices_->matrices().finest();
538 levelContext.pinfo = matrices_->parallelInformation().finest();
539 levelContext.redist =
540 matrices_->redistributeInformation().begin();
541 levelContext.aggregates = matrices_->aggregatesMaps().begin();
542 levelContext.lhs = lhs_->finest();
543 levelContext.residual = residual_->finest();
544 levelContext.rhs = rhs_->finest();
545 levelContext.level=0;
546 }
547
548 template<class M, class X, class PI, class A>
549 bool FastAMG<M,X,PI,A>
550 ::moveToCoarseLevel(LevelContext& levelContext)
551 {
552 bool processNextLevel=true;
553
554 if(levelContext.redist->isSetup()) {
555 throw "bla";
556 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
557 levelContext.residual.getRedistributed());
558 processNextLevel = levelContext.residual.getRedistributed().size()>0;
559 if(processNextLevel) {
560 //restrict defect to coarse level right hand side.
561 ++levelContext.pinfo;
562 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
563 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
564 static_cast<const Range&>(levelContext.residual.getRedistributed()),
565 *levelContext.pinfo);
566 }
567 }else{
568 //restrict defect to coarse level right hand side.
569 ++levelContext.rhs;
570 ++levelContext.pinfo;
571 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
572 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
573 static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
574 }
575
576 if(processNextLevel) {
577 // prepare coarse system
578 ++levelContext.residual;
579 ++levelContext.lhs;
580 ++levelContext.matrix;
581 ++levelContext.level;
582 ++levelContext.redist;
583
584 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
585 // next level is not the globally coarsest one
586 ++levelContext.aggregates;
587 }
588 // prepare the lhs on the next level
589 *levelContext.lhs=0;
590 *levelContext.residual=0;
591 }
592 return processNextLevel;
593 }
594
595 template<class M, class X, class PI, class A>
596 void FastAMG<M,X,PI,A>
597 ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
598 {
599 if(processNextLevel) {
600 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
601 // previous level is not the globally coarsest one
602 --levelContext.aggregates;
603 }
604 --levelContext.redist;
605 --levelContext.level;
606 //prolongate and add the correction (update is in coarse left hand side)
607 --levelContext.matrix;
608 --levelContext.residual;
609
610 }
611
612 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
613 if(levelContext.redist->isSetup()) {
614
615 // Need to redistribute during prolongate
616 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
617 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
618 levelContext.lhs.getRedistributed(),
619 matrices_->getProlongationDampingFactor(),
620 *levelContext.pinfo, *levelContext.redist);
621 }else{
622 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
623 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
624 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
625
626 // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
627 }
628
629
630 if(processNextLevel) {
631 --levelContext.rhs;
632 }
633
634 }
635
636
637 template<class M, class X, class PI, class A>
638 void FastAMG<M,X,PI,A>
639 ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
640 {
641 constexpr auto bl = blockLevel<typename M::matrix_type>();
642 GaussSeidelPresmoothDefect<bl>::apply(levelContext.matrix->getmat(),
643 x,
644 *levelContext.residual,
645 b);
646 }
647
648 template<class M, class X, class PI, class A>
649 void FastAMG<M,X,PI,A>
650 ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
651 {
652 constexpr auto bl = blockLevel<typename M::matrix_type>();
653 GaussSeidelPostsmoothDefect<bl>
654 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
655 }
656
657
658 template<class M, class X, class PI, class A>
660 {
661 return IsDirectSolver< CoarseSolver>::value;
662 }
663
664 template<class M, class X, class PI, class A>
665 void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
666
667 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
668 // Solve directly
670 res.converged=true; // If we do not compute this flag will not get updated
671 if(levelContext.redist->isSetup()) {
672 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
673 if(levelContext.rhs.getRedistributed().size()>0) {
674 // We are still participating in the computation
675 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
676 levelContext.rhs.getRedistributed());
677 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
678 }
679 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
680 levelContext.pinfo->copyOwnerToAll(v, v);
681 }else{
682 levelContext.pinfo->copyOwnerToAll(b, b);
683 solver_->apply(v, const_cast<Range&>(b), res);
684 }
685
686 // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
687 // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
688 if (!res.converged)
689 coarsesolverconverged = false;
690 }else{
691 // presmoothing
692 presmooth(levelContext, v, b);
693 // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
694 // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
695#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
696 bool processNextLevel = moveToCoarseLevel(levelContext);
697
698 if(processNextLevel) {
699 // next level
700 for(std::size_t i=0; i<gamma_; i++)
701 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
702 }
703
704 moveToFineLevel(levelContext, processNextLevel, v);
705#else
706 *lhs=0;
707#endif
708
709 if(levelContext.matrix == matrices_->matrices().finest()) {
710 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
711 if(!coarsesolverconverged)
712 DUNE_THROW(MathError, "Coarse solver did not converge");
713 }
714
715 postsmooth(levelContext, v, b);
716 }
717 }
718
719
721 template<class M, class X, class PI, class A>
722 void FastAMG<M,X,PI,A>::post([[maybe_unused]] Domain& x)
723 {
724 lhs_=nullptr;
725 rhs_=nullptr;
726 residual_=nullptr;
727 }
728
729 template<class M, class X, class PI, class A>
730 template<class A1>
731 void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
732 {
733 matrices_->getCoarsestAggregatesOnFinest(cont);
734 }
735
736 } // end namespace Amg
737} // end namespace Dune
738
739#endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:60
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:220
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:223
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:61
All parameters for AMG.
Definition: parameters.hh:416
ConstIterator class for sequential access.
Definition: matrix.hh:404
A generic dynamic dense matrix.
Definition: matrix.hh:561
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:565
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:589
T block_type
Export the type representing the components.
Definition: matrix.hh:568
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:96
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:33
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:52
Sequential SSOR preconditioner.
Definition: preconditioners.hh:142
A simple stop watch.
Definition: timer.hh:31
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:47
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:67
A few common exception classes.
Some generic functions for pretty printing vectors and matrices.
Define base class for scalar product and norm.
Classes for using UMFPack with ISTL matrices.
#define DUNE_THROW(E,...)
Definition: exceptions.hh:312
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:207
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:731
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:211
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:173
void post(Domain &x)
Clean up.
Definition: fastamg.hh:722
X Domain
The domain type.
Definition: fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:227
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:146
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:215
X Range
The range type.
Definition: fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:70
M Operator
The matrix operator type.
Definition: fastamg.hh:63
FastAMG(const Operator &fineOperator, const C &criterion, const Parameters &parms=Parameters(), bool symmetric=true, const ParallelInformation &pinfo=ParallelInformation())
Construct an AMG with an inexact coarse solver based on the smoother.
Definition: fastamg.hh:125
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:659
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:223
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:231
std::size_t level
The level index.
Definition: fastamg.hh:235
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:515
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:449
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:320
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:219
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:406
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:428
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:13
std::shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:72
Define general preconditioner interface.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:66
Statistics about the application of an inverse operator.
Definition: solver.hh:50
bool converged
True if convergence criterion has been met.
Definition: solver.hh:75
Whether this type acts as a scalar in the context of (hierarchically blocked) containers.
Definition: typetraits.hh:194
Category
Definition: solvercategory.hh:23
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:25
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Mar 12, 23:28, 2025)