3#ifndef DUNE_ISTL_FASTAMG_HH
4#define DUNE_ISTL_FASTAMG_HH
21#include "fastamgsmoother.hh"
57 template<
class M,
class X,
class PI=SequentialInformation,
class A=std::allocator<X> >
139 std::size_t levels();
141 std::size_t maxlevels();
170 void createHierarchies(C& criterion,
Operator& matrix,
192 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
196 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
216 void mgc(LevelContext& levelContext,
Domain& x,
const Range& b);
224 void presmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
232 void postsmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
240 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel,
247 bool moveToCoarseLevel(LevelContext& levelContext);
253 void initIteratorsWithFineLevel(LevelContext& levelContext);
256 std::shared_ptr<OperatorHierarchy> matrices_;
258 std::shared_ptr<CoarseSolver> solver_;
269 std::shared_ptr<ScalarProduct> scalarProduct_;
273 std::size_t preSteps_;
275 std::size_t postSteps_;
277 bool buildHierarchy_;
279 bool coarsesolverconverged;
281 typedef std::shared_ptr<Smoother> SmootherPointer;
282 SmootherPointer coarseSmoother_;
284 std::size_t verbosity_;
287 template<
class M,
class X,
class PI,
class A>
289 : matrices_(amg.matrices_), solver_(amg.solver_),
290 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
291 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
292 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
293 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
303 template<
class M,
class X,
class PI,
class A>
306 : matrices_(&matrices), solver_(&coarseSolver),
307 rhs_(), lhs_(), residual_(), scalarProduct_(),
308 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
309 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
310 symmetric(symmetric_), coarsesolverconverged(true),
311 coarseSmoother_(), verbosity_(parms.debugLevel())
313 if(preSteps_>1||postSteps_>1)
315 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
316 preSteps_=postSteps_=0;
318 assert(matrices_->isBuilt());
319 static_assert(std::is_same<PI,SequentialInformation>::value,
320 "Currently only sequential runs are supported");
322 template<
class M,
class X,
class PI,
class A>
329 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
330 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
331 buildHierarchy_(true),
332 symmetric(symmetric_), coarsesolverconverged(true),
333 coarseSmoother_(), verbosity_(criterion.debugLevel())
335 if(preSteps_>1||postSteps_>1)
337 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
338 preSteps_=postSteps_=1;
340 static_assert(std::is_same<PI,SequentialInformation>::value,
341 "Currently only sequential runs are supported");
345 createHierarchies(criterion,
const_cast<Operator&
>(matrix), pinfo);
348 template<
class M,
class X,
class PI,
class A>
351 if(buildHierarchy_) {
355 coarseSmoother_.reset();
368 template<
class M,
class X,
class PI,
class A>
370 void FastAMG<M,X,PI,A>::createHierarchies(C& criterion, Operator& matrix,
374 matrices_.reset(
new OperatorHierarchy(matrix, pinfo));
376 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
378 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
379 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
381 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
383 typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
385 sargs.iterations = 1;
388 cargs.setArgs(sargs);
389 if(matrices_->redistributeInformation().back().isSetup()) {
391 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
392 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
394 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
395 cargs.setComm(*matrices_->parallelInformation().coarsest());
399 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
401#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
402#if HAVE_SUITESPARSE_UMFPACK
403#define DIRECTSOLVER UMFPack
405#define DIRECTSOLVER SuperLU
408 if(std::is_same<ParallelInformation,SequentialInformation>::value
409 || matrices_->parallelInformation().coarsest()->communicator().size()==1
410 || (matrices_->parallelInformation().coarsest().isRedistributed()
411 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
412 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) {
413 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
414 std::cout<<
"Using superlu"<<std::endl;
415 if(matrices_->parallelInformation().coarsest().isRedistributed())
417 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
419 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
423 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(),
false,
false));
428 if(matrices_->parallelInformation().coarsest().isRedistributed())
430 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
432 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
434 *coarseSmoother_, 1E-2, 1000, 0));
438 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
440 *coarseSmoother_, 1E-2, 1000, 0));
444 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
445 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
449 template<
class M,
class X,
class PI,
class A>
457 typedef typename M::matrix_type
Matrix;
464 const Matrix& mat=matrices_->matrices().finest()->getmat();
465 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
466 bool isDirichlet =
true;
467 bool hasDiagonal =
false;
469 for(ColIter col=row->begin(); col!=row->end(); ++col) {
470 if(row.index()==col.index()) {
472 hasDiagonal = (*col != zero);
478 if(isDirichlet && hasDiagonal)
479 diag->solve(x[row.index()], b[row.index()]);
482 std::cout<<
" Preprocessing Dirichlet took "<<watch1.
elapsed()<<std::endl;
485 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
496 matrices_->coarsenVector(*rhs_);
497 matrices_->coarsenVector(*lhs_);
498 matrices_->coarsenVector(*residual_);
505 template<
class M,
class X,
class PI,
class A>
508 return matrices_->levels();
510 template<
class M,
class X,
class PI,
class A>
511 std::size_t FastAMG<M,X,PI,A>::maxlevels()
513 return matrices_->maxlevels();
517 template<
class M,
class X,
class PI,
class A>
520 LevelContext levelContext;
522 initIteratorsWithFineLevel(levelContext);
524 assert(v.two_norm()==0);
527 if(matrices_->maxlevels()==1){
530 mgc(levelContext, v, b);
532 mgc(levelContext, v, d);
533 if(postSteps_==0||matrices_->maxlevels()==1)
534 levelContext.pinfo->copyOwnerToAll(v, v);
537 template<
class M,
class X,
class PI,
class A>
540 levelContext.matrix = matrices_->matrices().finest();
541 levelContext.pinfo = matrices_->parallelInformation().finest();
542 levelContext.redist =
543 matrices_->redistributeInformation().begin();
544 levelContext.aggregates = matrices_->aggregatesMaps().begin();
545 levelContext.lhs = lhs_->finest();
546 levelContext.residual = residual_->finest();
547 levelContext.rhs = rhs_->finest();
548 levelContext.level=0;
551 template<
class M,
class X,
class PI,
class A>
552 bool FastAMG<M,X,PI,A>
553 ::moveToCoarseLevel(LevelContext& levelContext)
555 bool processNextLevel=
true;
557 if(levelContext.redist->isSetup()) {
559 levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.residual),
560 levelContext.residual.getRedistributed());
561 processNextLevel = levelContext.residual.getRedistributed().size()>0;
562 if(processNextLevel) {
564 ++levelContext.pinfo;
565 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
566 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
567 static_cast<const Range&
>(levelContext.residual.getRedistributed()),
568 *levelContext.pinfo);
573 ++levelContext.pinfo;
574 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
575 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
576 static_cast<const Range&
>(*levelContext.residual), *levelContext.pinfo);
579 if(processNextLevel) {
581 ++levelContext.residual;
583 ++levelContext.matrix;
584 ++levelContext.level;
585 ++levelContext.redist;
587 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
589 ++levelContext.aggregates;
593 *levelContext.residual=0;
595 return processNextLevel;
598 template<
class M,
class X,
class PI,
class A>
599 void FastAMG<M,X,PI,A>
600 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel, Domain& x)
602 if(processNextLevel) {
603 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
605 --levelContext.aggregates;
607 --levelContext.redist;
608 --levelContext.level;
610 --levelContext.matrix;
611 --levelContext.residual;
616 if(levelContext.redist->isSetup()) {
619 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
620 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
621 levelContext.lhs.getRedistributed(),
622 matrices_->getProlongationDampingFactor(),
623 *levelContext.pinfo, *levelContext.redist);
625 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
626 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
627 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
633 if(processNextLevel) {
640 template<
class M,
class X,
class PI,
class A>
641 void FastAMG<M,X,PI,A>
642 ::presmooth(LevelContext& levelContext, Domain& x,
const Range& b)
646 *levelContext.residual,
650 template<
class M,
class X,
class PI,
class A>
651 void FastAMG<M,X,PI,A>
652 ::postsmooth(LevelContext& levelContext, Domain& x,
const Range& b)
655 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
659 template<
class M,
class X,
class PI,
class A>
662 return IsDirectSolver< CoarseSolver>::value;
665 template<
class M,
class X,
class PI,
class A>
668 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
672 if(levelContext.redist->isSetup()) {
673 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
674 if(levelContext.rhs.getRedistributed().size()>0) {
676 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
677 levelContext.rhs.getRedistributed());
678 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
680 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
681 levelContext.pinfo->copyOwnerToAll(v, v);
683 levelContext.pinfo->copyOwnerToAll(b, b);
684 solver_->apply(v,
const_cast<Range&
>(b), res);
690 coarsesolverconverged =
false;
696#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
697 bool processNextLevel = moveToCoarseLevel(levelContext);
699 if(processNextLevel) {
701 for(std::size_t i=0; i<gamma_; i++)
702 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
705 moveToFineLevel(levelContext, processNextLevel, v);
710 if(levelContext.matrix == matrices_->matrices().finest()) {
711 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
712 if(!coarsesolverconverged)
713 DUNE_THROW(MathError,
"Coarse solver did not converge");
726 template<
class M,
class X,
class PI,
class A>
738 template<
class M,
class X,
class PI,
class A>
742 matrices_->getCoarsestAggregatesOnFinest(cont);
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:59
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:250
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:253
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:310
All parameters for AMG.
Definition: parameters.hh:391
ConstIterator class for sequential access.
Definition: matrix.hh:398
A generic dynamic dense matrix.
Definition: matrix.hh:555
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:559
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:583
T block_type
Export the type representing the components.
Definition: matrix.hh:562
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:46
Sequential SSOR preconditioner.
Definition: preconditioners.hh:135
A simple stop watch.
Definition: timer.hh:41
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:55
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
A few common exception classes.
decltype(auto) apply(F &&f, ArgTuple &&args)
Apply function with arguments given as tuple.
Definition: apply.hh:58
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:25
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:184
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:740
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:188
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:151
void post(Domain &x)
Clean up.
Definition: fastamg.hh:727
X Domain
The domain type.
Definition: fastamg.hh:76
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:204
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:124
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:71
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:192
X Range
The range type.
Definition: fastamg.hh:78
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:69
M Operator
The matrix operator type.
Definition: fastamg.hh:62
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:80
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:660
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:200
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:208
std::size_t level
The level index.
Definition: fastamg.hh:212
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:518
FastAMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:304
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:73
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:450
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:196
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:454
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:476
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
Provides a classes representing the hierarchies in AMG.
Some generic functions for pretty printing vectors and matrices.
Dune namespace.
Definition: alignedallocator.hh:10
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Statistics about the application of an inverse operator.
Definition: solver.hh:41
bool converged
True if convergence criterion has been met.
Definition: solver.hh:65
Category
Definition: solvercategory.hh:21
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:23
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Classes for using UMFPack with ISTL matrices.
Definition of the DUNE_UNUSED macro for the case that config.h is not available.