Loading [MathJax]/extensions/MathMenu.js

DUNE PDELab (2.9)

fastamg.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright (C) DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_ISTL_FASTAMG_HH
6#define DUNE_ISTL_FASTAMG_HH
7
8#include <memory>
14#include <dune/istl/solvers.hh>
16#include <dune/istl/superlu.hh>
17#include <dune/istl/umfpack.hh>
19#include <dune/istl/io.hh>
21
22#include "fastamgsmoother.hh"
23
32namespace Dune
33{
34 namespace Amg
35 {
58 template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
59 class FastAMG : public Preconditioner<X,X>
60 {
61 public:
63 typedef M Operator;
75
77 typedef X Domain;
79 typedef X Range;
82
90 FastAMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
91 const Parameters& parms,
92 bool symmetric=true);
93
105 template<class C>
106 FastAMG(const Operator& fineOperator, const C& criterion,
107 const Parameters& parms=Parameters(),
108 bool symmetric=true,
110
114 FastAMG(const FastAMG& amg);
115
117 void pre(Domain& x, Range& b);
118
120 void apply(Domain& v, const Range& d);
121
124 {
126 }
127
129 void post(Domain& x);
130
135 template<class A1>
136 void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
137
138 std::size_t levels();
139
140 std::size_t maxlevels();
141
151 {
152 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
153 }
154
159 bool usesDirectCoarseLevelSolver() const;
160
161 private:
168 template<class C>
169 void createHierarchies(C& criterion,
170 const std::shared_ptr<const Operator>& matrixptr,
171 const PI& pinfo);
172
179 struct LevelContext
180 {
192 typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
196 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
212 std::size_t level;
213 };
214
216 void mgc(LevelContext& levelContext, Domain& x, const Range& b);
217
224 void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
225
232 void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
233
240 void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
241 Domain& fineX);
242
247 bool moveToCoarseLevel(LevelContext& levelContext);
248
253 void initIteratorsWithFineLevel(LevelContext& levelContext);
254
256 std::shared_ptr<OperatorHierarchy> matrices_;
258 std::shared_ptr<CoarseSolver> solver_;
260 std::shared_ptr<Hierarchy<Range,A>> rhs_;
262 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
264 std::shared_ptr<Hierarchy<Domain,A>> residual_;
265
269 std::shared_ptr<ScalarProduct> scalarProduct_;
271 std::size_t gamma_;
273 std::size_t preSteps_;
275 std::size_t postSteps_;
276 std::size_t level;
277 bool buildHierarchy_;
278 bool symmetric;
279 bool coarsesolverconverged;
281 typedef std::shared_ptr<Smoother> SmootherPointer;
282 SmootherPointer coarseSmoother_;
284 std::size_t verbosity_;
285 };
286
287 template<class M, class X, class PI, class A>
289 : matrices_(amg.matrices_), solver_(amg.solver_),
290 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
291 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
292 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
293 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
294 {}
295
296 template<class M, class X, class PI, class A>
298 const Parameters& parms, bool symmetric_)
299 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
300 rhs_(), lhs_(), residual_(), scalarProduct_(),
301 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
302 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
303 symmetric(symmetric_), coarsesolverconverged(true),
304 coarseSmoother_(), verbosity_(parms.debugLevel())
305 {
306 if(preSteps_>1||postSteps_>1)
307 {
308 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
309 preSteps_=postSteps_=0;
310 }
311 assert(matrices_->isBuilt());
312 static_assert(std::is_same<PI,SequentialInformation>::value,
313 "Currently only sequential runs are supported");
314 }
315 template<class M, class X, class PI, class A>
316 template<class C>
318 const C& criterion,
319 const Parameters& parms,
320 bool symmetric_,
321 const PI& pinfo)
322 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
323 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
324 buildHierarchy_(true),
325 symmetric(symmetric_), coarsesolverconverged(true),
326 coarseSmoother_(), verbosity_(criterion.debugLevel())
327 {
328 if(preSteps_>1||postSteps_>1)
329 {
330 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
331 preSteps_=postSteps_=1;
332 }
333 static_assert(std::is_same<PI,SequentialInformation>::value,
334 "Currently only sequential runs are supported");
335 // TODO: reestablish compile time checks.
336 //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
337 // "Matrix and Solver must match in terms of category!");
338 auto matrixptr = stackobject_to_shared_ptr(matrix);
339 createHierarchies(criterion, matrixptr, pinfo);
340 }
341
342 template<class M, class X, class PI, class A>
343 template<class C>
344 void FastAMG<M,X,PI,A>::createHierarchies(C& criterion,
345 const std::shared_ptr<const Operator>& matrixptr,
346 const PI& pinfo)
347 {
348 Timer watch;
349 matrices_ = std::make_shared<OperatorHierarchy>(
350 std::const_pointer_cast<Operator>(matrixptr),
351 stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
352
353 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
354
355 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
356 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
357
358 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
359 // We have the carsest level. Create the coarse Solver
360 typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
361 SmootherArgs sargs;
362 sargs.iterations = 1;
363
365 cargs.setArgs(sargs);
366 if(matrices_->redistributeInformation().back().isSetup()) {
367 // Solve on the redistributed partitioning
368 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
369 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
370 }else{
371 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
372 cargs.setComm(*matrices_->parallelInformation().coarsest());
373 }
374
375 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
376 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
377
378#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
379#if HAVE_SUITESPARSE_UMFPACK
380#define DIRECTSOLVER UMFPack
381#else
382#define DIRECTSOLVER SuperLU
383#endif
384 // Use superlu if we are purely sequential or with only one processor on the coarsest level.
385 if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
386 || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
387 || (matrices_->parallelInformation().coarsest().isRedistributed()
388 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
389 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
390 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
391 std::cout<<"Using superlu"<<std::endl;
392 if(matrices_->parallelInformation().coarsest().isRedistributed())
393 {
394 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
395 // We are still participating on this level
396 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
397 else
398 solver_.reset();
399 }else
400 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
401 }else
402#undef DIRECTSOLVER
403#endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
404 {
405 if(matrices_->parallelInformation().coarsest().isRedistributed())
406 {
407 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
408 // We are still participating on this level
409 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
410 *scalarProduct_,
411 *coarseSmoother_, 1E-2, 1000, 0));
412 else
413 solver_.reset();
414 }else
415 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
416 *scalarProduct_,
417 *coarseSmoother_, 1E-2, 1000, 0));
418 }
419 }
420
421 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
422 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
423 }
424
425
426 template<class M, class X, class PI, class A>
428 {
429 Timer watch, watch1;
430 // Detect Matrix rows where all offdiagonal entries are
431 // zero and set x such that A_dd*x_d=b_d
432 // Thus users can be more careless when setting up their linear
433 // systems.
434 typedef typename M::matrix_type Matrix;
435 typedef typename Matrix::ConstRowIterator RowIter;
436 typedef typename Matrix::ConstColIterator ColIter;
437 typedef typename Matrix::block_type Block;
438 Block zero;
439 zero=typename Matrix::field_type();
440
441 const Matrix& mat=matrices_->matrices().finest()->getmat();
442 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
443 bool isDirichlet = true;
444 bool hasDiagonal = false;
445 ColIter diag;
446 for(ColIter col=row->begin(); col!=row->end(); ++col) {
447 if(row.index()==col.index()) {
448 diag = col;
449 hasDiagonal = (*col != zero);
450 }else{
451 if(*col!=zero)
452 isDirichlet = false;
453 }
454 }
455 if(isDirichlet && hasDiagonal)
456 diag->solve(x[row.index()], b[row.index()]);
457 }
458 if (verbosity_>0)
459 std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
460 watch1.reset();
461 // No smoother to make x consistent! Do it by hand
462 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
463 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
464 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
465 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
466 matrices_->coarsenVector(*rhs_);
467 matrices_->coarsenVector(*lhs_);
468 matrices_->coarsenVector(*residual_);
469
470 // The preconditioner might change x and b. So we have to
471 // copy the changes to the original vectors.
472 x = *lhs_->finest();
473 b = *rhs_->finest();
474 }
475 template<class M, class X, class PI, class A>
476 std::size_t FastAMG<M,X,PI,A>::levels()
477 {
478 return matrices_->levels();
479 }
480 template<class M, class X, class PI, class A>
481 std::size_t FastAMG<M,X,PI,A>::maxlevels()
482 {
483 return matrices_->maxlevels();
484 }
485
487 template<class M, class X, class PI, class A>
489 {
490 LevelContext levelContext;
491 // Init all iterators for the current level
492 initIteratorsWithFineLevel(levelContext);
493
494 assert(v.two_norm()==0);
495
496 level=0;
497 if(matrices_->maxlevels()==1){
498 // The coarse solver might modify the d!
499 Range b(d);
500 mgc(levelContext, v, b);
501 }else
502 mgc(levelContext, v, d);
503 if(postSteps_==0||matrices_->maxlevels()==1)
504 levelContext.pinfo->copyOwnerToAll(v, v);
505 }
506
507 template<class M, class X, class PI, class A>
508 void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
509 {
510 levelContext.matrix = matrices_->matrices().finest();
511 levelContext.pinfo = matrices_->parallelInformation().finest();
512 levelContext.redist =
513 matrices_->redistributeInformation().begin();
514 levelContext.aggregates = matrices_->aggregatesMaps().begin();
515 levelContext.lhs = lhs_->finest();
516 levelContext.residual = residual_->finest();
517 levelContext.rhs = rhs_->finest();
518 levelContext.level=0;
519 }
520
521 template<class M, class X, class PI, class A>
522 bool FastAMG<M,X,PI,A>
523 ::moveToCoarseLevel(LevelContext& levelContext)
524 {
525 bool processNextLevel=true;
526
527 if(levelContext.redist->isSetup()) {
528 throw "bla";
529 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
530 levelContext.residual.getRedistributed());
531 processNextLevel = levelContext.residual.getRedistributed().size()>0;
532 if(processNextLevel) {
533 //restrict defect to coarse level right hand side.
534 ++levelContext.pinfo;
535 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
536 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
537 static_cast<const Range&>(levelContext.residual.getRedistributed()),
538 *levelContext.pinfo);
539 }
540 }else{
541 //restrict defect to coarse level right hand side.
542 ++levelContext.rhs;
543 ++levelContext.pinfo;
544 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
545 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
546 static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
547 }
548
549 if(processNextLevel) {
550 // prepare coarse system
551 ++levelContext.residual;
552 ++levelContext.lhs;
553 ++levelContext.matrix;
554 ++levelContext.level;
555 ++levelContext.redist;
556
557 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
558 // next level is not the globally coarsest one
559 ++levelContext.aggregates;
560 }
561 // prepare the lhs on the next level
562 *levelContext.lhs=0;
563 *levelContext.residual=0;
564 }
565 return processNextLevel;
566 }
567
568 template<class M, class X, class PI, class A>
569 void FastAMG<M,X,PI,A>
570 ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
571 {
572 if(processNextLevel) {
573 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
574 // previous level is not the globally coarsest one
575 --levelContext.aggregates;
576 }
577 --levelContext.redist;
578 --levelContext.level;
579 //prolongate and add the correction (update is in coarse left hand side)
580 --levelContext.matrix;
581 --levelContext.residual;
582
583 }
584
585 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
586 if(levelContext.redist->isSetup()) {
587
588 // Need to redistribute during prolongate
589 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
590 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
591 levelContext.lhs.getRedistributed(),
592 matrices_->getProlongationDampingFactor(),
593 *levelContext.pinfo, *levelContext.redist);
594 }else{
595 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
596 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
597 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
598
599 // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
600 }
601
602
603 if(processNextLevel) {
604 --levelContext.rhs;
605 }
606
607 }
608
609
610 template<class M, class X, class PI, class A>
611 void FastAMG<M,X,PI,A>
612 ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
613 {
614 constexpr auto bl = blockLevel<typename M::matrix_type>();
615 GaussSeidelPresmoothDefect<bl>::apply(levelContext.matrix->getmat(),
616 x,
617 *levelContext.residual,
618 b);
619 }
620
621 template<class M, class X, class PI, class A>
622 void FastAMG<M,X,PI,A>
623 ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
624 {
625 constexpr auto bl = blockLevel<typename M::matrix_type>();
626 GaussSeidelPostsmoothDefect<bl>
627 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
628 }
629
630
631 template<class M, class X, class PI, class A>
633 {
634 return IsDirectSolver< CoarseSolver>::value;
635 }
636
637 template<class M, class X, class PI, class A>
638 void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
639
640 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
641 // Solve directly
643 res.converged=true; // If we do not compute this flag will not get updated
644 if(levelContext.redist->isSetup()) {
645 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
646 if(levelContext.rhs.getRedistributed().size()>0) {
647 // We are still participating in the computation
648 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
649 levelContext.rhs.getRedistributed());
650 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
651 }
652 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
653 levelContext.pinfo->copyOwnerToAll(v, v);
654 }else{
655 levelContext.pinfo->copyOwnerToAll(b, b);
656 solver_->apply(v, const_cast<Range&>(b), res);
657 }
658
659 // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
660 // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
661 if (!res.converged)
662 coarsesolverconverged = false;
663 }else{
664 // presmoothing
665 presmooth(levelContext, v, b);
666 // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
667 // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
668#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
669 bool processNextLevel = moveToCoarseLevel(levelContext);
670
671 if(processNextLevel) {
672 // next level
673 for(std::size_t i=0; i<gamma_; i++)
674 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
675 }
676
677 moveToFineLevel(levelContext, processNextLevel, v);
678#else
679 *lhs=0;
680#endif
681
682 if(levelContext.matrix == matrices_->matrices().finest()) {
683 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
684 if(!coarsesolverconverged)
685 DUNE_THROW(MathError, "Coarse solver did not converge");
686 }
687
688 postsmooth(levelContext, v, b);
689 }
690 }
691
692
694 template<class M, class X, class PI, class A>
695 void FastAMG<M,X,PI,A>::post([[maybe_unused]] Domain& x)
696 {
697 lhs_=nullptr;
698 rhs_=nullptr;
699 residual_=nullptr;
700 }
701
702 template<class M, class X, class PI, class A>
703 template<class A1>
704 void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
705 {
706 matrices_->getCoarsestAggregatesOnFinest(cont);
707 }
708
709 } // end namespace Amg
710} // end namespace Dune
711
712#endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:60
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:216
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:219
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:61
All parameters for AMG.
Definition: parameters.hh:393
ConstIterator class for sequential access.
Definition: matrix.hh:404
A generic dynamic dense matrix.
Definition: matrix.hh:561
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:565
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:589
T block_type
Export the type representing the components.
Definition: matrix.hh:568
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:96
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:32
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:52
Sequential SSOR preconditioner.
Definition: preconditioners.hh:141
A simple stop watch.
Definition: timer.hh:43
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:57
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:77
A few common exception classes.
Traits for type conversions and type information.
Some generic functions for pretty printing vectors and matrices.
Implementations of the inverse operator interface.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:184
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:704
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:188
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:150
void post(Domain &x)
Clean up.
Definition: fastamg.hh:695
X Domain
The domain type.
Definition: fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:204
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:123
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:192
X Range
The range type.
Definition: fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:70
M Operator
The matrix operator type.
Definition: fastamg.hh:63
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:632
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:200
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:208
std::size_t level
The level index.
Definition: fastamg.hh:212
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:488
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:427
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:297
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:196
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:406
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:428
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:13
std::shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:72
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:66
Statistics about the application of an inverse operator.
Definition: solver.hh:48
bool converged
True if convergence criterion has been met.
Definition: solver.hh:73
Category
Definition: solvercategory.hh:23
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:25
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Classes for using UMFPack with ISTL matrices.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 2, 23:03, 2025)