Dune Core Modules (2.8.0)

fastamg.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_ISTL_FASTAMG_HH
4#define DUNE_ISTL_FASTAMG_HH
5
6#include <memory>
12#include <dune/istl/solvers.hh>
14#include <dune/istl/superlu.hh>
15#include <dune/istl/umfpack.hh>
17#include <dune/istl/io.hh>
19
20#include "fastamgsmoother.hh"
21
30namespace Dune
31{
32 namespace Amg
33 {
56 template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
57 class FastAMG : public Preconditioner<X,X>
58 {
59 public:
61 typedef M Operator;
73
75 typedef X Domain;
77 typedef X Range;
80
88 FastAMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
89 const Parameters& parms,
90 bool symmetric=true);
91
103 template<class C>
104 FastAMG(const Operator& fineOperator, const C& criterion,
105 const Parameters& parms=Parameters(),
106 bool symmetric=true,
108
112 FastAMG(const FastAMG& amg);
113
115 void pre(Domain& x, Range& b);
116
118 void apply(Domain& v, const Range& d);
119
122 {
124 }
125
127 void post(Domain& x);
128
133 template<class A1>
134 void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
135
136 std::size_t levels();
137
138 std::size_t maxlevels();
139
149 {
150 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
151 }
152
157 bool usesDirectCoarseLevelSolver() const;
158
159 private:
166 template<class C>
167 void createHierarchies(C& criterion,
168 const std::shared_ptr<const Operator>& matrixptr,
169 const PI& pinfo);
170
177 struct LevelContext
178 {
190 typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
194 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
210 std::size_t level;
211 };
212
214 void mgc(LevelContext& levelContext, Domain& x, const Range& b);
215
222 void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
223
230 void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
231
238 void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
239 Domain& fineX);
240
245 bool moveToCoarseLevel(LevelContext& levelContext);
246
251 void initIteratorsWithFineLevel(LevelContext& levelContext);
252
254 std::shared_ptr<OperatorHierarchy> matrices_;
256 std::shared_ptr<CoarseSolver> solver_;
258 std::shared_ptr<Hierarchy<Range,A>> rhs_;
260 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
262 std::shared_ptr<Hierarchy<Domain,A>> residual_;
263
267 std::shared_ptr<ScalarProduct> scalarProduct_;
269 std::size_t gamma_;
271 std::size_t preSteps_;
273 std::size_t postSteps_;
274 std::size_t level;
275 bool buildHierarchy_;
276 bool symmetric;
277 bool coarsesolverconverged;
279 typedef std::shared_ptr<Smoother> SmootherPointer;
280 SmootherPointer coarseSmoother_;
282 std::size_t verbosity_;
283 };
284
285 template<class M, class X, class PI, class A>
287 : matrices_(amg.matrices_), solver_(amg.solver_),
288 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
289 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
290 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
291 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
292 {}
293
294 template<class M, class X, class PI, class A>
296 const Parameters& parms, bool symmetric_)
297 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
298 rhs_(), lhs_(), residual_(), scalarProduct_(),
299 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
300 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
301 symmetric(symmetric_), coarsesolverconverged(true),
302 coarseSmoother_(), verbosity_(parms.debugLevel())
303 {
304 if(preSteps_>1||postSteps_>1)
305 {
306 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
307 preSteps_=postSteps_=0;
308 }
309 assert(matrices_->isBuilt());
310 static_assert(std::is_same<PI,SequentialInformation>::value,
311 "Currently only sequential runs are supported");
312 }
313 template<class M, class X, class PI, class A>
314 template<class C>
316 const C& criterion,
317 const Parameters& parms,
318 bool symmetric_,
319 const PI& pinfo)
320 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
321 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
322 buildHierarchy_(true),
323 symmetric(symmetric_), coarsesolverconverged(true),
324 coarseSmoother_(), verbosity_(criterion.debugLevel())
325 {
326 if(preSteps_>1||postSteps_>1)
327 {
328 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
329 preSteps_=postSteps_=1;
330 }
331 static_assert(std::is_same<PI,SequentialInformation>::value,
332 "Currently only sequential runs are supported");
333 // TODO: reestablish compile time checks.
334 //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
335 // "Matrix and Solver must match in terms of category!");
336 auto matrixptr = stackobject_to_shared_ptr(matrix);
337 createHierarchies(criterion, matrixptr, pinfo);
338 }
339
340 template<class M, class X, class PI, class A>
341 template<class C>
342 void FastAMG<M,X,PI,A>::createHierarchies(C& criterion,
343 const std::shared_ptr<const Operator>& matrixptr,
344 const PI& pinfo)
345 {
346 Timer watch;
347 matrices_ = std::make_shared<OperatorHierarchy>(
348 std::const_pointer_cast<Operator>(matrixptr),
349 stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
350
351 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
352
353 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
354 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
355
356 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
357 // We have the carsest level. Create the coarse Solver
358 typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
359 SmootherArgs sargs;
360 sargs.iterations = 1;
361
363 cargs.setArgs(sargs);
364 if(matrices_->redistributeInformation().back().isSetup()) {
365 // Solve on the redistributed partitioning
366 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
367 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
368 }else{
369 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
370 cargs.setComm(*matrices_->parallelInformation().coarsest());
371 }
372
373 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
374 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
375
376#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
377#if HAVE_SUITESPARSE_UMFPACK
378#define DIRECTSOLVER UMFPack
379#else
380#define DIRECTSOLVER SuperLU
381#endif
382 // Use superlu if we are purely sequential or with only one processor on the coarsest level.
383 if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
384 || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
385 || (matrices_->parallelInformation().coarsest().isRedistributed()
386 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
387 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
388 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
389 std::cout<<"Using superlu"<<std::endl;
390 if(matrices_->parallelInformation().coarsest().isRedistributed())
391 {
392 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
393 // We are still participating on this level
394 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
395 else
396 solver_.reset();
397 }else
398 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
399 }else
400#undef DIRECTSOLVER
401#endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
402 {
403 if(matrices_->parallelInformation().coarsest().isRedistributed())
404 {
405 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
406 // We are still participating on this level
407 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
408 *scalarProduct_,
409 *coarseSmoother_, 1E-2, 1000, 0));
410 else
411 solver_.reset();
412 }else
413 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
414 *scalarProduct_,
415 *coarseSmoother_, 1E-2, 1000, 0));
416 }
417 }
418
419 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
420 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
421 }
422
423
424 template<class M, class X, class PI, class A>
426 {
427 Timer watch, watch1;
428 // Detect Matrix rows where all offdiagonal entries are
429 // zero and set x such that A_dd*x_d=b_d
430 // Thus users can be more careless when setting up their linear
431 // systems.
432 typedef typename M::matrix_type Matrix;
433 typedef typename Matrix::ConstRowIterator RowIter;
434 typedef typename Matrix::ConstColIterator ColIter;
435 typedef typename Matrix::block_type Block;
436 Block zero;
437 zero=typename Matrix::field_type();
438
439 const Matrix& mat=matrices_->matrices().finest()->getmat();
440 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
441 bool isDirichlet = true;
442 bool hasDiagonal = false;
443 ColIter diag;
444 for(ColIter col=row->begin(); col!=row->end(); ++col) {
445 if(row.index()==col.index()) {
446 diag = col;
447 hasDiagonal = (*col != zero);
448 }else{
449 if(*col!=zero)
450 isDirichlet = false;
451 }
452 }
453 if(isDirichlet && hasDiagonal)
454 diag->solve(x[row.index()], b[row.index()]);
455 }
456 if (verbosity_>0)
457 std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
458 watch1.reset();
459 // No smoother to make x consistent! Do it by hand
460 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
461 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
462 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
463 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
464 matrices_->coarsenVector(*rhs_);
465 matrices_->coarsenVector(*lhs_);
466 matrices_->coarsenVector(*residual_);
467
468 // The preconditioner might change x and b. So we have to
469 // copy the changes to the original vectors.
470 x = *lhs_->finest();
471 b = *rhs_->finest();
472 }
473 template<class M, class X, class PI, class A>
474 std::size_t FastAMG<M,X,PI,A>::levels()
475 {
476 return matrices_->levels();
477 }
478 template<class M, class X, class PI, class A>
479 std::size_t FastAMG<M,X,PI,A>::maxlevels()
480 {
481 return matrices_->maxlevels();
482 }
483
485 template<class M, class X, class PI, class A>
487 {
488 LevelContext levelContext;
489 // Init all iterators for the current level
490 initIteratorsWithFineLevel(levelContext);
491
492 assert(v.two_norm()==0);
493
494 level=0;
495 if(matrices_->maxlevels()==1){
496 // The coarse solver might modify the d!
497 Range b(d);
498 mgc(levelContext, v, b);
499 }else
500 mgc(levelContext, v, d);
501 if(postSteps_==0||matrices_->maxlevels()==1)
502 levelContext.pinfo->copyOwnerToAll(v, v);
503 }
504
505 template<class M, class X, class PI, class A>
506 void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
507 {
508 levelContext.matrix = matrices_->matrices().finest();
509 levelContext.pinfo = matrices_->parallelInformation().finest();
510 levelContext.redist =
511 matrices_->redistributeInformation().begin();
512 levelContext.aggregates = matrices_->aggregatesMaps().begin();
513 levelContext.lhs = lhs_->finest();
514 levelContext.residual = residual_->finest();
515 levelContext.rhs = rhs_->finest();
516 levelContext.level=0;
517 }
518
519 template<class M, class X, class PI, class A>
520 bool FastAMG<M,X,PI,A>
521 ::moveToCoarseLevel(LevelContext& levelContext)
522 {
523 bool processNextLevel=true;
524
525 if(levelContext.redist->isSetup()) {
526 throw "bla";
527 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
528 levelContext.residual.getRedistributed());
529 processNextLevel = levelContext.residual.getRedistributed().size()>0;
530 if(processNextLevel) {
531 //restrict defect to coarse level right hand side.
532 ++levelContext.pinfo;
533 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
534 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
535 static_cast<const Range&>(levelContext.residual.getRedistributed()),
536 *levelContext.pinfo);
537 }
538 }else{
539 //restrict defect to coarse level right hand side.
540 ++levelContext.rhs;
541 ++levelContext.pinfo;
542 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
543 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
544 static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
545 }
546
547 if(processNextLevel) {
548 // prepare coarse system
549 ++levelContext.residual;
550 ++levelContext.lhs;
551 ++levelContext.matrix;
552 ++levelContext.level;
553 ++levelContext.redist;
554
555 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
556 // next level is not the globally coarsest one
557 ++levelContext.aggregates;
558 }
559 // prepare the lhs on the next level
560 *levelContext.lhs=0;
561 *levelContext.residual=0;
562 }
563 return processNextLevel;
564 }
565
566 template<class M, class X, class PI, class A>
567 void FastAMG<M,X,PI,A>
568 ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
569 {
570 if(processNextLevel) {
571 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
572 // previous level is not the globally coarsest one
573 --levelContext.aggregates;
574 }
575 --levelContext.redist;
576 --levelContext.level;
577 //prolongate and add the correction (update is in coarse left hand side)
578 --levelContext.matrix;
579 --levelContext.residual;
580
581 }
582
583 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
584 if(levelContext.redist->isSetup()) {
585
586 // Need to redistribute during prolongate
587 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
588 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
589 levelContext.lhs.getRedistributed(),
590 matrices_->getProlongationDampingFactor(),
591 *levelContext.pinfo, *levelContext.redist);
592 }else{
593 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
594 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
595 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
596
597 // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
598 }
599
600
601 if(processNextLevel) {
602 --levelContext.rhs;
603 }
604
605 }
606
607
608 template<class M, class X, class PI, class A>
609 void FastAMG<M,X,PI,A>
610 ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
611 {
612 constexpr auto bl = blockLevel<typename M::matrix_type>();
613 GaussSeidelPresmoothDefect<bl>::apply(levelContext.matrix->getmat(),
614 x,
615 *levelContext.residual,
616 b);
617 }
618
619 template<class M, class X, class PI, class A>
620 void FastAMG<M,X,PI,A>
621 ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
622 {
623 constexpr auto bl = blockLevel<typename M::matrix_type>();
624 GaussSeidelPostsmoothDefect<bl>
625 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
626 }
627
628
629 template<class M, class X, class PI, class A>
631 {
632 return IsDirectSolver< CoarseSolver>::value;
633 }
634
635 template<class M, class X, class PI, class A>
636 void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
637
638 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
639 // Solve directly
641 res.converged=true; // If we do not compute this flag will not get updated
642 if(levelContext.redist->isSetup()) {
643 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
644 if(levelContext.rhs.getRedistributed().size()>0) {
645 // We are still participating in the computation
646 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
647 levelContext.rhs.getRedistributed());
648 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
649 }
650 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
651 levelContext.pinfo->copyOwnerToAll(v, v);
652 }else{
653 levelContext.pinfo->copyOwnerToAll(b, b);
654 solver_->apply(v, const_cast<Range&>(b), res);
655 }
656
657 // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
658 // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
659 if (!res.converged)
660 coarsesolverconverged = false;
661 }else{
662 // presmoothing
663 presmooth(levelContext, v, b);
664 // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
665 // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
666#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
667 bool processNextLevel = moveToCoarseLevel(levelContext);
668
669 if(processNextLevel) {
670 // next level
671 for(std::size_t i=0; i<gamma_; i++)
672 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
673 }
674
675 moveToFineLevel(levelContext, processNextLevel, v);
676#else
677 *lhs=0;
678#endif
679
680 if(levelContext.matrix == matrices_->matrices().finest()) {
681 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
682 if(!coarsesolverconverged)
683 DUNE_THROW(MathError, "Coarse solver did not converge");
684 }
685
686 postsmooth(levelContext, v, b);
687 }
688 }
689
690
692 template<class M, class X, class PI, class A>
693 void FastAMG<M,X,PI,A>::post([[maybe_unused]] Domain& x)
694 {
695 lhs_=nullptr;
696 rhs_=nullptr;
697 residual_=nullptr;
698 }
699
700 template<class M, class X, class PI, class A>
701 template<class A1>
702 void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
703 {
704 matrices_->getCoarsestAggregatesOnFinest(cont);
705 }
706
707 } // end namespace Amg
708} // end namespace Dune
709
710#endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:58
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:214
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:217
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:59
All parameters for AMG.
Definition: parameters.hh:391
ConstIterator class for sequential access.
Definition: matrix.hh:402
A generic dynamic dense matrix.
Definition: matrix.hh:559
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:563
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:587
T block_type
Export the type representing the components.
Definition: matrix.hh:566
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:94
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:50
Sequential SSOR preconditioner.
Definition: preconditioners.hh:139
A simple stop watch.
Definition: timer.hh:41
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:55
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
A few common exception classes.
Traits for type conversions and type information.
Some generic functions for pretty printing vectors and matrices.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:182
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:702
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:186
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:148
void post(Domain &x)
Clean up.
Definition: fastamg.hh:693
X Domain
The domain type.
Definition: fastamg.hh:75
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:202
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:121
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:70
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:190
X Range
The range type.
Definition: fastamg.hh:77
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:68
M Operator
The matrix operator type.
Definition: fastamg.hh:61
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:79
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:630
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:198
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:206
std::size_t level
The level index.
Definition: fastamg.hh:210
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:486
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:72
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:425
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:295
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:194
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:404
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:42
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:50
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:426
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:11
std::shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:70
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:64
Statistics about the application of an inverse operator.
Definition: solver.hh:46
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
Category
Definition: solvercategory.hh:21
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:23
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Classes for using UMFPack with ISTL matrices.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 22, 23:33, 2024)