DUNE PDELab (2.7)

fastamg.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_ISTL_FASTAMG_HH
4#define DUNE_ISTL_FASTAMG_HH
5
6#include <memory>
13#include <dune/istl/solvers.hh>
15#include <dune/istl/superlu.hh>
16#include <dune/istl/umfpack.hh>
18#include <dune/istl/io.hh>
20
21#include "fastamgsmoother.hh"
22
31namespace Dune
32{
33 namespace Amg
34 {
57 template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
58 class FastAMG : public Preconditioner<X,X>
59 {
60 public:
62 typedef M Operator;
74
76 typedef X Domain;
78 typedef X Range;
81
89 FastAMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
90 const Parameters& parms,
91 bool symmetric=true);
92
104 template<class C>
105 FastAMG(const Operator& fineOperator, const C& criterion,
106 const Parameters& parms=Parameters(),
107 bool symmetric=true,
109
113 FastAMG(const FastAMG& amg);
114
116 void pre(Domain& x, Range& b);
117
119 void apply(Domain& v, const Range& d);
120
123 {
125 }
126
128 void post(Domain& x);
129
134 template<class A1>
135 void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
136
137 std::size_t levels();
138
139 std::size_t maxlevels();
140
150 {
151 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
152 }
153
158 bool usesDirectCoarseLevelSolver() const;
159
160 private:
167 template<class C>
168 void createHierarchies(C& criterion,
169 const std::shared_ptr<const Operator>& matrixptr,
170 const PI& pinfo);
171
178 struct LevelContext
179 {
191 typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
195 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
211 std::size_t level;
212 };
213
215 void mgc(LevelContext& levelContext, Domain& x, const Range& b);
216
223 void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
224
231 void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
232
239 void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
240 Domain& fineX);
241
246 bool moveToCoarseLevel(LevelContext& levelContext);
247
252 void initIteratorsWithFineLevel(LevelContext& levelContext);
253
255 std::shared_ptr<OperatorHierarchy> matrices_;
257 std::shared_ptr<CoarseSolver> solver_;
259 std::shared_ptr<Hierarchy<Range,A>> rhs_;
261 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
263 std::shared_ptr<Hierarchy<Domain,A>> residual_;
264
268 std::shared_ptr<ScalarProduct> scalarProduct_;
270 std::size_t gamma_;
272 std::size_t preSteps_;
274 std::size_t postSteps_;
275 std::size_t level;
276 bool buildHierarchy_;
277 bool symmetric;
278 bool coarsesolverconverged;
280 typedef std::shared_ptr<Smoother> SmootherPointer;
281 SmootherPointer coarseSmoother_;
283 std::size_t verbosity_;
284 };
285
286 template<class M, class X, class PI, class A>
288 : matrices_(amg.matrices_), solver_(amg.solver_),
289 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
290 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
291 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
292 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
293 {}
294
295 template<class M, class X, class PI, class A>
297 const Parameters& parms, bool symmetric_)
298 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
299 rhs_(), lhs_(), residual_(), scalarProduct_(),
300 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
301 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
302 symmetric(symmetric_), coarsesolverconverged(true),
303 coarseSmoother_(), verbosity_(parms.debugLevel())
304 {
305 if(preSteps_>1||postSteps_>1)
306 {
307 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
308 preSteps_=postSteps_=0;
309 }
310 assert(matrices_->isBuilt());
311 static_assert(std::is_same<PI,SequentialInformation>::value,
312 "Currently only sequential runs are supported");
313 }
314 template<class M, class X, class PI, class A>
315 template<class C>
317 const C& criterion,
318 const Parameters& parms,
319 bool symmetric_,
320 const PI& pinfo)
321 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
322 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
323 buildHierarchy_(true),
324 symmetric(symmetric_), coarsesolverconverged(true),
325 coarseSmoother_(), verbosity_(criterion.debugLevel())
326 {
327 if(preSteps_>1||postSteps_>1)
328 {
329 std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
330 preSteps_=postSteps_=1;
331 }
332 static_assert(std::is_same<PI,SequentialInformation>::value,
333 "Currently only sequential runs are supported");
334 // TODO: reestablish compile time checks.
335 //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
336 // "Matrix and Solver must match in terms of category!");
337 auto matrixptr = stackobject_to_shared_ptr(matrix);
338 createHierarchies(criterion, matrixptr, pinfo);
339 }
340
341 template<class M, class X, class PI, class A>
342 template<class C>
343 void FastAMG<M,X,PI,A>::createHierarchies(C& criterion,
344 const std::shared_ptr<const Operator>& matrixptr,
345 const PI& pinfo)
346 {
347 Timer watch;
348 matrices_ = std::make_shared<OperatorHierarchy>(
349 std::const_pointer_cast<Operator>(matrixptr),
350 stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
351
352 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
353
354 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
355 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
356
357 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
358 // We have the carsest level. Create the coarse Solver
359 typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
360 SmootherArgs sargs;
361 sargs.iterations = 1;
362
364 cargs.setArgs(sargs);
365 if(matrices_->redistributeInformation().back().isSetup()) {
366 // Solve on the redistributed partitioning
367 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
368 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
369 }else{
370 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
371 cargs.setComm(*matrices_->parallelInformation().coarsest());
372 }
373
374 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
375 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
376
377#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
378#if HAVE_SUITESPARSE_UMFPACK
379#define DIRECTSOLVER UMFPack
380#else
381#define DIRECTSOLVER SuperLU
382#endif
383 // Use superlu if we are purely sequential or with only one processor on the coarsest level.
384 if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
385 || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
386 || (matrices_->parallelInformation().coarsest().isRedistributed()
387 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
388 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
389 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
390 std::cout<<"Using superlu"<<std::endl;
391 if(matrices_->parallelInformation().coarsest().isRedistributed())
392 {
393 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
394 // We are still participating on this level
395 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
396 else
397 solver_.reset();
398 }else
399 solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
400 }else
401#undef DIRECTSOLVER
402#endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
403 {
404 if(matrices_->parallelInformation().coarsest().isRedistributed())
405 {
406 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
407 // We are still participating on this level
408 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
409 *scalarProduct_,
410 *coarseSmoother_, 1E-2, 1000, 0));
411 else
412 solver_.reset();
413 }else
414 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
415 *scalarProduct_,
416 *coarseSmoother_, 1E-2, 1000, 0));
417 }
418 }
419
420 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
421 std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
422 }
423
424
425 template<class M, class X, class PI, class A>
427 {
428 Timer watch, watch1;
429 // Detect Matrix rows where all offdiagonal entries are
430 // zero and set x such that A_dd*x_d=b_d
431 // Thus users can be more careless when setting up their linear
432 // systems.
433 typedef typename M::matrix_type Matrix;
434 typedef typename Matrix::ConstRowIterator RowIter;
435 typedef typename Matrix::ConstColIterator ColIter;
436 typedef typename Matrix::block_type Block;
437 Block zero;
438 zero=typename Matrix::field_type();
439
440 const Matrix& mat=matrices_->matrices().finest()->getmat();
441 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
442 bool isDirichlet = true;
443 bool hasDiagonal = false;
444 ColIter diag;
445 for(ColIter col=row->begin(); col!=row->end(); ++col) {
446 if(row.index()==col.index()) {
447 diag = col;
448 hasDiagonal = (*col != zero);
449 }else{
450 if(*col!=zero)
451 isDirichlet = false;
452 }
453 }
454 if(isDirichlet && hasDiagonal)
455 diag->solve(x[row.index()], b[row.index()]);
456 }
457 if (verbosity_>0)
458 std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
459 watch1.reset();
460 // No smoother to make x consistent! Do it by hand
461 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
462 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
463 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
464 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
465 matrices_->coarsenVector(*rhs_);
466 matrices_->coarsenVector(*lhs_);
467 matrices_->coarsenVector(*residual_);
468
469 // The preconditioner might change x and b. So we have to
470 // copy the changes to the original vectors.
471 x = *lhs_->finest();
472 b = *rhs_->finest();
473 }
474 template<class M, class X, class PI, class A>
475 std::size_t FastAMG<M,X,PI,A>::levels()
476 {
477 return matrices_->levels();
478 }
479 template<class M, class X, class PI, class A>
480 std::size_t FastAMG<M,X,PI,A>::maxlevels()
481 {
482 return matrices_->maxlevels();
483 }
484
486 template<class M, class X, class PI, class A>
488 {
489 LevelContext levelContext;
490 // Init all iterators for the current level
491 initIteratorsWithFineLevel(levelContext);
492
493 assert(v.two_norm()==0);
494
495 level=0;
496 if(matrices_->maxlevels()==1){
497 // The coarse solver might modify the d!
498 Range b(d);
499 mgc(levelContext, v, b);
500 }else
501 mgc(levelContext, v, d);
502 if(postSteps_==0||matrices_->maxlevels()==1)
503 levelContext.pinfo->copyOwnerToAll(v, v);
504 }
505
506 template<class M, class X, class PI, class A>
507 void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
508 {
509 levelContext.matrix = matrices_->matrices().finest();
510 levelContext.pinfo = matrices_->parallelInformation().finest();
511 levelContext.redist =
512 matrices_->redistributeInformation().begin();
513 levelContext.aggregates = matrices_->aggregatesMaps().begin();
514 levelContext.lhs = lhs_->finest();
515 levelContext.residual = residual_->finest();
516 levelContext.rhs = rhs_->finest();
517 levelContext.level=0;
518 }
519
520 template<class M, class X, class PI, class A>
521 bool FastAMG<M,X,PI,A>
522 ::moveToCoarseLevel(LevelContext& levelContext)
523 {
524 bool processNextLevel=true;
525
526 if(levelContext.redist->isSetup()) {
527 throw "bla";
528 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
529 levelContext.residual.getRedistributed());
530 processNextLevel = levelContext.residual.getRedistributed().size()>0;
531 if(processNextLevel) {
532 //restrict defect to coarse level right hand side.
533 ++levelContext.pinfo;
534 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
535 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
536 static_cast<const Range&>(levelContext.residual.getRedistributed()),
537 *levelContext.pinfo);
538 }
539 }else{
540 //restrict defect to coarse level right hand side.
541 ++levelContext.rhs;
542 ++levelContext.pinfo;
543 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
544 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
545 static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
546 }
547
548 if(processNextLevel) {
549 // prepare coarse system
550 ++levelContext.residual;
551 ++levelContext.lhs;
552 ++levelContext.matrix;
553 ++levelContext.level;
554 ++levelContext.redist;
555
556 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
557 // next level is not the globally coarsest one
558 ++levelContext.aggregates;
559 }
560 // prepare the lhs on the next level
561 *levelContext.lhs=0;
562 *levelContext.residual=0;
563 }
564 return processNextLevel;
565 }
566
567 template<class M, class X, class PI, class A>
568 void FastAMG<M,X,PI,A>
569 ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
570 {
571 if(processNextLevel) {
572 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
573 // previous level is not the globally coarsest one
574 --levelContext.aggregates;
575 }
576 --levelContext.redist;
577 --levelContext.level;
578 //prolongate and add the correction (update is in coarse left hand side)
579 --levelContext.matrix;
580 --levelContext.residual;
581
582 }
583
584 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
585 if(levelContext.redist->isSetup()) {
586
587 // Need to redistribute during prolongate
588 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
589 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
590 levelContext.lhs.getRedistributed(),
591 matrices_->getProlongationDampingFactor(),
592 *levelContext.pinfo, *levelContext.redist);
593 }else{
594 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
595 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
596 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
597
598 // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
599 }
600
601
602 if(processNextLevel) {
603 --levelContext.rhs;
604 }
605
606 }
607
608
609 template<class M, class X, class PI, class A>
610 void FastAMG<M,X,PI,A>
611 ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
612 {
614 x,
615 *levelContext.residual,
616 b);
617 }
618
619 template<class M, class X, class PI, class A>
620 void FastAMG<M,X,PI,A>
621 ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
622 {
624 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
625 }
626
627
628 template<class M, class X, class PI, class A>
630 {
631 return IsDirectSolver< CoarseSolver>::value;
632 }
633
634 template<class M, class X, class PI, class A>
635 void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
636
637 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
638 // Solve directly
640 res.converged=true; // If we do not compute this flag will not get updated
641 if(levelContext.redist->isSetup()) {
642 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
643 if(levelContext.rhs.getRedistributed().size()>0) {
644 // We are still participating in the computation
645 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
646 levelContext.rhs.getRedistributed());
647 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
648 }
649 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
650 levelContext.pinfo->copyOwnerToAll(v, v);
651 }else{
652 levelContext.pinfo->copyOwnerToAll(b, b);
653 solver_->apply(v, const_cast<Range&>(b), res);
654 }
655
656 // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
657 // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
658 if (!res.converged)
659 coarsesolverconverged = false;
660 }else{
661 // presmoothing
662 presmooth(levelContext, v, b);
663 // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
664 // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
665#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
666 bool processNextLevel = moveToCoarseLevel(levelContext);
667
668 if(processNextLevel) {
669 // next level
670 for(std::size_t i=0; i<gamma_; i++)
671 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
672 }
673
674 moveToFineLevel(levelContext, processNextLevel, v);
675#else
676 *lhs=0;
677#endif
678
679 if(levelContext.matrix == matrices_->matrices().finest()) {
680 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
681 if(!coarsesolverconverged)
682 DUNE_THROW(MathError, "Coarse solver did not converge");
683 }
684
685 postsmooth(levelContext, v, b);
686 }
687 }
688
689
691 template<class M, class X, class PI, class A>
693 {
695 lhs_=nullptr;
696 rhs_=nullptr;
697 residual_=nullptr;
698 }
699
700 template<class M, class X, class PI, class A>
701 template<class A1>
702 void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
703 {
704 matrices_->getCoarsestAggregatesOnFinest(cont);
705 }
706
707 } // end namespace Amg
708} // end namespace Dune
709
710#endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:59
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:215
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:218
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:59
All parameters for AMG.
Definition: parameters.hh:391
ConstIterator class for sequential access.
Definition: matrix.hh:401
A generic dynamic dense matrix.
Definition: matrix.hh:558
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:562
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:586
T block_type
Export the type representing the components.
Definition: matrix.hh:565
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:48
Sequential SSOR preconditioner.
Definition: preconditioners.hh:138
A simple stop watch.
Definition: timer.hh:41
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:55
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
A few common exception classes.
Traits for type conversions and type information.
Some generic functions for pretty printing vectors and matrices.
Implementations of the inverse operator interface.
decltype(auto) apply(F &&f, ArgTuple &&args)
Apply function with arguments given as tuple.
Definition: apply.hh:46
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:25
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:183
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:702
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:187
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:149
void post(Domain &x)
Clean up.
Definition: fastamg.hh:692
X Domain
The domain type.
Definition: fastamg.hh:76
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:203
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:122
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:71
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:191
X Range
The range type.
Definition: fastamg.hh:78
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:69
M Operator
The matrix operator type.
Definition: fastamg.hh:62
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:80
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:629
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:199
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:207
std::size_t level
The level index.
Definition: fastamg.hh:211
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:487
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:73
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:426
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:296
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:195
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:468
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:490
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:14
shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:75
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:65
Statistics about the application of an inverse operator.
Definition: solver.hh:46
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
Category
Definition: solvercategory.hh:21
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:23
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Classes for using UMFPack with ISTL matrices.
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)