Dune Core Modules (2.5.1)

fastamg.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_ISTL_FASTAMG_HH
4 #define DUNE_ISTL_FASTAMG_HH
5 
6 #include <memory>
9 #include <dune/common/unused.hh>
13 #include <dune/istl/solvers.hh>
15 #include <dune/istl/superlu.hh>
16 #include <dune/istl/umfpack.hh>
17 #include <dune/istl/solvertype.hh>
18 #include <dune/istl/io.hh>
20 
21 #include "fastamgsmoother.hh"
22 
31 namespace Dune
32 {
33  namespace Amg
34  {
57  template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
58  class FastAMG : public Preconditioner<X,X>
59  {
60  public:
62  typedef M Operator;
69  typedef PI ParallelInformation;
74 
76  typedef X Domain;
78  typedef X Range;
81 
82  enum {
85  };
86 
94  FastAMG(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
95  const Parameters& parms,
96  bool symmetric=true);
97 
109  template<class C>
110  FastAMG(const Operator& fineOperator, const C& criterion,
111  const Parameters& parms=Parameters(),
112  bool symmetric=true,
114 
118  FastAMG(const FastAMG& amg);
119 
120  ~FastAMG();
121 
123  void pre(Domain& x, Range& b);
124 
126  void apply(Domain& v, const Range& d);
127 
129  void post(Domain& x);
130 
135  template<class A1>
136  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
137 
138  std::size_t levels();
139 
140  std::size_t maxlevels();
141 
151  {
152  matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
153  }
154 
159  bool usesDirectCoarseLevelSolver() const;
160 
161  private:
168  template<class C>
169  void createHierarchies(C& criterion, Operator& matrix,
170  const PI& pinfo);
171 
178  struct LevelContext
179  {
191  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
195  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
211  std::size_t level;
212  };
213 
215  void mgc(LevelContext& levelContext, Domain& x, const Range& b);
216 
223  void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
224 
231  void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
232 
239  void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
240  Domain& fineX);
241 
246  bool moveToCoarseLevel(LevelContext& levelContext);
247 
252  void initIteratorsWithFineLevel(LevelContext& levelContext);
253 
255  std::shared_ptr<OperatorHierarchy> matrices_;
257  std::shared_ptr<CoarseSolver> solver_;
259  Hierarchy<Range,A>* rhs_;
261  Hierarchy<Domain,A>* lhs_;
263  Hierarchy<Domain,A>* residual_;
264 
268  typedef typename ScalarProductChooserType::ScalarProduct ScalarProduct;
269  typedef std::shared_ptr<ScalarProduct> ScalarProductPointer;
271  ScalarProductPointer scalarProduct_;
273  std::size_t gamma_;
275  std::size_t preSteps_;
277  std::size_t postSteps_;
278  std::size_t level;
279  bool buildHierarchy_;
280  bool symmetric;
281  bool coarsesolverconverged;
283  typedef std::shared_ptr<Smoother> SmootherPointer;
284  SmootherPointer coarseSmoother_;
286  std::size_t verbosity_;
287  };
288 
289  template<class M, class X, class PI, class A>
291  : matrices_(amg.matrices_), solver_(amg.solver_),
292  rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
293  gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
294  symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
295  coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
296  {
297  if(amg.rhs_)
298  rhs_=new Hierarchy<Range,A>(*amg.rhs_);
299  if(amg.lhs_)
300  lhs_=new Hierarchy<Domain,A>(*amg.lhs_);
301  if(amg.residual_)
302  residual_=new Hierarchy<Domain,A>(*amg.residual_);
303  }
304 
305  template<class M, class X, class PI, class A>
307  const Parameters& parms, bool symmetric_)
308  : matrices_(&matrices), solver_(&coarseSolver),
309  rhs_(), lhs_(), residual_(), scalarProduct_(),
310  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
311  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
312  symmetric(symmetric_), coarsesolverconverged(true),
313  coarseSmoother_(), verbosity_(parms.debugLevel())
314  {
315  if(preSteps_>1||postSteps_>1)
316  {
317  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
318  preSteps_=postSteps_=0;
319  }
320  assert(matrices_->isBuilt());
321  static_assert(std::is_same<PI,SequentialInformation>::value,
322  "Currently only sequential runs are supported");
323  }
324  template<class M, class X, class PI, class A>
325  template<class C>
327  const C& criterion,
328  const Parameters& parms,
329  bool symmetric_,
330  const PI& pinfo)
331  : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
332  preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
333  buildHierarchy_(true),
334  symmetric(symmetric_), coarsesolverconverged(true),
335  coarseSmoother_(), verbosity_(criterion.debugLevel())
336  {
337  if(preSteps_>1||postSteps_>1)
338  {
339  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
340  preSteps_=postSteps_=1;
341  }
342  static_assert(std::is_same<PI,SequentialInformation>::value,
343  "Currently only sequential runs are supported");
344  // TODO: reestablish compile time checks.
345  //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
346  // "Matrix and Solver must match in terms of category!");
347  createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
348  }
349 
350  template<class M, class X, class PI, class A>
352  {
353  if(buildHierarchy_) {
354  if(solver_)
355  solver_.reset();
356  if(coarseSmoother_)
357  coarseSmoother_.reset();
358  }
359  if(lhs_)
360  delete lhs_;
361  lhs_=nullptr;
362  if(residual_)
363  delete residual_;
364  residual_=nullptr;
365  if(rhs_)
366  delete rhs_;
367  rhs_=nullptr;
368  }
369 
370  template<class M, class X, class PI, class A>
371  template<class C>
372  void FastAMG<M,X,PI,A>::createHierarchies(C& criterion, Operator& matrix,
373  const PI& pinfo)
374  {
375  Timer watch;
376  matrices_.reset(new OperatorHierarchy(matrix, pinfo));
377 
378  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
379 
380  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
381  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
382 
383  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
384  // We have the carsest level. Create the coarse Solver
385  typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
386  SmootherArgs sargs;
387  sargs.iterations = 1;
388 
390  cargs.setArgs(sargs);
391  if(matrices_->redistributeInformation().back().isSetup()) {
392  // Solve on the redistributed partitioning
393  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
394  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
395  }else{
396  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
397  cargs.setComm(*matrices_->parallelInformation().coarsest());
398  }
399 
400  coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
401  scalarProduct_.reset(ScalarProductChooserType::construct(cargs.getComm()));
402 
403 #if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
404 #if HAVE_SUITESPARSE_UMFPACK
405 #define DIRECTSOLVER UMFPack
406 #else
407 #define DIRECTSOLVER SuperLU
408 #endif
409  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
410  if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
411  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
412  || (matrices_->parallelInformation().coarsest().isRedistributed()
413  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
414  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
415  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
416  std::cout<<"Using superlu"<<std::endl;
417  if(matrices_->parallelInformation().coarsest().isRedistributed())
418  {
419  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
420  // We are still participating on this level
421  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
422  else
423  solver_.reset();
424  }else
425  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
426  }else
427 #undef DIRECTSOLVER
428 #endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
429  {
430  if(matrices_->parallelInformation().coarsest().isRedistributed())
431  {
432  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
433  // We are still participating on this level
434  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
435  *scalarProduct_,
436  *coarseSmoother_, 1E-2, 1000, 0));
437  else
438  solver_.reset();
439  }else
440  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
441  *scalarProduct_,
442  *coarseSmoother_, 1E-2, 1000, 0));
443  }
444  }
445 
446  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
447  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
448  }
449 
450 
451  template<class M, class X, class PI, class A>
453  {
454  Timer watch, watch1;
455  // Detect Matrix rows where all offdiagonal entries are
456  // zero and set x such that A_dd*x_d=b_d
457  // Thus users can be more careless when setting up their linear
458  // systems.
459  typedef typename M::matrix_type Matrix;
460  typedef typename Matrix::ConstRowIterator RowIter;
461  typedef typename Matrix::ConstColIterator ColIter;
462  typedef typename Matrix::block_type Block;
463  Block zero;
464  zero=typename Matrix::field_type();
465 
466  const Matrix& mat=matrices_->matrices().finest()->getmat();
467  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
468  bool isDirichlet = true;
469  bool hasDiagonal = false;
470  ColIter diag;
471  for(ColIter col=row->begin(); col!=row->end(); ++col) {
472  if(row.index()==col.index()) {
473  diag = col;
474  hasDiagonal = (*col != zero);
475  }else{
476  if(*col!=zero)
477  isDirichlet = false;
478  }
479  }
480  if(isDirichlet && hasDiagonal)
481  diag->solve(x[row.index()], b[row.index()]);
482  }
483  if (verbosity_>0)
484  std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
485  watch1.reset();
486  // No smoother to make x consistent! Do it by hand
487  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
488  Range* copy = new Range(b);
489  if(rhs_)
490  delete rhs_;
491  rhs_ = new Hierarchy<Range,A>(copy);
492  Domain* dcopy = new Domain(x);
493  if(lhs_)
494  delete lhs_;
495  lhs_ = new Hierarchy<Domain,A>(dcopy);
496  dcopy = new Domain(x);
497  residual_ = new Hierarchy<Domain,A>(dcopy);
498  matrices_->coarsenVector(*rhs_);
499  matrices_->coarsenVector(*lhs_);
500  matrices_->coarsenVector(*residual_);
501 
502  // The preconditioner might change x and b. So we have to
503  // copy the changes to the original vectors.
504  x = *lhs_->finest();
505  b = *rhs_->finest();
506  }
507  template<class M, class X, class PI, class A>
508  std::size_t FastAMG<M,X,PI,A>::levels()
509  {
510  return matrices_->levels();
511  }
512  template<class M, class X, class PI, class A>
513  std::size_t FastAMG<M,X,PI,A>::maxlevels()
514  {
515  return matrices_->maxlevels();
516  }
517 
519  template<class M, class X, class PI, class A>
521  {
522  LevelContext levelContext;
523  // Init all iterators for the current level
524  initIteratorsWithFineLevel(levelContext);
525 
526  assert(v.two_norm()==0);
527 
528  level=0;
529  if(matrices_->maxlevels()==1){
530  // The coarse solver might modify the d!
531  Range b(d);
532  mgc(levelContext, v, b);
533  }else
534  mgc(levelContext, v, d);
535  if(postSteps_==0||matrices_->maxlevels()==1)
536  levelContext.pinfo->copyOwnerToAll(v, v);
537  }
538 
539  template<class M, class X, class PI, class A>
540  void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
541  {
542  levelContext.matrix = matrices_->matrices().finest();
543  levelContext.pinfo = matrices_->parallelInformation().finest();
544  levelContext.redist =
545  matrices_->redistributeInformation().begin();
546  levelContext.aggregates = matrices_->aggregatesMaps().begin();
547  levelContext.lhs = lhs_->finest();
548  levelContext.residual = residual_->finest();
549  levelContext.rhs = rhs_->finest();
550  levelContext.level=0;
551  }
552 
553  template<class M, class X, class PI, class A>
554  bool FastAMG<M,X,PI,A>
555  ::moveToCoarseLevel(LevelContext& levelContext)
556  {
557  bool processNextLevel=true;
558 
559  if(levelContext.redist->isSetup()) {
560  throw "bla";
561  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
562  levelContext.residual.getRedistributed());
563  processNextLevel = levelContext.residual.getRedistributed().size()>0;
564  if(processNextLevel) {
565  //restrict defect to coarse level right hand side.
566  ++levelContext.pinfo;
567  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
568  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
569  static_cast<const Range&>(levelContext.residual.getRedistributed()),
570  *levelContext.pinfo);
571  }
572  }else{
573  //restrict defect to coarse level right hand side.
574  ++levelContext.rhs;
575  ++levelContext.pinfo;
576  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
577  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
578  static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
579  }
580 
581  if(processNextLevel) {
582  // prepare coarse system
583  ++levelContext.residual;
584  ++levelContext.lhs;
585  ++levelContext.matrix;
586  ++levelContext.level;
587  ++levelContext.redist;
588 
589  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
590  // next level is not the globally coarsest one
591  ++levelContext.aggregates;
592  }
593  // prepare the lhs on the next level
594  *levelContext.lhs=0;
595  *levelContext.residual=0;
596  }
597  return processNextLevel;
598  }
599 
600  template<class M, class X, class PI, class A>
601  void FastAMG<M,X,PI,A>
602  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
603  {
604  if(processNextLevel) {
605  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
606  // previous level is not the globally coarsest one
607  --levelContext.aggregates;
608  }
609  --levelContext.redist;
610  --levelContext.level;
611  //prolongate and add the correction (update is in coarse left hand side)
612  --levelContext.matrix;
613  --levelContext.residual;
614 
615  }
616 
617  typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
618  if(levelContext.redist->isSetup()) {
619 
620  // Need to redistribute during prolongate
621  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
622  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
623  levelContext.lhs.getRedistributed(),
624  matrices_->getProlongationDampingFactor(),
625  *levelContext.pinfo, *levelContext.redist);
626  }else{
627  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
628  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
629  matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
630 
631  // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
632  }
633 
634 
635  if(processNextLevel) {
636  --levelContext.rhs;
637  }
638 
639  }
640 
641 
642  template<class M, class X, class PI, class A>
643  void FastAMG<M,X,PI,A>
644  ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
645  {
646  GaussSeidelPresmoothDefect<M::matrix_type::blocklevel>::apply(levelContext.matrix->getmat(),
647  x,
648  *levelContext.residual,
649  b);
650  }
651 
652  template<class M, class X, class PI, class A>
653  void FastAMG<M,X,PI,A>
654  ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
655  {
656  GaussSeidelPostsmoothDefect<M::matrix_type::blocklevel>
657  ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
658  }
659 
660 
661  template<class M, class X, class PI, class A>
663  {
664  return IsDirectSolver< CoarseSolver>::value;
665  }
666 
667  template<class M, class X, class PI, class A>
668  void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
669 
670  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
671  // Solve directly
673  res.converged=true; // If we do not compute this flag will not get updated
674  if(levelContext.redist->isSetup()) {
675  levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
676  if(levelContext.rhs.getRedistributed().size()>0) {
677  // We are still participating in the computation
678  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
679  levelContext.rhs.getRedistributed());
680  solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
681  }
682  levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
683  levelContext.pinfo->copyOwnerToAll(v, v);
684  }else{
685  levelContext.pinfo->copyOwnerToAll(b, b);
686  solver_->apply(v, const_cast<Range&>(b), res);
687  }
688 
689  // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
690  // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
691  if (!res.converged)
692  coarsesolverconverged = false;
693  }else{
694  // presmoothing
695  presmooth(levelContext, v, b);
696  // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
697  // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
698 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
699  bool processNextLevel = moveToCoarseLevel(levelContext);
700 
701  if(processNextLevel) {
702  // next level
703  for(std::size_t i=0; i<gamma_; i++)
704  mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
705  }
706 
707  moveToFineLevel(levelContext, processNextLevel, v);
708 #else
709  *lhs=0;
710 #endif
711 
712  if(levelContext.matrix == matrices_->matrices().finest()) {
713  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
714  if(!coarsesolverconverged)
715  DUNE_THROW(MathError, "Coarse solver did not converge");
716  }
717 
718  // printvector(std::cout, *lhs, "update corrected", "u", 10, 10, 10);
719  // postsmoothing
720  postsmooth(levelContext, v, b);
721  // printvector(std::cout, *lhs, "update postsmoothed", "u", 10, 10, 10);
722 
723  }
724  }
725 
726 
728  template<class M, class X, class PI, class A>
730  {
732  delete lhs_;
733  lhs_=nullptr;
734  delete rhs_;
735  rhs_=nullptr;
736  delete residual_;
737  residual_=nullptr;
738  }
739 
740  template<class M, class X, class PI, class A>
741  template<class A1>
742  void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
743  {
744  matrices_->getCoarsestAggregatesOnFinest(cont);
745  }
746 
747  } // end namespace Amg
748 } // end namespace Dune
749 
750 #endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:59
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:257
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:260
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:317
All parameters for AMG.
Definition: parameters.hh:391
ConstIterator class for sequential access.
Definition: matrix.hh:398
A generic dynamic dense matrix.
Definition: matrix.hh:555
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:559
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:583
T block_type
Export the type representing the components.
Definition: matrix.hh:562
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:26
Sequential SSOR preconditioner.
Definition: preconditioners.hh:127
A simple stop watch.
Definition: timer.hh:52
void reset()
Reset timer while keeping the running/stopped state.
Definition: timer.hh:66
double elapsed() const
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:86
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:183
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:742
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:187
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:150
void post(Domain &x)
Clean up.
Definition: fastamg.hh:729
X Domain
The domain type.
Definition: fastamg.hh:76
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:203
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:71
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:191
X Range
The range type.
Definition: fastamg.hh:78
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:69
M Operator
The matrix operator type.
Definition: fastamg.hh:62
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:80
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:662
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:199
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:207
std::size_t level
The level index.
Definition: fastamg.hh:211
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:520
FastAMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:306
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:73
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:452
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:195
@ category
The solver category.
Definition: fastamg.hh:84
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:408
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:430
Provides a classes representing the hierarchies in AMG.
Some generic functions for pretty printing vectors and matrices.
Dune namespace.
Definition: alignment.hh:11
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Statistics about the application of an inverse operator.
Definition: solver.hh:32
bool converged
True if convergence criterion has been met.
Definition: solver.hh:56
Choose the approriate scalar product for a solver category.
Definition: scalarproducts.hh:77
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:21
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Classes for using UMFPack with ISTL matrices.
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:18
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 7, 22:32, 2024)