Dune Core Modules (2.3.1)

fastamg.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 // $Id$
4 #ifndef DUNE_ISTL_FASTAMG_HH
5 #define DUNE_ISTL_FASTAMG_HH
6 
7 #include <memory>
10 #include <dune/common/unused.hh>
14 #include <dune/istl/solvers.hh>
16 #include <dune/istl/superlu.hh>
17 #include <dune/istl/umfpack.hh>
18 #include <dune/istl/solvertype.hh>
19 #include <dune/istl/io.hh>
21 
22 #include "fastamgsmoother.hh"
23 
32 namespace Dune
33 {
34  namespace Amg
35  {
58  template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
59  class FastAMG : public Preconditioner<X,X>
60  {
61  public:
63  typedef M Operator;
70  typedef PI ParallelInformation;
75 
77  typedef X Domain;
79  typedef X Range;
82 
83  enum {
86  };
87 
95  FastAMG(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
96  const Parameters& parms,
97  bool symmetric=true);
98 
110  template<class C>
111  FastAMG(const Operator& fineOperator, const C& criterion,
112  const Parameters& parms=Parameters(),
113  bool symmetric=true,
115 
119  FastAMG(const FastAMG& amg);
120 
121  ~FastAMG();
122 
124  void pre(Domain& x, Range& b);
125 
127  void apply(Domain& v, const Range& d);
128 
130  void post(Domain& x);
131 
136  template<class A1>
137  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
138 
139  std::size_t levels();
140 
141  std::size_t maxlevels();
142 
152  {
153  matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
154  }
155 
160  bool usesDirectCoarseLevelSolver() const;
161 
162  private:
169  template<class C>
170  void createHierarchies(C& criterion, Operator& matrix,
171  const PI& pinfo);
172 
179  struct LevelContext
180  {
192  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
196  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
212  std::size_t level;
213  };
214 
216  void mgc(LevelContext& levelContext, Domain& x, const Range& b);
217 
224  void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
225 
232  void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
233 
240  void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
241  Domain& fineX);
242 
247  bool moveToCoarseLevel(LevelContext& levelContext);
248 
253  void initIteratorsWithFineLevel(LevelContext& levelContext);
254 
260  Hierarchy<Range,A>* rhs_;
262  Hierarchy<Domain,A>* lhs_;
264  Hierarchy<Domain,A>* residual_;
265 
269  typedef typename ScalarProductChooserType::ScalarProduct ScalarProduct;
272  ScalarProductPointer scalarProduct_;
274  std::size_t gamma_;
276  std::size_t preSteps_;
278  std::size_t postSteps_;
279  std::size_t level;
280  bool buildHierarchy_;
281  bool symmetric;
282  bool coarsesolverconverged;
285  SmootherPointer coarseSmoother_;
287  std::size_t verbosity_;
288  };
289 
290  template<class M, class X, class PI, class A>
292  : matrices_(amg.matrices_), solver_(amg.solver_),
293  rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
294  gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
295  symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
296  coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
297  {
298  if(amg.rhs_)
299  rhs_=new Hierarchy<Range,A>(*amg.rhs_);
300  if(amg.lhs_)
301  lhs_=new Hierarchy<Domain,A>(*amg.lhs_);
302  if(amg.residual_)
303  residual_=new Hierarchy<Domain,A>(*amg.residual_);
304  }
305 
306  template<class M, class X, class PI, class A>
308  const Parameters& parms, bool symmetric_)
309  : matrices_(&matrices), solver_(&coarseSolver),
310  rhs_(), lhs_(), residual_(), scalarProduct_(),
311  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
312  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
313  symmetric(symmetric_), coarsesolverconverged(true),
314  coarseSmoother_(), verbosity_(parms.debugLevel())
315  {
316  if(preSteps_>1||postSteps_>1)
317  {
318  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
319  preSteps_=postSteps_=0;
320  }
321  assert(matrices_->isBuilt());
322  dune_static_assert((is_same<PI,SequentialInformation>::value), "Currently only sequential runs are supported");
323  }
324  template<class M, class X, class PI, class A>
325  template<class C>
327  const C& criterion,
328  const Parameters& parms,
329  bool symmetric_,
330  const PI& pinfo)
331  : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
332  preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
333  buildHierarchy_(true),
334  symmetric(symmetric_), coarsesolverconverged(true),
335  coarseSmoother_(), verbosity_(criterion.debugLevel())
336  {
337  if(preSteps_>1||postSteps_>1)
338  {
339  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
340  preSteps_=postSteps_=1;
341  }
342  dune_static_assert((is_same<PI,SequentialInformation>::value), "Currently only sequential runs are supported");
343  // TODO: reestablish compile time checks.
344  //dune_static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
345  // "Matrix and Solver must match in terms of category!");
346  createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
347  }
348 
349  template<class M, class X, class PI, class A>
351  {
352  if(buildHierarchy_) {
353  if(solver_)
354  solver_.reset();
355  if(coarseSmoother_)
356  coarseSmoother_.reset();
357  }
358  if(lhs_)
359  delete lhs_;
360  lhs_=nullptr;
361  if(residual_)
362  delete residual_;
363  residual_=nullptr;
364  if(rhs_)
365  delete rhs_;
366  rhs_=nullptr;
367  }
368 
369  template<class M, class X, class PI, class A>
370  template<class C>
371  void FastAMG<M,X,PI,A>::createHierarchies(C& criterion, Operator& matrix,
372  const PI& pinfo)
373  {
374  Timer watch;
375  matrices_.reset(new OperatorHierarchy(matrix, pinfo));
376 
377  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
378 
379  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
380  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
381 
382  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
383  // We have the carsest level. Create the coarse Solver
384  typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
385  SmootherArgs sargs;
386  sargs.iterations = 1;
387 
389  cargs.setArgs(sargs);
390  if(matrices_->redistributeInformation().back().isSetup()) {
391  // Solve on the redistributed partitioning
392  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
393  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
394  }else{
395  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
396  cargs.setComm(*matrices_->parallelInformation().coarsest());
397  }
398 
399  coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
400  scalarProduct_.reset(ScalarProductChooserType::construct(cargs.getComm()));
401 
402 #if HAVE_SUPERLU|| HAVE_UMFPACK
403 #if HAVE_UMFPACK
404 #define DIRECTSOLVER UMFPack
405 #else
406 #define DIRECTSOLVER SuperLU
407 #endif
408  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
409  if(is_same<ParallelInformation,SequentialInformation>::value // sequential mode
410  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
411  || (matrices_->parallelInformation().coarsest().isRedistributed()
412  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
413  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
414  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
415  std::cout<<"Using superlu"<<std::endl;
416  if(matrices_->parallelInformation().coarsest().isRedistributed())
417  {
418  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
419  // We are still participating on this level
420  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
421  else
422  solver_.reset();
423  }else
424  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
425  }else
426 #undef DIRECTSOLVER
427 #endif
428  {
429  if(matrices_->parallelInformation().coarsest().isRedistributed())
430  {
431  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
432  // We are still participating on this level
433  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
434  *scalarProduct_,
435  *coarseSmoother_, 1E-2, 1000, 0));
436  else
437  solver_.reset();
438  }else
439  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
440  *scalarProduct_,
441  *coarseSmoother_, 1E-2, 1000, 0));
442  }
443  }
444 
445  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
446  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
447  }
448 
449 
450  template<class M, class X, class PI, class A>
452  {
453  Timer watch, watch1;
454  // Detect Matrix rows where all offdiagonal entries are
455  // zero and set x such that A_dd*x_d=b_d
456  // Thus users can be more careless when setting up their linear
457  // systems.
458  typedef typename M::matrix_type Matrix;
459  typedef typename Matrix::ConstRowIterator RowIter;
460  typedef typename Matrix::ConstColIterator ColIter;
461  typedef typename Matrix::block_type Block;
462  Block zero;
463  zero=typename Matrix::field_type();
464 
465  const Matrix& mat=matrices_->matrices().finest()->getmat();
466  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
467  bool isDirichlet = true;
468  bool hasDiagonal = false;
469  ColIter diag;
470  for(ColIter col=row->begin(); col!=row->end(); ++col) {
471  if(row.index()==col.index()) {
472  diag = col;
473  hasDiagonal = false;
474  }else{
475  if(*col!=zero)
476  isDirichlet = false;
477  }
478  }
479  if(isDirichlet && hasDiagonal)
480  diag->solve(x[row.index()], b[row.index()]);
481  }
482  std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
483  watch1.reset();
484  // No smoother to make x consistent! Do it by hand
485  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
486  Range* copy = new Range(b);
487  if(rhs_)
488  delete rhs_;
489  rhs_ = new Hierarchy<Range,A>(copy);
490  Domain* dcopy = new Domain(x);
491  if(lhs_)
492  delete lhs_;
493  lhs_ = new Hierarchy<Domain,A>(dcopy);
494  dcopy = new Domain(x);
495  residual_ = new Hierarchy<Domain,A>(dcopy);
496  matrices_->coarsenVector(*rhs_);
497  matrices_->coarsenVector(*lhs_);
498  matrices_->coarsenVector(*residual_);
499 
500  // The preconditioner might change x and b. So we have to
501  // copy the changes to the original vectors.
502  x = *lhs_->finest();
503  b = *rhs_->finest();
504  }
505  template<class M, class X, class PI, class A>
506  std::size_t FastAMG<M,X,PI,A>::levels()
507  {
508  return matrices_->levels();
509  }
510  template<class M, class X, class PI, class A>
511  std::size_t FastAMG<M,X,PI,A>::maxlevels()
512  {
513  return matrices_->maxlevels();
514  }
515 
517  template<class M, class X, class PI, class A>
519  {
520  LevelContext levelContext;
521  // Init all iterators for the current level
522  initIteratorsWithFineLevel(levelContext);
523 
524  assert(v.two_norm()==0);
525 
526  level=0;
527  if(matrices_->maxlevels()==1){
528  // The coarse solver might modify the d!
529  Range b(d);
530  mgc(levelContext, v, b);
531  }else
532  mgc(levelContext, v, d);
533  if(postSteps_==0||matrices_->maxlevels()==1)
534  levelContext.pinfo->copyOwnerToAll(v, v);
535  }
536 
537  template<class M, class X, class PI, class A>
538  void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
539  {
540  levelContext.matrix = matrices_->matrices().finest();
541  levelContext.pinfo = matrices_->parallelInformation().finest();
542  levelContext.redist =
543  matrices_->redistributeInformation().begin();
544  levelContext.aggregates = matrices_->aggregatesMaps().begin();
545  levelContext.lhs = lhs_->finest();
546  levelContext.residual = residual_->finest();
547  levelContext.rhs = rhs_->finest();
548  levelContext.level=0;
549  }
550 
551  template<class M, class X, class PI, class A>
552  bool FastAMG<M,X,PI,A>
553  ::moveToCoarseLevel(LevelContext& levelContext)
554  {
555  bool processNextLevel=true;
556 
557  if(levelContext.redist->isSetup()) {
558  throw "bla";
559  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
560  levelContext.residual.getRedistributed());
561  processNextLevel = levelContext.residual.getRedistributed().size()>0;
562  if(processNextLevel) {
563  //restrict defect to coarse level right hand side.
564  ++levelContext.pinfo;
565  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
566  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
567  static_cast<const Range&>(levelContext.residual.getRedistributed()),
568  *levelContext.pinfo);
569  }
570  }else{
571  //restrict defect to coarse level right hand side.
572  ++levelContext.rhs;
573  ++levelContext.pinfo;
574  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
575  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
576  static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
577  }
578 
579  if(processNextLevel) {
580  // prepare coarse system
581  ++levelContext.residual;
582  ++levelContext.lhs;
583  ++levelContext.matrix;
584  ++levelContext.level;
585  ++levelContext.redist;
586 
587  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
588  // next level is not the globally coarsest one
589  ++levelContext.aggregates;
590  }
591  // prepare the lhs on the next level
592  *levelContext.lhs=0;
593  *levelContext.residual=0;
594  }
595  return processNextLevel;
596  }
597 
598  template<class M, class X, class PI, class A>
599  void FastAMG<M,X,PI,A>
600  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
601  {
602  if(processNextLevel) {
603  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
604  // previous level is not the globally coarsest one
605  --levelContext.aggregates;
606  }
607  --levelContext.redist;
608  --levelContext.level;
609  //prolongate and add the correction (update is in coarse left hand side)
610  --levelContext.matrix;
611  --levelContext.residual;
612 
613  }
614 
615  typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
616  if(levelContext.redist->isSetup()) {
617 
618  // Need to redistribute during prolongate
619  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
620  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
621  levelContext.lhs.getRedistributed(),
622  matrices_->getProlongationDampingFactor(),
623  *levelContext.pinfo, *levelContext.redist);
624  }else{
625  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
626  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
627  matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
628 
629  // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
630  }
631 
632 
633  if(processNextLevel) {
634  --levelContext.rhs;
635  }
636 
637  }
638 
639 
640  template<class M, class X, class PI, class A>
641  void FastAMG<M,X,PI,A>
642  ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
643  {
644  GaussSeidelPresmoothDefect<M::matrix_type::blocklevel>::apply(levelContext.matrix->getmat(),
645  x,
646  *levelContext.residual,
647  b);
648  }
649 
650  template<class M, class X, class PI, class A>
651  void FastAMG<M,X,PI,A>
652  ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
653  {
654  GaussSeidelPostsmoothDefect<M::matrix_type::blocklevel>
655  ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
656  }
657 
658 
659  template<class M, class X, class PI, class A>
661  {
662  return IsDirectSolver< CoarseSolver>::value;
663  }
664 
665  template<class M, class X, class PI, class A>
666  void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
667 
668  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
669  // Solve directly
671  res.converged=true; // If we do not compute this flag will not get updated
672  if(levelContext.redist->isSetup()) {
673  levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
674  if(levelContext.rhs.getRedistributed().size()>0) {
675  // We are still participating in the computation
676  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
677  levelContext.rhs.getRedistributed());
678  solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
679  }
680  levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
681  levelContext.pinfo->copyOwnerToAll(v, v);
682  }else{
683  levelContext.pinfo->copyOwnerToAll(b, b);
684  solver_->apply(v, const_cast<Range&>(b), res);
685  }
686 
687  // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
688  // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
689  if (!res.converged)
690  coarsesolverconverged = false;
691  }else{
692  // presmoothing
693  presmooth(levelContext, v, b);
694  // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
695  // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
696 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
697  bool processNextLevel = moveToCoarseLevel(levelContext);
698 
699  if(processNextLevel) {
700  // next level
701  for(std::size_t i=0; i<gamma_; i++)
702  mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
703  }
704 
705  moveToFineLevel(levelContext, processNextLevel, v);
706 #else
707  *lhs=0;
708 #endif
709 
710  if(levelContext.matrix == matrices_->matrices().finest()) {
711  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
712  if(!coarsesolverconverged)
713  DUNE_THROW(MathError, "Coarse solver did not converge");
714  }
715 
716  // printvector(std::cout, *lhs, "update corrected", "u", 10, 10, 10);
717  // postsmoothing
718  postsmooth(levelContext, v, b);
719  // printvector(std::cout, *lhs, "update postsmoothed", "u", 10, 10, 10);
720 
721  }
722  }
723 
724 
726  template<class M, class X, class PI, class A>
728  {
730  delete lhs_;
731  lhs_=nullptr;
732  delete rhs_;
733  rhs_=nullptr;
734  delete residual_;
735  residual_=nullptr;
736  }
737 
738  template<class M, class X, class PI, class A>
739  template<class A1>
740  void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
741  {
742  matrices_->getCoarsestAggregatesOnFinest(cont);
743  }
744 
745  } // end namespace Amg
746 } // end namespace Dune
747 
748 #endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:60
LevelIterator< Hierarchy< Domain, A >, Domain > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:258
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:261
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:318
All parameters for AMG.
Definition: parameters.hh:391
Enables iteration over all entities of a given codimension and level of a grid. See also the document...
Definition: leveliterator.hh:31
A generic dynamic dense matrix.
Definition: matrix.hh:25
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:29
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:53
T block_type
Export the type representing the components.
Definition: matrix.hh:32
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:26
Sequential SSOR preconditioner.
Definition: preconditioners.hh:127
A simple stop watch.
Definition: timer.hh:52
void reset()
Reset timer while keeping the running/stopped state.
Definition: timer.hh:66
double elapsed() const
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:86
ConstIterator class for sequential access.
Definition: vbvector.hh:647
A reference counting smart pointer.
Definition: shared_ptr.hh:64
A few common exception classes.
#define dune_static_assert(COND, MSG)
Helper template so that compilation fails if condition is not true.
Definition: static_assert.hh:79
#define DUNE_THROW(E, m)
Definition: exceptions.hh:244
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:184
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:740
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:188
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:151
void post(Domain &x)
Clean up.
Definition: fastamg.hh:727
X Domain
The domain type.
Definition: fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:204
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:192
X Range
The range type.
Definition: fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:70
M Operator
The matrix operator type.
Definition: fastamg.hh:63
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:660
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:200
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:208
std::size_t level
The level index.
Definition: fastamg.hh:212
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:518
FastAMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:307
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:451
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:196
@ category
The solver category.
Definition: fastamg.hh:85
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:53
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:408
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:45
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:430
Provides a classes representing the hierarchies in AMG.
Some generic functions for pretty printing vectors and matrices.
Dune namespace.
Definition: alignment.hh:14
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Statistics about the application of an inverse operator.
Definition: solver.hh:32
bool converged
True if convergence criterion has been met.
Definition: solver.hh:56
Choose the approriate scalar product for a solver category.
Definition: scalarproducts.hh:76
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:22
Compile time test for testing whether two types are the same.
Definition: typetraits.hh:360
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Classes for using UMFPack with ISTL matrices.
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentional unused function parameters with.
Definition: unused.hh:18
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 14, 22:30, 2024)