Dune Core Modules (2.6.0)

amg.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_AMG_AMG_HH
4 #define DUNE_AMG_AMG_HH
5 
6 #include <memory>
11 #include <dune/istl/solvers.hh>
13 #include <dune/istl/superlu.hh>
14 #include <dune/istl/umfpack.hh>
15 #include <dune/istl/solvertype.hh>
18 
19 namespace Dune
20 {
21  namespace Amg
22  {
40  template<class M, class X, class S, class P, class K, class A>
41  class KAMG;
42 
43  template<class T>
44  class KAmgTwoGrid;
45 
56  template<class M, class X, class S, class PI=SequentialInformation,
57  class A=std::allocator<X> >
58  class AMG : public Preconditioner<X,X>
59  {
60  template<class M1, class X1, class S1, class P1, class K1, class A1>
61  friend class KAMG;
62 
63  friend class KAmgTwoGrid<AMG>;
64 
65  public:
67  typedef M Operator;
74  typedef PI ParallelInformation;
79 
81  typedef X Domain;
83  typedef X Range;
91  typedef S Smoother;
92 
95 
105  AMG(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
106  const SmootherArgs& smootherArgs, const Parameters& parms);
107 
119  template<class C>
120  AMG(const Operator& fineOperator, const C& criterion,
121  const SmootherArgs& smootherArgs=SmootherArgs(),
123 
127  AMG(const AMG& amg);
128 
129  ~AMG();
130 
132  void pre(Domain& x, Range& b);
133 
135  void apply(Domain& v, const Range& d);
136 
139  {
140  return category_;
141  }
142 
144  void post(Domain& x);
145 
150  template<class A1>
151  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
152 
153  std::size_t levels();
154 
155  std::size_t maxlevels();
156 
166  {
167  matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
168  }
169 
175 
176  private:
183  template<class C>
184  void createHierarchies(C& criterion, Operator& matrix,
185  const PI& pinfo);
192  struct LevelContext
193  {
194  typedef Smoother SmootherType;
210  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
214  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
230  std::size_t level;
231  };
232 
233 
238  void mgc(LevelContext& levelContext);
239 
240  void additiveMgc();
241 
248  void moveToFineLevel(LevelContext& levelContext,bool processedFineLevel);
249 
254  bool moveToCoarseLevel(LevelContext& levelContext);
255 
260  void initIteratorsWithFineLevel(LevelContext& levelContext);
261 
263  std::shared_ptr<OperatorHierarchy> matrices_;
265  SmootherArgs smootherArgs_;
267  std::shared_ptr<Hierarchy<Smoother,A> > smoothers_;
269  std::shared_ptr<CoarseSolver> solver_;
271  Hierarchy<Range,A>* rhs_;
273  Hierarchy<Domain,A>* lhs_;
275  Hierarchy<Domain,A>* update_;
279  std::shared_ptr<ScalarProduct> scalarProduct_;
281  std::size_t gamma_;
283  std::size_t preSteps_;
285  std::size_t postSteps_;
286  bool buildHierarchy_;
287  bool additive;
288  bool coarsesolverconverged;
289  std::shared_ptr<Smoother> coarseSmoother_;
291  SolverCategory::Category category_;
293  std::size_t verbosity_;
294  };
295 
296  template<class M, class X, class S, class PI, class A>
297  inline AMG<M,X,S,PI,A>::AMG(const AMG& amg)
298  : matrices_(amg.matrices_), smootherArgs_(amg.smootherArgs_),
299  smoothers_(amg.smoothers_), solver_(amg.solver_),
300  rhs_(), lhs_(), update_(),
301  scalarProduct_(amg.scalarProduct_), gamma_(amg.gamma_),
302  preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
303  buildHierarchy_(amg.buildHierarchy_),
304  additive(amg.additive), coarsesolverconverged(amg.coarsesolverconverged),
305  coarseSmoother_(amg.coarseSmoother_),
306  category_(amg.category_),
307  verbosity_(amg.verbosity_)
308  {
309  if(amg.rhs_)
310  rhs_=new Hierarchy<Range,A>(*amg.rhs_);
311  if(amg.lhs_)
312  lhs_=new Hierarchy<Domain,A>(*amg.lhs_);
313  if(amg.update_)
314  update_=new Hierarchy<Domain,A>(*amg.update_);
315  }
316 
317  template<class M, class X, class S, class PI, class A>
318  AMG<M,X,S,PI,A>::AMG(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
319  const SmootherArgs& smootherArgs,
320  const Parameters& parms)
321  : matrices_(stackobject_to_shared_ptr(matrices)), smootherArgs_(smootherArgs),
322  smoothers_(new Hierarchy<Smoother,A>), solver_(&coarseSolver),
323  rhs_(), lhs_(), update_(), scalarProduct_(0),
324  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
325  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
326  additive(parms.getAdditive()), coarsesolverconverged(true),
327  coarseSmoother_(),
328 // #warning should category be retrieved from matrices?
329  category_(SolverCategory::category(*smoothers_->coarsest())),
330  verbosity_(parms.debugLevel())
331  {
332  assert(matrices_->isBuilt());
333 
334  // build the necessary smoother hierarchies
335  matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
336  }
337 
338  template<class M, class X, class S, class PI, class A>
339  template<class C>
341  const C& criterion,
342  const SmootherArgs& smootherArgs,
343  const PI& pinfo)
344  : smootherArgs_(smootherArgs),
345  smoothers_(new Hierarchy<Smoother,A>), solver_(),
346  rhs_(), lhs_(), update_(), scalarProduct_(),
347  gamma_(criterion.getGamma()), preSteps_(criterion.getNoPreSmoothSteps()),
348  postSteps_(criterion.getNoPostSmoothSteps()), buildHierarchy_(true),
349  additive(criterion.getAdditive()), coarsesolverconverged(true),
350  coarseSmoother_(),
351  category_(SolverCategory::category(pinfo)),
352  verbosity_(criterion.debugLevel())
353  {
355  DUNE_THROW(InvalidSolverCategory, "Matrix and Communication must have the same SolverCategory!");
356  // TODO: reestablish compile time checks.
357  //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
358  // "Matrix and Solver must match in terms of category!");
359  createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
360  }
361 
362 
363  template<class M, class X, class S, class PI, class A>
365  {
366  if(buildHierarchy_) {
367  if(solver_)
368  solver_.reset();
369  if(coarseSmoother_)
370  coarseSmoother_.reset();
371  }
372  if(lhs_)
373  delete lhs_;
374  lhs_=nullptr;
375  if(update_)
376  delete update_;
377  update_=nullptr;
378  if(rhs_)
379  delete rhs_;
380  rhs_=nullptr;
381  }
382 
383  template <class Matrix,
384  class Vector>
385  struct DirectSolverSelector
386  {
387  typedef typename Matrix :: field_type field_type;
388  enum SolverType { umfpack, superlu, none };
389 
390  static constexpr SolverType solver =
391 #if DISABLE_AMG_DIRECTSOLVER
392  none;
393 #elif HAVE_SUITESPARSE_UMFPACK
394  UMFPackMethodChooser< field_type > :: valid ? umfpack : none ;
395 #elif HAVE_SUPERLU
396  superlu ;
397 #else
398  none;
399 #endif
400 
401  template <class M, SolverType>
402  struct Solver
403  {
404  typedef InverseOperator<Vector,Vector> type;
405  static type* create(const M& mat, bool verbose, bool reusevector )
406  {
407  DUNE_THROW(NotImplemented,"DirectSolver not selected");
408  return nullptr;
409  }
410  static std::string name () { return "None"; }
411  };
412 #if HAVE_SUITESPARSE_UMFPACK
413  template <class M>
414  struct Solver< M, umfpack >
415  {
416  typedef UMFPack< M > type;
417  static type* create(const M& mat, bool verbose, bool reusevector )
418  {
419  return new type(mat, verbose, reusevector );
420  }
421  static std::string name () { return "UMFPack"; }
422  };
423 #endif
424 #if HAVE_SUPERLU
425  template <class M>
426  struct Solver< M, superlu >
427  {
428  typedef SuperLU< M > type;
429  static type* create(const M& mat, bool verbose, bool reusevector )
430  {
431  return new type(mat, verbose, reusevector );
432  }
433  static std::string name () { return "SuperLU"; }
434  };
435 #endif
436 
437  // define direct solver type to be used
438  typedef Solver< Matrix, solver > SelectedSolver ;
439  typedef typename SelectedSolver :: type DirectSolver;
440  static constexpr bool isDirectSolver = solver != none;
441  static std::string name() { return SelectedSolver :: name (); }
442  static DirectSolver* create(const Matrix& mat, bool verbose, bool reusevector )
443  {
444  return SelectedSolver :: create( mat, verbose, reusevector );
445  }
446  };
447 
448  template<class M, class X, class S, class PI, class A>
449  template<class C>
450  void AMG<M,X,S,PI,A>::createHierarchies(C& criterion, Operator& matrix,
451  const PI& pinfo)
452  {
453  Timer watch;
454  matrices_.reset(new OperatorHierarchy(matrix, pinfo));
455 
456  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
457 
458  // build the necessary smoother hierarchies
459  matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
460 
461  // test whether we should solve on the coarse level. That is the case if we
462  // have that level and if there was a redistribution on this level then our
463  // communicator has to be valid (size()>0) as the smoother might try to communicate
464  // in the constructor.
465  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()
466  && ( ! matrices_->redistributeInformation().back().isSetup() ||
467  matrices_->parallelInformation().coarsest().getRedistributed().communicator().size() ) )
468  {
469  // We have the carsest level. Create the coarse Solver
470  SmootherArgs sargs(smootherArgs_);
471  sargs.iterations = 1;
472 
474  cargs.setArgs(sargs);
475  if(matrices_->redistributeInformation().back().isSetup()) {
476  // Solve on the redistributed partitioning
477  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
478  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
479  }else{
480  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
481  cargs.setComm(*matrices_->parallelInformation().coarsest());
482  }
483 
484  coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
485  scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
486 
487  typedef DirectSolverSelector< typename M::matrix_type, X > SolverSelector;
488 
489  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
490  if( SolverSelector::isDirectSolver &&
491  (std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
492  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
493  || (matrices_->parallelInformation().coarsest().isRedistributed()
494  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
495  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0) )
496  )
497  { // redistribute and 1 proc
498  if(matrices_->parallelInformation().coarsest().isRedistributed())
499  {
500  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
501  {
502  // We are still participating on this level
503  solver_.reset(SolverSelector::create(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
504  }
505  else
506  solver_.reset();
507  }
508  else
509  {
510  solver_.reset(SolverSelector::create(matrices_->matrices().coarsest()->getmat(), false, false));
511  }
512  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
513  std::cout<< "Using a direct coarse solver (" << SolverSelector::name() << ")" << std::endl;
514  }
515  else
516  {
517  if(matrices_->parallelInformation().coarsest().isRedistributed())
518  {
519  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
520  // We are still participating on this level
521 
522  // we have to allocate these types using the rebound allocator
523  // in order to ensure that we fulfill the alignement requirements
524  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
525  *scalarProduct_,
526  *coarseSmoother_, 1E-2, 1000, 0));
527  else
528  solver_.reset();
529  }else
530  {
531  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
532  *scalarProduct_,
533  *coarseSmoother_, 1E-2, 1000, 0));
534  // // we have to allocate these types using the rebound allocator
535  // // in order to ensure that we fulfill the alignement requirements
536  // using Alloc = typename A::template rebind<BiCGSTABSolver<X>>::other;
537  // Alloc alloc;
538  // auto p = alloc.allocate(1);
539  // alloc.construct(p,
540  // const_cast<M&>(*matrices_->matrices().coarsest()),
541  // *scalarProduct_,
542  // *coarseSmoother_, 1E-2, 1000, 0);
543  // solver_.reset(p,[](BiCGSTABSolver<X>* p){
544  // Alloc alloc;
545  // alloc.destroy(p);
546  // alloc.deallocate(p,1);
547  // });
548  }
549  }
550  }
551 
552  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
553  std::cout<<"Building hierarchy of "<<matrices_->maxlevels()<<" levels "
554  <<"(inclusive coarse solver) took "<<watch.elapsed()<<" seconds."<<std::endl;
555  }
556 
557 
558  template<class M, class X, class S, class PI, class A>
560  {
561  // Detect Matrix rows where all offdiagonal entries are
562  // zero and set x such that A_dd*x_d=b_d
563  // Thus users can be more careless when setting up their linear
564  // systems.
565  typedef typename M::matrix_type Matrix;
566  typedef typename Matrix::ConstRowIterator RowIter;
567  typedef typename Matrix::ConstColIterator ColIter;
568  typedef typename Matrix::block_type Block;
569  Block zero;
570  zero=typename Matrix::field_type();
571 
572  const Matrix& mat=matrices_->matrices().finest()->getmat();
573  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
574  bool isDirichlet = true;
575  bool hasDiagonal = false;
576  Block diagonal;
577  for(ColIter col=row->begin(); col!=row->end(); ++col) {
578  if(row.index()==col.index()) {
579  diagonal = *col;
580  hasDiagonal = false;
581  }else{
582  if(*col!=zero)
583  isDirichlet = false;
584  }
585  }
586  if(isDirichlet && hasDiagonal)
587  diagonal.solve(x[row.index()], b[row.index()]);
588  }
589 
590  if(smoothers_->levels()>0)
591  smoothers_->finest()->pre(x,b);
592  else
593  // No smoother to make x consistent! Do it by hand
594  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
595  Range* copy = new Range(b);
596  if(rhs_)
597  delete rhs_;
598  rhs_ = new Hierarchy<Range,A>(copy);
599  Domain* dcopy = new Domain(x);
600  if(lhs_)
601  delete lhs_;
602  lhs_ = new Hierarchy<Domain,A>(dcopy);
603  dcopy = new Domain(x);
604  if(update_)
605  delete update_;
606  update_ = new Hierarchy<Domain,A>(dcopy);
607  matrices_->coarsenVector(*rhs_);
608  matrices_->coarsenVector(*lhs_);
609  matrices_->coarsenVector(*update_);
610 
611  // Preprocess all smoothers
612  typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
613  typedef typename Hierarchy<Range,A>::Iterator RIterator;
614  typedef typename Hierarchy<Domain,A>::Iterator DIterator;
615  Iterator coarsest = smoothers_->coarsest();
616  Iterator smoother = smoothers_->finest();
617  RIterator rhs = rhs_->finest();
618  DIterator lhs = lhs_->finest();
619  if(smoothers_->levels()>0) {
620 
621  assert(lhs_->levels()==rhs_->levels());
622  assert(smoothers_->levels()==lhs_->levels() || matrices_->levels()==matrices_->maxlevels());
623  assert(smoothers_->levels()+1==lhs_->levels() || matrices_->levels()<matrices_->maxlevels());
624 
625  if(smoother!=coarsest)
626  for(++smoother, ++lhs, ++rhs; smoother != coarsest; ++smoother, ++lhs, ++rhs)
627  smoother->pre(*lhs,*rhs);
628  smoother->pre(*lhs,*rhs);
629  }
630 
631 
632  // The preconditioner might change x and b. So we have to
633  // copy the changes to the original vectors.
634  x = *lhs_->finest();
635  b = *rhs_->finest();
636 
637  }
638  template<class M, class X, class S, class PI, class A>
639  std::size_t AMG<M,X,S,PI,A>::levels()
640  {
641  return matrices_->levels();
642  }
643  template<class M, class X, class S, class PI, class A>
644  std::size_t AMG<M,X,S,PI,A>::maxlevels()
645  {
646  return matrices_->maxlevels();
647  }
648 
650  template<class M, class X, class S, class PI, class A>
652  {
653  LevelContext levelContext;
654 
655  if(additive) {
656  *(rhs_->finest())=d;
657  additiveMgc();
658  v=*lhs_->finest();
659  }else{
660  // Init all iterators for the current level
661  initIteratorsWithFineLevel(levelContext);
662 
663 
664  *levelContext.lhs = v;
665  *levelContext.rhs = d;
666  *levelContext.update=0;
667  levelContext.level=0;
668 
669  mgc(levelContext);
670 
671  if(postSteps_==0||matrices_->maxlevels()==1)
672  levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
673 
674  v=*levelContext.update;
675  }
676 
677  }
678 
679  template<class M, class X, class S, class PI, class A>
680  void AMG<M,X,S,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
681  {
682  levelContext.smoother = smoothers_->finest();
683  levelContext.matrix = matrices_->matrices().finest();
684  levelContext.pinfo = matrices_->parallelInformation().finest();
685  levelContext.redist =
686  matrices_->redistributeInformation().begin();
687  levelContext.aggregates = matrices_->aggregatesMaps().begin();
688  levelContext.lhs = lhs_->finest();
689  levelContext.update = update_->finest();
690  levelContext.rhs = rhs_->finest();
691  }
692 
693  template<class M, class X, class S, class PI, class A>
694  bool AMG<M,X,S,PI,A>
695  ::moveToCoarseLevel(LevelContext& levelContext)
696  {
697 
698  bool processNextLevel=true;
699 
700  if(levelContext.redist->isSetup()) {
701  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.rhs),
702  levelContext.rhs.getRedistributed());
703  processNextLevel = levelContext.rhs.getRedistributed().size()>0;
704  if(processNextLevel) {
705  //restrict defect to coarse level right hand side.
706  typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
707  ++levelContext.pinfo;
708  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
709  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
710  static_cast<const Range&>(fineRhs.getRedistributed()),
711  *levelContext.pinfo);
712  }
713  }else{
714  //restrict defect to coarse level right hand side.
715  typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
716  ++levelContext.pinfo;
717  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
718  ::restrictVector(*(*levelContext.aggregates),
719  *levelContext.rhs, static_cast<const Range&>(*fineRhs),
720  *levelContext.pinfo);
721  }
722 
723  if(processNextLevel) {
724  // prepare coarse system
725  ++levelContext.lhs;
726  ++levelContext.update;
727  ++levelContext.matrix;
728  ++levelContext.level;
729  ++levelContext.redist;
730 
731  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
732  // next level is not the globally coarsest one
733  ++levelContext.smoother;
734  ++levelContext.aggregates;
735  }
736  // prepare the update on the next level
737  *levelContext.update=0;
738  }
739  return processNextLevel;
740  }
741 
742  template<class M, class X, class S, class PI, class A>
743  void AMG<M,X,S,PI,A>
744  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel)
745  {
746  if(processNextLevel) {
747  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
748  // previous level is not the globally coarsest one
749  --levelContext.smoother;
750  --levelContext.aggregates;
751  }
752  --levelContext.redist;
753  --levelContext.level;
754  //prolongate and add the correction (update is in coarse left hand side)
755  --levelContext.matrix;
756 
757  //typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--;
758  --levelContext.lhs;
759  --levelContext.pinfo;
760  }
761  if(levelContext.redist->isSetup()) {
762  // Need to redistribute during prolongateVector
763  levelContext.lhs.getRedistributed()=0;
764  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
765  ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
766  levelContext.lhs.getRedistributed(),
767  matrices_->getProlongationDampingFactor(),
768  *levelContext.pinfo, *levelContext.redist);
769  }else{
770  *levelContext.lhs=0;
771  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
772  ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
773  matrices_->getProlongationDampingFactor(),
774  *levelContext.pinfo);
775  }
776 
777 
778  if(processNextLevel) {
779  --levelContext.update;
780  --levelContext.rhs;
781  }
782 
783  *levelContext.update += *levelContext.lhs;
784  }
785 
786  template<class M, class X, class S, class PI, class A>
788  {
789  return IsDirectSolver< CoarseSolver>::value;
790  }
791 
792  template<class M, class X, class S, class PI, class A>
793  void AMG<M,X,S,PI,A>::mgc(LevelContext& levelContext){
794  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
795  // Solve directly
797  res.converged=true; // If we do not compute this flag will not get updated
798  if(levelContext.redist->isSetup()) {
799  levelContext.redist->redistribute(*levelContext.rhs, levelContext.rhs.getRedistributed());
800  if(levelContext.rhs.getRedistributed().size()>0) {
801  // We are still participating in the computation
802  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
803  levelContext.rhs.getRedistributed());
804  solver_->apply(levelContext.update.getRedistributed(),
805  levelContext.rhs.getRedistributed(), res);
806  }
807  levelContext.redist->redistributeBackward(*levelContext.update, levelContext.update.getRedistributed());
808  levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
809  }else{
810  levelContext.pinfo->copyOwnerToAll(*levelContext.rhs, *levelContext.rhs);
811  solver_->apply(*levelContext.update, *levelContext.rhs, res);
812  }
813 
814  if (!res.converged)
815  coarsesolverconverged = false;
816  }else{
817  // presmoothing
818  presmooth(levelContext, preSteps_);
819 
820 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
821  bool processNextLevel = moveToCoarseLevel(levelContext);
822 
823  if(processNextLevel) {
824  // next level
825  for(std::size_t i=0; i<gamma_; i++)
826  mgc(levelContext);
827  }
828 
829  moveToFineLevel(levelContext, processNextLevel);
830 #else
831  *lhs=0;
832 #endif
833 
834  if(levelContext.matrix == matrices_->matrices().finest()) {
835  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
836  if(!coarsesolverconverged)
837  DUNE_THROW(MathError, "Coarse solver did not converge");
838  }
839  // postsmoothing
840  postsmooth(levelContext, postSteps_);
841 
842  }
843  }
844 
845  template<class M, class X, class S, class PI, class A>
846  void AMG<M,X,S,PI,A>::additiveMgc(){
847 
848  // restrict residual to all levels
849  typename ParallelInformationHierarchy::Iterator pinfo=matrices_->parallelInformation().finest();
850  typename Hierarchy<Range,A>::Iterator rhs=rhs_->finest();
851  typename Hierarchy<Domain,A>::Iterator lhs = lhs_->finest();
852  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates=matrices_->aggregatesMaps().begin();
853 
854  for(typename Hierarchy<Range,A>::Iterator fineRhs=rhs++; fineRhs != rhs_->coarsest(); fineRhs=rhs++, ++aggregates) {
855  ++pinfo;
856  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
857  ::restrictVector(*(*aggregates), *rhs, static_cast<const Range&>(*fineRhs), *pinfo);
858  }
859 
860  // pinfo is invalid, set to coarsest level
861  //pinfo = matrices_->parallelInformation().coarsest
862  // calculate correction for all levels
863  lhs = lhs_->finest();
864  typename Hierarchy<Smoother,A>::Iterator smoother = smoothers_->finest();
865 
866  for(rhs=rhs_->finest(); rhs != rhs_->coarsest(); ++lhs, ++rhs, ++smoother) {
867  // presmoothing
868  *lhs=0;
869  smoother->apply(*lhs, *rhs);
870  }
871 
872  // Coarse level solve
873 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
874  InverseOperatorResult res;
875  pinfo->copyOwnerToAll(*rhs, *rhs);
876  solver_->apply(*lhs, *rhs, res);
877 
878  if(!res.converged)
879  DUNE_THROW(MathError, "Coarse solver did not converge");
880 #else
881  *lhs=0;
882 #endif
883  // Prologate and add up corrections from all levels
884  --pinfo;
885  --aggregates;
886 
887  for(typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--; coarseLhs != lhs_->finest(); coarseLhs = lhs--, --aggregates, --pinfo) {
888  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
889  ::prolongateVector(*(*aggregates), *coarseLhs, *lhs, 1.0, *pinfo);
890  }
891  }
892 
893 
895  template<class M, class X, class S, class PI, class A>
897  {
899  // Postprocess all smoothers
900  typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
901  typedef typename Hierarchy<Domain,A>::Iterator DIterator;
902  Iterator coarsest = smoothers_->coarsest();
903  Iterator smoother = smoothers_->finest();
904  DIterator lhs = lhs_->finest();
905  if(smoothers_->levels()>0) {
906  if(smoother != coarsest || matrices_->levels()<matrices_->maxlevels())
907  smoother->post(*lhs);
908  if(smoother!=coarsest)
909  for(++smoother, ++lhs; smoother != coarsest; ++smoother, ++lhs)
910  smoother->post(*lhs);
911  smoother->post(*lhs);
912  }
913  //delete &(*lhs_->finest());
914  delete lhs_;
915  lhs_=nullptr;
916  //delete &(*update_->finest());
917  delete update_;
918  update_=nullptr;
919  //delete &(*rhs_->finest());
920  delete rhs_;
921  rhs_=nullptr;
922  }
923 
924  template<class M, class X, class S, class PI, class A>
925  template<class A1>
926  void AMG<M,X,S,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
927  {
928  matrices_->getCoarsestAggregatesOnFinest(cont);
929  }
930 
931  } // end namespace Amg
932 } // end namespace Dune
933 
934 #endif
Parallel algebraic multigrid based on agglomeration.
Definition: amg.hh:59
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:250
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:253
an algebraic multigrid method using a Krylov-cycle.
Definition: kamg.hh:142
Two grid operator for AMG with Krylov cycle.
Definition: kamg.hh:31
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:310
All parameters for AMG.
Definition: parameters.hh:391
ConstIterator class for sequential access.
Definition: matrix.hh:398
A generic dynamic dense matrix.
Definition: matrix.hh:555
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:559
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:583
T block_type
Export the type representing the components.
Definition: matrix.hh:562
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:46
A few common exception classes.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:25
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
constexpr GeometryType none(unsigned int dim)
Returns a GeometryType representing a singular of dimension dim.
Definition: type.hh:714
AMG(const AMG &amg)
Copy constructor.
Definition: amg.hh:297
AMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const SmootherArgs &smootherArgs, const Parameters &parms)
Construct a new amg with a specific coarse solver.
Definition: amg.hh:318
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: amg.hh:559
Hierarchy< Domain, A >::Iterator update
The iterator over the updates.
Definition: amg.hh:222
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: amg.hh:226
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: amg.hh:787
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
X Domain
The domain type.
Definition: amg.hh:81
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: amg.hh:206
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: amg.hh:214
SmootherTraits< Smoother >::Arguments SmootherArgs
The argument type for the construction of the smoother.
Definition: amg.hh:94
S Smoother
The type of the smoother.
Definition: amg.hh:91
Hierarchy< Smoother, A >::Iterator smoother
The iterator over the smoothers.
Definition: amg.hh:198
M Operator
The matrix operator type.
Definition: amg.hh:67
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: amg.hh:202
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: amg.hh:210
X Range
The range type.
Definition: amg.hh:83
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:454
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: amg.hh:926
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: amg.hh:218
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: amg.hh:165
Iterator coarsest()
Get an iterator positioned at the coarsest level.
Definition: hierarchy.hh:1369
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: amg.hh:78
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: amg.hh:85
void post(Domain &x)
Clean up.
Definition: amg.hh:896
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:476
std::size_t level
The level index.
Definition: amg.hh:230
AMG(const Operator &fineOperator, const C &criterion, const SmootherArgs &smootherArgs=SmootherArgs(), const ParallelInformation &pinfo=ParallelInformation())
Construct an AMG with an inexact coarse solver based on the smoother.
Definition: amg.hh:340
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: amg.hh:651
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: amg.hh:76
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: amg.hh:138
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: amg.hh:74
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:10
shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:75
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:64
Statistics about the application of an inverse operator.
Definition: solver.hh:41
bool converged
True if convergence criterion has been met.
Definition: solver.hh:65
Categories for the solvers.
Definition: solvercategory.hh:20
Category
Definition: solvercategory.hh:21
static Category category(const OP &op, decltype(op.category()) *=nullptr)
Helperfunction to extract the solver category either from an enum, or from the newly introduced virtu...
Definition: solvercategory.hh:32
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Classes for using UMFPack with ISTL matrices.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 1, 22:29, 2024)