DUNE PDELab (2.7)

amg.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_AMG_AMG_HH
4#define DUNE_AMG_AMG_HH
5
6#include <memory>
11#include <dune/istl/solvers.hh>
13#include <dune/istl/superlu.hh>
14#include <dune/istl/umfpack.hh>
21
22namespace Dune
23{
24 namespace Amg
25 {
43 template<class M, class X, class S, class P, class K, class A>
44 class KAMG;
45
46 template<class T>
47 class KAmgTwoGrid;
48
59 template<class M, class X, class S, class PI=SequentialInformation,
60 class A=std::allocator<X> >
61 class AMG : public Preconditioner<X,X>
62 {
63 template<class M1, class X1, class S1, class P1, class K1, class A1>
64 friend class KAMG;
65
66 friend class KAmgTwoGrid<AMG>;
67
68 public:
70 typedef M Operator;
82
84 typedef X Domain;
86 typedef X Range;
94 typedef S Smoother;
95
98
108 AMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
109 const SmootherArgs& smootherArgs, const Parameters& parms);
110
122 template<class C>
123 AMG(const Operator& fineOperator, const C& criterion,
124 const SmootherArgs& smootherArgs=SmootherArgs(),
126
151 AMG(std::shared_ptr<const Operator> fineOperator, const ParameterTree& configuration, const ParallelInformation& pinfo=ParallelInformation());
152
156 AMG(const AMG& amg);
157
159 void pre(Domain& x, Range& b);
160
162 void apply(Domain& v, const Range& d);
163
166 {
167 return category_;
168 }
169
171 void post(Domain& x);
172
177 template<class A1>
178 void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
179
180 std::size_t levels();
181
182 std::size_t maxlevels();
183
193 {
194 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
195 }
196
202
203 private:
210 template<class C>
211 void createHierarchies(C& criterion,
212 const std::shared_ptr<const Operator>& matrixptr,
213 const PI& pinfo);
220 struct LevelContext
221 {
222 typedef Smoother SmootherType;
238 typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
242 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
258 std::size_t level;
259 };
260
261
266 void mgc(LevelContext& levelContext);
267
268 void additiveMgc();
269
276 void moveToFineLevel(LevelContext& levelContext,bool processedFineLevel);
277
282 bool moveToCoarseLevel(LevelContext& levelContext);
283
288 void initIteratorsWithFineLevel(LevelContext& levelContext);
289
291 std::shared_ptr<OperatorHierarchy> matrices_;
293 SmootherArgs smootherArgs_;
295 std::shared_ptr<Hierarchy<Smoother,A> > smoothers_;
297 std::shared_ptr<CoarseSolver> solver_;
299 std::shared_ptr<Hierarchy<Range,A>> rhs_;
301 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
303 std::shared_ptr<Hierarchy<Domain,A>> update_;
307 std::shared_ptr<ScalarProduct> scalarProduct_;
309 std::size_t gamma_;
311 std::size_t preSteps_;
313 std::size_t postSteps_;
314 bool buildHierarchy_;
315 bool additive;
316 bool coarsesolverconverged;
317 std::shared_ptr<Smoother> coarseSmoother_;
319 SolverCategory::Category category_;
321 std::size_t verbosity_;
322 };
323
324 template<class M, class X, class S, class PI, class A>
325 inline AMG<M,X,S,PI,A>::AMG(const AMG& amg)
326 : matrices_(amg.matrices_), smootherArgs_(amg.smootherArgs_),
327 smoothers_(amg.smoothers_), solver_(amg.solver_),
328 rhs_(), lhs_(), update_(),
329 scalarProduct_(amg.scalarProduct_), gamma_(amg.gamma_),
330 preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
331 buildHierarchy_(amg.buildHierarchy_),
332 additive(amg.additive), coarsesolverconverged(amg.coarsesolverconverged),
333 coarseSmoother_(amg.coarseSmoother_),
334 category_(amg.category_),
335 verbosity_(amg.verbosity_)
336 {}
337
338 template<class M, class X, class S, class PI, class A>
340 const SmootherArgs& smootherArgs,
341 const Parameters& parms)
342 : matrices_(stackobject_to_shared_ptr(matrices)), smootherArgs_(smootherArgs),
343 smoothers_(new Hierarchy<Smoother,A>), solver_(&coarseSolver),
344 rhs_(), lhs_(), update_(), scalarProduct_(0),
345 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
346 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
347 additive(parms.getAdditive()), coarsesolverconverged(true),
348 coarseSmoother_(),
349// #warning should category be retrieved from matrices?
350 category_(SolverCategory::category(*smoothers_->coarsest())),
351 verbosity_(parms.debugLevel())
352 {
353 assert(matrices_->isBuilt());
354
355 // build the necessary smoother hierarchies
356 matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
357 }
358
359 template<class M, class X, class S, class PI, class A>
360 template<class C>
362 const C& criterion,
363 const SmootherArgs& smootherArgs,
364 const PI& pinfo)
365 : smootherArgs_(smootherArgs),
366 smoothers_(new Hierarchy<Smoother,A>), solver_(),
367 rhs_(), lhs_(), update_(), scalarProduct_(),
368 gamma_(criterion.getGamma()), preSteps_(criterion.getNoPreSmoothSteps()),
369 postSteps_(criterion.getNoPostSmoothSteps()), buildHierarchy_(true),
370 additive(criterion.getAdditive()), coarsesolverconverged(true),
371 coarseSmoother_(),
372 category_(SolverCategory::category(pinfo)),
373 verbosity_(criterion.debugLevel())
374 {
376 DUNE_THROW(InvalidSolverCategory, "Matrix and Communication must have the same SolverCategory!");
377 // TODO: reestablish compile time checks.
378 //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
379 // "Matrix and Solver must match in terms of category!");
380 auto matrixptr = stackobject_to_shared_ptr(matrix);
381 createHierarchies(criterion, matrixptr, pinfo);
382 }
383
384 template<class M, class X, class S, class PI, class A>
385 AMG<M,X,S,PI,A>::AMG(std::shared_ptr<const Operator> matrixptr,
386 const ParameterTree& configuration,
387 const ParallelInformation& pinfo) :
388 smoothers_(new Hierarchy<Smoother,A>),
389 solver_(), rhs_(), lhs_(), update_(), scalarProduct_(), buildHierarchy_(true),
390 coarsesolverconverged(true), coarseSmoother_(),
391 category_(SolverCategory::category(pinfo))
392 {
393
394 if (configuration.hasKey ("smootherIterations"))
395 smootherArgs_.iterations = configuration.get<int>("smootherIterations");
396
397 if (configuration.hasKey ("smootherRelaxation"))
398 smootherArgs_.relaxationFactor = configuration.get<typename SmootherArgs::RelaxationFactor>("smootherRelaxation");
399
400 typedef Amg::RowSum Norm;
402
403 Criterion criterion (configuration.get<int>("maxLevel"));
404
405 if (configuration.hasKey ("coarsenTarget"))
406 criterion.setCoarsenTarget (configuration.get<int>("coarsenTarget"));
407 if (configuration.hasKey ("prolongationDampingFactor"))
408 criterion.setProlongationDampingFactor (configuration.get<double>("prolongationDampingFactor"));
409
410 if (configuration.hasKey ("alpha"))
411 criterion.setAlpha (configuration.get<double> ("alpha"));
412
413 if (configuration.hasKey ("beta"))
414 criterion.setBeta (configuration.get<double> ("beta"));
415
416 if (configuration.hasKey ("gamma"))
417 criterion.setGamma (configuration.get<std::size_t> ("gamma"));
418 gamma_ = criterion.getGamma();
419
420 if (configuration.hasKey ("additive"))
421 criterion.setAdditive (configuration.get<bool>("additive"));
422 additive = criterion.getAdditive();
423
424 if (configuration.hasKey ("preSteps"))
425 criterion.setNoPreSmoothSteps (configuration.get<std::size_t> ("preSteps"));
426 preSteps_ = criterion.getNoPreSmoothSteps ();
427
428 if (configuration.hasKey ("postSteps"))
429 criterion.setNoPostSmoothSteps (configuration.get<std::size_t> ("preSteps"));
430 postSteps_ = criterion.getNoPostSmoothSteps ();
431
432 verbosity_ = configuration.get("verbosity",2);
433 criterion.setDebugLevel (verbosity_);
434
435 createHierarchies(criterion, matrixptr, pinfo);
436 }
437
438 template <class Matrix,
439 class Vector>
440 struct DirectSolverSelector
441 {
442 typedef typename Matrix :: field_type field_type;
443 enum SolverType { umfpack, superlu, none };
444
445 static constexpr SolverType solver =
446#if DISABLE_AMG_DIRECTSOLVER
447 none;
448#elif HAVE_SUITESPARSE_UMFPACK
449 UMFPackMethodChooser< field_type > :: valid ? umfpack : none ;
450#elif HAVE_SUPERLU
451 superlu ;
452#else
453 none;
454#endif
455
456 template <class M, SolverType>
457 struct Solver
458 {
460 static type* create(const M& mat, bool verbose, bool reusevector )
461 {
462 DUNE_THROW(NotImplemented,"DirectSolver not selected");
463 return nullptr;
464 }
465 static std::string name () { return "None"; }
466 };
467#if HAVE_SUITESPARSE_UMFPACK
468 template <class M>
469 struct Solver< M, umfpack >
470 {
471 typedef UMFPack< M > type;
472 static type* create(const M& mat, bool verbose, bool reusevector )
473 {
474 return new type(mat, verbose, reusevector );
475 }
476 static std::string name () { return "UMFPack"; }
477 };
478#endif
479#if HAVE_SUPERLU
480 template <class M>
481 struct Solver< M, superlu >
482 {
483 typedef SuperLU< M > type;
484 static type* create(const M& mat, bool verbose, bool reusevector )
485 {
486 return new type(mat, verbose, reusevector );
487 }
488 static std::string name () { return "SuperLU"; }
489 };
490#endif
491
492 // define direct solver type to be used
493 typedef Solver< Matrix, solver > SelectedSolver ;
494 typedef typename SelectedSolver :: type DirectSolver;
495 static constexpr bool isDirectSolver = solver != none;
496 static std::string name() { return SelectedSolver :: name (); }
497 static DirectSolver* create(const Matrix& mat, bool verbose, bool reusevector )
498 {
499 return SelectedSolver :: create( mat, verbose, reusevector );
500 }
501 };
502
503 template<class M, class X, class S, class PI, class A>
504 template<class C>
505 void AMG<M,X,S,PI,A>::createHierarchies(C& criterion,
506 const std::shared_ptr<const Operator>& matrixptr,
507 const PI& pinfo)
508 {
509 Timer watch;
510 matrices_ = std::make_shared<OperatorHierarchy>(
511 std::const_pointer_cast<Operator>(matrixptr),
512 stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
513
514 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
515
516 // build the necessary smoother hierarchies
517 matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
518
519 // test whether we should solve on the coarse level. That is the case if we
520 // have that level and if there was a redistribution on this level then our
521 // communicator has to be valid (size()>0) as the smoother might try to communicate
522 // in the constructor.
523 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()
524 && ( ! matrices_->redistributeInformation().back().isSetup() ||
525 matrices_->parallelInformation().coarsest().getRedistributed().communicator().size() ) )
526 {
527 // We have the carsest level. Create the coarse Solver
528 SmootherArgs sargs(smootherArgs_);
529 sargs.iterations = 1;
530
532 cargs.setArgs(sargs);
533 if(matrices_->redistributeInformation().back().isSetup()) {
534 // Solve on the redistributed partitioning
535 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
536 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
537 }else{
538 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
539 cargs.setComm(*matrices_->parallelInformation().coarsest());
540 }
541
542 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
543 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
544
545 typedef DirectSolverSelector< typename M::matrix_type, X > SolverSelector;
546
547 // Use superlu if we are purely sequential or with only one processor on the coarsest level.
548 if( SolverSelector::isDirectSolver &&
549 (std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
550 || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
551 || (matrices_->parallelInformation().coarsest().isRedistributed()
552 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
553 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0) )
554 )
555 { // redistribute and 1 proc
556 if(matrices_->parallelInformation().coarsest().isRedistributed())
557 {
558 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
559 {
560 // We are still participating on this level
561 solver_.reset(SolverSelector::create(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
562 }
563 else
564 solver_.reset();
565 }
566 else
567 {
568 solver_.reset(SolverSelector::create(matrices_->matrices().coarsest()->getmat(), false, false));
569 }
570 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
571 std::cout<< "Using a direct coarse solver (" << SolverSelector::name() << ")" << std::endl;
572 }
573 else
574 {
575 if(matrices_->parallelInformation().coarsest().isRedistributed())
576 {
577 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
578 // We are still participating on this level
579
580 // we have to allocate these types using the rebound allocator
581 // in order to ensure that we fulfill the alignment requirements
582 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
583 *scalarProduct_,
584 *coarseSmoother_, 1E-2, 1000, 0));
585 else
586 solver_.reset();
587 }else
588 {
589 solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
590 *scalarProduct_,
591 *coarseSmoother_, 1E-2, 1000, 0));
592 // // we have to allocate these types using the rebound allocator
593 // // in order to ensure that we fulfill the alignment requirements
594 // using Alloc = typename std::allocator_traits<A>::template rebind_alloc<BiCGSTABSolver<X>>;
595 // Alloc alloc;
596 // auto p = alloc.allocate(1);
597 // std::allocator_traits<Alloc>::construct(alloc, p,
598 // const_cast<M&>(*matrices_->matrices().coarsest()),
599 // *scalarProduct_,
600 // *coarseSmoother_, 1E-2, 1000, 0);
601 // solver_.reset(p,[](BiCGSTABSolver<X>* p){
602 // Alloc alloc;
603 // std::allocator_traits<Alloc>::destroy(alloc, p);
604 // alloc.deallocate(p,1);
605 // });
606 }
607 }
608 }
609
610 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
611 std::cout<<"Building hierarchy of "<<matrices_->maxlevels()<<" levels "
612 <<"(including coarse solver) took "<<watch.elapsed()<<" seconds."<<std::endl;
613 }
614
615
616 template<class M, class X, class S, class PI, class A>
618 {
619 // Detect Matrix rows where all offdiagonal entries are
620 // zero and set x such that A_dd*x_d=b_d
621 // Thus users can be more careless when setting up their linear
622 // systems.
623 typedef typename M::matrix_type Matrix;
624 typedef typename Matrix::ConstRowIterator RowIter;
625 typedef typename Matrix::ConstColIterator ColIter;
626 typedef typename Matrix::block_type Block;
627 Block zero;
628 zero=typename Matrix::field_type();
629
630 const Matrix& mat=matrices_->matrices().finest()->getmat();
631 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
632 bool isDirichlet = true;
633 bool hasDiagonal = false;
634 Block diagonal{};
635 for(ColIter col=row->begin(); col!=row->end(); ++col) {
636 if(row.index()==col.index()) {
637 diagonal = *col;
638 hasDiagonal = true;
639 }else{
640 if(*col!=zero)
641 isDirichlet = false;
642 }
643 }
644 if(isDirichlet && hasDiagonal)
645 {
646 auto&& xEntry = Impl::asVector(x[row.index()]);
647 auto&& bEntry = Impl::asVector(b[row.index()]);
648 Impl::asMatrix(diagonal).solve(xEntry, bEntry);
649 }
650 }
651
652 if(smoothers_->levels()>0)
653 smoothers_->finest()->pre(x,b);
654 else
655 // No smoother to make x consistent! Do it by hand
656 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
657 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
658 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
659 update_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
660 matrices_->coarsenVector(*rhs_);
661 matrices_->coarsenVector(*lhs_);
662 matrices_->coarsenVector(*update_);
663
664 // Preprocess all smoothers
665 typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
666 typedef typename Hierarchy<Range,A>::Iterator RIterator;
667 typedef typename Hierarchy<Domain,A>::Iterator DIterator;
668 Iterator coarsest = smoothers_->coarsest();
669 Iterator smoother = smoothers_->finest();
670 RIterator rhs = rhs_->finest();
671 DIterator lhs = lhs_->finest();
672 if(smoothers_->levels()>1) {
673
674 assert(lhs_->levels()==rhs_->levels());
675 assert(smoothers_->levels()==lhs_->levels() || matrices_->levels()==matrices_->maxlevels());
676 assert(smoothers_->levels()+1==lhs_->levels() || matrices_->levels()<matrices_->maxlevels());
677
678 if(smoother!=coarsest)
679 for(++smoother, ++lhs, ++rhs; smoother != coarsest; ++smoother, ++lhs, ++rhs)
680 smoother->pre(*lhs,*rhs);
681 smoother->pre(*lhs,*rhs);
682 }
683
684
685 // The preconditioner might change x and b. So we have to
686 // copy the changes to the original vectors.
687 x = *lhs_->finest();
688 b = *rhs_->finest();
689
690 }
691 template<class M, class X, class S, class PI, class A>
692 std::size_t AMG<M,X,S,PI,A>::levels()
693 {
694 return matrices_->levels();
695 }
696 template<class M, class X, class S, class PI, class A>
697 std::size_t AMG<M,X,S,PI,A>::maxlevels()
698 {
699 return matrices_->maxlevels();
700 }
701
703 template<class M, class X, class S, class PI, class A>
705 {
706 LevelContext levelContext;
707
708 if(additive) {
709 *(rhs_->finest())=d;
710 additiveMgc();
711 v=*lhs_->finest();
712 }else{
713 // Init all iterators for the current level
714 initIteratorsWithFineLevel(levelContext);
715
716
717 *levelContext.lhs = v;
718 *levelContext.rhs = d;
719 *levelContext.update=0;
720 levelContext.level=0;
721
722 mgc(levelContext);
723
724 if(postSteps_==0||matrices_->maxlevels()==1)
725 levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
726
727 v=*levelContext.update;
728 }
729
730 }
731
732 template<class M, class X, class S, class PI, class A>
733 void AMG<M,X,S,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
734 {
735 levelContext.smoother = smoothers_->finest();
736 levelContext.matrix = matrices_->matrices().finest();
737 levelContext.pinfo = matrices_->parallelInformation().finest();
738 levelContext.redist =
739 matrices_->redistributeInformation().begin();
740 levelContext.aggregates = matrices_->aggregatesMaps().begin();
741 levelContext.lhs = lhs_->finest();
742 levelContext.update = update_->finest();
743 levelContext.rhs = rhs_->finest();
744 }
745
746 template<class M, class X, class S, class PI, class A>
747 bool AMG<M,X,S,PI,A>
748 ::moveToCoarseLevel(LevelContext& levelContext)
749 {
750
751 bool processNextLevel=true;
752
753 if(levelContext.redist->isSetup()) {
754 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.rhs),
755 levelContext.rhs.getRedistributed());
756 processNextLevel = levelContext.rhs.getRedistributed().size()>0;
757 if(processNextLevel) {
758 //restrict defect to coarse level right hand side.
759 typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
760 ++levelContext.pinfo;
761 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
762 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
763 static_cast<const Range&>(fineRhs.getRedistributed()),
764 *levelContext.pinfo);
765 }
766 }else{
767 //restrict defect to coarse level right hand side.
768 typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
769 ++levelContext.pinfo;
770 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
771 ::restrictVector(*(*levelContext.aggregates),
772 *levelContext.rhs, static_cast<const Range&>(*fineRhs),
773 *levelContext.pinfo);
774 }
775
776 if(processNextLevel) {
777 // prepare coarse system
778 ++levelContext.lhs;
779 ++levelContext.update;
780 ++levelContext.matrix;
781 ++levelContext.level;
782 ++levelContext.redist;
783
784 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
785 // next level is not the globally coarsest one
786 ++levelContext.smoother;
787 ++levelContext.aggregates;
788 }
789 // prepare the update on the next level
790 *levelContext.update=0;
791 }
792 return processNextLevel;
793 }
794
795 template<class M, class X, class S, class PI, class A>
796 void AMG<M,X,S,PI,A>
797 ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel)
798 {
799 if(processNextLevel) {
800 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
801 // previous level is not the globally coarsest one
802 --levelContext.smoother;
803 --levelContext.aggregates;
804 }
805 --levelContext.redist;
806 --levelContext.level;
807 //prolongate and add the correction (update is in coarse left hand side)
808 --levelContext.matrix;
809
810 //typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--;
811 --levelContext.lhs;
812 --levelContext.pinfo;
813 }
814 if(levelContext.redist->isSetup()) {
815 // Need to redistribute during prolongateVector
816 levelContext.lhs.getRedistributed()=0;
817 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
818 ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
819 levelContext.lhs.getRedistributed(),
820 matrices_->getProlongationDampingFactor(),
821 *levelContext.pinfo, *levelContext.redist);
822 }else{
823 *levelContext.lhs=0;
824 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
825 ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
826 matrices_->getProlongationDampingFactor(),
827 *levelContext.pinfo);
828 }
829
830
831 if(processNextLevel) {
832 --levelContext.update;
833 --levelContext.rhs;
834 }
835
836 *levelContext.update += *levelContext.lhs;
837 }
838
839 template<class M, class X, class S, class PI, class A>
841 {
842 return IsDirectSolver< CoarseSolver>::value;
843 }
844
845 template<class M, class X, class S, class PI, class A>
846 void AMG<M,X,S,PI,A>::mgc(LevelContext& levelContext){
847 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
848 // Solve directly
850 res.converged=true; // If we do not compute this flag will not get updated
851 if(levelContext.redist->isSetup()) {
852 levelContext.redist->redistribute(*levelContext.rhs, levelContext.rhs.getRedistributed());
853 if(levelContext.rhs.getRedistributed().size()>0) {
854 // We are still participating in the computation
855 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
856 levelContext.rhs.getRedistributed());
857 solver_->apply(levelContext.update.getRedistributed(),
858 levelContext.rhs.getRedistributed(), res);
859 }
860 levelContext.redist->redistributeBackward(*levelContext.update, levelContext.update.getRedistributed());
861 levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
862 }else{
863 levelContext.pinfo->copyOwnerToAll(*levelContext.rhs, *levelContext.rhs);
864 solver_->apply(*levelContext.update, *levelContext.rhs, res);
865 }
866
867 if (!res.converged)
868 coarsesolverconverged = false;
869 }else{
870 // presmoothing
871 presmooth(levelContext, preSteps_);
872
873#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
874 bool processNextLevel = moveToCoarseLevel(levelContext);
875
876 if(processNextLevel) {
877 // next level
878 for(std::size_t i=0; i<gamma_; i++)
879 mgc(levelContext);
880 }
881
882 moveToFineLevel(levelContext, processNextLevel);
883#else
884 *lhs=0;
885#endif
886
887 if(levelContext.matrix == matrices_->matrices().finest()) {
888 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
889 if(!coarsesolverconverged)
890 DUNE_THROW(MathError, "Coarse solver did not converge");
891 }
892 // postsmoothing
893 postsmooth(levelContext, postSteps_);
894
895 }
896 }
897
898 template<class M, class X, class S, class PI, class A>
899 void AMG<M,X,S,PI,A>::additiveMgc(){
900
901 // restrict residual to all levels
902 typename ParallelInformationHierarchy::Iterator pinfo=matrices_->parallelInformation().finest();
903 typename Hierarchy<Range,A>::Iterator rhs=rhs_->finest();
904 typename Hierarchy<Domain,A>::Iterator lhs = lhs_->finest();
905 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates=matrices_->aggregatesMaps().begin();
906
907 for(typename Hierarchy<Range,A>::Iterator fineRhs=rhs++; fineRhs != rhs_->coarsest(); fineRhs=rhs++, ++aggregates) {
908 ++pinfo;
909 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
910 ::restrictVector(*(*aggregates), *rhs, static_cast<const Range&>(*fineRhs), *pinfo);
911 }
912
913 // pinfo is invalid, set to coarsest level
914 //pinfo = matrices_->parallelInformation().coarsest
915 // calculate correction for all levels
916 lhs = lhs_->finest();
917 typename Hierarchy<Smoother,A>::Iterator smoother = smoothers_->finest();
918
919 for(rhs=rhs_->finest(); rhs != rhs_->coarsest(); ++lhs, ++rhs, ++smoother) {
920 // presmoothing
921 *lhs=0;
922 smoother->apply(*lhs, *rhs);
923 }
924
925 // Coarse level solve
926#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
927 InverseOperatorResult res;
928 pinfo->copyOwnerToAll(*rhs, *rhs);
929 solver_->apply(*lhs, *rhs, res);
930
931 if(!res.converged)
932 DUNE_THROW(MathError, "Coarse solver did not converge");
933#else
934 *lhs=0;
935#endif
936 // Prologate and add up corrections from all levels
937 --pinfo;
938 --aggregates;
939
940 for(typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--; coarseLhs != lhs_->finest(); coarseLhs = lhs--, --aggregates, --pinfo) {
941 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
942 ::prolongateVector(*(*aggregates), *coarseLhs, *lhs, 1.0, *pinfo);
943 }
944 }
945
946
948 template<class M, class X, class S, class PI, class A>
950 {
952 // Postprocess all smoothers
953 typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
954 typedef typename Hierarchy<Domain,A>::Iterator DIterator;
955 Iterator coarsest = smoothers_->coarsest();
956 Iterator smoother = smoothers_->finest();
957 DIterator lhs = lhs_->finest();
958 if(smoothers_->levels()>0) {
959 if(smoother != coarsest || matrices_->levels()<matrices_->maxlevels())
960 smoother->post(*lhs);
961 if(smoother!=coarsest)
962 for(++smoother, ++lhs; smoother != coarsest; ++smoother, ++lhs)
963 smoother->post(*lhs);
964 smoother->post(*lhs);
965 }
966 lhs_ = nullptr;
967 update_ = nullptr;
968 rhs_ = nullptr;
969 }
970
971 template<class M, class X, class S, class PI, class A>
972 template<class A1>
973 void AMG<M,X,S,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
974 {
975 matrices_->getCoarsestAggregatesOnFinest(cont);
976 }
977
978 } // end namespace Amg
979
980 struct AMGCreator{
981 template<class> struct isValidBlockType : std::false_type{};
982 template<class T, int n, int m> struct isValidBlockType<FieldMatrix<T,n,m>> : std::true_type{};
983
984 template<typename TL, typename M>
985 std::shared_ptr<Dune::Preconditioner<typename Dune::TypeListElement<1, TL>::type,
986 typename Dune::TypeListElement<2, TL>::type>>
987 operator() (TL tl, const M& mat, const Dune::ParameterTree& config,
988 std::enable_if_t<isValidBlockType<typename M::block_type>::value,int> = 0) const
989 {
990 using D = typename Dune::TypeListElement<1, decltype(tl)>::type;
991 using R = typename Dune::TypeListElement<2, decltype(tl)>::type;
992 std::shared_ptr<Preconditioner<D,R>> amg;
993 std::string smoother = config.get("smoother", "ssor");
994 typedef MatrixAdapter<M,D,R> OP;
995 std::shared_ptr<OP> op = std::make_shared<OP>(mat);
996 if(smoother == "ssor")
997 return std::make_shared<Amg::AMG<OP, D, SeqSSOR<M,D,R>>>(op, config);
998 if(smoother == "sor")
999 return std::make_shared<Amg::AMG<OP, D, SeqSOR<M,D,R>>>(op, config);
1000 if(smoother == "jac")
1001 return std::make_shared<Amg::AMG<OP, D, SeqJac<M,D,R>>>(op, config);
1002 if(smoother == "gs")
1003 return std::make_shared<Amg::AMG<OP, D, SeqGS<M,D,R>>>(op, config);
1004 if(smoother == "ilu")
1005 return std::make_shared<Amg::AMG<OP, D, SeqILU<M,D,R>>>(op, config);
1006 else
1007 DUNE_THROW(Dune::Exception, "Unknown smoother for AMG");
1008 }
1009
1010 template<typename TL, typename M>
1011 std::shared_ptr<Dune::Preconditioner<typename Dune::TypeListElement<1, TL>::type,
1012 typename Dune::TypeListElement<2, TL>::type>>
1013 operator() (TL /*tl*/, const M& /*mat*/, const Dune::ParameterTree& /*config*/,
1014 std::enable_if_t<!isValidBlockType<typename M::block_type>::value,int> = 0) const
1015 {
1016 DUNE_THROW(UnsupportedType, "AMG needs a FieldMatrix as Matrix block_type");
1017 }
1018 };
1019
1020 DUNE_REGISTER_PRECONDITIONER("amg", AMGCreator());
1021} // end namespace Dune
1022
1023#endif
Parallel algebraic multigrid based on agglomeration.
Definition: amg.hh:62
The criterion describing the stop criteria for the coarsening process.
Definition: matrixhierarchy.hh:283
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:215
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:218
an algebraic multigrid method using a Krylov-cycle.
Definition: kamg.hh:142
Two grid operator for AMG with Krylov cycle.
Definition: kamg.hh:31
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:59
All parameters for AMG.
Definition: parameters.hh:391
Base class for Dune-Exceptions.
Definition: exceptions.hh:94
ConstIterator class for sequential access.
Definition: matrix.hh:401
A generic dynamic dense matrix.
Definition: matrix.hh:558
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:562
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:586
T block_type
Export the type representing the components.
Definition: matrix.hh:565
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
Default exception for dummy implementations.
Definition: exceptions.hh:261
Hierarchical structure of string parameters.
Definition: parametertree.hh:35
std::string get(const std::string &key, const std::string &defaultValue) const
get value as string
Definition: parametertree.cc:183
bool hasKey(const std::string &key) const
test for key
Definition: parametertree.cc:46
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:48
A few common exception classes.
Traits for type conversions and type information.
Implementations of the inverse operator interface.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:25
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
constexpr GeometryType none(unsigned int dim)
Returns a GeometryType representing a singular of dimension dim.
Definition: type.hh:785
AMG(const AMG &amg)
Copy constructor.
Definition: amg.hh:325
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: amg.hh:617
Hierarchy< Domain, A >::Iterator update
The iterator over the updates.
Definition: amg.hh:250
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: amg.hh:254
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: amg.hh:840
X Domain
The domain type.
Definition: amg.hh:84
AMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const SmootherArgs &smootherArgs, const Parameters &parms)
Construct a new amg with a specific coarse solver.
Definition: amg.hh:339
AMG(std::shared_ptr< const Operator > fineOperator, const ParameterTree &configuration, const ParallelInformation &pinfo=ParallelInformation())
Constructor an AMG via ParameterTree.
Definition: amg.hh:385
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: amg.hh:234
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: amg.hh:242
SmootherTraits< Smoother >::Arguments SmootherArgs
The argument type for the construction of the smoother.
Definition: amg.hh:97
S Smoother
The type of the smoother.
Definition: amg.hh:94
Hierarchy< Smoother, A >::Iterator smoother
The iterator over the smoothers.
Definition: amg.hh:226
M Operator
The matrix operator type.
Definition: amg.hh:70
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: amg.hh:230
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: amg.hh:238
X Range
The range type.
Definition: amg.hh:86
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:468
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: amg.hh:973
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: amg.hh:246
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: amg.hh:192
Iterator coarsest()
Get an iterator positioned at the coarsest level.
Definition: hierarchy.hh:382
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: amg.hh:81
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: amg.hh:88
void post(Domain &x)
Clean up.
Definition: amg.hh:949
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:490
std::size_t level
The level index.
Definition: amg.hh:258
AMG(const Operator &fineOperator, const C &criterion, const SmootherArgs &smootherArgs=SmootherArgs(), const ParallelInformation &pinfo=ParallelInformation())
Construct an AMG with an inexact coarse solver based on the smoother.
Definition: amg.hh:361
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: amg.hh:704
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: amg.hh:79
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: amg.hh:165
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: amg.hh:77
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:14
shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:75
A hierarchical structure of string parameters.
Implements a scalar matrix view wrapper around an existing scalar.
Define base class for scalar product and norm.
Implements a scalar vector view wrapper around an existing scalar.
Classes for the generic construction and application of the smoothers.
Templates characterizing the type of a solver.
Functor using the row sum (infinity) norm to determine strong couplings.
Definition: aggregates.hh:459
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:65
Statistics about the application of an inverse operator.
Definition: solver.hh:46
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
Categories for the solvers.
Definition: solvercategory.hh:20
Category
Definition: solvercategory.hh:21
static Category category(const OP &op, decltype(op.category()) *=nullptr)
Helperfunction to extract the solver category either from an enum, or from the newly introduced virtu...
Definition: solvercategory.hh:32
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Classes for using UMFPack with ISTL matrices.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)