Dune Core Modules (2.5.0)

hierarchy.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_AMGHIERARCHY_HH
4#define DUNE_AMGHIERARCHY_HH
5
6#include <list>
7#include <memory>
8#include <limits>
9#include <algorithm>
10#include <tuple>
11#include "aggregates.hh"
12#include "graph.hh"
13#include "galerkin.hh"
14#include "renumberer.hh"
15#include "graphcreator.hh"
17#include <dune/common/unused.hh>
18#include <dune/common/timer.hh>
20#include <dune/istl/bvector.hh>
31
32namespace Dune
33{
34 namespace Amg
35 {
47 enum {
55 MAX_PROCESSES = 72000
56 };
57
65 template<typename T, typename A=std::allocator<T> >
67 {
68 public:
72 typedef T MemberType;
73
74 template<typename T1, typename T2>
75 class LevelIterator;
76
77 private:
81 struct Element
82 {
83 friend class LevelIterator<Hierarchy<T,A>, T>;
84 friend class LevelIterator<const Hierarchy<T,A>, const T>;
85
87 Element* coarser_;
88
90 Element* finer_;
91
93 MemberType* element_;
94
96 MemberType* redistributed_;
97 };
98 public:
99 // enum{
100 // /**
101 // * @brief If true only the method addCoarser will be usable
102 // * otherwise only the method addFiner will be usable.
103 // */
104 // coarsen = b
105 // };
106
110 typedef typename A::template rebind<Element>::other Allocator;
111
112 typedef typename ConstructionTraits<T>::Arguments Arguments;
113
119
127
132
136 Hierarchy(const Hierarchy& other);
141 void addCoarser(Arguments& args);
142
143 void addRedistributedOnCoarsest(Arguments& args);
144
149 void addFiner(Arguments& args);
150
157 template<class C, class T1>
159 : public BidirectionalIteratorFacade<LevelIterator<C,T1>,T1,T1&>
160 {
161 friend class LevelIterator<typename std::remove_const<C>::type,
162 typename std::remove_const<T1>::type >;
163 friend class LevelIterator<const typename std::remove_const<C>::type,
164 const typename std::remove_const<T1>::type >;
165
166 public:
169 : element_(0)
170 {}
171
172 LevelIterator(Element* element)
173 : element_(element)
174 {}
175
177 LevelIterator(const LevelIterator<typename std::remove_const<C>::type,
178 typename std::remove_const<T1>::type>& other)
179 : element_(other.element_)
180 {}
181
183 LevelIterator(const LevelIterator<const typename std::remove_const<C>::type,
184 const typename std::remove_const<T1>::type>& other)
185 : element_(other.element_)
186 {}
187
191 bool equals(const LevelIterator<typename std::remove_const<C>::type,
192 typename std::remove_const<T1>::type>& other) const
193 {
194 return element_ == other.element_;
195 }
196
200 bool equals(const LevelIterator<const typename std::remove_const<C>::type,
201 const typename std::remove_const<T1>::type>& other) const
202 {
203 return element_ == other.element_;
204 }
205
207 T1& dereference() const
208 {
209 return *(element_->element_);
210 }
211
214 {
215 element_ = element_->coarser_;
216 }
217
220 {
221 element_ = element_->finer_;
222 }
223
228 bool isRedistributed() const
229 {
230 return element_->redistributed_;
231 }
232
238 {
239 assert(element_->redistributed_);
240 return *element_->redistributed_;
241 }
242 void addRedistributed(T1* t)
243 {
244 element_->redistributed_ = t;
245 }
246
247 void deleteRedistributed()
248 {
249 element_->redistributed_ = nullptr;
250 }
251
252 private:
253 Element* element_;
254 };
255
258
261
267
273
274
280
286
291 std::size_t levels() const;
292
295
296 private:
298 Element* finest_;
300 Element* coarsest_;
302 Element* nonAllocated_;
304 Allocator allocator_;
306 int levels_;
307 };
308
315 template<class M, class PI, class A=std::allocator<M> >
317 {
318 public:
320 typedef M MatrixOperator;
321
323 typedef typename MatrixOperator::matrix_type Matrix;
324
327
329 typedef A Allocator;
330
333
336
339
341 typedef typename Allocator::template rebind<AggregatesMap*>::other AAllocator;
342
344 typedef std::list<AggregatesMap*,AAllocator> AggregatesMapList;
345
347 typedef RedistributeInformation<ParallelInformation> RedistributeInfoType;
348
350 typedef typename Allocator::template rebind<RedistributeInfoType>::other RILAllocator;
351
353 typedef std::list<RedistributeInfoType,RILAllocator> RedistributeInfoList;
354
360 MatrixHierarchy(const MatrixOperator& fineMatrix,
362
363
365
371 template<typename O, typename T>
372 void build(const T& criterion);
373
381 template<class F>
382 void recalculateGalerkin(const F& copyFlags);
383
388 template<class V, class TA>
389 void coarsenVector(Hierarchy<BlockVector<V,TA> >& hierarchy) const;
390
396 template<class S, class TA>
397 void coarsenSmoother(Hierarchy<S,TA>& smoothers,
398 const typename SmootherTraits<S>::Arguments& args) const;
399
404 std::size_t levels() const;
405
410 std::size_t maxlevels() const;
411
412 bool hasCoarsest() const;
413
418 bool isBuilt() const;
419
424 const ParallelMatrixHierarchy& matrices() const;
425
431
436 const AggregatesMapList& aggregatesMaps() const;
437
444
445
446 typename MatrixOperator::field_type getProlongationDampingFactor() const
447 {
448 return prolongDamp_;
449 }
450
461 void getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const;
462
463 private:
464 typedef typename ConstructionTraits<MatrixOperator>::Arguments MatrixArgs;
465 typedef typename ConstructionTraits<ParallelInformation>::Arguments CommunicationArgs;
467 AggregatesMapList aggregatesMaps_;
469 RedistributeInfoList redistributes_;
471 ParallelMatrixHierarchy matrices_;
473 ParallelInformationHierarchy parallelInformation_;
474
476 bool built_;
477
479 int maxlevels_;
480
481 typename MatrixOperator::field_type prolongDamp_;
482
486 template<class Matrix, bool print>
487 struct MatrixStats
488 {
489
493 static void stats(const Matrix& matrix)
494 {
495 DUNE_UNUSED_PARAMETER(matrix);
496 }
497 };
498
499 template<class Matrix>
500 struct MatrixStats<Matrix,true>
501 {
502 struct calc
503 {
504 typedef typename Matrix::size_type size_type;
505 typedef typename Matrix::row_type matrix_row;
506
507 calc()
508 {
509 min=std::numeric_limits<size_type>::max();
510 max=0;
511 sum=0;
512 }
513
514 void operator()(const matrix_row& row)
515 {
516 min=std::min(min, row.size());
517 max=std::max(max, row.size());
518 sum += row.size();
519 }
520
521 size_type min;
522 size_type max;
523 size_type sum;
524 };
528 static void stats(const Matrix& matrix)
529 {
530 calc c= for_each(matrix.begin(), matrix.end(), calc());
531 dinfo<<"Matrix row: min="<<c.min<<" max="<<c.max
532 <<" average="<<static_cast<double>(c.sum)/matrix.N()
533 <<std::endl;
534 }
535 };
536 };
537
541 template<class T>
542 class CoarsenCriterion : public T
543 {
544 public:
550
561 CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2,
562 double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
563 : AggregationCriterion(Dune::Amg::Parameters(maxLevel, coarsenTarget, minCoarsenRate, prolongDamp, accumulate))
564 {}
565
567 : AggregationCriterion(parms)
568 {}
569
570 };
571
572 template<typename M, typename C1>
573 bool repartitionAndDistributeMatrix(const M& origMatrix, M& newMatrix,
574 SequentialInformation& origSequentialInformationomm,
575 SequentialInformation*& newComm,
576 RedistributeInformation<SequentialInformation>& ri,
577 int nparts, C1& criterion)
578 {
579 DUNE_UNUSED_PARAMETER(origMatrix);
580 DUNE_UNUSED_PARAMETER(newMatrix);
581 DUNE_UNUSED_PARAMETER(origSequentialInformationomm);
582 DUNE_UNUSED_PARAMETER(newComm);
584 DUNE_UNUSED_PARAMETER(nparts);
585 DUNE_UNUSED_PARAMETER(criterion);
586 DUNE_THROW(NotImplemented, "Redistribution does not make sense in sequential code!");
587 }
588
589
590 template<typename M, typename C, typename C1>
591 bool repartitionAndDistributeMatrix(const M& origMatrix, M& newMatrix, C& origComm, C*& newComm,
592 RedistributeInformation<C>& ri,
593 int nparts, C1& criterion)
594 {
595 Timer time;
596#ifdef AMG_REPART_ON_COMM_GRAPH
597 // Done not repartition the matrix graph, but a graph of the communication scheme.
598 bool existentOnRedist=Dune::commGraphRepartition(origMatrix, origComm, nparts, newComm,
599 ri.getInterface(),
600 criterion.debugLevel()>1);
601
602#else
603 typedef Dune::Amg::MatrixGraph<const M> MatrixGraph;
604 typedef Dune::Amg::PropertiesGraph<MatrixGraph,
605 VertexProperties,
606 EdgeProperties,
607 IdentityMap,
608 IdentityMap> PropertiesGraph;
609 MatrixGraph graph(origMatrix);
610 PropertiesGraph pgraph(graph);
611 buildDependency(pgraph, origMatrix, criterion, false);
612
613#ifdef DEBUG_REPART
614 if(origComm.communicator().rank()==0)
615 std::cout<<"Original matrix"<<std::endl;
616 origComm.communicator().barrier();
617 printGlobalSparseMatrix(origMatrix, origComm, std::cout);
618#endif
619 bool existentOnRedist=Dune::graphRepartition(pgraph, origComm, nparts,
620 newComm, ri.getInterface(),
621 criterion.debugLevel()>1);
622#endif // if else AMG_REPART
623
624 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
625 std::cout<<"Repartitioning took "<<time.elapsed()<<" seconds."<<std::endl;
626
627 ri.setSetup();
628
629#ifdef DEBUG_REPART
630 ri.checkInterface(origComm.indexSet(), newComm->indexSet(), origComm.communicator());
631#endif
632
633 redistributeMatrix(const_cast<M&>(origMatrix), newMatrix, origComm, *newComm, ri);
634
635#ifdef DEBUG_REPART
636 if(origComm.communicator().rank()==0)
637 std::cout<<"Original matrix"<<std::endl;
638 origComm.communicator().barrier();
639 if(newComm->communicator().size()>0)
640 printGlobalSparseMatrix(newMatrix, *newComm, std::cout);
641 origComm.communicator().barrier();
642#endif
643
644 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
645 std::cout<<"Redistributing matrix took "<<time.elapsed()<<" seconds."<<std::endl;
646 return existentOnRedist;
647
648 }
649
650 template<typename M>
651 bool repartitionAndDistributeMatrix(M& origMatrix, M& newMatrix,
652 SequentialInformation& origComm,
653 SequentialInformation& newComm,
654 RedistributeInformation<SequentialInformation>& ri)
655 {
656 return true;
657 }
658
659 template<class M, class IS, class A>
661 const ParallelInformation& pinfo)
662 : matrices_(const_cast<MatrixOperator&>(fineOperator)),
663 parallelInformation_(const_cast<ParallelInformation&>(pinfo))
664 {
665 static_assert((static_cast<int>(MatrixOperator::category) ==
666 static_cast<int>(SolverCategory::sequential)
667 || static_cast<int>(MatrixOperator::category) ==
668 static_cast<int>(SolverCategory::overlapping)
669 || static_cast<int>(MatrixOperator::category) ==
670 static_cast<int>(SolverCategory::nonoverlapping)),
671 "MatrixOperator must be of category sequential or overlapping or nonoverlapping");
672 if (static_cast<int>(MatrixOperator::category) != static_cast<int>(pinfo.getSolverCategory()))
673 DUNE_THROW(ISTLError, "MatrixOperator and ParallelInformation must belong to the same category!");
674
675 }
676
677 template<class M, class IS, class A>
678 template<typename O, typename T>
679 void MatrixHierarchy<M,IS,A>::build(const T& criterion)
680 {
681 prolongDamp_ = criterion.getProlongationDampingFactor();
682 typedef O OverlapFlags;
683 typedef typename ParallelMatrixHierarchy::Iterator MatIterator;
684 typedef typename ParallelInformationHierarchy::Iterator PInfoIterator;
685
686 static const int noints=(Dune::Amg::MAX_PROCESSES/4096>0) ? (Dune::Amg::MAX_PROCESSES/4096) : 1;
687
688 typedef bigunsignedint<sizeof(int)*8*noints> BIGINT;
689 GalerkinProduct<ParallelInformation> productBuilder;
690 MatIterator mlevel = matrices_.finest();
691 MatrixStats<typename M::matrix_type,MINIMAL_DEBUG_LEVEL<=INFO_DEBUG_LEVEL>::stats(mlevel->getmat());
692
693 PInfoIterator infoLevel = parallelInformation_.finest();
694 BIGINT finenonzeros=countNonZeros(mlevel->getmat());
695 finenonzeros = infoLevel->communicator().sum(finenonzeros);
696 BIGINT allnonzeros = finenonzeros;
697
698
699 int level = 0;
700 int rank = 0;
701
702 BIGINT unknowns = mlevel->getmat().N();
703
704 unknowns = infoLevel->communicator().sum(unknowns);
705 double dunknowns=unknowns.todouble();
706 infoLevel->buildGlobalLookup(mlevel->getmat().N());
707 redistributes_.push_back(RedistributeInfoType());
708
709 for(; level < criterion.maxLevel(); ++level, ++mlevel) {
710 assert(matrices_.levels()==redistributes_.size());
711 rank = infoLevel->communicator().rank();
712 if(rank==0 && criterion.debugLevel()>1)
713 std::cout<<"Level "<<level<<" has "<<dunknowns<<" unknowns, "<<dunknowns/infoLevel->communicator().size()
714 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
715
716 MatrixOperator* matrix=&(*mlevel);
717 ParallelInformation* info =&(*infoLevel);
718
719 if((
720#if HAVE_PARMETIS
721 criterion.accumulate()==successiveAccu
722#else
723 false
724#endif
725 || (criterion.accumulate()==atOnceAccu
726 && dunknowns < 30*infoLevel->communicator().size()))
727 && infoLevel->communicator().size()>1 &&
728 dunknowns/infoLevel->communicator().size() <= criterion.coarsenTarget())
729 {
730 // accumulate to fewer processors
731 Matrix* redistMat= new Matrix();
732 ParallelInformation* redistComm=0;
733 std::size_t nodomains = (std::size_t)std::ceil(dunknowns/(criterion.minAggregateSize()
734 *criterion.coarsenTarget()));
735 if( nodomains<=criterion.minAggregateSize()/2 ||
736 dunknowns <= criterion.coarsenTarget() )
737 nodomains=1;
738
739 bool existentOnNextLevel =
740 repartitionAndDistributeMatrix(mlevel->getmat(), *redistMat, *infoLevel,
741 redistComm, redistributes_.back(), nodomains,
742 criterion);
743 BIGINT unknownsRedist = redistMat->N();
744 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
745 dunknowns= unknownsRedist.todouble();
746 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1)
747 std::cout<<"Level "<<level<<" (redistributed) has "<<dunknowns<<" unknowns, "<<dunknowns/redistComm->communicator().size()
748 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
749 MatrixArgs args(*redistMat, *redistComm);
750 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
751 assert(mlevel.isRedistributed());
752 infoLevel.addRedistributed(redistComm);
753 infoLevel->freeGlobalLookup();
754
755 if(!existentOnNextLevel)
756 // We do not hold any data on the redistributed partitioning
757 break;
758
759 // Work on the redistributed Matrix from now on
760 matrix = &(mlevel.getRedistributed());
761 info = &(infoLevel.getRedistributed());
762 info->buildGlobalLookup(matrix->getmat().N());
763 }
764
765 rank = info->communicator().rank();
766 if(dunknowns <= criterion.coarsenTarget())
767 // No further coarsening needed
768 break;
769
770 typedef PropertiesGraphCreator<MatrixOperator> GraphCreator;
771 typedef typename GraphCreator::PropertiesGraph PropertiesGraph;
772 typedef typename GraphCreator::GraphTuple GraphTuple;
773
774 typedef typename PropertiesGraph::VertexDescriptor Vertex;
775
776 std::vector<bool> excluded(matrix->getmat().N(), false);
777
778 GraphTuple graphs = GraphCreator::create(*matrix, excluded, *info, OverlapFlags());
779
780 AggregatesMap* aggregatesMap=new AggregatesMap(std::get<1>(graphs)->maxVertex()+1);
781
782 aggregatesMaps_.push_back(aggregatesMap);
783
784 Timer watch;
785 watch.reset();
786 int noAggregates, isoAggregates, oneAggregates, skippedAggregates;
787
788 std::tie(noAggregates, isoAggregates, oneAggregates, skippedAggregates) =
789 aggregatesMap->buildAggregates(matrix->getmat(), *(std::get<1>(graphs)), criterion, level==0);
790
791 if(rank==0 && criterion.debugLevel()>2)
792 std::cout<<" Have built "<<noAggregates<<" aggregates totally ("<<isoAggregates<<" isolated aggregates, "<<
793 oneAggregates<<" aggregates of one vertex, and skipped "<<
794 skippedAggregates<<" aggregates)."<<std::endl;
795#ifdef TEST_AGGLO
796 {
797 // calculate size of local matrix in the distributed direction
798 int start, end, overlapStart, overlapEnd;
799 int procs=info->communicator().rank();
800 int n = UNKNOWNS/procs; // number of unknowns per process
801 int bigger = UNKNOWNS%procs; // number of process with n+1 unknows
802
803 // Compute owner region
804 if(rank<bigger) {
805 start = rank*(n+1);
806 end = (rank+1)*(n+1);
807 }else{
808 start = bigger + rank * n;
809 end = bigger + (rank + 1) * n;
810 }
811
812 // Compute overlap region
813 if(start>0)
814 overlapStart = start - 1;
815 else
816 overlapStart = start;
817
818 if(end<UNKNOWNS)
819 overlapEnd = end + 1;
820 else
821 overlapEnd = end;
822
823 assert((UNKNOWNS)*(overlapEnd-overlapStart)==aggregatesMap->noVertices());
824 for(int j=0; j< UNKNOWNS; ++j)
825 for(int i=0; i < UNKNOWNS; ++i)
826 {
827 if(i>=overlapStart && i<overlapEnd)
828 {
829 int no = (j/2)*((UNKNOWNS)/2)+i/2;
830 (*aggregatesMap)[j*(overlapEnd-overlapStart)+i-overlapStart]=no;
831 }
832 }
833 }
834#endif
835 if(criterion.debugLevel()>1 && info->communicator().rank()==0)
836 std::cout<<"aggregating finished."<<std::endl;
837
838 BIGINT gnoAggregates=noAggregates;
839 gnoAggregates = info->communicator().sum(gnoAggregates);
840 double dgnoAggregates = gnoAggregates.todouble();
841#ifdef TEST_AGGLO
842 BIGINT gnoAggregates=((UNKNOWNS)/2)*((UNKNOWNS)/2);
843#endif
844
845 if(criterion.debugLevel()>2 && rank==0)
846 std::cout << "Building "<<dgnoAggregates<<" aggregates took "<<watch.elapsed()<<" seconds."<<std::endl;
847
848 if(dgnoAggregates==0 || dunknowns/dgnoAggregates<criterion.minCoarsenRate())
849 {
850 if(rank==0)
851 {
852 if(dgnoAggregates>0)
853 std::cerr << "Stopped coarsening because of rate breakdown "<<dunknowns<<"/"<<dgnoAggregates
854 <<"="<<dunknowns/dgnoAggregates<<"<"
855 <<criterion.minCoarsenRate()<<std::endl;
856 else
857 std::cerr<< "Could not build any aggregates. Probably no connected nodes."<<std::endl;
858 }
859 aggregatesMap->free();
860 delete aggregatesMap;
861 aggregatesMaps_.pop_back();
862
863 if(criterion.accumulate() && mlevel.isRedistributed() && info->communicator().size()>1) {
864 // coarse level matrix was already redistributed, but to more than 1 process
865 // Therefore need to delete the redistribution. Further down it will
866 // then be redistributed to 1 process
867 delete &(mlevel.getRedistributed().getmat());
868 mlevel.deleteRedistributed();
869 delete &(infoLevel.getRedistributed());
870 infoLevel.deleteRedistributed();
871 redistributes_.back().resetSetup();
872 }
873
874 break;
875 }
876 unknowns = noAggregates;
877 dunknowns = dgnoAggregates;
878
879 CommunicationArgs commargs(info->communicator(),info->getSolverCategory());
880 parallelInformation_.addCoarser(commargs);
881
882 ++infoLevel; // parallel information on coarse level
883
885 get(VertexVisitedTag(), *(std::get<1>(graphs)));
886
887 watch.reset();
888 int aggregates = IndicesCoarsener<ParallelInformation,OverlapFlags>
889 ::coarsen(*info,
890 *(std::get<1>(graphs)),
891 visitedMap,
892 *aggregatesMap,
893 *infoLevel,
894 noAggregates);
895 GraphCreator::free(graphs);
896
897 if(criterion.debugLevel()>2) {
898 if(rank==0)
899 std::cout<<"Coarsening of index sets took "<<watch.elapsed()<<" seconds."<<std::endl;
900 }
901
902 watch.reset();
903
904 infoLevel->buildGlobalLookup(aggregates);
905 AggregatesPublisher<Vertex,OverlapFlags,ParallelInformation>::publish(*aggregatesMap,
906 *info,
907 infoLevel->globalLookup());
908
909
910 if(criterion.debugLevel()>2) {
911 if(rank==0)
912 std::cout<<"Communicating global aggregate numbers took "<<watch.elapsed()<<" seconds."<<std::endl;
913 }
914
915 watch.reset();
916 std::vector<bool>& visited=excluded;
917
918 typedef std::vector<bool>::iterator Iterator;
920 Iterator end = visited.end();
921 for(Iterator iter= visited.begin(); iter != end; ++iter)
922 *iter=false;
923
924 VisitedMap2 visitedMap2(visited.begin(), Dune::IdentityMap());
925
926 typename MatrixOperator::matrix_type* coarseMatrix;
927
928 coarseMatrix = productBuilder.build(*(std::get<0>(graphs)), visitedMap2,
929 *info,
930 *aggregatesMap,
931 aggregates,
932 OverlapFlags());
933 dverb<<"Building of sparsity pattern took "<<watch.elapsed()<<std::endl;
934 watch.reset();
935 info->freeGlobalLookup();
936
937 delete std::get<0>(graphs);
938 productBuilder.calculate(matrix->getmat(), *aggregatesMap, *coarseMatrix, *infoLevel, OverlapFlags());
939
940 if(criterion.debugLevel()>2) {
941 if(rank==0)
942 std::cout<<"Calculation entries of Galerkin product took "<<watch.elapsed()<<" seconds."<<std::endl;
943 }
944
945 BIGINT nonzeros = countNonZeros(*coarseMatrix);
946 allnonzeros = allnonzeros + infoLevel->communicator().sum(nonzeros);
947 MatrixArgs args(*coarseMatrix, *infoLevel);
948
949 matrices_.addCoarser(args);
950 redistributes_.push_back(RedistributeInfoType());
951 } // end level loop
952
953
954 infoLevel->freeGlobalLookup();
955
956 built_=true;
957 AggregatesMap* aggregatesMap=new AggregatesMap(0);
958 aggregatesMaps_.push_back(aggregatesMap);
959
960 if(criterion.debugLevel()>0) {
961 if(level==criterion.maxLevel()) {
962 BIGINT unknownsLevel = mlevel->getmat().N();
963 unknownsLevel = infoLevel->communicator().sum(unknownsLevel);
964 double dunknownsLevel = unknownsLevel.todouble();
965 if(rank==0 && criterion.debugLevel()>1) {
966 std::cout<<"Level "<<level<<" has "<<dunknownsLevel<<" unknowns, "<<dunknownsLevel/infoLevel->communicator().size()
967 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
968 }
969 }
970 }
971
972 if(criterion.accumulate() && !redistributes_.back().isSetup() &&
973 infoLevel->communicator().size()>1) {
974#if HAVE_MPI && !HAVE_PARMETIS
975 if(criterion.accumulate()==successiveAccu &&
976 infoLevel->communicator().rank()==0)
977 std::cerr<<"Successive accumulation of data on coarse levels only works with ParMETIS installed."
978 <<" Fell back to accumulation to one domain on coarsest level"<<std::endl;
979#endif
980
981 // accumulate to fewer processors
982 Matrix* redistMat= new Matrix();
983 ParallelInformation* redistComm=0;
984 int nodomains = 1;
985
986 repartitionAndDistributeMatrix(mlevel->getmat(), *redistMat, *infoLevel,
987 redistComm, redistributes_.back(), nodomains,criterion);
988 MatrixArgs args(*redistMat, *redistComm);
989 BIGINT unknownsRedist = redistMat->N();
990 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
991
992 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1) {
993 double dunknownsRedist = unknownsRedist.todouble();
994 std::cout<<"Level "<<level<<" redistributed has "<<dunknownsRedist<<" unknowns, "<<dunknownsRedist/redistComm->communicator().size()
995 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
996 }
997 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
998 infoLevel.addRedistributed(redistComm);
999 infoLevel->freeGlobalLookup();
1000 }
1001
1002 int levels = matrices_.levels();
1003 maxlevels_ = parallelInformation_.finest()->communicator().max(levels);
1004 assert(matrices_.levels()==redistributes_.size());
1005 if(hasCoarsest() && rank==0 && criterion.debugLevel()>1)
1006 std::cout<<"operator complexity: "<<allnonzeros.todouble()/finenonzeros.todouble()<<std::endl;
1007
1008 }
1009
1010 template<class M, class IS, class A>
1013 {
1014 return matrices_;
1015 }
1016
1017 template<class M, class IS, class A>
1020 {
1021 return parallelInformation_;
1022 }
1023
1024 template<class M, class IS, class A>
1025 void MatrixHierarchy<M,IS,A>::getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const
1026 {
1027 int levels=aggregatesMaps().size();
1028 int maxlevels=parallelInformation_.finest()->communicator().max(levels);
1029 std::size_t size=(*(aggregatesMaps().begin()))->noVertices();
1030 // We need an auxiliary vector for the consecutive prolongation.
1031 std::vector<std::size_t> tmp;
1032 std::vector<std::size_t> *coarse, *fine;
1033
1034 // make sure the allocated space suffices.
1035 tmp.reserve(size);
1036 data.reserve(size);
1037
1038 // Correctly assign coarse and fine for the first prolongation such that
1039 // we end up in data in the end.
1040 if(levels%2==0) {
1041 coarse=&tmp;
1042 fine=&data;
1043 }else{
1044 coarse=&data;
1045 fine=&tmp;
1046 }
1047
1048 // Number the unknowns on the coarsest level consecutively for each process.
1049 if(levels==maxlevels) {
1050 const AggregatesMap& map = *(*(++aggregatesMaps().rbegin()));
1051 std::size_t m=0;
1052
1053 for(typename AggregatesMap::const_iterator iter = map.begin(); iter != map.end(); ++iter)
1054 if(*iter< AggregatesMap::ISOLATED)
1055 m=std::max(*iter,m);
1056
1057 coarse->resize(m+1);
1058 std::size_t i=0;
1059 srand((unsigned)std::clock());
1060 std::set<size_t> used;
1061 for(typename std::vector<std::size_t>::iterator iter=coarse->begin(); iter != coarse->end();
1062 ++iter, ++i)
1063 {
1064 std::pair<std::set<std::size_t>::iterator,bool> ibpair
1065 = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0)))*coarse->size());
1066
1067 while(!ibpair.second)
1068 ibpair = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0))*coarse->size()));
1069 *iter=*(ibpair.first);
1070 }
1071 }
1072
1073 typename ParallelInformationHierarchy::Iterator pinfo = parallelInformation().coarsest();
1074 --pinfo;
1075
1076 // Now consecutively project the numbers to the finest level.
1077 for(typename AggregatesMapList::const_reverse_iterator aggregates=++aggregatesMaps().rbegin();
1078 aggregates != aggregatesMaps().rend(); ++aggregates,--levels) {
1079
1080 fine->resize((*aggregates)->noVertices());
1081 fine->assign(fine->size(), 0);
1082 Transfer<typename AggregatesMap::AggregateDescriptor, std::vector<std::size_t>, ParallelInformation>
1083 ::prolongateVector(*(*aggregates), *coarse, *fine, static_cast<std::size_t>(1), *pinfo);
1084 --pinfo;
1085 std::swap(coarse, fine);
1086 }
1087
1088 // Assertion to check that we really projected to data on the last step.
1089 assert(coarse==&data);
1090 }
1091
1092 template<class M, class IS, class A>
1095 {
1096 return aggregatesMaps_;
1097 }
1098 template<class M, class IS, class A>
1101 {
1102 return redistributes_;
1103 }
1104
1105 template<class M, class IS, class A>
1107 {
1108 typedef typename AggregatesMapList::reverse_iterator AggregatesMapIterator;
1109 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
1110 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
1111
1112 AggregatesMapIterator amap = aggregatesMaps_.rbegin();
1113 InfoIterator info = parallelInformation_.coarsest();
1114 for(Iterator level=matrices_.coarsest(), finest=matrices_.finest(); level != finest; --level, --info, ++amap) {
1115 (*amap)->free();
1116 delete *amap;
1117 delete &level->getmat();
1118 if(level.isRedistributed())
1119 delete &(level.getRedistributed().getmat());
1120 }
1121 delete *amap;
1122 }
1123
1124 template<class M, class IS, class A>
1125 template<class V, class TA>
1127 {
1128 assert(hierarchy.levels()==1);
1129 typedef typename ParallelMatrixHierarchy::ConstIterator Iterator;
1130 typedef typename RedistributeInfoList::const_iterator RIter;
1131 RIter redist = redistributes_.begin();
1132
1133 Iterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
1134 int level=0;
1135 if(redist->isSetup())
1136 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
1137 Dune::dvverb<<"Level "<<level<<" has "<<matrices_.finest()->getmat().N()<<" unknowns!"<<std::endl;
1138
1139 while(matrix != coarsest) {
1140 ++matrix; ++level; ++redist;
1141 Dune::dvverb<<"Level "<<level<<" has "<<matrix->getmat().N()<<" unknowns!"<<std::endl;
1142
1143 hierarchy.addCoarser(matrix->getmat().N());
1144 if(redist->isSetup())
1145 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
1146
1147 }
1148
1149 }
1150
1151 template<class M, class IS, class A>
1152 template<class S, class TA>
1154 const typename SmootherTraits<S>::Arguments& sargs) const
1155 {
1156 assert(smoothers.levels()==0);
1157 typedef typename ParallelMatrixHierarchy::ConstIterator MatrixIterator;
1158 typedef typename ParallelInformationHierarchy::ConstIterator PinfoIterator;
1159 typedef typename AggregatesMapList::const_iterator AggregatesIterator;
1160
1161 typename ConstructionTraits<S>::Arguments cargs;
1162 cargs.setArgs(sargs);
1163 PinfoIterator pinfo = parallelInformation_.finest();
1164 AggregatesIterator aggregates = aggregatesMaps_.begin();
1165 int level=0;
1166 for(MatrixIterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
1167 matrix != coarsest; ++matrix, ++pinfo, ++aggregates, ++level) {
1168 cargs.setMatrix(matrix->getmat(), **aggregates);
1169 cargs.setComm(*pinfo);
1170 smoothers.addCoarser(cargs);
1171 }
1172 if(maxlevels()>levels()) {
1173 // This is not the globally coarsest level and therefore smoothing is needed
1174 cargs.setMatrix(matrices_.coarsest()->getmat(), **aggregates);
1175 cargs.setComm(*pinfo);
1176 smoothers.addCoarser(cargs);
1177 ++level;
1178 }
1179 }
1180
1181 template<class M, class IS, class A>
1182 template<class F>
1184 {
1185 typedef typename AggregatesMapList::iterator AggregatesMapIterator;
1186 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
1187 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
1188
1189 AggregatesMapIterator amap = aggregatesMaps_.begin();
1190 BaseGalerkinProduct productBuilder;
1191 InfoIterator info = parallelInformation_.finest();
1192 typename RedistributeInfoList::iterator riIter = redistributes_.begin();
1193 Iterator level = matrices_.finest(), coarsest=matrices_.coarsest();
1194 if(level.isRedistributed()) {
1195 info->buildGlobalLookup(level->getmat().N());
1196 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
1197 const_cast<Matrix&>(level.getRedistributed().getmat()),
1198 *info,info.getRedistributed(), *riIter);
1199 info->freeGlobalLookup();
1200 }
1201
1202 for(; level!=coarsest; ++amap) {
1203 const Matrix& fine = (level.isRedistributed() ? level.getRedistributed() : *level).getmat();
1204 ++level;
1205 ++info;
1206 ++riIter;
1207 productBuilder.calculate(fine, *(*amap), const_cast<Matrix&>(level->getmat()), *info, copyFlags);
1208 if(level.isRedistributed()) {
1209 info->buildGlobalLookup(level->getmat().N());
1210 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
1211 const_cast<Matrix&>(level.getRedistributed().getmat()), *info,
1212 info.getRedistributed(), *riIter);
1213 info->freeGlobalLookup();
1214 }
1215 }
1216 }
1217
1218 template<class M, class IS, class A>
1220 {
1221 return matrices_.levels();
1222 }
1223
1224 template<class M, class IS, class A>
1226 {
1227 return maxlevels_;
1228 }
1229
1230 template<class M, class IS, class A>
1232 {
1233 return levels()==maxlevels() &&
1234 (!matrices_.coarsest().isRedistributed() ||matrices_.coarsest()->getmat().N()>0);
1235 }
1236
1237 template<class M, class IS, class A>
1239 {
1240 return built_;
1241 }
1242
1243 template<class T, class A>
1245 : finest_(0), coarsest_(0), nonAllocated_(0), allocator_(), levels_(0)
1246 {}
1247
1248 template<class T, class A>
1250 : allocator_()
1251 {
1252 finest_ = allocator_.allocate(1,0);
1253 finest_->element_ = &first;
1254 finest_->redistributed_ = nullptr;
1255 nonAllocated_ = finest_;
1256 coarsest_ = finest_;
1257 coarsest_->coarser_ = coarsest_->finer_ = nullptr;
1258 levels_ = 1;
1259 }
1260
1261 template<class T, class A>
1263 : allocator_()
1264 {
1265 finest_ = allocator_.allocate(1,0);
1266 finest_->element_ = first;
1267 finest_->redistributed_ = nullptr;
1268 nonAllocated_ = nullptr;
1269 coarsest_ = finest_;
1270 coarsest_->coarser_ = coarsest_->finer_ = nullptr;
1271 levels_ = 1;
1272 }
1273 template<class T, class A>
1275 {
1276 while(coarsest_) {
1277 Element* current = coarsest_;
1278 coarsest_ = coarsest_->finer_;
1279 if(current != nonAllocated_) {
1280 if(current->redistributed_)
1281 ConstructionTraits<T>::deconstruct(current->redistributed_);
1282 ConstructionTraits<T>::deconstruct(current->element_);
1283 }
1284 allocator_.deallocate(current, 1);
1285 current=nullptr;
1286 //coarsest_->coarser_ = nullptr;
1287 }
1288 }
1289
1290 template<class T, class A>
1292 : nonAllocated_(), allocator_(other.allocator_),
1293 levels_(other.levels_)
1294 {
1295 if(!other.finest_)
1296 {
1297 finest_=coarsest_=nonAllocated_=nullptr;
1298 return;
1299 }
1300 finest_=allocator_.allocate(1,0);
1301 Element* finer_ = nullptr;
1302 Element* current_ = finest_;
1303 Element* otherCurrent_ = other.finest_;
1304
1305 while(otherCurrent_)
1306 {
1307 T* t=new T(*(otherCurrent_->element_));
1308 current_->element_=t;
1309 current_->finer_=finer_;
1310 if(otherCurrent_->redistributed_)
1311 current_->redistributed_ = new T(*otherCurrent_->redistributed_);
1312 else
1313 current_->redistributed_= nullptr;
1314 finer_=current_;
1315 if(otherCurrent_->coarser_)
1316 {
1317 current_->coarser_=allocator_.allocate(1,0);
1318 current_=current_->coarser_;
1319 }else
1320 current_->coarser_=nullptr;
1321 otherCurrent_=otherCurrent_->coarser_;
1322 }
1323 coarsest_=current_;
1324 }
1325
1326 template<class T, class A>
1327 std::size_t Hierarchy<T,A>::levels() const
1328 {
1329 return levels_;
1330 }
1331
1332 template<class T, class A>
1333 void Hierarchy<T,A>::addRedistributedOnCoarsest(Arguments& args)
1334 {
1335 coarsest_->redistributed_ = ConstructionTraits<MemberType>::construct(args);
1336 }
1337
1338 template<class T, class A>
1339 void Hierarchy<T,A>::addCoarser(Arguments& args)
1340 {
1341 if(!coarsest_) {
1342 assert(!finest_);
1343 coarsest_ = allocator_.allocate(1,0);
1344 coarsest_->element_ = ConstructionTraits<MemberType>::construct(args);
1345 finest_ = coarsest_;
1346 coarsest_->finer_ = nullptr;
1347 }else{
1348 coarsest_->coarser_ = allocator_.allocate(1,0);
1349 coarsest_->coarser_->finer_ = coarsest_;
1350 coarsest_ = coarsest_->coarser_;
1351 coarsest_->element_ = ConstructionTraits<MemberType>::construct(args);
1352 }
1353 coarsest_->redistributed_ = nullptr;
1354 coarsest_->coarser_=nullptr;
1355 ++levels_;
1356 }
1357
1358
1359 template<class T, class A>
1360 void Hierarchy<T,A>::addFiner(Arguments& args)
1361 {
1362 if(!finest_) {
1363 assert(!coarsest_);
1364 finest_ = allocator_.allocate(1,0);
1365 finest_->element = ConstructionTraits<T>::construct(args);
1366 coarsest_ = finest_;
1367 coarsest_->coarser_ = coarsest_->finer_ = nullptr;
1368 }else{
1369 finest_->finer_ = allocator_.allocate(1,0);
1370 finest_->finer_->coarser_ = finest_;
1371 finest_ = finest_->finer_;
1372 finest_->finer = nullptr;
1373 finest_->element = ConstructionTraits<T>::construct(args);
1374 }
1375 ++levels_;
1376 }
1377
1378 template<class T, class A>
1380 {
1381 return Iterator(finest_);
1382 }
1383
1384 template<class T, class A>
1386 {
1387 return Iterator(coarsest_);
1388 }
1389
1390 template<class T, class A>
1392 {
1393 return ConstIterator(finest_);
1394 }
1395
1396 template<class T, class A>
1398 {
1399 return ConstIterator(coarsest_);
1400 }
1402 } // namespace Amg
1403} // namespace Dune
1404
1405#endif
Provides classes for the Coloring process of AMG.
Portable very large unsigned integers.
This file implements a vector space as a tensor product of a given vector space. The number of compon...
Class providing information about the mapping of the vertices onto aggregates.
Definition: aggregates.hh:543
Base class of all aggregation criterions.
Definition: aggregates.hh:47
The criterion describing the stop criteria for the coarsening process.
Definition: hierarchy.hh:543
T AggregationCriterion
The criterion for tagging connections as strong and nodes as isolated. This might be e....
Definition: hierarchy.hh:549
CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2, double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
Constructor.
Definition: hierarchy.hh:561
Traits class for generically constructing non default constructable types.
Definition: construction.hh:38
Iterator over the levels in the hierarchy.
Definition: hierarchy.hh:160
LevelIterator(const LevelIterator< typename std::remove_const< C >::type, typename std::remove_const< T1 >::type > &other)
Copy constructor.
Definition: hierarchy.hh:177
T1 & dereference() const
Dereference the iterator.
Definition: hierarchy.hh:207
bool equals(const LevelIterator< typename std::remove_const< C >::type, typename std::remove_const< T1 >::type > &other) const
Equality check.
Definition: hierarchy.hh:191
bool isRedistributed() const
Check whether there was a redistribution at the current level.
Definition: hierarchy.hh:228
bool equals(const LevelIterator< const typename std::remove_const< C >::type, const typename std::remove_const< T1 >::type > &other) const
Equality check.
Definition: hierarchy.hh:200
void increment()
Move to the next coarser level.
Definition: hierarchy.hh:213
LevelIterator(const LevelIterator< const typename std::remove_const< C >::type, const typename std::remove_const< T1 >::type > &other)
Copy constructor.
Definition: hierarchy.hh:183
void decrement()
Move to the next fine level.
Definition: hierarchy.hh:219
T1 & getRedistributed() const
Get the redistributed container.
Definition: hierarchy.hh:237
LevelIterator()
Constructor.
Definition: hierarchy.hh:168
A hierarchy of coantainers (e.g. matrices or vectors)
Definition: hierarchy.hh:67
T MemberType
The type of the container we store.
Definition: hierarchy.hh:72
LevelIterator< Hierarchy< T, A >, T > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:257
LevelIterator< const Hierarchy< T, A >, const T > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:260
A::template rebind< Element >::other Allocator
The allocator to use for the list elements.
Definition: hierarchy.hh:110
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:317
Dune::Amg::Hierarchy< ParallelInformation, Allocator > ParallelInformationHierarchy
The type of the parallel informarion hierarchy.
Definition: hierarchy.hh:338
std::list< AggregatesMap *, AAllocator > AggregatesMapList
The type of the aggregates maps list.
Definition: hierarchy.hh:344
PI ParallelInformation
The type of the index set.
Definition: hierarchy.hh:326
Dune::Amg::Hierarchy< MatrixOperator, Allocator > ParallelMatrixHierarchy
The type of the parallel matrix hierarchy.
Definition: hierarchy.hh:335
A Allocator
The allocator to use.
Definition: hierarchy.hh:329
RedistributeInformation< ParallelInformation > RedistributeInfoType
The type of the redistribute information.
Definition: hierarchy.hh:347
std::list< RedistributeInfoType, RILAllocator > RedistributeInfoList
The type of the list of redistribute information.
Definition: hierarchy.hh:353
Dune::Amg::AggregatesMap< typename MatrixGraph< Matrix >::VertexDescriptor > AggregatesMap
The type of the aggregates map we use.
Definition: hierarchy.hh:332
Allocator::template rebind< AggregatesMap * >::other AAllocator
Allocator for pointers.
Definition: hierarchy.hh:341
MatrixOperator::matrix_type Matrix
The type of the matrix.
Definition: hierarchy.hh:323
Allocator::template rebind< RedistributeInfoType >::other RILAllocator
Allocator for RedistributeInfoType.
Definition: hierarchy.hh:350
M MatrixOperator
The type of the matrix operator.
Definition: hierarchy.hh:320
All parameters for AMG.
Definition: parameters.hh:391
Attaches properties to the edges and vertices of a graph.
Definition: graph.hh:976
Graph::VertexDescriptor VertexDescriptor
The vertex descriptor.
Definition: graph.hh:986
Facade class for stl conformant bidirectional iterators.
Definition: iteratorfacades.hh:275
A vector of blocks with memory management.
Definition: bvector.hh:313
derive error class from the base class in common
Definition: istlexception.hh:16
Adapter to turn a random access iterator into a property map.
Definition: propertymap.hh:106
A generic dynamic dense matrix.
Definition: matrix.hh:555
A::size_type size_type
Type for indices and sizes.
Definition: matrix.hh:571
MatrixImp::DenseMatrixBase< T, A >::window_type row_type
The type implementing a matrix row.
Definition: matrix.hh:568
A simple stop watch.
Definition: timer.hh:52
void reset()
Reset timer while keeping the running/stopped state.
Definition: timer.hh:66
double elapsed() const
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:86
Portable very large unsigned integers.
Definition: bigunsignedint.hh:70
Helper classes for the construction of classes without empty constructor.
Provides classes for initializing the link attributes of a matrix graph.
Provides a class for building the galerkin product based on a aggregation scheme.
Provdes class for identifying aggregates globally.
Provides classes for building the matrix graph.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
T accumulate(Range &&range, T value, F &&f)
Accumulate values.
Definition: hybridutilities.hh:337
const AggregatesMapList & aggregatesMaps() const
Get the hierarchy of the mappings of the nodes onto aggregates.
Definition: hierarchy.hh:1094
bool isBuilt() const
Whether the hierarchy was built.
Definition: hierarchy.hh:1238
Hierarchy(const Hierarchy &other)
Copy constructor.
Definition: hierarchy.hh:1291
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: hierarchy.hh:1327
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: hierarchy.hh:1219
ConstIterator coarsest() const
Get an iterator positioned at the coarsest level.
Definition: hierarchy.hh:1397
void addCoarser(Arguments &args)
Add an element on a coarser level.
Definition: hierarchy.hh:1339
const RedistributeInfoList & redistributeInformation() const
Get the hierarchy of the information about redistributions,.
Definition: hierarchy.hh:1100
void coarsenVector(Hierarchy< BlockVector< V, TA > > &hierarchy) const
Coarsen the vector hierarchy according to the matrix hierarchy.
Definition: hierarchy.hh:1126
const ParallelInformationHierarchy & parallelInformation() const
Get the hierarchy of the parallel data distribution information.
Definition: hierarchy.hh:1019
void addFiner(Arguments &args)
Add an element on a finer level.
Definition: hierarchy.hh:1360
const ParallelMatrixHierarchy & matrices() const
Get the matrix hierarchy.
Definition: hierarchy.hh:1012
std::size_t maxlevels() const
Get the max number of levels in the hierarchy of processors.
Definition: hierarchy.hh:1225
static const V ISOLATED
Identifier of isolated vertices.
Definition: aggregates.hh:554
void recalculateGalerkin(const F &copyFlags)
Recalculate the galerkin products.
Definition: hierarchy.hh:1183
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
std::size_t noVertices() const
Get the number of vertices.
Iterator coarsest()
Get an iterator positioned at the coarsest level.
Definition: hierarchy.hh:1385
Hierarchy(MemberType &first)
Construct a new hierarchy.
Definition: hierarchy.hh:1249
ConstIterator finest() const
Get an iterator positioned at the finest level.
Definition: hierarchy.hh:1391
Hierarchy(MemberType *first)
Construct a new hierarchy.
Definition: hierarchy.hh:1262
Hierarchy()
Construct a new empty hierarchy.
Definition: hierarchy.hh:1244
~Hierarchy()
Destructor.
Definition: hierarchy.hh:1274
AccumulationMode
Identifiers for the different accumulation modes.
Definition: parameters.hh:230
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
Iterator finest()
Get an iterator positioned at the finest level.
Definition: hierarchy.hh:1379
void build(const T &criterion)
Build the matrix hierarchy using aggregation.
Definition: hierarchy.hh:679
MatrixHierarchy(const MatrixOperator &fineMatrix, const ParallelInformation &pinfo=ParallelInformation())
Constructor.
Definition: hierarchy.hh:660
void free()
Free the allocated memory.
static void deconstruct(T *t)
Destroys an object.
Definition: construction.hh:61
void coarsenSmoother(Hierarchy< S, TA > &smoothers, const typename SmootherTraits< S >::Arguments &args) const
Coarsen the smoother hierarchy according to the matrix hierarchy.
Definition: hierarchy.hh:1153
void buildDependency(G &graph, const typename C::Matrix &matrix, C criterion, bool finestLevel)
Build the dependency of the matrix graph.
std::tuple< int, int, int, int > buildAggregates(const M &matrix, G &graph, const C &criterion, bool finestLevel)
Build the aggregates.
void getCoarsestAggregatesOnFinest(std::vector< std::size_t > &data) const
Get the mapping of fine level unknowns to coarse level aggregates.
Definition: hierarchy.hh:1025
@ MAX_PROCESSES
Hard limit for the number of processes allowed.
Definition: hierarchy.hh:55
@ atOnceAccu
Accumulate data to on process at once.
Definition: parameters.hh:242
@ successiveAccu
Successively accumulate to fewer processes.
Definition: parameters.hh:246
int countNonZeros(const M &matrix)
Get the number of nonzero fields in the matrix.
Definition: matrixutils.hh:153
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:93
DInfoType dinfo(std::cout)
Stream for informative output.
Definition: stdstreams.hh:138
DVerbType dverb(std::cout)
Singleton of verbose debug stream.
Definition: stdstreams.hh:114
Provides a map between global and local indices.
Provides a class for building the index set and remote indices on the coarse level.
Functionality for redistributing a sparse matrix.
Some handy generic functions for ISTL matrices.
Dune namespace.
Definition: alignment.hh:11
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, idxtype nparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > *&outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition: repartition.hh:1256
void redistributeMatrix(M &origMatrix, M &newMatrix, C &origComm, C &newComm, RedistributeInformation< C > &ri)
Redistribute a matrix according to given domain decompositions.
Definition: matrixredistribute.hh:834
STL namespace.
Classes for the generic construction and application of the smoothers.
Standard Dune debug streams.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:64
Tag idnetifying the visited property of a vertex.
Definition: properties.hh:27
A property map that applies the identity function to integers.
Definition: propertymap.hh:291
Selector for the property map type.
Definition: propertymap.hh:318
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:21
@ nonoverlapping
Category for non-overlapping solvers.
Definition: solvercategory.hh:23
@ overlapping
Category for overlapping solvers.
Definition: solvercategory.hh:25
A simple timing class.
Prolongation and restriction for amg.
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:18
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 23, 23:29, 2024)