Dune Core Modules (2.7.1)

matrixhierarchy.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_AMG_MATRIXHIERARCHY_HH
4#define DUNE_AMG_MATRIXHIERARCHY_HH
5
6#include <algorithm>
7#include <tuple>
8#include "aggregates.hh"
9#include "graph.hh"
10#include "galerkin.hh"
11#include "renumberer.hh"
12#include "graphcreator.hh"
13#include "hierarchy.hh"
14#include <dune/istl/bvector.hh>
25
26namespace Dune
27{
28 namespace Amg
29 {
40 enum {
48 MAX_PROCESSES = 72000
49 };
50
57 template<class M, class PI, class A=std::allocator<M> >
59 {
60 public:
62 typedef M MatrixOperator;
63
65 typedef typename MatrixOperator::matrix_type Matrix;
66
69
71 typedef A Allocator;
72
75
78
81
83 using AAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<AggregatesMap*>;
84
86 typedef std::list<AggregatesMap*,AAllocator> AggregatesMapList;
87
89 typedef RedistributeInformation<ParallelInformation> RedistributeInfoType;
90
92 using RILAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<RedistributeInfoType>;
93
95 typedef std::list<RedistributeInfoType,RILAllocator> RedistributeInfoList;
96
102 MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
103 std::shared_ptr<ParallelInformation> pinfo = std::make_shared<ParallelInformation>());
104
106
112 template<typename O, typename T>
113 void build(const T& criterion);
114
122 template<class F>
123 void recalculateGalerkin(const F& copyFlags);
124
129 template<class V, class BA, class TA>
130 void coarsenVector(Hierarchy<BlockVector<V,BA>, TA>& hierarchy) const;
131
137 template<class S, class TA>
138 void coarsenSmoother(Hierarchy<S,TA>& smoothers,
139 const typename SmootherTraits<S>::Arguments& args) const;
140
145 std::size_t levels() const;
146
151 std::size_t maxlevels() const;
152
153 bool hasCoarsest() const;
154
159 bool isBuilt() const;
160
165 const ParallelMatrixHierarchy& matrices() const;
166
172
177 const AggregatesMapList& aggregatesMaps() const;
178
185
186 double getProlongationDampingFactor() const
187 {
188 return prolongDamp_;
189 }
190
201 void getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const;
202
203 private:
204 typedef typename ConstructionTraits<MatrixOperator>::Arguments MatrixArgs;
205 typedef typename ConstructionTraits<ParallelInformation>::Arguments CommunicationArgs;
207 AggregatesMapList aggregatesMaps_;
209 RedistributeInfoList redistributes_;
211 ParallelMatrixHierarchy matrices_;
213 ParallelInformationHierarchy parallelInformation_;
214
216 bool built_;
217
219 int maxlevels_;
220
221 double prolongDamp_;
222
226 template<class Matrix, bool print>
227 struct MatrixStats
228 {
229
233 static void stats(const Matrix& matrix)
234 {
235 DUNE_UNUSED_PARAMETER(matrix);
236 }
237 };
238
239 template<class Matrix>
240 struct MatrixStats<Matrix,true>
241 {
242 struct calc
243 {
244 typedef typename Matrix::size_type size_type;
245 typedef typename Matrix::row_type matrix_row;
246
247 calc()
248 {
250 max=0;
251 sum=0;
252 }
253
254 void operator()(const matrix_row& row)
255 {
256 min=std::min(min, row.size());
257 max=std::max(max, row.size());
258 sum += row.size();
259 }
260
261 size_type min;
262 size_type max;
263 size_type sum;
264 };
268 static void stats(const Matrix& matrix)
269 {
270 calc c= for_each(matrix.begin(), matrix.end(), calc());
271 dinfo<<"Matrix row: min="<<c.min<<" max="<<c.max
272 <<" average="<<static_cast<double>(c.sum)/matrix.N()
273 <<std::endl;
274 }
275 };
276 };
277
281 template<class T>
282 class CoarsenCriterion : public T
283 {
284 public:
290
301 CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2,
302 double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
303 : AggregationCriterion(Dune::Amg::Parameters(maxLevel, coarsenTarget, minCoarsenRate, prolongDamp, accumulate))
304 {}
305
307 : AggregationCriterion(parms)
308 {}
309
310 };
311
312 template<typename M, typename C1>
313 bool repartitionAndDistributeMatrix(const M& origMatrix,
314 std::shared_ptr<M> newMatrix,
315 SequentialInformation& origComm,
316 std::shared_ptr<SequentialInformation>& newComm,
317 RedistributeInformation<SequentialInformation>& ri,
318 int nparts, C1& criterion)
319 {
320 DUNE_UNUSED_PARAMETER(origMatrix);
321 DUNE_UNUSED_PARAMETER(newMatrix);
322 DUNE_UNUSED_PARAMETER(origComm);
323 DUNE_UNUSED_PARAMETER(newComm);
325 DUNE_UNUSED_PARAMETER(nparts);
326 DUNE_UNUSED_PARAMETER(criterion);
327 DUNE_THROW(NotImplemented, "Redistribution does not make sense in sequential code!");
328 }
329
330
331 template<typename M, typename C, typename C1>
332 bool repartitionAndDistributeMatrix(const M& origMatrix,
333 std::shared_ptr<M> newMatrix,
334 C& origComm,
335 std::shared_ptr<C>& newComm,
336 RedistributeInformation<C>& ri,
337 int nparts, C1& criterion)
338 {
339 Timer time;
340#ifdef AMG_REPART_ON_COMM_GRAPH
341 // Done not repartition the matrix graph, but a graph of the communication scheme.
342 bool existentOnRedist=Dune::commGraphRepartition(origMatrix, origComm, nparts, newComm,
343 ri.getInterface(),
344 criterion.debugLevel()>1);
345
346#else
347 typedef Dune::Amg::MatrixGraph<const M> MatrixGraph;
348 typedef Dune::Amg::PropertiesGraph<MatrixGraph,
349 VertexProperties,
350 EdgeProperties,
351 IdentityMap,
352 IdentityMap> PropertiesGraph;
353 MatrixGraph graph(origMatrix);
354 PropertiesGraph pgraph(graph);
355 buildDependency(pgraph, origMatrix, criterion, false);
356
357#ifdef DEBUG_REPART
358 if(origComm.communicator().rank()==0)
359 std::cout<<"Original matrix"<<std::endl;
360 origComm.communicator().barrier();
361 printGlobalSparseMatrix(origMatrix, origComm, std::cout);
362#endif
363 bool existentOnRedist=Dune::graphRepartition(pgraph, origComm, nparts,
364 newComm, ri.getInterface(),
365 criterion.debugLevel()>1);
366#endif // if else AMG_REPART
367
368 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
369 std::cout<<"Repartitioning took "<<time.elapsed()<<" seconds."<<std::endl;
370
371 ri.setSetup();
372
373#ifdef DEBUG_REPART
374 ri.checkInterface(origComm.indexSet(), newComm->indexSet(), origComm.communicator());
375#endif
376
377 redistributeMatrix(const_cast<M&>(origMatrix), *newMatrix, origComm, *newComm, ri);
378
379#ifdef DEBUG_REPART
380 if(origComm.communicator().rank()==0)
381 std::cout<<"Original matrix"<<std::endl;
382 origComm.communicator().barrier();
383 if(newComm->communicator().size()>0)
384 printGlobalSparseMatrix(*newMatrix, *newComm, std::cout);
385 origComm.communicator().barrier();
386#endif
387
388 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
389 std::cout<<"Redistributing matrix took "<<time.elapsed()<<" seconds."<<std::endl;
390 return existentOnRedist;
391
392 }
393
394 template<class M, class IS, class A>
395 MatrixHierarchy<M,IS,A>::MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
396 std::shared_ptr<ParallelInformation> pinfo)
397 : matrices_(fineMatrix),
398 parallelInformation_(pinfo)
399 {
400 if (SolverCategory::category(*fineMatrix) != SolverCategory::category(*pinfo))
401 DUNE_THROW(ISTLError, "MatrixOperator and ParallelInformation must belong to the same category!");
402 }
403
404 template<class M, class IS, class A>
405 template<typename O, typename T>
406 void MatrixHierarchy<M,IS,A>::build(const T& criterion)
407 {
408 prolongDamp_ = criterion.getProlongationDampingFactor();
409 typedef O OverlapFlags;
410 typedef typename ParallelMatrixHierarchy::Iterator MatIterator;
411 typedef typename ParallelInformationHierarchy::Iterator PInfoIterator;
412
413 static const int noints=(Dune::Amg::MAX_PROCESSES/4096>0) ? (Dune::Amg::MAX_PROCESSES/4096) : 1;
414
415 typedef bigunsignedint<sizeof(int)*8*noints> BIGINT;
416 GalerkinProduct<ParallelInformation> productBuilder;
417 MatIterator mlevel = matrices_.finest();
418 MatrixStats<typename M::matrix_type,MINIMAL_DEBUG_LEVEL<=INFO_DEBUG_LEVEL>::stats(mlevel->getmat());
419
420 PInfoIterator infoLevel = parallelInformation_.finest();
421 BIGINT finenonzeros=countNonZeros(mlevel->getmat());
422 finenonzeros = infoLevel->communicator().sum(finenonzeros);
423 BIGINT allnonzeros = finenonzeros;
424
425
426 int level = 0;
427 int rank = 0;
428
429 BIGINT unknowns = mlevel->getmat().N();
430
431 unknowns = infoLevel->communicator().sum(unknowns);
432 double dunknowns=unknowns.todouble();
433 infoLevel->buildGlobalLookup(mlevel->getmat().N());
434 redistributes_.push_back(RedistributeInfoType());
435
436 for(; level < criterion.maxLevel(); ++level, ++mlevel) {
437 assert(matrices_.levels()==redistributes_.size());
438 rank = infoLevel->communicator().rank();
439 if(rank==0 && criterion.debugLevel()>1)
440 std::cout<<"Level "<<level<<" has "<<dunknowns<<" unknowns, "<<dunknowns/infoLevel->communicator().size()
441 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
442
443 MatrixOperator* matrix=&(*mlevel);
444 ParallelInformation* info =&(*infoLevel);
445
446 if((
447#if HAVE_PARMETIS
448 criterion.accumulate()==successiveAccu
449#else
450 false
451#endif
452 || (criterion.accumulate()==atOnceAccu
453 && dunknowns < 30*infoLevel->communicator().size()))
454 && infoLevel->communicator().size()>1 &&
455 dunknowns/infoLevel->communicator().size() <= criterion.coarsenTarget())
456 {
457 // accumulate to fewer processors
458 std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
459 std::shared_ptr<ParallelInformation> redistComm;
460 std::size_t nodomains = (std::size_t)std::ceil(dunknowns/(criterion.minAggregateSize()
461 *criterion.coarsenTarget()));
462 if( nodomains<=criterion.minAggregateSize()/2 ||
463 dunknowns <= criterion.coarsenTarget() )
464 nodomains=1;
465
466 bool existentOnNextLevel =
467 repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
468 redistComm, redistributes_.back(), nodomains,
469 criterion);
470 BIGINT unknownsRedist = redistMat->N();
471 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
472 dunknowns= unknownsRedist.todouble();
473 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1)
474 std::cout<<"Level "<<level<<" (redistributed) has "<<dunknowns<<" unknowns, "<<dunknowns/redistComm->communicator().size()
475 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
476 MatrixArgs args(redistMat, *redistComm);
477 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
478 assert(mlevel.isRedistributed());
479 infoLevel.addRedistributed(redistComm);
480 infoLevel->freeGlobalLookup();
481
482 if(!existentOnNextLevel)
483 // We do not hold any data on the redistributed partitioning
484 break;
485
486 // Work on the redistributed Matrix from now on
487 matrix = &(mlevel.getRedistributed());
488 info = &(infoLevel.getRedistributed());
489 info->buildGlobalLookup(matrix->getmat().N());
490 }
491
492 rank = info->communicator().rank();
493 if(dunknowns <= criterion.coarsenTarget())
494 // No further coarsening needed
495 break;
496
497 typedef PropertiesGraphCreator<MatrixOperator,ParallelInformation> GraphCreator;
498 typedef typename GraphCreator::PropertiesGraph PropertiesGraph;
499 typedef typename GraphCreator::GraphTuple GraphTuple;
500
501 typedef typename PropertiesGraph::VertexDescriptor Vertex;
502
503 std::vector<bool> excluded(matrix->getmat().N(), false);
504
505 GraphTuple graphs = GraphCreator::create(*matrix, excluded, *info, OverlapFlags());
506
507 AggregatesMap* aggregatesMap=new AggregatesMap(std::get<1>(graphs)->maxVertex()+1);
508
509 aggregatesMaps_.push_back(aggregatesMap);
510
511 Timer watch;
512 watch.reset();
513 int noAggregates, isoAggregates, oneAggregates, skippedAggregates;
514
515 std::tie(noAggregates, isoAggregates, oneAggregates, skippedAggregates) =
516 aggregatesMap->buildAggregates(matrix->getmat(), *(std::get<1>(graphs)), criterion, level==0);
517
518 if(rank==0 && criterion.debugLevel()>2)
519 std::cout<<" Have built "<<noAggregates<<" aggregates totally ("<<isoAggregates<<" isolated aggregates, "<<
520 oneAggregates<<" aggregates of one vertex, and skipped "<<
521 skippedAggregates<<" aggregates)."<<std::endl;
522#ifdef TEST_AGGLO
523 {
524 // calculate size of local matrix in the distributed direction
525 int start, end, overlapStart, overlapEnd;
526 int procs=info->communicator().rank();
527 int n = UNKNOWNS/procs; // number of unknowns per process
528 int bigger = UNKNOWNS%procs; // number of process with n+1 unknows
529
530 // Compute owner region
531 if(rank<bigger) {
532 start = rank*(n+1);
533 end = (rank+1)*(n+1);
534 }else{
535 start = bigger + rank * n;
536 end = bigger + (rank + 1) * n;
537 }
538
539 // Compute overlap region
540 if(start>0)
541 overlapStart = start - 1;
542 else
543 overlapStart = start;
544
545 if(end<UNKNOWNS)
546 overlapEnd = end + 1;
547 else
548 overlapEnd = end;
549
550 assert((UNKNOWNS)*(overlapEnd-overlapStart)==aggregatesMap->noVertices());
551 for(int j=0; j< UNKNOWNS; ++j)
552 for(int i=0; i < UNKNOWNS; ++i)
553 {
554 if(i>=overlapStart && i<overlapEnd)
555 {
556 int no = (j/2)*((UNKNOWNS)/2)+i/2;
557 (*aggregatesMap)[j*(overlapEnd-overlapStart)+i-overlapStart]=no;
558 }
559 }
560 }
561#endif
562 if(criterion.debugLevel()>1 && info->communicator().rank()==0)
563 std::cout<<"aggregating finished."<<std::endl;
564
565 BIGINT gnoAggregates=noAggregates;
566 gnoAggregates = info->communicator().sum(gnoAggregates);
567 double dgnoAggregates = gnoAggregates.todouble();
568#ifdef TEST_AGGLO
569 BIGINT gnoAggregates=((UNKNOWNS)/2)*((UNKNOWNS)/2);
570#endif
571
572 if(criterion.debugLevel()>2 && rank==0)
573 std::cout << "Building "<<dgnoAggregates<<" aggregates took "<<watch.elapsed()<<" seconds."<<std::endl;
574
575 if(dgnoAggregates==0 || dunknowns/dgnoAggregates<criterion.minCoarsenRate())
576 {
577 if(rank==0)
578 {
579 if(dgnoAggregates>0)
580 std::cerr << "Stopped coarsening because of rate breakdown "<<dunknowns<<"/"<<dgnoAggregates
581 <<"="<<dunknowns/dgnoAggregates<<"<"
582 <<criterion.minCoarsenRate()<<std::endl;
583 else
584 std::cerr<< "Could not build any aggregates. Probably no connected nodes."<<std::endl;
585 }
586 aggregatesMap->free();
587 delete aggregatesMap;
588 aggregatesMaps_.pop_back();
589
590 if(criterion.accumulate() && mlevel.isRedistributed() && info->communicator().size()>1) {
591 // coarse level matrix was already redistributed, but to more than 1 process
592 // Therefore need to delete the redistribution. Further down it will
593 // then be redistributed to 1 process
594 delete &(mlevel.getRedistributed().getmat());
595 mlevel.deleteRedistributed();
596 delete &(infoLevel.getRedistributed());
597 infoLevel.deleteRedistributed();
598 redistributes_.back().resetSetup();
599 }
600
601 break;
602 }
603 unknowns = noAggregates;
604 dunknowns = dgnoAggregates;
605
606 CommunicationArgs commargs(info->communicator(),info->category());
607 parallelInformation_.addCoarser(commargs);
608
609 ++infoLevel; // parallel information on coarse level
610
612 get(VertexVisitedTag(), *(std::get<1>(graphs)));
613
614 watch.reset();
615 int aggregates = IndicesCoarsener<ParallelInformation,OverlapFlags>
616 ::coarsen(*info,
617 *(std::get<1>(graphs)),
618 visitedMap,
619 *aggregatesMap,
620 *infoLevel,
621 noAggregates);
622 GraphCreator::free(graphs);
623
624 if(criterion.debugLevel()>2) {
625 if(rank==0)
626 std::cout<<"Coarsening of index sets took "<<watch.elapsed()<<" seconds."<<std::endl;
627 }
628
629 watch.reset();
630
631 infoLevel->buildGlobalLookup(aggregates);
632 AggregatesPublisher<Vertex,OverlapFlags,ParallelInformation>::publish(*aggregatesMap,
633 *info,
634 infoLevel->globalLookup());
635
636
637 if(criterion.debugLevel()>2) {
638 if(rank==0)
639 std::cout<<"Communicating global aggregate numbers took "<<watch.elapsed()<<" seconds."<<std::endl;
640 }
641
642 watch.reset();
643 std::vector<bool>& visited=excluded;
644
645 typedef std::vector<bool>::iterator Iterator;
647 Iterator end = visited.end();
648 for(Iterator iter= visited.begin(); iter != end; ++iter)
649 *iter=false;
650
651 VisitedMap2 visitedMap2(visited.begin(), Dune::IdentityMap());
652
653 std::shared_ptr<typename MatrixOperator::matrix_type>
654 coarseMatrix(productBuilder.build(*(std::get<0>(graphs)), visitedMap2,
655 *info,
656 *aggregatesMap,
657 aggregates,
658 OverlapFlags()));
659 dverb<<"Building of sparsity pattern took "<<watch.elapsed()<<std::endl;
660 watch.reset();
661 info->freeGlobalLookup();
662
663 delete std::get<0>(graphs);
664 productBuilder.calculate(matrix->getmat(), *aggregatesMap, *coarseMatrix, *infoLevel, OverlapFlags());
665
666 if(criterion.debugLevel()>2) {
667 if(rank==0)
668 std::cout<<"Calculation entries of Galerkin product took "<<watch.elapsed()<<" seconds."<<std::endl;
669 }
670
671 BIGINT nonzeros = countNonZeros(*coarseMatrix);
672 allnonzeros = allnonzeros + infoLevel->communicator().sum(nonzeros);
673 MatrixArgs args(coarseMatrix, *infoLevel);
674
675 matrices_.addCoarser(args);
676 redistributes_.push_back(RedistributeInfoType());
677 } // end level loop
678
679
680 infoLevel->freeGlobalLookup();
681
682 built_=true;
683 AggregatesMap* aggregatesMap=new AggregatesMap(0);
684 aggregatesMaps_.push_back(aggregatesMap);
685
686 if(criterion.debugLevel()>0) {
687 if(level==criterion.maxLevel()) {
688 BIGINT unknownsLevel = mlevel->getmat().N();
689 unknownsLevel = infoLevel->communicator().sum(unknownsLevel);
690 double dunknownsLevel = unknownsLevel.todouble();
691 if(rank==0 && criterion.debugLevel()>1) {
692 std::cout<<"Level "<<level<<" has "<<dunknownsLevel<<" unknowns, "<<dunknownsLevel/infoLevel->communicator().size()
693 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
694 }
695 }
696 }
697
698 if(criterion.accumulate() && !redistributes_.back().isSetup() &&
699 infoLevel->communicator().size()>1) {
700#if HAVE_MPI && !HAVE_PARMETIS
701 if(criterion.accumulate()==successiveAccu &&
702 infoLevel->communicator().rank()==0)
703 std::cerr<<"Successive accumulation of data on coarse levels only works with ParMETIS installed."
704 <<" Fell back to accumulation to one domain on coarsest level"<<std::endl;
705#endif
706
707 // accumulate to fewer processors
708 std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
709 std::shared_ptr<ParallelInformation> redistComm;
710 int nodomains = 1;
711
712 repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
713 redistComm, redistributes_.back(), nodomains,criterion);
714 MatrixArgs args(redistMat, *redistComm);
715 BIGINT unknownsRedist = redistMat->N();
716 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
717
718 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1) {
719 double dunknownsRedist = unknownsRedist.todouble();
720 std::cout<<"Level "<<level<<" redistributed has "<<dunknownsRedist<<" unknowns, "<<dunknownsRedist/redistComm->communicator().size()
721 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
722 }
723 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
724 infoLevel.addRedistributed(redistComm);
725 infoLevel->freeGlobalLookup();
726 }
727
728 int levels = matrices_.levels();
729 maxlevels_ = parallelInformation_.finest()->communicator().max(levels);
730 assert(matrices_.levels()==redistributes_.size());
731 if(hasCoarsest() && rank==0 && criterion.debugLevel()>1)
732 std::cout<<"operator complexity: "<<allnonzeros.todouble()/finenonzeros.todouble()<<std::endl;
733
734 }
735
736 template<class M, class IS, class A>
739 {
740 return matrices_;
741 }
742
743 template<class M, class IS, class A>
746 {
747 return parallelInformation_;
748 }
749
750 template<class M, class IS, class A>
751 void MatrixHierarchy<M,IS,A>::getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const
752 {
753 int levels=aggregatesMaps().size();
754 int maxlevels=parallelInformation_.finest()->communicator().max(levels);
755 std::size_t size=(*(aggregatesMaps().begin()))->noVertices();
756 // We need an auxiliary vector for the consecutive prolongation.
757 std::vector<std::size_t> tmp;
758 std::vector<std::size_t> *coarse, *fine;
759
760 // make sure the allocated space suffices.
761 tmp.reserve(size);
762 data.reserve(size);
763
764 // Correctly assign coarse and fine for the first prolongation such that
765 // we end up in data in the end.
766 if(levels%2==0) {
767 coarse=&tmp;
768 fine=&data;
769 }else{
770 coarse=&data;
771 fine=&tmp;
772 }
773
774 // Number the unknowns on the coarsest level consecutively for each process.
775 if(levels==maxlevels) {
776 const AggregatesMap& map = *(*(++aggregatesMaps().rbegin()));
777 std::size_t m=0;
778
779 for(typename AggregatesMap::const_iterator iter = map.begin(); iter != map.end(); ++iter)
780 if(*iter< AggregatesMap::ISOLATED)
781 m=std::max(*iter,m);
782
783 coarse->resize(m+1);
784 std::size_t i=0;
785 srand((unsigned)std::clock());
786 std::set<size_t> used;
787 for(typename std::vector<std::size_t>::iterator iter=coarse->begin(); iter != coarse->end();
788 ++iter, ++i)
789 {
790 std::pair<std::set<std::size_t>::iterator,bool> ibpair
791 = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0)))*coarse->size());
792
793 while(!ibpair.second)
794 ibpair = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0))*coarse->size()));
795 *iter=*(ibpair.first);
796 }
797 }
798
799 typename ParallelInformationHierarchy::Iterator pinfo = parallelInformation().coarsest();
800 --pinfo;
801
802 // Now consecutively project the numbers to the finest level.
803 for(typename AggregatesMapList::const_reverse_iterator aggregates=++aggregatesMaps().rbegin();
804 aggregates != aggregatesMaps().rend(); ++aggregates,--levels) {
805
806 fine->resize((*aggregates)->noVertices());
807 fine->assign(fine->size(), 0);
808 Transfer<typename AggregatesMap::AggregateDescriptor, std::vector<std::size_t>, ParallelInformation>
809 ::prolongateVector(*(*aggregates), *coarse, *fine, static_cast<std::size_t>(1), *pinfo);
810 --pinfo;
811 std::swap(coarse, fine);
812 }
813
814 // Assertion to check that we really projected to data on the last step.
815 assert(coarse==&data);
816 }
817
818 template<class M, class IS, class A>
821 {
822 return aggregatesMaps_;
823 }
824 template<class M, class IS, class A>
827 {
828 return redistributes_;
829 }
830
831 template<class M, class IS, class A>
833 {
834 typedef typename AggregatesMapList::reverse_iterator AggregatesMapIterator;
835 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
836 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
837
838 AggregatesMapIterator amap = aggregatesMaps_.rbegin();
839 InfoIterator info = parallelInformation_.coarsest();
840 for(Iterator level=matrices_.coarsest(), finest=matrices_.finest(); level != finest; --level, --info, ++amap) {
841 (*amap)->free();
842 delete *amap;
843 }
844 delete *amap;
845 }
846
847 template<class M, class IS, class A>
848 template<class V, class BA, class TA>
850 {
851 assert(hierarchy.levels()==1);
852 typedef typename ParallelMatrixHierarchy::ConstIterator Iterator;
853 typedef typename RedistributeInfoList::const_iterator RIter;
854 RIter redist = redistributes_.begin();
855
856 Iterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
857 int level=0;
858 if(redist->isSetup())
859 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
860 Dune::dvverb<<"Level "<<level<<" has "<<matrices_.finest()->getmat().N()<<" unknowns!"<<std::endl;
861
862 while(matrix != coarsest) {
863 ++matrix; ++level; ++redist;
864 Dune::dvverb<<"Level "<<level<<" has "<<matrix->getmat().N()<<" unknowns!"<<std::endl;
865
866 hierarchy.addCoarser(matrix->getmat().N());
867 if(redist->isSetup())
868 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
869
870 }
871
872 }
873
874 template<class M, class IS, class A>
875 template<class S, class TA>
877 const typename SmootherTraits<S>::Arguments& sargs) const
878 {
879 assert(smoothers.levels()==0);
880 typedef typename ParallelMatrixHierarchy::ConstIterator MatrixIterator;
881 typedef typename ParallelInformationHierarchy::ConstIterator PinfoIterator;
882 typedef typename AggregatesMapList::const_iterator AggregatesIterator;
883
885 cargs.setArgs(sargs);
886 PinfoIterator pinfo = parallelInformation_.finest();
887 AggregatesIterator aggregates = aggregatesMaps_.begin();
888 int level=0;
889 for(MatrixIterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
890 matrix != coarsest; ++matrix, ++pinfo, ++aggregates, ++level) {
891 cargs.setMatrix(matrix->getmat(), **aggregates);
892 cargs.setComm(*pinfo);
893 smoothers.addCoarser(cargs);
894 }
895 if(maxlevels()>levels()) {
896 // This is not the globally coarsest level and therefore smoothing is needed
897 cargs.setMatrix(matrices_.coarsest()->getmat(), **aggregates);
898 cargs.setComm(*pinfo);
899 smoothers.addCoarser(cargs);
900 ++level;
901 }
902 }
903
904 template<class M, class IS, class A>
905 template<class F>
907 {
908 typedef typename AggregatesMapList::iterator AggregatesMapIterator;
909 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
910 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
911
912 AggregatesMapIterator amap = aggregatesMaps_.begin();
913 BaseGalerkinProduct productBuilder;
914 InfoIterator info = parallelInformation_.finest();
915 typename RedistributeInfoList::iterator riIter = redistributes_.begin();
916 Iterator level = matrices_.finest(), coarsest=matrices_.coarsest();
917 if(level.isRedistributed()) {
918 info->buildGlobalLookup(level->getmat().N());
919 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
920 const_cast<Matrix&>(level.getRedistributed().getmat()),
921 *info,info.getRedistributed(), *riIter);
922 info->freeGlobalLookup();
923 }
924
925 for(; level!=coarsest; ++amap) {
926 const Matrix& fine = (level.isRedistributed() ? level.getRedistributed() : *level).getmat();
927 ++level;
928 ++info;
929 ++riIter;
930 productBuilder.calculate(fine, *(*amap), const_cast<Matrix&>(level->getmat()), *info, copyFlags);
931 if(level.isRedistributed()) {
932 info->buildGlobalLookup(level->getmat().N());
933 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
934 const_cast<Matrix&>(level.getRedistributed().getmat()), *info,
935 info.getRedistributed(), *riIter);
936 info->freeGlobalLookup();
937 }
938 }
939 }
940
941 template<class M, class IS, class A>
943 {
944 return matrices_.levels();
945 }
946
947 template<class M, class IS, class A>
949 {
950 return maxlevels_;
951 }
952
953 template<class M, class IS, class A>
955 {
956 return levels()==maxlevels() &&
957 (!matrices_.coarsest().isRedistributed() ||matrices_.coarsest()->getmat().N()>0);
958 }
959
960 template<class M, class IS, class A>
962 {
963 return built_;
964 }
965
967 } // namespace Amg
968} // namespace Dune
969
970#endif // end DUNE_AMG_MATRIXHIERARCHY_HH
Provides classes for the Coloring process of AMG.
This file implements a vector space as a tensor product of a given vector space. The number of compon...
Class providing information about the mapping of the vertices onto aggregates.
Definition: aggregates.hh:556
Base class of all aggregation criterions.
Definition: aggregates.hh:48
The criterion describing the stop criteria for the coarsening process.
Definition: matrixhierarchy.hh:283
T AggregationCriterion
The criterion for tagging connections as strong and nodes as isolated. This might be e....
Definition: matrixhierarchy.hh:289
CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2, double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
Constructor.
Definition: matrixhierarchy.hh:301
Traits class for generically constructing non default constructable types.
Definition: construction.hh:38
LevelIterator< Hierarchy< MatrixOperator, Allocator >, MatrixOperator > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:215
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:218
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:59
typename std::allocator_traits< Allocator >::template rebind_alloc< AggregatesMap * > AAllocator
Allocator for pointers.
Definition: matrixhierarchy.hh:83
Dune::Amg::Hierarchy< ParallelInformation, Allocator > ParallelInformationHierarchy
The type of the parallel informarion hierarchy.
Definition: matrixhierarchy.hh:80
std::list< AggregatesMap *, AAllocator > AggregatesMapList
The type of the aggregates maps list.
Definition: matrixhierarchy.hh:86
PI ParallelInformation
The type of the index set.
Definition: matrixhierarchy.hh:68
Dune::Amg::Hierarchy< MatrixOperator, Allocator > ParallelMatrixHierarchy
The type of the parallel matrix hierarchy.
Definition: matrixhierarchy.hh:77
A Allocator
The allocator to use.
Definition: matrixhierarchy.hh:71
RedistributeInformation< ParallelInformation > RedistributeInfoType
The type of the redistribute information.
Definition: matrixhierarchy.hh:89
typename std::allocator_traits< Allocator >::template rebind_alloc< RedistributeInfoType > RILAllocator
Allocator for RedistributeInfoType.
Definition: matrixhierarchy.hh:92
std::list< RedistributeInfoType, RILAllocator > RedistributeInfoList
The type of the list of redistribute information.
Definition: matrixhierarchy.hh:95
Dune::Amg::AggregatesMap< typename MatrixGraph< Matrix >::VertexDescriptor > AggregatesMap
The type of the aggregates map we use.
Definition: matrixhierarchy.hh:74
MatrixOperator::matrix_type Matrix
The type of the matrix.
Definition: matrixhierarchy.hh:65
M MatrixOperator
The type of the matrix operator.
Definition: matrixhierarchy.hh:62
All parameters for AMG.
Definition: parameters.hh:391
Attaches properties to the edges and vertices of a graph.
Definition: graph.hh:976
Graph::VertexDescriptor VertexDescriptor
The vertex descriptor.
Definition: graph.hh:986
A vector of blocks with memory management.
Definition: bvector.hh:403
derive error class from the base class in common
Definition: istlexception.hh:16
Adapter to turn a random access iterator into a property map.
Definition: propertymap.hh:106
A generic dynamic dense matrix.
Definition: matrix.hh:558
A::size_type size_type
Type for indices and sizes.
Definition: matrix.hh:574
MatrixImp::DenseMatrixBase< T, A >::window_type row_type
The type implementing a matrix row.
Definition: matrix.hh:571
A simple stop watch.
Definition: timer.hh:41
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:55
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
Portable very large unsigned integers.
Definition: bigunsignedint.hh:71
Helper classes for the construction of classes without empty constructor.
Provides classes for initializing the link attributes of a matrix graph.
Provides a class for building the galerkin product based on a aggregation scheme.
Provdes class for identifying aggregates globally.
Provides classes for building the matrix graph.
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:25
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
T accumulate(Range &&range, T value, F &&f)
Accumulate values.
Definition: hybridutilities.hh:290
const AggregatesMapList & aggregatesMaps() const
Get the hierarchy of the mappings of the nodes onto aggregates.
Definition: matrixhierarchy.hh:820
bool isBuilt() const
Whether the hierarchy was built.
Definition: matrixhierarchy.hh:961
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: hierarchy.hh:321
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: matrixhierarchy.hh:942
void addCoarser(Arguments &args)
Add an element on a coarser level.
Definition: hierarchy.hh:333
const RedistributeInfoList & redistributeInformation() const
Get the hierarchy of the information about redistributions,.
Definition: matrixhierarchy.hh:826
const ParallelInformationHierarchy & parallelInformation() const
Get the hierarchy of the parallel data distribution information.
Definition: matrixhierarchy.hh:745
const ParallelMatrixHierarchy & matrices() const
Get the matrix hierarchy.
Definition: matrixhierarchy.hh:738
std::size_t maxlevels() const
Get the max number of levels in the hierarchy of processors.
Definition: matrixhierarchy.hh:948
static const V ISOLATED
Identifier of isolated vertices.
Definition: aggregates.hh:567
void recalculateGalerkin(const F &copyFlags)
Recalculate the galerkin products.
Definition: matrixhierarchy.hh:906
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
std::size_t noVertices() const
Get the number of vertices.
void coarsenVector(Hierarchy< BlockVector< V, BA >, TA > &hierarchy) const
Coarsen the vector hierarchy according to the matrix hierarchy.
Definition: matrixhierarchy.hh:849
MatrixHierarchy(std::shared_ptr< MatrixOperator > fineMatrix, std::shared_ptr< ParallelInformation > pinfo=std::make_shared< ParallelInformation >())
Constructor.
Definition: matrixhierarchy.hh:395
AccumulationMode
Identifiers for the different accumulation modes.
Definition: parameters.hh:230
void build(const T &criterion)
Build the matrix hierarchy using aggregation.
Definition: matrixhierarchy.hh:406
void free()
Free the allocated memory.
void coarsenSmoother(Hierarchy< S, TA > &smoothers, const typename SmootherTraits< S >::Arguments &args) const
Coarsen the smoother hierarchy according to the matrix hierarchy.
Definition: matrixhierarchy.hh:876
void buildDependency(G &graph, const typename C::Matrix &matrix, C criterion, bool finestLevel)
Build the dependency of the matrix graph.
std::tuple< int, int, int, int > buildAggregates(const M &matrix, G &graph, const C &criterion, bool finestLevel)
Build the aggregates.
void getCoarsestAggregatesOnFinest(std::vector< std::size_t > &data) const
Get the mapping of fine level unknowns to coarse level aggregates.
Definition: matrixhierarchy.hh:751
@ MAX_PROCESSES
Hard limit for the number of processes allowed.
Definition: matrixhierarchy.hh:48
@ atOnceAccu
Accumulate data to one process at once.
Definition: parameters.hh:242
@ successiveAccu
Successively accumulate to fewer processes.
Definition: parameters.hh:246
auto countNonZeros(const M &matrix, typename std::enable_if_t< Dune::IsNumber< M >::value > *sfinae=nullptr)
Get the number of nonzero fields in the matrix.
Definition: matrixutils.hh:119
auto min(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::min()
Definition: defaults.hh:87
auto max(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::max()
Definition: defaults.hh:79
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:93
DInfoType dinfo(std::cout)
Stream for informative output.
Definition: stdstreams.hh:138
DVerbType dverb(std::cout)
Singleton of verbose debug stream.
Definition: stdstreams.hh:114
Provides a classes representing the hierarchies in AMG.
Provides a map between global and local indices.
Provides a class for building the index set and remote indices on the coarse level.
Functionality for redistributing a sparse matrix.
Some handy generic functions for ISTL matrices.
Dune namespace.
Definition: alignedallocator.hh:14
void redistributeMatrix(M &origMatrix, M &newMatrix, C &origComm, C &newComm, RedistributeInformation< C > &ri)
Redistribute a matrix according to given domain decompositions.
Definition: matrixredistribute.hh:836
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 > > &outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition: repartition.hh:1268
Classes for the generic construction and application of the smoothers.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:65
Tag idnetifying the visited property of a vertex.
Definition: properties.hh:27
A property map that applies the identity function to integers.
Definition: propertymap.hh:291
Selector for the property map type.
Definition: propertymap.hh:318
static Category category(const OP &op, decltype(op.category()) *=nullptr)
Helperfunction to extract the solver category either from an enum, or from the newly introduced virtu...
Definition: solvercategory.hh:32
Prolongation and restriction for amg.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)