DUNE-FEM (unstable)

matrixhierarchy.hh
Go to the documentation of this file.
1 // SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2 // SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4 // vi: set et ts=4 sw=2 sts=2:
5 #ifndef DUNE_AMG_MATRIXHIERARCHY_HH
6 #define DUNE_AMG_MATRIXHIERARCHY_HH
7 
8 #include <algorithm>
9 #include <tuple>
10 #include "aggregates.hh"
11 #include "graph.hh"
12 #include "galerkin.hh"
13 #include "renumberer.hh"
14 #include "graphcreator.hh"
15 #include "hierarchy.hh"
16 #include <dune/istl/bvector.hh>
18 #include <dune/istl/matrixutils.hh>
21 #include <dune/istl/paamg/graph.hh>
27 
28 namespace Dune
29 {
30  namespace Amg
31  {
42  enum {
50  MAX_PROCESSES = 72000
51  };
52 
59  template<class M, class PI, class A=std::allocator<M> >
61  {
62  public:
64  typedef M MatrixOperator;
65 
67  typedef typename MatrixOperator::matrix_type Matrix;
68 
70  typedef PI ParallelInformation;
71 
73  typedef A Allocator;
74 
77 
80 
83 
85  using AAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<AggregatesMap*>;
86 
88  typedef std::list<AggregatesMap*,AAllocator> AggregatesMapList;
89 
91  typedef RedistributeInformation<ParallelInformation> RedistributeInfoType;
92 
94  using RILAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<RedistributeInfoType>;
95 
97  typedef std::list<RedistributeInfoType,RILAllocator> RedistributeInfoList;
98 
104  MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
105  std::shared_ptr<ParallelInformation> pinfo = std::make_shared<ParallelInformation>());
106 
107  ~MatrixHierarchy();
108 
114  template<typename O, typename T>
115  void build(const T& criterion);
116 
124  template<class F>
125  void recalculateGalerkin(const F& copyFlags);
126 
131  template<class V, class BA, class TA>
132  void coarsenVector(Hierarchy<BlockVector<V,BA>, TA>& hierarchy) const;
133 
139  template<class S, class TA>
140  void coarsenSmoother(Hierarchy<S,TA>& smoothers,
141  const typename SmootherTraits<S>::Arguments& args) const;
142 
147  std::size_t levels() const;
148 
153  std::size_t maxlevels() const;
154 
155  bool hasCoarsest() const;
156 
161  bool isBuilt() const;
162 
167  const ParallelMatrixHierarchy& matrices() const;
168 
174 
179  const AggregatesMapList& aggregatesMaps() const;
180 
187 
188  double getProlongationDampingFactor() const
189  {
190  return prolongDamp_;
191  }
192 
203  void getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const;
204 
205  private:
206  typedef typename ConstructionTraits<MatrixOperator>::Arguments MatrixArgs;
207  typedef typename ConstructionTraits<ParallelInformation>::Arguments CommunicationArgs;
209  AggregatesMapList aggregatesMaps_;
211  RedistributeInfoList redistributes_;
213  ParallelMatrixHierarchy matrices_;
215  ParallelInformationHierarchy parallelInformation_;
216 
218  bool built_;
219 
221  int maxlevels_;
222 
223  double prolongDamp_;
224 
228  template<class Matrix, bool print>
229  struct MatrixStats
230  {
231 
235  static void stats([[maybe_unused]] const Matrix& matrix)
236  {}
237  };
238 
239  template<class Matrix>
240  struct MatrixStats<Matrix,true>
241  {
242  struct calc
243  {
244  typedef typename Matrix::size_type size_type;
245  typedef typename Matrix::row_type matrix_row;
246 
247  calc()
248  {
250  max=0;
251  sum=0;
252  }
253 
254  void operator()(const matrix_row& row)
255  {
256  min=std::min(min, row.size());
257  max=std::max(max, row.size());
258  sum += row.size();
259  }
260 
261  size_type min;
262  size_type max;
263  size_type sum;
264  };
268  static void stats(const Matrix& matrix)
269  {
270  calc c= for_each(matrix.begin(), matrix.end(), calc());
271  dinfo<<"Matrix row: min="<<c.min<<" max="<<c.max
272  <<" average="<<static_cast<double>(c.sum)/matrix.N()
273  <<std::endl;
274  }
275  };
276  };
277 
281  template<class T>
282  class CoarsenCriterion : public T
283  {
284  public:
290 
304  CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2,
305  double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu, bool useFixedOrder = false)
306  : AggregationCriterion(Dune::Amg::Parameters(maxLevel, coarsenTarget, minCoarsenRate, prolongDamp, accumulate, useFixedOrder))
307  {}
308 
310  : AggregationCriterion(parms)
311  {}
312 
313  };
314 
315  template<typename M, typename C1>
316  bool repartitionAndDistributeMatrix([[maybe_unused]] const M& origMatrix,
317  [[maybe_unused]] std::shared_ptr<M> newMatrix,
318  [[maybe_unused]] SequentialInformation& origComm,
319  [[maybe_unused]] std::shared_ptr<SequentialInformation>& newComm,
320  [[maybe_unused]] RedistributeInformation<SequentialInformation>& ri,
321  [[maybe_unused]] int nparts,
322  [[maybe_unused]] C1& criterion)
323  {
324  DUNE_THROW(NotImplemented, "Redistribution does not make sense in sequential code!");
325  }
326 
327 
328  template<typename M, typename C, typename C1>
329  bool repartitionAndDistributeMatrix(const M& origMatrix,
330  std::shared_ptr<M> newMatrix,
331  C& origComm,
332  std::shared_ptr<C>& newComm,
333  RedistributeInformation<C>& ri,
334  int nparts, C1& criterion)
335  {
336  Timer time;
337 #ifdef AMG_REPART_ON_COMM_GRAPH
338  // Done not repartition the matrix graph, but a graph of the communication scheme.
339  bool existentOnRedist=Dune::commGraphRepartition(origMatrix, origComm, nparts, newComm,
340  ri.getInterface(),
341  criterion.debugLevel()>1);
342 
343 #else
344  typedef Dune::Amg::MatrixGraph<const M> MatrixGraph;
345  typedef Dune::Amg::PropertiesGraph<MatrixGraph,
346  VertexProperties,
347  EdgeProperties,
348  IdentityMap,
349  IdentityMap> PropertiesGraph;
350  MatrixGraph graph(origMatrix);
351  PropertiesGraph pgraph(graph);
352  buildDependency(pgraph, origMatrix, criterion, false);
353 
354 #ifdef DEBUG_REPART
355  if(origComm.communicator().rank()==0)
356  std::cout<<"Original matrix"<<std::endl;
357  origComm.communicator().barrier();
358  printGlobalSparseMatrix(origMatrix, origComm, std::cout);
359 #endif
360  bool existentOnRedist=Dune::graphRepartition(pgraph, origComm, nparts,
361  newComm, ri.getInterface(),
362  criterion.debugLevel()>1);
363 #endif // if else AMG_REPART
364 
365  if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
366  std::cout<<"Repartitioning took "<<time.elapsed()<<" seconds."<<std::endl;
367 
368  ri.setSetup();
369 
370 #ifdef DEBUG_REPART
371  ri.checkInterface(origComm.indexSet(), newComm->indexSet(), origComm.communicator());
372 #endif
373 
374  redistributeMatrix(const_cast<M&>(origMatrix), *newMatrix, origComm, *newComm, ri);
375 
376 #ifdef DEBUG_REPART
377  if(origComm.communicator().rank()==0)
378  std::cout<<"Original matrix"<<std::endl;
379  origComm.communicator().barrier();
380  if(newComm->communicator().size()>0)
381  printGlobalSparseMatrix(*newMatrix, *newComm, std::cout);
382  origComm.communicator().barrier();
383 #endif
384 
385  if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
386  std::cout<<"Redistributing matrix took "<<time.elapsed()<<" seconds."<<std::endl;
387  return existentOnRedist;
388 
389  }
390 
391  template<class M, class IS, class A>
392  MatrixHierarchy<M,IS,A>::MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
393  std::shared_ptr<ParallelInformation> pinfo)
394  : matrices_(fineMatrix),
395  parallelInformation_(pinfo)
396  {
397  if (SolverCategory::category(*fineMatrix) != SolverCategory::category(*pinfo))
398  DUNE_THROW(ISTLError, "MatrixOperator and ParallelInformation must belong to the same category!");
399  }
400 
401  template<class M, class IS, class A>
402  template<typename O, typename T>
403  void MatrixHierarchy<M,IS,A>::build(const T& criterion)
404  {
405  prolongDamp_ = criterion.getProlongationDampingFactor();
406  typedef O OverlapFlags;
407  typedef typename ParallelMatrixHierarchy::Iterator MatIterator;
408  typedef typename ParallelInformationHierarchy::Iterator PInfoIterator;
409 
410  static const int noints=(Dune::Amg::MAX_PROCESSES/4096>0) ? (Dune::Amg::MAX_PROCESSES/4096) : 1;
411 
412  typedef bigunsignedint<sizeof(int)*8*noints> BIGINT;
413  GalerkinProduct<ParallelInformation> productBuilder;
414  MatIterator mlevel = matrices_.finest();
415  MatrixStats<typename M::matrix_type,MINIMAL_DEBUG_LEVEL<=INFO_DEBUG_LEVEL>::stats(mlevel->getmat());
416 
417  PInfoIterator infoLevel = parallelInformation_.finest();
418  BIGINT finenonzeros=countNonZeros(mlevel->getmat());
419  finenonzeros = infoLevel->communicator().sum(finenonzeros);
420  BIGINT allnonzeros = finenonzeros;
421 
422 
423  int level = 0;
424  int rank = 0;
425 
426  BIGINT unknowns = mlevel->getmat().N();
427 
428  unknowns = infoLevel->communicator().sum(unknowns);
429  double dunknowns=unknowns.todouble();
430  infoLevel->buildGlobalLookup(mlevel->getmat().N());
431  redistributes_.push_back(RedistributeInfoType());
432 
433  for(; level < criterion.maxLevel(); ++level, ++mlevel) {
434  assert(matrices_.levels()==redistributes_.size());
435  rank = infoLevel->communicator().rank();
436  if(rank==0 && criterion.debugLevel()>1)
437  std::cout<<"Level "<<level<<" has "<<dunknowns<<" unknowns, "<<dunknowns/infoLevel->communicator().size()
438  <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
439 
440  MatrixOperator* matrix=&(*mlevel);
441  ParallelInformation* info =&(*infoLevel);
442 
443  if((
444 #if HAVE_PARMETIS
445  criterion.accumulate()==successiveAccu
446 #else
447  false
448 #endif
449  || (criterion.accumulate()==atOnceAccu
450  && dunknowns < 30*infoLevel->communicator().size()))
451  && infoLevel->communicator().size()>1 &&
452  dunknowns/infoLevel->communicator().size() <= criterion.coarsenTarget())
453  {
454  // accumulate to fewer processors
455  std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
456  std::shared_ptr<ParallelInformation> redistComm;
457  std::size_t nodomains = (std::size_t)std::ceil(dunknowns/(criterion.minAggregateSize()
458  *criterion.coarsenTarget()));
459  if( nodomains<=criterion.minAggregateSize()/2 ||
460  dunknowns <= criterion.coarsenTarget() )
461  nodomains=1;
462 
463  bool existentOnNextLevel =
464  repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
465  redistComm, redistributes_.back(), nodomains,
466  criterion);
467  BIGINT unknownsRedist = redistMat->N();
468  unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
469  dunknowns= unknownsRedist.todouble();
470  if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1)
471  std::cout<<"Level "<<level<<" (redistributed) has "<<dunknowns<<" unknowns, "<<dunknowns/redistComm->communicator().size()
472  <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
473  MatrixArgs args(redistMat, *redistComm);
474  mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
475  assert(mlevel.isRedistributed());
476  infoLevel.addRedistributed(redistComm);
477  infoLevel->freeGlobalLookup();
478 
479  if(!existentOnNextLevel)
480  // We do not hold any data on the redistributed partitioning
481  break;
482 
483  // Work on the redistributed Matrix from now on
484  matrix = &(mlevel.getRedistributed());
485  info = &(infoLevel.getRedistributed());
486  info->buildGlobalLookup(matrix->getmat().N());
487  }
488 
489  rank = info->communicator().rank();
490  if(dunknowns <= criterion.coarsenTarget())
491  // No further coarsening needed
492  break;
493 
494  typedef PropertiesGraphCreator<MatrixOperator,ParallelInformation> GraphCreator;
495  typedef typename GraphCreator::PropertiesGraph PropertiesGraph;
496  typedef typename GraphCreator::GraphTuple GraphTuple;
497 
498  typedef typename PropertiesGraph::VertexDescriptor Vertex;
499 
500  std::vector<bool> excluded(matrix->getmat().N(), false);
501 
502  GraphTuple graphs = GraphCreator::create(*matrix, excluded, *info, OverlapFlags());
503 
504  AggregatesMap* aggregatesMap=new AggregatesMap(std::get<1>(graphs)->maxVertex()+1);
505 
506  aggregatesMaps_.push_back(aggregatesMap);
507 
508  Timer watch;
509  watch.reset();
510  auto [noAggregates, isoAggregates, oneAggregates, skippedAggregates] =
511  aggregatesMap->buildAggregates(matrix->getmat(), *(std::get<1>(graphs)), criterion, level==0);
512 
513  if(rank==0 && criterion.debugLevel()>2)
514  std::cout<<" Have built "<<noAggregates<<" aggregates totally ("<<isoAggregates<<" isolated aggregates, "<<
515  oneAggregates<<" aggregates of one vertex, and skipped "<<
516  skippedAggregates<<" aggregates)."<<std::endl;
517 #ifdef TEST_AGGLO
518  {
519  // calculate size of local matrix in the distributed direction
520  int start, end, overlapStart, overlapEnd;
521  int procs=info->communicator().rank();
522  int n = UNKNOWNS/procs; // number of unknowns per process
523  int bigger = UNKNOWNS%procs; // number of process with n+1 unknowns
524 
525  // Compute owner region
526  if(rank<bigger) {
527  start = rank*(n+1);
528  end = (rank+1)*(n+1);
529  }else{
530  start = bigger + rank * n;
531  end = bigger + (rank + 1) * n;
532  }
533 
534  // Compute overlap region
535  if(start>0)
536  overlapStart = start - 1;
537  else
538  overlapStart = start;
539 
540  if(end<UNKNOWNS)
541  overlapEnd = end + 1;
542  else
543  overlapEnd = end;
544 
545  assert((UNKNOWNS)*(overlapEnd-overlapStart)==aggregatesMap->noVertices());
546  for(int j=0; j< UNKNOWNS; ++j)
547  for(int i=0; i < UNKNOWNS; ++i)
548  {
549  if(i>=overlapStart && i<overlapEnd)
550  {
551  int no = (j/2)*((UNKNOWNS)/2)+i/2;
552  (*aggregatesMap)[j*(overlapEnd-overlapStart)+i-overlapStart]=no;
553  }
554  }
555  }
556 #endif
557  if(criterion.debugLevel()>1 && info->communicator().rank()==0)
558  std::cout<<"aggregating finished."<<std::endl;
559 
560  BIGINT gnoAggregates=noAggregates;
561  gnoAggregates = info->communicator().sum(gnoAggregates);
562  double dgnoAggregates = gnoAggregates.todouble();
563 #ifdef TEST_AGGLO
564  BIGINT gnoAggregates=((UNKNOWNS)/2)*((UNKNOWNS)/2);
565 #endif
566 
567  if(criterion.debugLevel()>2 && rank==0)
568  std::cout << "Building "<<dgnoAggregates<<" aggregates took "<<watch.elapsed()<<" seconds."<<std::endl;
569 
570  if(dgnoAggregates==0 || dunknowns/dgnoAggregates<criterion.minCoarsenRate())
571  {
572  if(rank==0)
573  {
574  if(dgnoAggregates>0)
575  std::cerr << "Stopped coarsening because of rate breakdown "<<dunknowns<<"/"<<dgnoAggregates
576  <<"="<<dunknowns/dgnoAggregates<<"<"
577  <<criterion.minCoarsenRate()<<std::endl;
578  else
579  std::cerr<< "Could not build any aggregates. Probably no connected nodes."<<std::endl;
580  }
581  aggregatesMap->free();
582  delete aggregatesMap;
583  aggregatesMaps_.pop_back();
584 
585  if(criterion.accumulate() && mlevel.isRedistributed() && info->communicator().size()>1) {
586  // coarse level matrix was already redistributed, but to more than 1 process
587  // Therefore need to delete the redistribution. Further down it will
588  // then be redistributed to 1 process
589  delete &(mlevel.getRedistributed().getmat());
590  mlevel.deleteRedistributed();
591  delete &(infoLevel.getRedistributed());
592  infoLevel.deleteRedistributed();
593  redistributes_.back().resetSetup();
594  }
595 
596  break;
597  }
598  unknowns = noAggregates;
599  dunknowns = dgnoAggregates;
600 
601  CommunicationArgs commargs(info->communicator(),info->category());
602  parallelInformation_.addCoarser(commargs);
603 
604  ++infoLevel; // parallel information on coarse level
605 
607  get(VertexVisitedTag(), *(std::get<1>(graphs)));
608 
609  watch.reset();
610  int aggregates = IndicesCoarsener<ParallelInformation,OverlapFlags>
611  ::coarsen(*info,
612  *(std::get<1>(graphs)),
613  visitedMap,
614  *aggregatesMap,
615  *infoLevel,
616  noAggregates,
617  criterion.useFixedOrder());
618  GraphCreator::free(graphs);
619 
620  if(criterion.debugLevel()>2) {
621  if(rank==0)
622  std::cout<<"Coarsening of index sets took "<<watch.elapsed()<<" seconds."<<std::endl;
623  }
624 
625  watch.reset();
626 
627  infoLevel->buildGlobalLookup(aggregates);
628  AggregatesPublisher<Vertex,OverlapFlags,ParallelInformation>::publish(*aggregatesMap,
629  *info,
630  infoLevel->globalLookup());
631 
632 
633  if(criterion.debugLevel()>2) {
634  if(rank==0)
635  std::cout<<"Communicating global aggregate numbers took "<<watch.elapsed()<<" seconds."<<std::endl;
636  }
637 
638  watch.reset();
639  std::vector<bool>& visited=excluded;
640 
641  typedef std::vector<bool>::iterator Iterator;
642  typedef IteratorPropertyMap<Iterator, IdentityMap> VisitedMap2;
643  Iterator end = visited.end();
644  for(Iterator iter= visited.begin(); iter != end; ++iter)
645  *iter=false;
646 
647  VisitedMap2 visitedMap2(visited.begin(), Dune::IdentityMap());
648 
649  std::shared_ptr<typename MatrixOperator::matrix_type>
650  coarseMatrix(productBuilder.build(*(std::get<0>(graphs)), visitedMap2,
651  *info,
652  *aggregatesMap,
653  aggregates,
654  OverlapFlags()));
655  dverb<<"Building of sparsity pattern took "<<watch.elapsed()<<std::endl;
656  watch.reset();
657  info->freeGlobalLookup();
658 
659  delete std::get<0>(graphs);
660  productBuilder.calculate(matrix->getmat(), *aggregatesMap, *coarseMatrix, *infoLevel, OverlapFlags());
661 
662  if(criterion.debugLevel()>2) {
663  if(rank==0)
664  std::cout<<"Calculation entries of Galerkin product took "<<watch.elapsed()<<" seconds."<<std::endl;
665  }
666 
667  BIGINT nonzeros = countNonZeros(*coarseMatrix);
668  allnonzeros = allnonzeros + infoLevel->communicator().sum(nonzeros);
669  MatrixArgs args(coarseMatrix, *infoLevel);
670 
671  matrices_.addCoarser(args);
672  redistributes_.push_back(RedistributeInfoType());
673  } // end level loop
674 
675 
676  infoLevel->freeGlobalLookup();
677 
678  built_=true;
679  AggregatesMap* aggregatesMap=new AggregatesMap(0);
680  aggregatesMaps_.push_back(aggregatesMap);
681 
682  if(criterion.debugLevel()>0) {
683  if(level==criterion.maxLevel()) {
684  BIGINT unknownsLevel = mlevel->getmat().N();
685  unknownsLevel = infoLevel->communicator().sum(unknownsLevel);
686  double dunknownsLevel = unknownsLevel.todouble();
687  if(rank==0 && criterion.debugLevel()>1) {
688  std::cout<<"Level "<<level<<" has "<<dunknownsLevel<<" unknowns, "<<dunknownsLevel/infoLevel->communicator().size()
689  <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
690  }
691  }
692  }
693 
694  if(criterion.accumulate() && !redistributes_.back().isSetup() &&
695  infoLevel->communicator().size()>1) {
696 #if HAVE_MPI && !HAVE_PARMETIS
697  if(criterion.accumulate()==successiveAccu &&
698  infoLevel->communicator().rank()==0)
699  std::cerr<<"Successive accumulation of data on coarse levels only works with ParMETIS installed."
700  <<" Fell back to accumulation to one domain on coarsest level"<<std::endl;
701 #endif
702 
703  // accumulate to fewer processors
704  std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
705  std::shared_ptr<ParallelInformation> redistComm;
706  int nodomains = 1;
707 
708  repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
709  redistComm, redistributes_.back(), nodomains,criterion);
710  MatrixArgs args(redistMat, *redistComm);
711  BIGINT unknownsRedist = redistMat->N();
712  unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
713 
714  if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1) {
715  double dunknownsRedist = unknownsRedist.todouble();
716  std::cout<<"Level "<<level<<" redistributed has "<<dunknownsRedist<<" unknowns, "<<dunknownsRedist/redistComm->communicator().size()
717  <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
718  }
719  mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
720  infoLevel.addRedistributed(redistComm);
721  infoLevel->freeGlobalLookup();
722  }
723 
724  int levels = matrices_.levels();
725  maxlevels_ = parallelInformation_.finest()->communicator().max(levels);
726  assert(matrices_.levels()==redistributes_.size());
727  if(hasCoarsest() && rank==0 && criterion.debugLevel()>1)
728  std::cout<<"operator complexity: "<<allnonzeros.todouble()/finenonzeros.todouble()<<std::endl;
729 
730  }
731 
732  template<class M, class IS, class A>
735  {
736  return matrices_;
737  }
738 
739  template<class M, class IS, class A>
742  {
743  return parallelInformation_;
744  }
745 
746  template<class M, class IS, class A>
747  void MatrixHierarchy<M,IS,A>::getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const
748  {
749  int levels=aggregatesMaps().size();
750  int maxlevels=parallelInformation_.finest()->communicator().max(levels);
751  std::size_t size=(*(aggregatesMaps().begin()))->noVertices();
752  // We need an auxiliary vector for the consecutive prolongation.
753  std::vector<std::size_t> tmp;
754  std::vector<std::size_t> *coarse, *fine;
755 
756  // make sure the allocated space suffices.
757  tmp.reserve(size);
758  data.reserve(size);
759 
760  // Correctly assign coarse and fine for the first prolongation such that
761  // we end up in data in the end.
762  if(levels%2==0) {
763  coarse=&tmp;
764  fine=&data;
765  }else{
766  coarse=&data;
767  fine=&tmp;
768  }
769 
770  // Number the unknowns on the coarsest level consecutively for each process.
771  if(levels==maxlevels) {
772  const AggregatesMap& map = *(*(++aggregatesMaps().rbegin()));
773  std::size_t m=0;
774 
775  for(typename AggregatesMap::const_iterator iter = map.begin(); iter != map.end(); ++iter)
776  if(*iter< AggregatesMap::ISOLATED)
777  m=std::max(*iter,m);
778 
779  coarse->resize(m+1);
780  std::size_t i=0;
781  srand((unsigned)std::clock());
782  std::set<size_t> used;
783  for(typename std::vector<std::size_t>::iterator iter=coarse->begin(); iter != coarse->end();
784  ++iter, ++i)
785  {
786  std::pair<std::set<std::size_t>::iterator,bool> ibpair
787  = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0)))*coarse->size());
788 
789  while(!ibpair.second)
790  ibpair = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0))*coarse->size()));
791  *iter=*(ibpair.first);
792  }
793  }
794 
795  typename ParallelInformationHierarchy::Iterator pinfo = parallelInformation().coarsest();
796  --pinfo;
797 
798  // Now consecutively project the numbers to the finest level.
799  for(typename AggregatesMapList::const_reverse_iterator aggregates=++aggregatesMaps().rbegin();
800  aggregates != aggregatesMaps().rend(); ++aggregates,--levels) {
801 
802  fine->resize((*aggregates)->noVertices());
803  fine->assign(fine->size(), 0);
804  Transfer<typename AggregatesMap::AggregateDescriptor, std::vector<std::size_t>, ParallelInformation>
805  ::prolongateVector(*(*aggregates), *coarse, *fine, static_cast<std::size_t>(1), *pinfo);
806  --pinfo;
807  std::swap(coarse, fine);
808  }
809 
810  // Assertion to check that we really projected to data on the last step.
811  assert(coarse==&data);
812  }
813 
814  template<class M, class IS, class A>
817  {
818  return aggregatesMaps_;
819  }
820  template<class M, class IS, class A>
823  {
824  return redistributes_;
825  }
826 
827  template<class M, class IS, class A>
829  {
830  typedef typename AggregatesMapList::reverse_iterator AggregatesMapIterator;
831  typedef typename ParallelMatrixHierarchy::Iterator Iterator;
832  typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
833 
834  AggregatesMapIterator amap = aggregatesMaps_.rbegin();
835  InfoIterator info = parallelInformation_.coarsest();
836  for(Iterator level=matrices_.coarsest(), finest=matrices_.finest(); level != finest; --level, --info, ++amap) {
837  (*amap)->free();
838  delete *amap;
839  }
840  delete *amap;
841  }
842 
843  template<class M, class IS, class A>
844  template<class V, class BA, class TA>
846  {
847  assert(hierarchy.levels()==1);
848  typedef typename ParallelMatrixHierarchy::ConstIterator Iterator;
849  typedef typename RedistributeInfoList::const_iterator RIter;
850  RIter redist = redistributes_.begin();
851 
852  Iterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
853  int level=0;
854  if(redist->isSetup())
855  hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
856  Dune::dvverb<<"Level "<<level<<" has "<<matrices_.finest()->getmat().N()<<" unknowns!"<<std::endl;
857 
858  while(matrix != coarsest) {
859  ++matrix; ++level; ++redist;
860  Dune::dvverb<<"Level "<<level<<" has "<<matrix->getmat().N()<<" unknowns!"<<std::endl;
861 
862  hierarchy.addCoarser(matrix->getmat().N());
863  if(redist->isSetup())
864  hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
865 
866  }
867 
868  }
869 
870  template<class M, class IS, class A>
871  template<class S, class TA>
873  const typename SmootherTraits<S>::Arguments& sargs) const
874  {
875  assert(smoothers.levels()==0);
876  typedef typename ParallelMatrixHierarchy::ConstIterator MatrixIterator;
877  typedef typename ParallelInformationHierarchy::ConstIterator PinfoIterator;
878  typedef typename AggregatesMapList::const_iterator AggregatesIterator;
879 
880  typename ConstructionTraits<S>::Arguments cargs;
881  cargs.setArgs(sargs);
882  PinfoIterator pinfo = parallelInformation_.finest();
883  AggregatesIterator aggregates = aggregatesMaps_.begin();
884  int level=0;
885  for(MatrixIterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
886  matrix != coarsest; ++matrix, ++pinfo, ++aggregates, ++level) {
887  cargs.setMatrix(matrix->getmat(), **aggregates);
888  cargs.setComm(*pinfo);
889  smoothers.addCoarser(cargs);
890  }
891  if(maxlevels()>levels()) {
892  // This is not the globally coarsest level and therefore smoothing is needed
893  cargs.setMatrix(matrices_.coarsest()->getmat(), **aggregates);
894  cargs.setComm(*pinfo);
895  smoothers.addCoarser(cargs);
896  ++level;
897  }
898  }
899 
900  template<class M, class IS, class A>
901  template<class F>
903  {
904  typedef typename AggregatesMapList::iterator AggregatesMapIterator;
905  typedef typename ParallelMatrixHierarchy::Iterator Iterator;
906  typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
907 
908  AggregatesMapIterator amap = aggregatesMaps_.begin();
909  BaseGalerkinProduct productBuilder;
910  InfoIterator info = parallelInformation_.finest();
911  typename RedistributeInfoList::iterator riIter = redistributes_.begin();
912  Iterator level = matrices_.finest(), coarsest=matrices_.coarsest();
913  if(level.isRedistributed()) {
914  info->buildGlobalLookup(level->getmat().N());
915  redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
916  const_cast<Matrix&>(level.getRedistributed().getmat()),
917  *info,info.getRedistributed(), *riIter);
918  info->freeGlobalLookup();
919  }
920 
921  for(; level!=coarsest; ++amap) {
922  const Matrix& fine = (level.isRedistributed() ? level.getRedistributed() : *level).getmat();
923  ++level;
924  ++info;
925  ++riIter;
926  productBuilder.calculate(fine, *(*amap), const_cast<Matrix&>(level->getmat()), *info, copyFlags);
927  if(level.isRedistributed()) {
928  info->buildGlobalLookup(level->getmat().N());
929  redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
930  const_cast<Matrix&>(level.getRedistributed().getmat()), *info,
931  info.getRedistributed(), *riIter);
932  info->freeGlobalLookup();
933  }
934  }
935  }
936 
937  template<class M, class IS, class A>
939  {
940  return matrices_.levels();
941  }
942 
943  template<class M, class IS, class A>
945  {
946  return maxlevels_;
947  }
948 
949  template<class M, class IS, class A>
951  {
952  return levels()==maxlevels() &&
953  (!matrices_.coarsest().isRedistributed() ||matrices_.coarsest()->getmat().N()>0);
954  }
955 
956  template<class M, class IS, class A>
958  {
959  return built_;
960  }
961 
963  } // namespace Amg
964 } // namespace Dune
965 
966 #endif // end DUNE_AMG_MATRIXHIERARCHY_HH
Provides classes for the Coloring process of AMG.
This file implements a vector space as a tensor product of a given vector space. The number of compon...
Class providing information about the mapping of the vertices onto aggregates.
Definition: aggregates.hh:560
Base class of all aggregation criterions.
Definition: aggregates.hh:49
The criterion describing the stop criteria for the coarsening process.
Definition: matrixhierarchy.hh:283
T AggregationCriterion
The criterion for tagging connections as strong and nodes as isolated. This might be e....
Definition: matrixhierarchy.hh:289
CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2, double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu, bool useFixedOrder=false)
Constructor.
Definition: matrixhierarchy.hh:304
LevelIterator< Hierarchy< MatrixOperator, Allocator >, MatrixOperator > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:216
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:219
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:61
typename std::allocator_traits< Allocator >::template rebind_alloc< AggregatesMap * > AAllocator
Allocator for pointers.
Definition: matrixhierarchy.hh:85
Dune::Amg::Hierarchy< ParallelInformation, Allocator > ParallelInformationHierarchy
The type of the parallel informarion hierarchy.
Definition: matrixhierarchy.hh:82
std::list< AggregatesMap *, AAllocator > AggregatesMapList
The type of the aggregates maps list.
Definition: matrixhierarchy.hh:88
PI ParallelInformation
The type of the index set.
Definition: matrixhierarchy.hh:70
Dune::Amg::Hierarchy< MatrixOperator, Allocator > ParallelMatrixHierarchy
The type of the parallel matrix hierarchy.
Definition: matrixhierarchy.hh:79
A Allocator
The allocator to use.
Definition: matrixhierarchy.hh:73
RedistributeInformation< ParallelInformation > RedistributeInfoType
The type of the redistribute information.
Definition: matrixhierarchy.hh:91
typename std::allocator_traits< Allocator >::template rebind_alloc< RedistributeInfoType > RILAllocator
Allocator for RedistributeInfoType.
Definition: matrixhierarchy.hh:94
std::list< RedistributeInfoType, RILAllocator > RedistributeInfoList
The type of the list of redistribute information.
Definition: matrixhierarchy.hh:97
Dune::Amg::AggregatesMap< typename MatrixGraph< Matrix >::VertexDescriptor > AggregatesMap
The type of the aggregates map we use.
Definition: matrixhierarchy.hh:76
MatrixOperator::matrix_type Matrix
The type of the matrix.
Definition: matrixhierarchy.hh:67
M MatrixOperator
The type of the matrix operator.
Definition: matrixhierarchy.hh:64
All parameters for AMG.
Definition: parameters.hh:416
Attaches properties to the edges and vertices of a graph.
Definition: graph.hh:978
Graph::VertexDescriptor VertexDescriptor
The vertex descriptor.
Definition: graph.hh:988
A vector of blocks with memory management.
Definition: bvector.hh:392
derive error class from the base class in common
Definition: istlexception.hh:19
Adapter to turn a random access iterator into a property map.
Definition: propertymap.hh:108
A generic dynamic dense matrix.
Definition: matrix.hh:561
A::size_type size_type
Type for indices and sizes.
Definition: matrix.hh:577
MatrixImp::DenseMatrixBase< T, A >::window_type row_type
The type implementing a matrix row.
Definition: matrix.hh:574
A simple stop watch.
Definition: timer.hh:43
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:57
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:77
Portable very large unsigned integers.
Definition: bigunsignedint.hh:73
Helper classes for the construction of classes without empty constructor.
Provides classes for initializing the link attributes of a matrix graph.
Provides a map between global and local indices.
Provdes class for identifying aggregates globally.
Provides classes for building the matrix graph.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
constexpr auto max
Function object that returns the greater of the given values.
Definition: hybridutilities.hh:484
constexpr auto min
Function object that returns the smaller of the given values.
Definition: hybridutilities.hh:506
constexpr T accumulate(Range &&range, T value, F &&f)
Accumulate values.
Definition: hybridutilities.hh:279
const AggregatesMapList & aggregatesMaps() const
Get the hierarchy of the mappings of the nodes onto aggregates.
Definition: matrixhierarchy.hh:816
bool isBuilt() const
Whether the hierarchy was built.
Definition: matrixhierarchy.hh:957
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: hierarchy.hh:322
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition: matrixhierarchy.hh:938
void addCoarser(Arguments &args)
Add an element on a coarser level.
Definition: hierarchy.hh:334
const RedistributeInfoList & redistributeInformation() const
Get the hierarchy of the information about redistributions,.
Definition: matrixhierarchy.hh:822
const ParallelInformationHierarchy & parallelInformation() const
Get the hierarchy of the parallel data distribution information.
Definition: matrixhierarchy.hh:741
const ParallelMatrixHierarchy & matrices() const
Get the matrix hierarchy.
Definition: matrixhierarchy.hh:734
std::size_t maxlevels() const
Get the max number of levels in the hierarchy of processors.
Definition: matrixhierarchy.hh:944
static const V ISOLATED
Identifier of isolated vertices.
Definition: aggregates.hh:571
void recalculateGalerkin(const F &copyFlags)
Recalculate the galerkin products.
Definition: matrixhierarchy.hh:902
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
std::size_t noVertices() const
Get the number of vertices.
std::tuple< int, int, int, int > buildAggregates(const M &matrix, G &graph, const C &criterion, bool finestLevel)
Build the aggregates.
void coarsenVector(Hierarchy< BlockVector< V, BA >, TA > &hierarchy) const
Coarsen the vector hierarchy according to the matrix hierarchy.
Definition: matrixhierarchy.hh:845
MatrixHierarchy(std::shared_ptr< MatrixOperator > fineMatrix, std::shared_ptr< ParallelInformation > pinfo=std::make_shared< ParallelInformation >())
Constructor.
Definition: matrixhierarchy.hh:392
AccumulationMode
Identifiers for the different accumulation modes.
Definition: parameters.hh:231
void build(const T &criterion)
Build the matrix hierarchy using aggregation.
Definition: matrixhierarchy.hh:403
void free()
Free the allocated memory.
void coarsenSmoother(Hierarchy< S, TA > &smoothers, const typename SmootherTraits< S >::Arguments &args) const
Coarsen the smoother hierarchy according to the matrix hierarchy.
Definition: matrixhierarchy.hh:872
void buildDependency(G &graph, const typename C::Matrix &matrix, C criterion, bool finestLevel)
Build the dependency of the matrix graph.
void getCoarsestAggregatesOnFinest(std::vector< std::size_t > &data) const
Get the mapping of fine level unknowns to coarse level aggregates.
Definition: matrixhierarchy.hh:747
@ MAX_PROCESSES
Hard limit for the number of processes allowed.
Definition: matrixhierarchy.hh:50
@ atOnceAccu
Accumulate data to one process at once.
Definition: parameters.hh:243
@ successiveAccu
Successively accumulate to fewer processes.
Definition: parameters.hh:247
auto countNonZeros(const M &, [[maybe_unused]] typename std::enable_if_t< Dune::IsNumber< M >::value > *sfinae=nullptr)
Get the number of nonzero fields in the matrix.
Definition: matrixutils.hh:119
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:95
DInfoType dinfo(std::cout)
Stream for informative output.
Definition: stdstreams.hh:140
DVerbType dverb(std::cout)
Singleton of verbose debug stream.
Definition: stdstreams.hh:116
Provides a classes representing the hierarchies in AMG.
Provides a class for building the index set and remote indices on the coarse level.
Functionality for redistributing a sparse matrix.
Some handy generic functions for ISTL matrices.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 >> &outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition: repartition.hh:1233
void redistributeMatrix(M &origMatrix, M &newMatrix, C &origComm, C &newComm, RedistributeInformation< C > &ri)
Redistribute a matrix according to given domain decompositions.
Definition: matrixredistribute.hh:820
constexpr auto get(std::integer_sequence< T, II... >, std::integral_constant< std::size_t, pos >={})
Return the entry at position pos of the given sequence.
Definition: integersequence.hh:22
Classes for the generic construction and application of the smoothers.
Traits class for generically constructing non default constructable types.
Definition: construction.hh:39
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:66
Tag idnetifying the visited property of a vertex.
Definition: properties.hh:29
A property map that applies the identity function to integers.
Definition: propertymap.hh:293
Selector for the property map type.
Definition: propertymap.hh:320
static Category category(const OP &op, decltype(op.category()) *=nullptr)
Helperfunction to extract the solver category either from an enum, or from the newly introduced virtu...
Definition: solvercategory.hh:34
Prolongation and restriction for amg.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 16, 22:29, 2024)