DUNE PDELab (2.7)

ovlp_amg_dg_backend.hh
1#ifndef DUNE_PDELAB_BACKEND_ISTL_OVLP_AMG_DG_BACKEND_HH
2#define DUNE_PDELAB_BACKEND_ISTL_OVLP_AMG_DG_BACKEND_HH
3
6
8
10#include <dune/pdelab/backend/istl/vector.hh>
11#include <dune/pdelab/backend/istl/bcrsmatrix.hh>
12#include <dune/pdelab/backend/istl/bcrsmatrixbackend.hh>
13#include <dune/pdelab/backend/istl/ovlpistlsolverbackend.hh>
14#include <dune/pdelab/gridoperator/gridoperator.hh>
15#include <dune/pdelab/localoperator/flags.hh>
16#include <dune/pdelab/localoperator/idefault.hh>
17#include <dune/pdelab/localoperator/pattern.hh>
18#include <dune/pdelab/localoperator/defaultimp.hh>
19
20namespace Dune {
21 namespace PDELab {
22 //***********************************************************
23 // A data handle / function to communicate matrix entries
24 // in the overlap. It turned out that it is actually not
25 // required to do that, but anyway it is there now.
26 //***********************************************************
27
30 template<class GFS>
32 : public Dune::CommDataHandleIF<LocalGlobalMapDataHandle<GFS>,int>
33 {
34 public:
35 // some types from the grid view
36 typedef typename GFS::Traits::GridView GV;
37 typedef typename GV::IndexSet IndexSet;
38 typedef typename IndexSet::IndexType IndexType;
39 typedef typename GV::Grid Grid;
40 typedef typename Grid::Traits::GlobalIdSet GlobalIdSet;
41 typedef typename GlobalIdSet::IdType IdType;
42
44 typedef int DataType;
45
46 // map types
47 typedef std::map<IndexType,IdType> LocalToGlobalMap;
48 typedef std::map<IdType,IndexType> GlobalToLocalMap;
49
51 bool contains (int dim, int codim) const
52 {
53 return (codim==0);
54 }
55
57 bool fixedSize (int dim, int codim) const
58 {
59 return true;
60 }
61
66 template<class EntityType>
67 size_t size (EntityType& e) const
68 {
69 return 1;
70 }
71
73 template<class MessageBuffer, class EntityType>
74 void gather (MessageBuffer& buff, const EntityType& e) const
75 {
76 // fill map
77 IndexType myindex = indexSet.index(e);
78 IdType myid = globalIdSet.id(e);
79 l2g[myindex] = myid;
80 g2l[myid] = myindex;
81 //std::cout << gv.comm().rank() << ": gather local=" << myindex << " global=" << myid << std::endl;
82
83 buff.write(0); // this is a dummy, we are not interested in the data
84 }
85
90 template<class MessageBuffer, class EntityType>
91 void scatter (MessageBuffer& buff, const EntityType& e, size_t n)
92 {
93 DataType x;
94 buff.read(x); // this is a dummy, we are not interested in the data
95
96 IndexType myindex = indexSet.index(e);
97 IdType myid = globalIdSet.id(e);
98 l2g[myindex] = myid;
99 g2l[myid] = myindex;
100 //std::cout << gv.comm().rank() << ": scatter local=" << myindex << " global=" << myid << std::endl;
101 }
102
104 LocalGlobalMapDataHandle (const GFS& gfs_, LocalToGlobalMap& l2g_, GlobalToLocalMap& g2l_)
105 : gfs(gfs_), gv(gfs.gridView()), indexSet(gv.indexSet()),
106 grid(gv.grid()), globalIdSet(grid.globalIdSet()),
107 l2g(l2g_), g2l(g2l_)
108 {
109 }
110
111 private:
112 const GFS& gfs;
113 GV gv;
114 const IndexSet& indexSet;
115 const Grid& grid;
116 const GlobalIdSet& globalIdSet;
117 LocalToGlobalMap& l2g;
118 GlobalToLocalMap& g2l;
119 };
120
121
122 // A DataHandle class to exchange rows of a sparse matrix
123 template<class GFS, class M> // mapper type and vector type
124 class MatrixExchangeDataHandle
125 : public Dune::CommDataHandleIF<MatrixExchangeDataHandle<GFS,M>,
126 std::pair<typename GFS::Traits::GridView::Grid::Traits::GlobalIdSet::IdType,
127 typename M::block_type> >
128 {
129 public:
130 // some types from the grid view
131 typedef typename GFS::Traits::GridView GV;
132 typedef typename GV::IndexSet IndexSet;
133 typedef typename IndexSet::IndexType IndexType;
134 typedef typename GV::Grid Grid;
135 typedef typename Grid::Traits::GlobalIdSet GlobalIdSet;
136 typedef typename GlobalIdSet::IdType IdType;
137
138 // some types from the matrix
139 typedef typename M::block_type B;
140
142 typedef std::pair<IdType,B> DataType;
143
144 // map types
145 typedef std::map<IndexType,IdType> LocalToGlobalMap;
146 typedef std::map<IdType,IndexType> GlobalToLocalMap;
147
149 bool contains (int dim, int codim) const
150 {
151 return (codim==0);
152 }
153
155 bool fixedSize (int dim, int codim) const
156 {
157 return false;
158 }
159
164 template<class EntityType>
165 size_t size (EntityType& e) const
166 {
167 IndexType myindex = indexSet.index(e);
168 typename M::row_type myrow = m[myindex];
169 typename M::row_type::iterator endit=myrow.end();
170 size_t count=0;
171 for (typename M::row_type::iterator it=myrow.begin(); it!=endit; ++it)
172 {
173 typename LocalToGlobalMap::const_iterator find=l2g.find(it.index());
174 if (find!=l2g.end())
175 {
176 count++;
177 }
178 }
179 //std::cout << gv.comm().rank() << ": row=" << myindex << " size=" << count << std::endl;
180 return count;
181 }
182
184 template<class MessageBuffer, class EntityType>
185 void gather (MessageBuffer& buff, const EntityType& e) const
186 {
187 IndexType myindex = indexSet.index(e);
188 //std::cout << gv.comm().rank() << ": begin gather local=" << myindex << std::endl;
189 typename M::row_type myrow = m[myindex];
190 typename M::row_type::iterator endit=myrow.end();
191 size_t count = 0;
192 for (typename M::row_type::iterator it=myrow.begin(); it!=endit; ++it)
193 {
194 typename LocalToGlobalMap::const_iterator find=l2g.find(it.index());
195 if (find!=l2g.end())
196 {
197 buff.write(std::make_pair(find->second,*it));
198 //std::cout << gv.comm().rank() << ": ==> local=" << find->first << " global=" << find->second << std::endl;
199 count++;
200 }
201 }
202 //std::cout << gv.comm().rank() << ": end gather row=" << myindex << " size=" << count << std::endl;
203 }
204
209 template<class MessageBuffer, class EntityType>
210 void scatter (MessageBuffer& buff, const EntityType& e, size_t n)
211 {
212 IndexType myindex = indexSet.index(e);
213 std::cout << gv.comm().rank() << ": begin scatter local=" << myindex << " size=" << n << std::endl;
214 DataType x;
215 size_t count = 0;
216 for (size_t i=0; i<n; ++i)
217 {
218 buff.read(x);
219 std::cout << gv.comm().rank() << ": --> received global=" << x.first << std::endl;
220 typename GlobalToLocalMap::const_iterator find=g2l.find(x.first);
221 if (find!=g2l.end())
222 {
223 IndexType nbindex=find->second;
224 if (m.exists(myindex,nbindex))
225 {
226 m[myindex][nbindex] = x.second;
227 B block(x.second);
228 block -= m2[myindex][nbindex];
229 std::cout << gv.comm().rank() << ": compare i=" << myindex << " j=" << nbindex
230 << " norm=" << block.infinity_norm() << std::endl;
231
232 count++;
233 //std::cout << gv.comm().rank() << ": --> local=" << find->first << " global=" << find->second << std::endl;
234 }
235 }
236 }
237 //std::cout << gv.comm().rank() << ": end scatter row=" << myindex << " size=" << count << std::endl;
238 }
239
241 MatrixExchangeDataHandle (const GFS& gfs_, M& m_, const LocalToGlobalMap& l2g_, const GlobalToLocalMap& g2l_,M& m2_)
242 : gfs(gfs_), m(m_), gv(gfs.gridView()), indexSet(gv.indexSet()),
243 grid(gv.grid()), globalIdSet(grid.globalIdSet()),
244 l2g(l2g_), g2l(g2l_), m2(m2_)
245 {
246 }
247
248 private:
249 const GFS& gfs;
250 M& m;
251 GV gv;
252 const IndexSet& indexSet;
253 const Grid& grid;
254 const GlobalIdSet& globalIdSet;
255 const LocalToGlobalMap& l2g;
256 const GlobalToLocalMap& g2l;
257 M& m2;
258 };
259
260
263 template<class GFS, class T, class A, int n, int m>
264 void restore_overlap_entries (const GFS& gfs, Dune::BCRSMatrix<Dune::FieldMatrix<T,n,m>,A>& matrix,
266 {
267 typedef Dune::FieldMatrix<T,n,m> B;
268 typedef Dune::BCRSMatrix<B,A> M; // m is of type M
269 typedef typename LocalGlobalMapDataHandle<GFS>::LocalToGlobalMap LocalToGlobalMap;
270 typedef typename LocalGlobalMapDataHandle<GFS>::GlobalToLocalMap GlobalToLocalMap;
271
272 // build up two maps to associate local indices and global ids
273 LocalToGlobalMap l2g;
274 GlobalToLocalMap g2l;
275 LocalGlobalMapDataHandle<GFS> lgdh(gfs,l2g,g2l);
276 if (gfs.gridView().comm().size()>1)
277 gfs.gridView().communicate(lgdh,Dune::InteriorBorder_All_Interface,Dune::ForwardCommunication);
278
279 // exchange matrix entries
280 MatrixExchangeDataHandle<GFS,M> mexdh(gfs,matrix,l2g,g2l,matrix2);
281 if (gfs.gridView().comm().size()>1)
282 gfs.gridView().communicate(mexdh,Dune::InteriorBorder_All_Interface,Dune::ForwardCommunication);
283 }
284
285 //***********************************************************
286 // The DG/AMG preconditioner in the overlapping case
287 //***********************************************************
288
298 template<class DGGFS, class DGMatrix, class DGPrec, class DGCC,
299 class CGGFS, class CGPrec, class CGCC,
300 class P, class DGHelper, class Comm>
302 : public Dune::Preconditioner<Dune::PDELab::Backend::Vector<DGGFS,typename DGPrec::domain_type::field_type>,
303 Dune::PDELab::Backend::Vector<DGGFS,typename DGPrec::range_type::field_type>>
304 {
305 const DGGFS& dggfs;
306 DGMatrix& dgmatrix;
307 DGPrec& dgprec;
308 const DGCC& dgcc;
309 const CGGFS& cggfs;
310 CGPrec& cgprec;
311 const CGCC& cgcc;
312 P& p;
313 const DGHelper& dghelper;
314 const Comm& comm;
315 int n1,n2;
316
317 public:
318 using V = Dune::PDELab::Backend::Vector<DGGFS,typename DGPrec::domain_type::field_type>;
319 using W = Dune::PDELab::Backend::Vector<DGGFS,typename DGPrec::range_type::field_type>;
320 using X = Backend::Native<V>;
321 using Y = Backend::Native<W>;
322 using CGV = Dune::PDELab::Backend::Vector<CGGFS,typename CGPrec::domain_type::field_type>;
323 using CGW = Dune::PDELab::Backend::Vector<CGGFS,typename CGPrec::range_type::field_type>;
324
325 // define the category
327 {
329 }
330
338 OvlpDGAMGPrec (const DGGFS& dggfs_, DGMatrix& dgmatrix_, DGPrec& dgprec_, const DGCC& dgcc_,
339 const CGGFS& cggfs_, CGPrec& cgprec_, const CGCC& cgcc_, P& p_,
340 const DGHelper& dghelper_, const Comm& comm_, int n1_, int n2_)
341 : dggfs(dggfs_), dgmatrix(dgmatrix_), dgprec(dgprec_), dgcc(dgcc_),
342 cggfs(cggfs_), cgprec(cgprec_), cgcc(cgcc_), p(p_), dghelper(dghelper_),
343 comm(comm_), n1(n1_), n2(n2_)
344 {
345 }
346
352 virtual void pre (V& x, W& b) override
353 {
354 using Backend::native;
355 dgprec.pre(native(x),native(b));
356 CGW cgd(cggfs,0.0);
357 CGV cgv(cggfs,0.0);
358 cgprec.pre(native(cgv),native(cgd));
359 }
360
366 virtual void apply (V& x, const W& b) override
367 {
368 using Backend::native;
369 // need local copies to store defect and solution
370 W d(b);
372 V v(x); // only to get correct size
373
374 // pre-smoothing on DG matrix
375 for (int i=0; i<n1; i++)
376 {
377 using Backend::native;
378 v = 0.0;
379 dgprec.apply(native(v),native(d));
380 Dune::PDELab::AddDataHandle<DGGFS,V> adddh(dggfs,v);
381 if (dggfs.gridView().comm().size()>1)
382 dggfs.gridView().communicate(adddh,Dune::All_All_Interface,Dune::ForwardCommunication);
383 dgmatrix.mmv(native(v),native(d));
385 x += v;
386 }
387
388 // restrict defect to CG subspace
389 dghelper.maskForeignDOFs(d); // DG defect is additive for overlap 1, but in case we use more
390 CGW cgd(cggfs,0.0);
391 p.mtv(native(d),native(cgd));
392 Dune::PDELab::AddDataHandle<CGGFS,CGW> adddh(cggfs,cgd);
393 if (cggfs.gridView().comm().size()>1)
394 cggfs.gridView().communicate(adddh,Dune::All_All_Interface,Dune::ForwardCommunication); // now we have consistent defect on coarse grid
396 comm.project(native(cgd));
397 CGV cgv(cggfs,0.0);
398
399
400 // call preconditioner
401 cgprec.apply(native(cgv),native(cgd));
402
403 // prolongate correction
404 p.mv(native(cgv),native(v));
405 dgmatrix.mmv(native(v),native(d));
407 x += v;
408
409 // post-smoothing on DG matrix
410 for (int i=0; i<n2; i++)
411 {
412 v = 0.0;
413 dgprec.apply(native(v),native(d));
414 Dune::PDELab::AddDataHandle<DGGFS,V> adddh(dggfs,v);
415 if (dggfs.gridView().comm().size()>1)
416 dggfs.gridView().communicate(adddh,Dune::All_All_Interface,Dune::ForwardCommunication);
417 dgmatrix.mmv(native(v),native(d));
419 x += v;
420 }
421 }
422
428 virtual void post (V& x) override
429 {
430 dgprec.post(Backend::native(x));
431 CGV cgv(cggfs,0.0);
432 cgprec.post(Backend::native(cgv));
433 }
434 };
435
436//***********************************************************
437// The DG/AMG linear solver backend in the overlapping case
438//***********************************************************
439
452template<class DGGO, class DGCC, class CGGFS, class CGCC, class TransferLOP,
453 template<class,class,class,int> class DGPrec, template<class> class Solver, int s=96>
455 public Dune::PDELab::OVLPScalarProductImplementation<typename DGGO::Traits::TrialGridFunctionSpace>,
456 public Dune::PDELab::LinearResultStorage
457{
458public:
459 // DG grid function space
460 typedef typename DGGO::Traits::TrialGridFunctionSpace GFS;
461
462 // vectors and matrices on DG level
463 typedef typename DGGO::Traits::Jacobian M; // wrapped istl DG matrix
464 typedef typename DGGO::Traits::Domain V; // wrapped istl DG vector
465 typedef Backend::Native<M> Matrix; // istl DG matrix
466 typedef Backend::Native<V> Vector; // istl DG vector
467 typedef typename Vector::field_type field_type;
468
469 // vectors and matrices on CG level
470 using CGV = Dune::PDELab::Backend::Vector<CGGFS,field_type>; // wrapped istl CG vector
471 typedef Backend::Native<CGV> CGVector; // istl CG vector
472
473 // prolongation matrix
475 typedef Dune::PDELab::EmptyTransformation CC;
476 typedef TransferLOP CGTODGLOP; // local operator
478 typedef typename PGO::Jacobian PMatrix; // wrapped ISTL prolongation matrix
479 typedef Backend::Native<PMatrix> P; // ISTL prolongation matrix
480
481 // CG subspace matrix
483 typedef typename Dune::MatMultMatResult<PTADG,P>::type CGMatrix; // istl coarse space matrix
484
485 // AMG in CG-subspace
486 typedef typename Dune::PDELab::ISTL::CommSelector<s,Dune::MPIHelper::isFake>::type Comm;
492
493private:
494
495 const GFS& gfs;
496 DGGO& dggo;
497 const DGCC& dgcc;
498 CGGFS& cggfs;
499 const CGCC& cgcc;
500 std::shared_ptr<AMG> amg;
501 Parameters amg_parameters;
502 unsigned maxiter;
503 int verbose;
504 bool reuse;
505 bool firstapply;
506 bool usesuperlu;
507 std::size_t low_order_space_entries_per_row;
508
509 CGTODGLOP cgtodglop; // local operator to assemble prolongation matrix
510 PGO pgo; // grid operator to assemble prolongation matrix
511 PMatrix pmatrix; // wrapped prolongation matrix
512
515 class EmptyLop : public Dune::PDELab::NumericalJacobianApplyVolume<EmptyLop>,
519 {
520 };
521
522 // grid operator with empty local operator for constraints assembly in parallel
524 // CG-subspace matrix
525 typedef typename CGGO::Jacobian CGM;
526 CGM acg;
527
528public:
529
530 // access to prolongation matrix
531 PMatrix& prolongation_matrix ()
532 {
533 return pmatrix;
534 }
535
540 void setParameters(const Parameters& amg_parameters_)
541 {
542 amg_parameters = amg_parameters_;
543 }
544
552 const Parameters& parameters() const
553 {
554 return amg_parameters;
555 }
556
558 void setReuse(bool reuse_)
559 {
560 reuse = reuse_;
561 }
562
564 bool getReuse() const
565 {
566 return reuse;
567 }
568
571 ISTLBackend_OVLP_AMG_4_DG(DGGO& dggo_, const DGCC& dgcc_, CGGFS& cggfs_, const CGCC& cgcc_,
572 unsigned maxiter_=5000, int verbose_=1, bool reuse_=false,
573 bool usesuperlu_=true)
574 : Dune::PDELab::OVLPScalarProductImplementation<typename DGGO::Traits::TrialGridFunctionSpace>(dggo_.trialGridFunctionSpace())
575 , gfs(dggo_.trialGridFunctionSpace())
576 , dggo(dggo_)
577 , dgcc(dgcc_)
578 , cggfs(cggfs_)
579 , cgcc(cgcc_)
580 , amg_parameters(15,2000)
581 , maxiter(maxiter_)
582 , verbose(verbose_)
583 , reuse(reuse_)
584 , firstapply(true)
585 , usesuperlu(usesuperlu_)
586 , low_order_space_entries_per_row(StaticPower<3,GFS::Traits::GridView::dimension>::power)
587 , cgtodglop()
588 , pgo(cggfs,dggo.trialGridFunctionSpace(),cgtodglop,MBE(low_order_space_entries_per_row))
589 , pmatrix(pgo)
590 , acg(Backend::attached_container())
591 {
592 amg_parameters.setDefaultValuesIsotropic(GFS::Traits::GridViewType::Traits::Grid::dimension);
593 amg_parameters.setDebugLevel(verbose_);
594#if !HAVE_SUPERLU
595 if (usesuperlu == true)
596 {
597 if (gfs.gridView().comm().rank()==0)
598 std::cout << "WARNING: You are using AMG without SuperLU!"
599 << " Please consider installing SuperLU,"
600 << " or set the usesuperlu flag to false"
601 << " to suppress this warning." << std::endl;
602 }
603#endif
604
605 // assemble prolongation matrix; this will not change from one apply to the next
606 pmatrix = 0.0;
607 if (verbose>0 && gfs.gridView().comm().rank()==0) std::cout << "allocated prolongation matrix of size " << pmatrix.N() << " x " << pmatrix.M() << std::endl;
608 CGV cgx(cggfs,0.0); // need vector to call jacobian
609 pgo.jacobian(cgx,pmatrix);
610 }
611
612
615 ISTLBackend_OVLP_AMG_4_DG(DGGO& dggo_, const DGCC& dgcc_, CGGFS& cggfs_, const CGCC& cgcc_,
616 const ParameterTree& params)
617 : Dune::PDELab::OVLPScalarProductImplementation<typename DGGO::Traits::TrialGridFunctionSpace>(dggo_.trialGridFunctionSpace())
618 , gfs(dggo_.trialGridFunctionSpace())
619 , dggo(dggo_)
620 , dgcc(dgcc_)
621 , cggfs(cggfs_)
622 , cgcc(cgcc_)
623 , maxiter(params.get<int>("max_iterations",5000))
624 , amg_parameters(15,2000)
625 , verbose(params.get<int>("verbose",1))
626 , reuse(params.get<bool>("reuse",false))
627 , firstapply(true)
628 , usesuperlu(params.get<bool>("use_superlu",true))
629 , low_order_space_entries_per_row(params.get<std::size_t>("low_order_space.entries_per_row",StaticPower<3,GFS::Traits::GridView::dimension>::power))
630 , cgtodglop()
631 , pgo(cggfs,dggo.trialGridFunctionSpace(),cgtodglop,MBE(low_order_space_entries_per_row))
632 , pmatrix(pgo)
633 , acg(Backend::attached_container())
634 {
635 amg_parameters.setDefaultValuesIsotropic(GFS::Traits::GridViewType::Traits::Grid::dimension);
636 amg_parameters.setDebugLevel(params.get<int>("verbose",1));
637#if !HAVE_SUPERLU
638 if (usesuperlu == true)
639 {
640 if (gfs.gridView().comm().rank()==0)
641 std::cout << "WARNING: You are using AMG without SuperLU!"
642 << " Please consider installing SuperLU,"
643 << " or set the usesuperlu flag to false"
644 << " to suppress this warning." << std::endl;
645 }
646#endif
647
648 // assemble prolongation matrix; this will not change from one apply to the next
649 pmatrix = 0.0;
650 if (verbose>0 && gfs.gridView().comm().rank()==0) std::cout << "allocated prolongation matrix of size " << pmatrix.N() << " x " << pmatrix.M() << std::endl;
651 CGV cgx(cggfs,0.0); // need vector to call jacobian
652 pgo.jacobian(cgx,pmatrix);
653 }
654
655
663 void apply (M& A, V& z, V& r, typename Dune::template FieldTraits<typename V::ElementType >::real_type reduction)
664 {
665 using Backend::native;
666 // make operator and scalar product for overlapping solver
667 typedef Dune::PDELab::OverlappingOperator<DGCC,M,V,V> POP;
668 POP pop(dgcc,A);
669 typedef Dune::PDELab::OVLPScalarProduct<GFS,V> PSP;
670 PSP psp(*this);
671
672 // compute CG matrix
673 // make grid operator with empty local operator => matrix data type and constraints assembly
674 EmptyLop emptylop;
675 CGGO cggo(cggfs,cgcc,cggfs,cgcc,emptylop,MBE(low_order_space_entries_per_row));
676
677 // do triple matrix product ACG = P^T ADG P; this is purely local
678 Dune::Timer watch;
679 watch.reset();
680 // only do triple matrix product if the matrix changes
681 double triple_product_time = 0.0;
682 // no need to set acg here back to zero, this is done in matMultmat
683 if(reuse == false || firstapply == true) {
684 PTADG ptadg;
685 Dune::transposeMatMultMat(ptadg,native(pmatrix),native(A)); // 1a
686 //Dune::transposeMatMultMat(ptadg,native(pmatrix),native(A2)); // 1b
687 Dune::matMultMat(native(acg),ptadg,native(pmatrix));
688 triple_product_time = watch.elapsed();
689 if (verbose>0 && gfs.gridView().comm().rank()==0)
690 std::cout << "=== triple matrix product " << triple_product_time << " s" << std::endl;
691 //Dune::printmatrix(std::cout,native(acg),"triple product matrix","row",10,2);
692 CGV cgx(cggfs,0.0); // need vector to call jacobian
693 cggo.jacobian(cgx,acg); // insert trivial rows at processor boundaries
694 //std::cout << "CG constraints: " << cgcc.size() << " out of " << cggfs.globalSize() << std::endl;
695 }
696 else if(verbose>0 && gfs.gridView().comm().rank()==0)
697 std::cout << "=== reuse CG matrix, SKIPPING triple matrix product " << std::endl;
698
699 // NOW we need to insert the processor boundary conditions in DG matrix
701 DGGOEmpty dggoempty(gfs,dgcc,gfs,dgcc,emptylop,MBE(1 << GFS::Traits::GridView::dimension));
702 dggoempty.jacobian(z,A);
703
704 // and in the residual
706
707 // now set up parallel AMG solver for the CG subspace
708 Comm oocc(gfs.gridView().comm());
709 typedef Dune::PDELab::ISTL::ParallelHelper<CGGFS> CGHELPER;
710 CGHELPER cghelper(cggfs,2);
711 cghelper.createIndexSetAndProjectForAMG(acg,oocc);
712 ParCGOperator paroop(native(acg),oocc);
714
715 typedef typename Dune::Amg::SmootherTraits<ParSmoother>::Arguments SmootherArgs;
716 SmootherArgs smootherArgs;
717 smootherArgs.iterations = 1;
718 smootherArgs.relaxationFactor = 1.0;
720 Criterion criterion(amg_parameters);
721 watch.reset();
722
723 // only construct a new AMG for the CG-subspace if the matrix changes
724 double amg_setup_time = 0.0;
725 if(reuse == false || firstapply == true) {
726 amg.reset(new AMG(paroop,criterion,smootherArgs,oocc));
727 firstapply = false;
728 amg_setup_time = watch.elapsed();
729 if (verbose>0 && gfs.gridView().comm().rank()==0)
730 std::cout << "=== AMG setup " <<amg_setup_time << " s" << std::endl;
731 }
732 else if (verbose>0 && gfs.gridView().comm().rank()==0)
733 std::cout << "=== reuse CG matrix, SKIPPING AMG setup " << std::endl;
734
735 // set up hybrid DG/CG preconditioner
736 typedef DGPrec<Matrix,Vector,Vector,1> DGPrecType;
737 DGPrecType dgprec(native(A),1,0.92);
738 //DGPrecType dgprec(native(A),0.92);
739 typedef Dune::PDELab::ISTL::ParallelHelper<GFS> DGHELPER;
741 HybridPrec hybridprec(gfs,native(A),dgprec,dgcc,cggfs,*amg,cgcc,native(pmatrix),
742 this->parallelHelper(),oocc,3,3);
743
744 // /********/
745 // /* Test */
746 // /********/
747 // Solver<CGVector> testsolver(paroop,sp,amg,1e-8,100,2);
748 // CGV cgxx(cggfs,0.0);
749 // CGV cgdd(cggfs,1.0);
750 // Dune::InverseOperatorResult statstat;
751 // testsolver.apply(native(cgxx),native(cgdd),statstat);
752 // /********/
753
754 // set up solver
755 int verb=verbose;
756 if (gfs.gridView().comm().rank()>0) verb=0;
757 Solver<V> solver(pop,psp,hybridprec,reduction,maxiter,verb);
758
759 // solve
761 watch.reset();
762 solver.apply(z,r,stat);
763 double amg_solve_time = watch.elapsed();
764 if (verbose>0 && gfs.gridView().comm().rank()==0) std::cout << "=== Hybrid total solve time " << amg_solve_time+amg_setup_time+triple_product_time << " s" << std::endl;
765 res.converged = stat.converged;
766 res.iterations = stat.iterations;
767 res.elapsed = amg_solve_time+amg_setup_time+triple_product_time;
768 res.reduction = stat.reduction;
769 res.conv_rate = stat.conv_rate;
770 }
771
772};
773}
774}
775#endif // DUNE_PDELAB_BACKEND_ISTL_OVLP_AMG_DG_BACKEND_HH
Parallel algebraic multigrid based on agglomeration.
Definition: amg.hh:62
The criterion describing the stop criteria for the coarsening process.
Definition: matrixhierarchy.hh:283
All parameters for AMG.
Definition: parameters.hh:391
A sparse block matrix with compressed row storage.
Definition: bcrsmatrix.hh:426
Block parallel preconditioner.
Definition: schwarz.hh:273
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:76
void scatter(MessageBufferImp &buff, const EntityType &e, size_t n)
unpack data from message buffer to user.
Definition: datahandleif.hh:205
size_t size(const EntityType &e) const
how many objects of type DataType have to be sent for a given entity
Definition: datahandleif.hh:180
void gather(MessageBufferImp &buff, const EntityType &e) const
pack data from user to message buffer
Definition: datahandleif.hh:191
A dense n x m matrix.
Definition: fmatrix.hh:69
Grid view abstract base class.
Definition: gridview.hh:60
Grid abstract base class.
Definition: grid.hh:373
Index Set Interface base class.
Definition: indexidset.hh:76
IndexTypeImp IndexType
The type used for the indices.
Definition: indexidset.hh:90
An overlapping Schwarz operator.
Definition: schwarz.hh:76
Scalar product for overlapping Schwarz methods.
Definition: scalarproducts.hh:183
sparsity pattern generator
Definition: pattern.hh:14
void jacobian(const Domain &x, Jacobian &a) const
Assembler jacobian.
Definition: gridoperator.hh:180
Dune::PDELab::Backend::Matrix< MBE, Domain, Range, field_type > Jacobian
The type of the jacobian.
Definition: gridoperator.hh:47
Definition: ovlp_amg_dg_backend.hh:457
bool getReuse() const
Return whether the AMG is reused during call to apply()
Definition: ovlp_amg_dg_backend.hh:564
ISTLBackend_OVLP_AMG_4_DG(DGGO &dggo_, const DGCC &dgcc_, CGGFS &cggfs_, const CGCC &cgcc_, unsigned maxiter_=5000, int verbose_=1, bool reuse_=false, bool usesuperlu_=true)
Definition: ovlp_amg_dg_backend.hh:571
void apply(M &A, V &z, V &r, typename Dune::template FieldTraits< typename V::ElementType >::real_type reduction)
solve the given linear system
Definition: ovlp_amg_dg_backend.hh:663
void setReuse(bool reuse_)
Set whether the AMG should be reused again during call to apply().
Definition: ovlp_amg_dg_backend.hh:558
void setParameters(const Parameters &amg_parameters_)
set AMG parameters
Definition: ovlp_amg_dg_backend.hh:540
const Parameters & parameters() const
Get the parameters describing the behaviuour of AMG.
Definition: ovlp_amg_dg_backend.hh:552
ISTLBackend_OVLP_AMG_4_DG(DGGO &dggo_, const DGCC &dgcc_, CGGFS &cggfs_, const CGCC &cgcc_, const ParameterTree &params)
Definition: ovlp_amg_dg_backend.hh:615
Default class for additional methods in instationary local operators.
Definition: idefault.hh:90
Definition: ovlp_amg_dg_backend.hh:33
LocalGlobalMapDataHandle(const GFS &gfs_, LocalToGlobalMap &l2g_, GlobalToLocalMap &g2l_)
constructor
Definition: ovlp_amg_dg_backend.hh:104
bool fixedSize(int dim, int codim) const
returns true if size per entity of given dim and codim is a constant
Definition: ovlp_amg_dg_backend.hh:57
int DataType
export type of data for message buffer
Definition: ovlp_amg_dg_backend.hh:44
void gather(MessageBuffer &buff, const EntityType &e) const
pack data from user to message buffer
Definition: ovlp_amg_dg_backend.hh:74
size_t size(EntityType &e) const
Definition: ovlp_amg_dg_backend.hh:67
bool contains(int dim, int codim) const
returns true if data for this codim should be communicated
Definition: ovlp_amg_dg_backend.hh:51
void scatter(MessageBuffer &buff, const EntityType &e, size_t n)
Definition: ovlp_amg_dg_backend.hh:91
Default flags for all local operators.
Definition: flags.hh:19
Implements linear and nonlinear versions of jacobian_apply_volume() based on alpha_volume()
Definition: numericaljacobianapply.hh:34
Definition: ovlp_amg_dg_backend.hh:304
virtual void pre(V &x, W &b) override
Prepare the preconditioner.
Definition: ovlp_amg_dg_backend.hh:352
virtual void apply(V &x, const W &b) override
Apply the precondioner.
Definition: ovlp_amg_dg_backend.hh:366
SolverCategory::Category category() const override
Category of the preconditioner (see SolverCategory::Category)
Definition: ovlp_amg_dg_backend.hh:326
virtual void post(V &x) override
Clean up.
Definition: ovlp_amg_dg_backend.hh:428
OvlpDGAMGPrec(const DGGFS &dggfs_, DGMatrix &dgmatrix_, DGPrec &dgprec_, const DGCC &dgcc_, const CGGFS &cggfs_, CGPrec &cgprec_, const CGCC &cgcc_, P &p_, const DGHelper &dghelper_, const Comm &comm_, int n1_, int n2_)
Constructor.
Definition: ovlp_amg_dg_backend.hh:338
Hierarchical structure of string parameters.
Definition: parametertree.hh:35
std::string get(const std::string &key, const std::string &defaultValue) const
get value as string
Definition: parametertree.cc:183
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Sequential SSOR preconditioner.
Definition: preconditioners.hh:138
A simple stop watch.
Definition: timer.hh:41
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:55
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
Describes the parallel communication interface class for MessageBuffers and DataHandles.
Various implementations of the power function for run-time and static arguments.
@ ForwardCommunication
communicate as given in InterfaceType
Definition: gridenums.hh:169
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:86
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:89
void set_constrained_dofs(const CG &cg, typename XG::ElementType x, XG &xg)
construct constraints from given boundary condition function
Definition: constraints.hh:796
void setDebugLevel(int level)
Set the debugging level.
Definition: parameters.hh:399
void setDefaultValuesIsotropic(std::size_t dim, std::size_t diameter=2)
Sets reasonable default values for an isotropic problem.
Definition: parameters.hh:107
int iterations
The numbe of iterations to perform.
Definition: smoother.hh:46
void transposeMatMultMat(BCRSMatrix< FieldMatrix< T, n, m >, A > &res, const BCRSMatrix< FieldMatrix< T, k, n >, A1 > &mat, const BCRSMatrix< FieldMatrix< T, k, m >, A2 > &matt, bool tryHard=false)
Calculate product of a transposed sparse matrix with another sparse matrices ( ).
Definition: matrixmatrix.hh:590
void matMultMat(BCRSMatrix< FieldMatrix< T, n, m >, A > &res, const BCRSMatrix< FieldMatrix< T, n, k >, A1 > &mat, const BCRSMatrix< FieldMatrix< T, k, m >, A2 > &matt, bool tryHard=false)
Calculate product of two sparse matrices ( ).
Definition: matrixmatrix.hh:575
provides functions for sparse matrix matrix multiplication.
Dune namespace.
Definition: alignedallocator.hh:14
constexpr Mantissa power(Mantissa m, Exponent p)
Power method for integer exponents.
Definition: math.hh:73
STL namespace.
A hierarchical structure of string parameters.
The default class for the smoother arguments.
Definition: smoother.hh:37
Statistics about the application of an inverse operator.
Definition: solver.hh:46
int iterations
Number of iterations.
Definition: solver.hh:65
double reduction
Reduction achieved: .
Definition: solver.hh:68
double conv_rate
Convergence rate (average reduction per step)
Definition: solver.hh:74
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
Helper TMP to get the result type of a sparse matrix matrix multiplication ( )
Definition: matrixmatrix.hh:509
Backend using (possibly nested) ISTL BCRSMatrices.
Definition: bcrsmatrixbackend.hh:188
Category
Definition: solvercategory.hh:21
@ overlapping
Category for overlapping solvers.
Definition: solvercategory.hh:27
Calculates m^p at compile time.
Definition: power.hh:24
Helper TMP to get the result type of a sparse matrix matrix multiplication ( )
Definition: matrixmatrix.hh:534
std::size_t fixedSize
The number of data items per index if it is fixed, 0 otherwise.
Definition: variablesizecommunicator.hh:245
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)