Loading [MathJax]/extensions/tex2jax.js

dune-composites (unstable)

hypreinterface.hh
1/* *************************************************** *
2 * Hypre solver interface
3 * *************************************************** */
4#ifndef HYPREINTERFACE_HH
5#define HYPREINTERFACE_HH
6
7#if HAVE_HYPRE
8
9#include <vector>
10#include <string>
11#include <dune/common/parallel/communication.hh>
12#include <dune/common/parallel/mpihelper.hh>
13#include <dune/pdelab/gridoperator/gridoperator.hh>
14#include <dune/grid/common/indexidset.hh>
15#include <dune/grid/common/gridview.hh>
16#include <dune/istl/bcrsmatrix.hh>
17#include <dune/istl/solvers.hh>
18
19#include <HYPRE.h>
20#include <HYPRE_IJ_mv.h>
21#include <HYPRE_parcsr_ls.h>
22#include <HYPRE_krylov.h>
23
24#include "globalDOFNumbering.hh"
25
26
30class HypreParameters {
31 public:
32 int dim = 3; //For blocked aggregation, gives size of block
33 /* ! Type of coarsening strategy
34 0 CLJP-coarsening (a parallel coarsening algorithm using independent sets.
35 1 classical Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!)
36 3 classical Ruge-Stueben coarsening on each processor, followed by a third pass, which adds coarse points on the boundaries
37 6 Falgout coarsening (uses 1 first, followed by CLJP using the interior coarse points generated by 1 as its first independent set)
38 7 CLJP-coarsening (using a fixed random vector, for debugging purposes only)
39 8 PMIS-coarsening (a parallel coarsening algorithm using independent sets, generating lower complexities than CLJP, might also lead to slower convergence)
40 9 PMIS-coarsening (using a fixed random vector, for debugging purposes only)
41 10 HMIS-coarsening (uses one pass Ruge-Stueben on each processor independently, followed by PMIS using the interior C-points generated as its first independent set)
42 11 one-pass Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!)
43 21 CGC coarsening by M. Griebel, B. Metsch and A. Schweitzer
44 22 CGC-E coarsening by M. Griebel, B. Metsch and A.Schweitzer
45 * */
46 int coarsentype = 10;
47
63 int interptype = 6;
64 //Maximal number of elements per row for interpolation
65 int pmaxelmts = 1000;
66 //number of levels of aggressive coarsening
67 int aggnumlevels = 0;
68 /* ! defines the smoother
69 0 Jacobi
70 1 Gauss-Seidel, sequential (very slow!)
71 2 Gauss-Seidel, interior points in parallel, boundary sequential (slow!)
72 3 hybrid Gauss-Seidel or SOR, forward solve
73 4 hybrid Gauss-Seidel or SOR, backward solve
74 5 hybrid chaotic Gauss-Seidel (works only with OpenMP)
75 6 hybrid symmetric Gauss-Seidel or SSOR
76 9 Gaussian elimination (only on coarsest level)
77 */
78 int relaxtype = 6;
79 /* !
80 defines which order the points are relaxed
81 0 lexiographic
82 1 CF relaxation
83 */
84 int relaxorder = 0;
86 double strongthreshold = 0.75;
88 int printlevel = 1;
90 int maxlevel = 15;
92 int coarsesolver = 6;
94 int ncoarserelax = 3;
95};
96
97
98/* *************************************************** *
99 * Solver class
100 * *************************************************** */
101template <typename GFS, typename Matrix, typename U>
102class HypreSolver : public Dune::PDELab::LinearResultStorage {
103 public:
104 // Number of rows and columns in each block of the matrix
105 static constexpr int mBlock = Matrix::block_type::rows;
106 static constexpr int nBlock = Matrix::block_type::cols;
107
108 /* Constructor */
109 HypreSolver (const GFS& gfs_, U& u_, Matrix& a_, U& b_,
110 const double tolerance_=1e-6) :
111 gfs(gfs_), u(u_), b(b_), a(a_),
112 gv(gfs_.gridView()),
113 tolerance(tolerance_),
114 maxiter(5000),
115 ParCSRPCG_printlevel(3),
116 globalNumbering(gfs_) {
117 HypreParameters hypre_param;
118
119 // Set up BoomerAMG preconditioner
120 HYPRE_BoomerAMGCreate(&prec);
121 HYPRE_BoomerAMGSetTol(prec,tolerance);
122 HYPRE_BoomerAMGSetMaxIter(prec,1);
123
124 HYPRE_BoomerAMGSetNumFunctions(prec, hypre_param.dim);
125
126 HYPRE_BoomerAMGSetInterpType(prec, hypre_param.interptype);
127 HYPRE_BoomerAMGSetPMaxElmts(prec, hypre_param.pmaxelmts);
128 HYPRE_BoomerAMGSetCoarsenType(prec, hypre_param.coarsentype);
129 HYPRE_BoomerAMGSetAggNumLevels(prec, hypre_param.aggnumlevels);
130 HYPRE_BoomerAMGSetRelaxType(prec, hypre_param.relaxtype);
131 HYPRE_BoomerAMGSetRelaxOrder(prec, hypre_param.relaxorder);
132 HYPRE_BoomerAMGSetMaxLevels (prec, hypre_param.maxlevel);
133 HYPRE_BoomerAMGSetCycleNumSweeps(prec, hypre_param.ncoarserelax,3);
134 HYPRE_BoomerAMGSetCycleRelaxType(prec, hypre_param.coarsesolver,3);
135 HYPRE_BoomerAMGSetPrintLevel(prec,hypre_param.printlevel);
136 HYPRE_BoomerAMGSetStrongThreshold(prec,hypre_param.strongthreshold);
137
138 HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD,&solver);
139 HYPRE_ParCSRPCGSetTol(solver,tolerance);
140 HYPRE_ParCSRPCGSetMaxIter(solver,maxiter);
141 HYPRE_ParCSRPCGSetPrintLevel(solver, ParCSRPCG_printlevel);
142 HYPRE_PCGSetTwoNorm(solver,1);
143
144 HYPRE_ParCSRPCGSetPrecond(solver,
145 HYPRE_BoomerAMGSolve,
146 HYPRE_BoomerAMGSetup,
147 prec);
148 }
149
150 /* Destructor */
151 ~HypreSolver() {
152 HYPRE_BoomerAMGDestroy(prec);
153 HYPRE_ParCSRPCGDestroy(solver);
154 HYPRE_IJVectorDestroy(b_hypre);
155 HYPRE_IJMatrixDestroy(a_hypre);
156 }
157
158 // Solve for a given RHS
159 void solve(U& u) {
160 Dune::Timer watch;
161
162 // Assemble hypre data structures
163 AssembleHypreMatrix(a,a_hypre);
164 AssembleHypreVector(b,b_hypre);
165 HYPRE_IJVector u_hypre;
166 AssembleHypreVector(u,u_hypre);
167 if (gv.comm().rank()==0)
168 std::cout << " assembled hypre matrices" << std::endl;
169 // Extract ParCSR data
170 HYPRE_ParCSRMatrix a_parcsr;
171 HYPRE_ParVector b_parcsr;
172 HYPRE_ParVector u_parcsr;
173 HYPRE_IJMatrixGetObject(a_hypre,(void **) &a_parcsr);
174 HYPRE_IJVectorGetObject(b_hypre,(void **) &b_parcsr);
175 HYPRE_IJVectorGetObject(u_hypre,(void **) &u_parcsr);
176 if (gv.comm().rank()==0)
177 std::cout << " assembled parcrs hypre matrices" << std::endl;
178
179 //Setup and solve
180 watch.reset();
181 HYPRE_ParCSRPCGSetup(solver, a_parcsr, b_parcsr, u_parcsr);
182 double timeBoomerAMGSetup = watch.elapsed();
183 timeBoomerAMGSetup = gv.comm().max(timeBoomerAMGSetup);
184 watch.reset();
185 HYPRE_ParCSRPCGSolve(solver, a_parcsr, b_parcsr,u_parcsr);
186 double timeBoomerAMGSolve = watch.elapsed();
187 timeBoomerAMGSolve = gv.comm().max(timeBoomerAMGSolve);
188
189 //Write back to dune vector
190 ReadHypreVector(u_hypre,u);
191 HYPRE_IJVectorDestroy(u_hypre);
192 HYPRE_Int iterations;
193 double resreduction;
194 HYPRE_ParCSRPCGGetNumIterations(solver,&iterations);
195 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(solver,&resreduction);
196
197 //Store solver statistics
198 res.converged = (resreduction <= tolerance);
199 res.iterations = iterations;
200 res.reduction = resreduction;
201 res.elapsed = timeBoomerAMGSolve;
202 res.conv_rate = pow(resreduction,1./iterations);
203 }
204
205 private:
207 void AssembleHypreMatrix(const Matrix& a, HYPRE_IJMatrix& hypre_matrix) {
208 HYPRE_Int ilower, iupper;
209 HYPRE_Int jlower, jupper;
210 ilower = nBlock*globalNumbering.globalIndexOffset() ;
211 iupper = nBlock*(globalNumbering.globalIndexOffset() + globalNumbering.numberOfLocalDOFs()) -1;
212 jlower = ilower;
213 jupper = iupper;
214 HYPRE_IJMatrixCreate(gv.comm(),
215 ilower, iupper,
216 jlower, jupper,
217 &hypre_matrix);
218 HYPRE_IJMatrixSetObjectType(hypre_matrix, HYPRE_PARCSR);
219 HYPRE_IJMatrixInitialize(hypre_matrix);
220 using Dune::PDELab::Backend::native;
221 int nrows=1;
222 HYPRE_Int ncol=1;
223
224 for (auto row_iter = native(a).begin(); row_iter != native(a).end(); row_iter++) {
225 if(globalNumbering.claimedByRank(row_iter.index()) == gv.comm().rank()){
226 for (auto col_iter = row_iter->begin(); col_iter != row_iter->end(); col_iter++) {
227 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
228 for(int block_j = 0; block_j < mBlock; block_j++){ //iterator over block values
229 HYPRE_Int row_idx = mBlock*globalNumbering.globalIndex(row_iter.index())+block_i;
230 HYPRE_Int col_idx = nBlock*globalNumbering.globalIndex(col_iter.index())+block_j;
231 double value = (*col_iter)[block_i][block_j];
232 HYPRE_IJMatrixSetValues(hypre_matrix,nrows,&ncol,&row_idx,&col_idx,&value);
233 }
234 }
235 }
236 }
237 }
238 HYPRE_IJMatrixAssemble(hypre_matrix);
239 }
240 /* *************************************************** *
241 * Assemble hypre vector given vector u and return
242 * *************************************************** */
243void AssembleHypreVector(const U& u, HYPRE_IJVector& u_hypre) {
244
245 HYPRE_Int jlower = nBlock*globalNumbering.globalIndexOffset();
246 HYPRE_Int jupper = nBlock*(globalNumbering.globalIndexOffset() + globalNumbering.numberOfLocalDOFs()) -1;
247 HYPRE_IJVectorCreate(gv.comm(), jlower, jupper, &u_hypre);
248 HYPRE_IJVectorSetObjectType(u_hypre, HYPRE_PARCSR);
249 HYPRE_IJVectorInitialize(u_hypre);
250 using Dune::PDELab::Backend::native;
251 int nvalues=1;
252 for (unsigned int i = 0; i < u.N(); i++) {
253 if(globalNumbering.claimedByRank(i) == gv.comm().rank()){
254 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
255 HYPRE_Int indices = nBlock*globalNumbering.globalIndex(i)+block_i;
256 double values = native(u)[i][block_i];
257 HYPRE_IJVectorSetValues(u_hypre, nvalues, &indices, &values);
258 }
259 }
260 }
261 HYPRE_IJVectorAssemble(u_hypre);
262}
263
264/* *************************************************** *
265 * Extract from hypre vector u and return
266 * *************************************************** */
267void ReadHypreVector(const HYPRE_IJVector& u_hypre, U& u) {
268 int nvalues=1;
269 double values;
270 using Dune::PDELab::Backend::native;
271 for (unsigned int i = 0; i < u.N(); i++) {
272 if(globalNumbering.claimedByRank(i) == gv.comm().rank()){
273 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
274 HYPRE_Int indices = nBlock*globalNumbering.globalIndex(i) + block_i;
275 HYPRE_IJVectorGetValues(u_hypre, nvalues, &indices, &values);
276 native(u)[i][block_i] = values;
277 }
278 } else {
279 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
280 native(u)[i][block_i] = 0.0;
281 }
282 }
283 }
284 Dune::PDELab::AddDataHandle<GFS,U> adddh(gfs,u);
285 gv.communicate(adddh,Dune::All_All_Interface,Dune::ForwardCommunication);
286}
287/* *************************************************** *
288 * Class data
289 * *************************************************** */
290
291const GFS& gfs; // Gridoperator
292U& u; // Point where the Jacobian is evaluated
293U b; // RHS
294Matrix& a;
295
296typedef typename GFS::Traits::GridView GV; // gridview of trial grid function space
297const GV gv; // gridview of trial grid function space
298
299double tolerance; // ParCSRPCG Solver tolerance
300HYPRE_Int maxiter; // ParCSRPCG maxiter
301int ParCSRPCG_printlevel;
302
303GlobalNumbering<U, GFS> globalNumbering;
304
305HYPRE_IJMatrix a_hypre; // Hypre matrix storage
306HYPRE_IJVector b_hypre;
307
308HYPRE_Solver prec;
309HYPRE_Solver solver;
310
311
312};
313
314#endif
315
316#endif
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 15, 23:04, 2025)