dune-composites (unstable)

hypreinterface_sequential.hh
1/* *************************************************** *
2 * Hypre solver interface
3 * *************************************************** */
4#ifndef HYPREINTERFACE_SEQUENTIAL_HH
5#define HYPREINTERFACE_SEQUENTIAL_HH
6
7#if HAVE_HYPRE
8
9#include <vector>
10#include <string>
11#include <dune/common/parallel/communication.hh>
12#include <dune/common/parallel/mpihelper.hh>
13#include <dune/pdelab/gridoperator/gridoperator.hh>
14#include <dune/grid/common/indexidset.hh>
15#include <dune/grid/common/gridview.hh>
16#include <dune/istl/bcrsmatrix.hh>
17#include <dune/istl/solvers.hh>
18
19#include <HYPRE.h>
20#include <HYPRE_IJ_mv.h>
21#include <HYPRE_parcsr_ls.h>
22#include <HYPRE_krylov.h>
23
24#include "globalDOFNumbering.hh"
25
29class SequentialHypreParameters {
30 public:
31 //TODO set to number of input EV
32 int dim = 1; //For blocked aggregation, gives size of block
33 /* ! Type of coarsening strategy
34 0 CLJP-coarsening (a parallel coarsening algorithm using independent sets.
35 1 classical Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!)
36 3 classical Ruge-Stueben coarsening on each processor, followed by a third pass, which adds coarse points on the boundaries
37 6 Falgout coarsening (uses 1 first, followed by CLJP using the interior coarse points generated by 1 as its first independent set)
38 7 CLJP-coarsening (using a fixed random vector, for debugging purposes only)
39 8 PMIS-coarsening (a parallel coarsening algorithm using independent sets, generating lower complexities than CLJP, might also lead to slower convergence)
40 9 PMIS-coarsening (using a fixed random vector, for debugging purposes only)
41 10 HMIS-coarsening (uses one pass Ruge-Stueben on each processor independently, followed by PMIS using the interior C-points generated as its first independent set)
42 11 one-pass Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!)
43 21 CGC coarsening by M. Griebel, B. Metsch and A. Schweitzer
44 22 CGC-E coarsening by M. Griebel, B. Metsch and A.Schweitzer
45 * */
46 int coarsentype = 0;
47
63 int interptype = 0;
64 //Maximal number of elements per row for interpolation
65 int pmaxelmts = 0;
66 //number of levels of aggressive coarsening
67 int aggnumlevels = 2;
68 /* ! defines the smoother
69 0 Jacobi
70 1 Gauss-Seidel, sequential (very slow!)
71 2 Gauss-Seidel, interior points in parallel, boundary sequential (slow!)
72 3 hybrid Gauss-Seidel or SOR, forward solve
73 4 hybrid Gauss-Seidel or SOR, backward solve
74 5 hybrid chaotic Gauss-Seidel (works only with OpenMP)
75 6 hybrid symmetric Gauss-Seidel or SSOR
76 9 Gaussian elimination (only on coarsest level)
77 */
78 int relaxtype = 6;
79 /* !
80 defines which order the points are relaxed
81 0 lexiographic
82 1 CF relaxation
83 */
84 int relaxorder = 1;
86 double strongthreshold = 0.9;
88 int printlevel = 0;
90 int maxlevel = 5;
92 int coarsesolver = 6;
94 int ncoarserelax = 1;
95};
96
97
98/* *************************************************** *
99 * Solver class
100 * *************************************************** */
101template <typename GV, typename Matrix, typename U>
102class SequentialHypreSolver : public Dune::PDELab::LinearResultStorage {
103 public:
104 // Number of rows and columns in each block of the matrix
105 static constexpr int mBlock = Matrix::block_type::rows;
106 static constexpr int nBlock = Matrix::block_type::cols;
107
108 /* Constructor */
109 SequentialHypreSolver (GV& gv_, U& u_, Matrix& a_, U& b_,
110 const double tolerance_) :
111 gv(gv_),
112 u(u_), b(b_), a(a_),
113 tolerance(tolerance_),
114 maxiter(200),
115 ParCSRPCG_printlevel(0) {
116 SequentialHypreParameters hypre_param;
117
118 // Set up BoomerAMG preconditioner
119 HYPRE_BoomerAMGCreate(&prec);
120 HYPRE_BoomerAMGSetTol(prec,0);
121 HYPRE_BoomerAMGSetMaxIter(prec,1);
122
123 HYPRE_BoomerAMGSetNumFunctions(prec, hypre_param.dim);
124
125 HYPRE_BoomerAMGSetInterpType(prec, hypre_param.interptype);
126 HYPRE_BoomerAMGSetPMaxElmts(prec, hypre_param.pmaxelmts);
127 HYPRE_BoomerAMGSetCoarsenType(prec, hypre_param.coarsentype);
128 HYPRE_BoomerAMGSetAggNumLevels(prec, hypre_param.aggnumlevels);
129 HYPRE_BoomerAMGSetRelaxType(prec, hypre_param.relaxtype);
130 HYPRE_BoomerAMGSetRelaxOrder(prec, hypre_param.relaxorder);
131 HYPRE_BoomerAMGSetMaxLevels (prec, hypre_param.maxlevel);
132 HYPRE_BoomerAMGSetCycleNumSweeps(prec, hypre_param.ncoarserelax,3);
133 HYPRE_BoomerAMGSetCycleRelaxType(prec, hypre_param.coarsesolver,3);
134 HYPRE_BoomerAMGSetPrintLevel(prec,hypre_param.printlevel);
135 HYPRE_BoomerAMGSetStrongThreshold(prec,hypre_param.strongthreshold);
136
137 HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD,&solver);
138 HYPRE_ParCSRPCGSetTol(solver,tolerance);
139 HYPRE_ParCSRPCGSetMaxIter(solver,maxiter);
140 HYPRE_ParCSRPCGSetPrintLevel(solver, ParCSRPCG_printlevel);
141 HYPRE_PCGSetTwoNorm(solver,1);
142
143 HYPRE_ParCSRPCGSetPrecond(solver,
144 HYPRE_BoomerAMGSolve,
145 HYPRE_BoomerAMGSetup,
146 prec);
147
148 // Split the communicator into sequential parts and use the original rank for ordering
149 MPI_Comm_split(gv.comm(), gv.comm().rank(), gv.comm().rank(), &row_comm);
150
151 // Assemble hypre data structures
152 AssembleHypreMatrix(a,a_hypre);
153 AssembleHypreVector(b,b_hypre);
154 AssembleHypreVector(u,u_hypre);
155 // Extract ParCSR data
156 HYPRE_IJMatrixGetObject(a_hypre,(void **) &a_parcsr);
157 HYPRE_IJVectorGetObject(b_hypre,(void **) &b_parcsr);
158 HYPRE_IJVectorGetObject(u_hypre,(void **) &u_parcsr);
159 }
160
161 /* Destructor */
162 ~SequentialHypreSolver() {
163 HYPRE_BoomerAMGDestroy(prec);
164 HYPRE_ParCSRPCGDestroy(solver);
165 HYPRE_IJVectorDestroy(b_hypre);
166 HYPRE_IJMatrixDestroy(a_hypre);
167 MPI_Comm_free(&row_comm);
168 }
169
170 // Solve for a given RHS
171 void solve(U& u) {
172 Dune::Timer watch;
173
174 //Setup and solve
175 watch.reset();
176 HYPRE_ParCSRPCGSetup(solver, a_parcsr, b_parcsr, u_parcsr);
177 double timeBoomerAMGSetup = watch.elapsed();
178 timeBoomerAMGSetup = gv.comm().max(timeBoomerAMGSetup);
179 watch.reset();
180
181 HYPRE_ParCSRPCGSolve(solver, a_parcsr, b_parcsr,u_parcsr);
182 double timeBoomerAMGSolve = watch.elapsed();
183 timeBoomerAMGSolve = gv.comm().max(timeBoomerAMGSolve);
184 MPI_Barrier(gv.comm());
185
186 //Write back to dune vector
187 ReadHypreVector(u_hypre,u);
188 HYPRE_IJVectorDestroy(u_hypre);
189 HYPRE_Int iterations;
190 double resreduction;
191 HYPRE_ParCSRPCGGetNumIterations(solver,&iterations);
192 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(solver,&resreduction);
193
194 //Store solver statistics
195 res.converged = (resreduction <= tolerance);
196 if (gv.comm().rank()==0)
197 std::cout << " Converged to " << resreduction << " in " << iterations << " iterations." << std::endl;
198 res.iterations = iterations;
199 res.reduction = resreduction;
200 res.elapsed = timeBoomerAMGSolve;
201 res.conv_rate = pow(resreduction,1./iterations);
202 }
203
204 /* Read solver tolerance */
205 double Tolerance() const { return tolerance; }
206
207 private:
209 void AssembleHypreMatrix(const Matrix& a, HYPRE_IJMatrix& hypre_matrix) {
210 HYPRE_Int ilower, iupper;
211 HYPRE_Int jlower, jupper;
212 ilower = 0;
213 iupper = nBlock*a.N();
214 jlower = ilower;
215 jupper = iupper;
216
217 HYPRE_IJMatrixCreate(row_comm,
218 ilower, iupper,
219 jlower, jupper,
220 &hypre_matrix);
221 HYPRE_IJMatrixSetObjectType(hypre_matrix, HYPRE_PARCSR);
222 HYPRE_IJMatrixInitialize(hypre_matrix);
223 using Dune::PDELab::Backend::native;
224 int nrows=1;
225 HYPRE_Int ncol=1;
226
227 for (auto row_iter = native(a).begin(); row_iter != native(a).end(); row_iter++) {
228 for (auto col_iter = row_iter->begin(); col_iter != row_iter->end(); col_iter++) {
229 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
230 for(int block_j = 0; block_j < mBlock; block_j++){ //iterator over block values
231 HYPRE_Int row_idx = mBlock*row_iter.index()+block_i;
232 HYPRE_Int col_idx = nBlock*col_iter.index()+block_j;
233 double value = (*col_iter)[block_i][block_j];
234 HYPRE_IJMatrixSetValues(hypre_matrix,nrows,&ncol,&row_idx,&col_idx,&value);
235 }
236 }
237 }
238 }
239 HYPRE_IJMatrixAssemble(hypre_matrix);
240 }
241 /* *************************************************** *
242 * Assemble hypre vector given vector u and return
243 * *************************************************** */
244void AssembleHypreVector(const U& u, HYPRE_IJVector& u_hypre) {
245
246 HYPRE_Int jlower = 0;
247 HYPRE_Int jupper = nBlock*a.N();
248 HYPRE_IJVectorCreate(row_comm, jlower, jupper, &u_hypre);
249 HYPRE_IJVectorSetObjectType(u_hypre, HYPRE_PARCSR);
250 HYPRE_IJVectorInitialize(u_hypre);
251 using Dune::PDELab::Backend::native;
252 int nvalues=1;
253 for (unsigned int i = 0; i < u.N(); i++) {
254 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
255 HYPRE_Int indices = nBlock*i+block_i;
256 double values = native(u)[i][block_i];
257 HYPRE_IJVectorSetValues(u_hypre, nvalues, &indices, &values);
258 }
259 }
260 HYPRE_IJVectorAssemble(u_hypre);
261}
262
263/* *************************************************** *
264 * Extract from hypre vector u and return
265 * *************************************************** */
266void ReadHypreVector(const HYPRE_IJVector& u_hypre, U& u) {
267 int nvalues=1;
268 double values;
269 using Dune::PDELab::Backend::native;
270 for (unsigned int i = 0; i < u.N(); i++) {
271 for(int block_i = 0; block_i < mBlock; block_i++){ //iterator over block values
272 HYPRE_Int indices = nBlock*i + block_i;
273 HYPRE_IJVectorGetValues(u_hypre, nvalues, &indices, &values);
274 native(u)[i][block_i] = values;
275 }
276 }
277}
278/* *************************************************** *
279 * Class data
280 * *************************************************** */
281
282const GV gv; // gridview of trial grid function space
283U& u; // Point where the Jacobian is evaluated
284U b; // RHS
285Matrix& a;
286
287double tolerance; // ParCSRPCG Solver tolerance
288HYPRE_Int maxiter; // ParCSRPCG maxiter
289int ParCSRPCG_printlevel;
290
291MPI_Comm row_comm;
292
293HYPRE_IJMatrix a_hypre; // Hypre matrix storage
294HYPRE_IJVector b_hypre;
295HYPRE_IJVector u_hypre;
296
297HYPRE_Solver prec;
298HYPRE_Solver solver;
299
300HYPRE_ParCSRMatrix a_parcsr;
301HYPRE_ParVector b_parcsr;
302HYPRE_ParVector u_parcsr;
303
304};
305
306#endif
307
308#endif
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 3, 22:46, 2025)