DUNE PDELab (git)

solvers.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5
6#ifndef DUNE_ISTL_SOLVERS_HH
7#define DUNE_ISTL_SOLVERS_HH
8
9#include <array>
10#include <cmath>
11#include <complex>
12#include <iostream>
13#include <memory>
14#include <type_traits>
15#include <vector>
16
18#include <dune/common/math.hh>
21#include <dune/common/std/type_traits.hh>
22#include <dune/common/timer.hh>
23
24#include <dune/istl/allocator.hh>
26#include <dune/istl/eigenvalue/arpackpp.hh>
27#include <dune/istl/istlexception.hh>
29#include <dune/istl/preconditioner.hh>
31#include <dune/istl/solver.hh>
32#include <dune/istl/solverregistry.hh>
33
34namespace Dune {
46 //=====================================================================
47 // Implementation of this interface
48 //=====================================================================
49
58 template<class X>
59 class LoopSolver : public IterativeSolver<X,X> {
60 public:
65
66 // copy base class constructors
68
69 // don't shadow four-argument version of apply defined in the base class
70 using IterativeSolver<X,X>::apply;
71
73 void apply (X& x, X& b, InverseOperatorResult& res) override
74 {
75 Iteration iteration(*this, res);
76 _prec->pre(x,b);
77
78 // overwrite b with defect
79 _op->applyscaleadd(-1,x,b);
80
81 // compute norm, \todo parallelization
82 real_type def = _sp->norm(b);
83 if(iteration.step(0, def)){
84 _prec->post(x);
85 return;
86 }
87 // prepare preconditioner
88
89 // allocate correction vector
90 X v(x);
91
92 // iteration loop
93 int i=1;
94 for ( ; i<=_maxit; i++ )
95 {
96 v = 0; // clear correction
97 _prec->apply(v,b); // apply preconditioner
98 x += v; // update solution
99 _op->applyscaleadd(-1,v,b); // update defect
100 def=_sp->norm(b); // comp defect norm
101 if(iteration.step(i, def))
102 break;
103 }
104
105 // postprocess preconditioner
106 _prec->post(x);
107 }
108
109 protected:
110 using IterativeSolver<X,X>::_op;
111 using IterativeSolver<X,X>::_prec;
112 using IterativeSolver<X,X>::_sp;
113 using IterativeSolver<X,X>::_reduction;
114 using IterativeSolver<X,X>::_maxit;
115 using IterativeSolver<X,X>::_verbose;
116 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
117 };
118 DUNE_REGISTER_SOLVER("loopsolver", defaultIterativeSolverCreator<Dune::LoopSolver>());
119
120
121 // all these solvers are taken from the SUMO library
123 template<class X>
124 class GradientSolver : public IterativeSolver<X,X> {
125 public:
129 using typename IterativeSolver<X,X>::real_type;
130
131 // copy base class constructors
133
134 // don't shadow four-argument version of apply defined in the base class
135 using IterativeSolver<X,X>::apply;
136
142 void apply (X& x, X& b, InverseOperatorResult& res) override
143 {
144 Iteration iteration(*this, res);
145 _prec->pre(x,b); // prepare preconditioner
146
147 _op->applyscaleadd(-1,x,b); // overwrite b with defec
148
149 real_type def = _sp->norm(b); // compute norm
150 if(iteration.step(0, def)){
151 _prec->post(x);
152 return;
153 }
154
155 X p(x); // create local vectors
156 X q(b);
157
158 int i=1; // loop variables
159 field_type lambda;
160 for ( ; i<=_maxit; i++ )
161 {
162 p = 0; // clear correction
163 _prec->apply(p,b); // apply preconditioner
164 _op->apply(p,q); // q=Ap
165 auto alpha = _sp->dot(q,p);
166 lambda = Simd::cond(def==field_type(0.),
167 field_type(0.), // no need for minimization if def is already 0
168 _sp->dot(p,b)/alpha); // minimization
169 x.axpy(lambda,p); // update solution
170 b.axpy(-lambda,q); // update defect
171
172 def =_sp->norm(b); // comp defect norm
173 if(iteration.step(i, def))
174 break;
175 }
176 // postprocess preconditioner
177 _prec->post(x);
178 }
179
180 protected:
181 using IterativeSolver<X,X>::_op;
182 using IterativeSolver<X,X>::_prec;
183 using IterativeSolver<X,X>::_sp;
184 using IterativeSolver<X,X>::_reduction;
185 using IterativeSolver<X,X>::_maxit;
186 using IterativeSolver<X,X>::_verbose;
187 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
188 };
189 DUNE_REGISTER_SOLVER("gradientsolver", defaultIterativeSolverCreator<Dune::GradientSolver>());
190
192 template<class X>
193 class CGSolver : public IterativeSolver<X,X> {
194 public:
198 using typename IterativeSolver<X,X>::real_type;
199
200 // copy base class constructors
202
203 private:
205
206 protected:
207
208 static constexpr bool enableConditionEstimate = (std::is_same_v<field_type,float> || std::is_same_v<field_type,double>);
209
210 public:
211
212 // don't shadow four-argument version of apply defined in the base class
213 using IterativeSolver<X,X>::apply;
214
223 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose),
224 condition_estimate_(condition_estimate)
225 {
226 if (condition_estimate && !enableConditionEstimate) {
227 condition_estimate_ = false;
228 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
229 }
230 }
231
240 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
241 condition_estimate_(condition_estimate)
242 {
243 if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
244 condition_estimate_ = false;
245 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
246 }
247 }
248
256 CGSolver (std::shared_ptr<const LinearOperator<X,X>> op, std::shared_ptr<ScalarProduct<X>> sp,
257 std::shared_ptr<Preconditioner<X,X>> prec,
258 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
259 : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
260 condition_estimate_(condition_estimate)
261 {
262 if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
263 condition_estimate_ = false;
264 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
265 }
266 }
267
279 void apply (X& x, X& b, InverseOperatorResult& res) override
280 {
281 Iteration iteration(*this,res);
282 _prec->pre(x,b); // prepare preconditioner
283
284 _op->applyscaleadd(-1,x,b); // overwrite b with defect
285
286 real_type def = _sp->norm(b); // compute norm
287 if(iteration.step(0, def)){
288 _prec->post(x);
289 return;
290 }
291
292 X p(x); // the search direction
293 X q(x); // a temporary vector
294
295 // Remember lambda and beta values for condition estimate
296 std::vector<real_type> lambdas(0);
297 std::vector<real_type> betas(0);
298
299 // some local variables
300 field_type rho,rholast,lambda,alpha,beta;
301
302 // determine initial search direction
303 p = 0; // clear correction
304 _prec->apply(p,b); // apply preconditioner
305 rholast = _sp->dot(p,b); // orthogonalization
306
307 // the loop
308 int i=1;
309 for ( ; i<=_maxit; i++ )
310 {
311 // minimize in given search direction p
312 _op->apply(p,q); // q=Ap
313 alpha = _sp->dot(p,q); // scalar product
314 lambda = Simd::cond(def==field_type(0.), field_type(0.), rholast/alpha); // minimization
315 if constexpr (enableConditionEstimate)
316 if (condition_estimate_)
317 lambdas.push_back(std::real(lambda));
318 x.axpy(lambda,p); // update solution
319 b.axpy(-lambda,q); // update defect
320
321 // convergence test
322 def=_sp->norm(b); // comp defect norm
323 if(iteration.step(i, def))
324 break;
325
326 // determine new search direction
327 q = 0; // clear correction
328 _prec->apply(q,b); // apply preconditioner
329 rho = _sp->dot(q,b); // orthogonalization
330 beta = Simd::cond(def==field_type(0.), field_type(0.), rho/rholast); // scaling factor
331 if constexpr (enableConditionEstimate)
332 if (condition_estimate_)
333 betas.push_back(std::real(beta));
334 p *= beta; // scale old search direction
335 p += q; // orthogonalization with correction
336 rholast = rho; // remember rho for recurrence
337 }
338
339 _prec->post(x); // postprocess preconditioner
340
341 if (condition_estimate_) {
342#if HAVE_ARPACKPP
343 if constexpr (enableConditionEstimate) {
344 using std::sqrt;
345
346 // Build T matrix which has extreme eigenvalues approximating
347 // those of the original system
348 // (see Y. Saad, Iterative methods for sparse linear systems)
349
351
352 for (auto row = T.createbegin(); row != T.createend(); ++row) {
353 if (row.index() > 0)
354 row.insert(row.index()-1);
355 row.insert(row.index());
356 if (row.index() < T.N() - 1)
357 row.insert(row.index()+1);
358 }
359 for (int row = 0; row < i; ++row) {
360 if (row > 0) {
361 T[row][row-1] = sqrt(betas[row-1]) / lambdas[row-1];
362 }
363
364 T[row][row] = 1.0 / lambdas[row];
365 if (row > 0) {
366 T[row][row] += betas[row-1] / lambdas[row-1];
367 }
368
369 if (row < i - 1) {
370 T[row][row+1] = sqrt(betas[row]) / lambdas[row];
371 }
372 }
373
374 // Compute largest and smallest eigenvalue of T matrix and return as estimate
376
377 real_type eps = 0.0;
378 COND_VEC eigv;
379 real_type min_eigv, max_eigv;
380 arpack.computeSymMinMagnitude (eps, eigv, min_eigv);
381 arpack.computeSymMaxMagnitude (eps, eigv, max_eigv);
382
383 res.condition_estimate = max_eigv / min_eigv;
384
385 if (this->_verbose > 0) {
386 std::cout << "Min eigv estimate: " << Simd::io(min_eigv) << '\n';
387 std::cout << "Max eigv estimate: " << Simd::io(max_eigv) << '\n';
388 std::cout << "Condition estimate: "
389 << Simd::io(max_eigv / min_eigv) << std::endl;
390 }
391 }
392#else
393 std::cerr << "WARNING: Condition estimate was requested. This requires ARPACK, but ARPACK was not found!" << std::endl;
394#endif
395 }
396 }
397
398 private:
399 bool condition_estimate_ = false;
400
401 // Matrix and vector types used for condition estimate
404
405 protected:
406 using IterativeSolver<X,X>::_op;
407 using IterativeSolver<X,X>::_prec;
408 using IterativeSolver<X,X>::_sp;
409 using IterativeSolver<X,X>::_reduction;
410 using IterativeSolver<X,X>::_maxit;
411 using IterativeSolver<X,X>::_verbose;
412 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
413 };
414 DUNE_REGISTER_SOLVER("cgsolver", defaultIterativeSolverCreator<Dune::CGSolver>());
415
416 // Ronald Kriemanns BiCG-STAB implementation from Sumo
418 template<class X>
419 class BiCGSTABSolver : public IterativeSolver<X,X> {
420 public:
424 using typename IterativeSolver<X,X>::real_type;
425
426 // copy base class constructors
428
429 // don't shadow four-argument version of apply defined in the base class
430 using IterativeSolver<X,X>::apply;
431
439 void apply (X& x, X& b, InverseOperatorResult& res) override
440 {
441 using std::abs;
442 const Simd::Scalar<real_type> EPSILON=1e-80;
443 using std::abs;
444 double it;
445 field_type rho, rho_new, alpha, beta, h, omega;
446 real_type norm;
447
448 //
449 // get vectors and matrix
450 //
451 X& r=b;
452 X p(x);
453 X v(x);
454 X t(x);
455 X y(x);
456 X rt(x);
457
458 //
459 // begin iteration
460 //
461
462 // r = r - Ax; rt = r
463 Iteration<double> iteration(*this,res);
464 _prec->pre(x,r); // prepare preconditioner
465
466 _op->applyscaleadd(-1,x,r); // overwrite b with defect
467
468 rt=r;
469
470 norm = _sp->norm(r);
471 if(iteration.step(0, norm)){
472 _prec->post(x);
473 return;
474 }
475 p=0;
476 v=0;
477
478 rho = 1;
479 alpha = 1;
480 omega = 1;
481
482 //
483 // iteration
484 //
485
486 for (it = 0.5; it < _maxit; it+=.5)
487 {
488 //
489 // preprocess, set vecsizes etc.
490 //
491
492 // rho_new = < rt , r >
493 rho_new = _sp->dot(rt,r);
494
495 // look if breakdown occurred
496 if (Simd::allTrue(abs(rho) <= EPSILON))
497 DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - rho "
498 << Simd::io(rho) << " <= EPSILON " << EPSILON
499 << " after " << it << " iterations");
500 if (Simd::allTrue(abs(omega) <= EPSILON))
501 DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - omega "
502 << Simd::io(omega) << " <= EPSILON " << EPSILON
503 << " after " << it << " iterations");
504
505
506 if (it<1)
507 p = r;
508 else
509 {
510 beta = Simd::cond(norm==field_type(0.),
511 field_type(0.), // no need for orthogonalization if norm is already 0
512 ( rho_new / rho ) * ( alpha / omega ));
513 p.axpy(-omega,v); // p = r + beta (p - omega*v)
514 p *= beta;
515 p += r;
516 }
517
518 // y = W^-1 * p
519 y = 0;
520 _prec->apply(y,p); // apply preconditioner
521
522 // v = A * y
523 _op->apply(y,v);
524
525 // alpha = rho_new / < rt, v >
526 h = _sp->dot(rt,v);
527
528 if ( Simd::allTrue(abs(h) < EPSILON) )
529 DUNE_THROW(SolverAbort,"abs(h) < EPSILON in BiCGSTAB - abs(h) "
530 << Simd::io(abs(h)) << " < EPSILON " << EPSILON
531 << " after " << it << " iterations");
532
533 alpha = Simd::cond(norm==field_type(0.),
534 field_type(0.),
535 rho_new / h);
536
537 // apply first correction to x
538 // x <- x + alpha y
539 x.axpy(alpha,y);
540
541 // r = r - alpha*v
542 r.axpy(-alpha,v);
543
544 //
545 // test stop criteria
546 //
547
548 norm = _sp->norm(r);
549 if(iteration.step(it, norm)){
550 break;
551 }
552
553 it+=.5;
554
555 // y = W^-1 * r
556 y = 0;
557 _prec->apply(y,r);
558
559 // t = A * y
560 _op->apply(y,t);
561
562 // omega = < t, r > / < t, t >
563 h = _sp->dot(t,t);
564 omega = Simd::cond(norm==field_type(0.),
565 field_type(0.),
566 _sp->dot(t,r)/h);
567
568 // apply second correction to x
569 // x <- x + omega y
570 x.axpy(omega,y);
571
572 // r = s - omega*t (remember : r = s)
573 r.axpy(-omega,t);
574
575 rho = rho_new;
576
577 //
578 // test stop criteria
579 //
580
581 norm = _sp->norm(r);
582 if(iteration.step(it, norm)){
583 break;
584 }
585 } // end for
586
587 _prec->post(x); // postprocess preconditioner
588 }
589
590 protected:
591 using IterativeSolver<X,X>::_op;
592 using IterativeSolver<X,X>::_prec;
593 using IterativeSolver<X,X>::_sp;
594 using IterativeSolver<X,X>::_reduction;
595 using IterativeSolver<X,X>::_maxit;
596 using IterativeSolver<X,X>::_verbose;
597 template<class CountType>
598 using Iteration = typename IterativeSolver<X,X>::template Iteration<CountType>;
599 };
600 DUNE_REGISTER_SOLVER("bicgstabsolver", defaultIterativeSolverCreator<Dune::BiCGSTABSolver>());
601
608 template<class X>
609 class MINRESSolver : public IterativeSolver<X,X> {
610 public:
614 using typename IterativeSolver<X,X>::real_type;
615
616 // copy base class constructors
618
619 // don't shadow four-argument version of apply defined in the base class
620 using IterativeSolver<X,X>::apply;
621
627 void apply (X& x, X& b, InverseOperatorResult& res) override
628 {
629 using std::sqrt;
630 using std::abs;
631 Iteration iteration(*this, res);
632 // prepare preconditioner
633 _prec->pre(x,b);
634
635 // overwrite rhs with defect
636 _op->applyscaleadd(-1.0,x,b); // b -= Ax
637
638 // some temporary vectors
639 X z(b), dummy(b);
640 z = 0.0;
641
642 // calculate preconditioned defect
643 _prec->apply(z,b); // r = W^-1 (b - Ax)
644 real_type def = _sp->norm(z);
645 if (iteration.step(0, def)){
646 _prec->post(x);
647 return;
648 }
649
650 // recurrence coefficients as computed in Lanczos algorithm
651 field_type alpha, beta;
652 // diagonal entries of givens rotation
653 std::array<real_type,2> c{{0.0,0.0}};
654 // off-diagonal entries of givens rotation
655 std::array<field_type,2> s{{0.0,0.0}};
656
657 // recurrence coefficients (column k of tridiag matrix T_k)
658 std::array<field_type,3> T{{0.0,0.0,0.0}};
659
660 // the rhs vector of the min problem
661 std::array<field_type,2> xi{{1.0,0.0}};
662
663 // beta is real and positive in exact arithmetic
664 // since it is the norm of the basis vectors (in unpreconditioned case)
665 beta = sqrt(_sp->dot(b,z));
666 field_type beta0 = beta;
667
668 // the search directions
669 std::array<X,3> p{{b,b,b}};
670 p[0] = 0.0;
671 p[1] = 0.0;
672 p[2] = 0.0;
673
674 // orthonormal basis vectors (in unpreconditioned case)
675 std::array<X,3> q{{b,b,b}};
676 q[0] = 0.0;
677 q[1] *= Simd::cond(def==field_type(0.),
678 field_type(0.),
679 real_type(1.0)/beta);
680 q[2] = 0.0;
681
682 z *= Simd::cond(def==field_type(0.),
683 field_type(0.),
684 real_type(1.0)/beta);
685
686 // the loop
687 int i = 1;
688 for( ; i<=_maxit; i++) {
689
690 dummy = z;
691 int i1 = i%3,
692 i0 = (i1+2)%3,
693 i2 = (i1+1)%3;
694
695 // symmetrically preconditioned Lanczos algorithm (see Greenbaum p.121)
696 _op->apply(z,q[i2]); // q[i2] = Az
697 q[i2].axpy(-beta,q[i0]);
698 // alpha is real since it is the diagonal entry of the hermitian tridiagonal matrix
699 // from the Lanczos Algorithm
700 // so the order in the scalar product doesn't matter even for the complex case
701 alpha = _sp->dot(z,q[i2]);
702 q[i2].axpy(-alpha,q[i1]);
703
704 z = 0.0;
705 _prec->apply(z,q[i2]);
706
707 // beta is real and positive in exact arithmetic
708 // since it is the norm of the basis vectors (in unpreconditioned case)
709 beta = sqrt(_sp->dot(q[i2],z));
710
711 q[i2] *= Simd::cond(def==field_type(0.),
712 field_type(0.),
713 real_type(1.0)/beta);
714 z *= Simd::cond(def==field_type(0.),
715 field_type(0.),
716 real_type(1.0)/beta);
717
718 // QR Factorization of recurrence coefficient matrix
719 // apply previous givens rotations to last column of T
720 T[1] = T[2];
721 if(i>2) {
722 T[0] = s[i%2]*T[1];
723 T[1] = c[i%2]*T[1];
724 }
725 if(i>1) {
726 T[2] = c[(i+1)%2]*alpha - s[(i+1)%2]*T[1];
727 T[1] = c[(i+1)%2]*T[1] + s[(i+1)%2]*alpha;
728 }
729 else
730 T[2] = alpha;
731
732 // update QR factorization
733 generateGivensRotation(T[2],beta,c[i%2],s[i%2]);
734 // to last column of T_k
735 T[2] = c[i%2]*T[2] + s[i%2]*beta;
736 // and to the rhs xi of the min problem
737 xi[i%2] = -s[i%2]*xi[(i+1)%2];
738 xi[(i+1)%2] *= c[i%2];
739
740 // compute correction direction
741 p[i2] = dummy;
742 p[i2].axpy(-T[1],p[i1]);
743 p[i2].axpy(-T[0],p[i0]);
744 p[i2] *= real_type(1.0)/T[2];
745
746 // apply correction/update solution
747 x.axpy(beta0*xi[(i+1)%2],p[i2]);
748
749 // remember beta_old
750 T[2] = beta;
751
752 // check for convergence
753 // the last entry in the rhs of the min-problem is the residual
754 def = abs(beta0*xi[i%2]);
755 if(iteration.step(i, def)){
756 break;
757 }
758 } // end for
759
760 // postprocess preconditioner
761 _prec->post(x);
762 }
763
764 private:
765
766 void generateGivensRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
767 {
768 using std::sqrt;
769 using std::abs;
770 using std::max;
771 using std::min;
772 const real_type eps = 1e-15;
773 real_type norm_dx = abs(dx);
774 real_type norm_dy = abs(dy);
775 real_type norm_max = max(norm_dx, norm_dy);
776 real_type norm_min = min(norm_dx, norm_dy);
777 real_type temp = norm_min/norm_max;
778 // we rewrite the code in a vectorizable fashion
779 cs = Simd::cond(norm_dy < eps,
780 real_type(1.0),
781 Simd::cond(norm_dx < eps,
782 real_type(0.0),
783 Simd::cond(norm_dy > norm_dx,
784 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
785 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
786 )));
787 sn = Simd::cond(norm_dy < eps,
788 field_type(0.0),
789 Simd::cond(norm_dx < eps,
790 field_type(1.0),
791 Simd::cond(norm_dy > norm_dx,
792 // dy and dx are real in exact arithmetic
793 // thus dx*dy is real so we can explicitly enforce it
794 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*dy/norm_dx/norm_dy,
795 // dy and dx is real in exact arithmetic
796 // so we don't have to conjugate both of them
797 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dy/dx
798 )));
799 }
800
801 protected:
802 using IterativeSolver<X,X>::_op;
803 using IterativeSolver<X,X>::_prec;
804 using IterativeSolver<X,X>::_sp;
805 using IterativeSolver<X,X>::_reduction;
806 using IterativeSolver<X,X>::_maxit;
807 using IterativeSolver<X,X>::_verbose;
808 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
809 };
810 DUNE_REGISTER_SOLVER("minressolver", defaultIterativeSolverCreator<Dune::MINRESSolver>());
811
825 template<class X, class Y=X, class F = Y>
827 {
828 public:
832 using typename IterativeSolver<X,Y>::real_type;
833
834 protected:
836
838 using fAlloc = ReboundAllocatorType<X,field_type>;
840 using rAlloc = ReboundAllocatorType<X,real_type>;
841
842 public:
843
850 RestartedGMResSolver (const LinearOperator<X,Y>& op, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
851 IterativeSolver<X,Y>::IterativeSolver(op,prec,reduction,maxit,verbose),
852 _restart(restart)
853 {}
854
861 RestartedGMResSolver (const LinearOperator<X,Y>& op, const ScalarProduct<X>& sp, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
862 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
863 _restart(restart)
864 {}
865
878 RestartedGMResSolver (std::shared_ptr<const LinearOperator<X,Y> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
879 IterativeSolver<X,Y>::IterativeSolver(op,prec,configuration),
880 _restart(configuration.get<int>("restart"))
881 {}
882
883 RestartedGMResSolver (std::shared_ptr<const LinearOperator<X,Y> > op, std::shared_ptr<const ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
884 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,configuration),
885 _restart(configuration.get<int>("restart"))
886 {}
887
894 RestartedGMResSolver (std::shared_ptr<const LinearOperator<X,Y>> op,
895 std::shared_ptr<const ScalarProduct<X>> sp,
896 std::shared_ptr<Preconditioner<X,Y>> prec,
897 scalar_real_type reduction, int restart, int maxit, int verbose) :
898 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
899 _restart(restart)
900 {}
901
910 void apply (X& x, Y& b, InverseOperatorResult& res) override
911 {
912 apply(x,b,Simd::max(_reduction),res);
913 }
914
923 void apply (X& x, Y& b, [[maybe_unused]] double reduction, InverseOperatorResult& res) override
924 {
925 using std::abs;
926 const Simd::Scalar<real_type> EPSILON = 1e-80;
927 const int m = _restart;
928 real_type norm = 0.0;
929 int j = 1;
930 std::vector<field_type,fAlloc> s(m+1), sn(m);
931 std::vector<real_type,rAlloc> cs(m);
932 // need copy of rhs if GMRes has to be restarted
933 Y b2(b);
934 // helper vector
935 Y w(b);
936 std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
937 std::vector<F> v(m+1,b);
938
939 Iteration iteration(*this,res);
940
941 // clear solver statistics and set res.converged to false
942 _prec->pre(x,b);
943
944 // calculate defect and overwrite rhs with it
945 _op->applyscaleadd(-1.0,x,b); // b -= Ax
946 // calculate preconditioned defect
947 v[0] = 0.0; _prec->apply(v[0],b); // r = W^-1 b
948 norm = _sp->norm(v[0]);
949 if(iteration.step(0, norm)){
950 _prec->post(x);
951 return;
952 }
953
954 while(j <= _maxit && res.converged != true) {
955
956 int i = 0;
957 v[0] *= Simd::cond(norm==real_type(0.),
958 real_type(0.),
959 real_type(1.0)/norm);
960 s[0] = norm;
961 for(i=1; i<m+1; i++)
962 s[i] = 0.0;
963
964 for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++) {
965 w = 0.0;
966 // use v[i+1] as temporary vector
967 v[i+1] = 0.0;
968 // do Arnoldi algorithm
969 _op->apply(v[i],v[i+1]);
970 _prec->apply(w,v[i+1]);
971 for(int k=0; k<i+1; k++) {
972 // notice that _sp->dot(v[k],w) = v[k]\adjoint w
973 // so one has to pay attention to the order
974 // in the scalar product for the complex case
975 // doing the modified Gram-Schmidt algorithm
976 H[k][i] = _sp->dot(v[k],w);
977 // w -= H[k][i] * v[k]
978 w.axpy(-H[k][i],v[k]);
979 }
980 H[i+1][i] = _sp->norm(w);
981 if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
983 "breakdown in GMRes - |w| == 0.0 after " << j << " iterations");
984
985 // normalize new vector
986 v[i+1] = w;
987 v[i+1] *= Simd::cond(norm==real_type(0.),
988 field_type(0.),
989 real_type(1.0)/H[i+1][i]);
990
991 // update QR factorization
992 for(int k=0; k<i; k++)
993 applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
994
995 // compute new givens rotation
996 generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
997 // finish updating QR factorization
998 applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
999 applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
1000
1001 // norm of the defect is the last component the vector s
1002 norm = abs(s[i+1]);
1003
1004 iteration.step(j, norm);
1005
1006 } // end for
1007
1008 // calculate update vector
1009 w = 0.0;
1010 update(w,i,H,s,v);
1011 // and current iterate
1012 x += w;
1013
1014 // restart GMRes if convergence was not achieved,
1015 // i.e. linear defect has not reached desired reduction
1016 // and if j < _maxit (do not restart on last iteration)
1017 if( res.converged != true && j < _maxit ) {
1018
1019 if(_verbose > 0)
1020 std::cout << "=== GMRes::restart" << std::endl;
1021 // get saved rhs
1022 b = b2;
1023 // calculate new defect
1024 _op->applyscaleadd(-1.0,x,b); // b -= Ax;
1025 // calculate preconditioned defect
1026 v[0] = 0.0;
1027 _prec->apply(v[0],b);
1028 norm = _sp->norm(v[0]);
1029 }
1030
1031 } //end while
1032
1033 // postprocess preconditioner
1034 _prec->post(x);
1035 }
1036
1037 protected :
1038
1039 void update(X& w, int i,
1040 const std::vector<std::vector<field_type,fAlloc> >& H,
1041 const std::vector<field_type,fAlloc>& s,
1042 const std::vector<X>& v) {
1043 // solution vector of the upper triangular system
1044 std::vector<field_type,fAlloc> y(s);
1045
1046 // backsolve
1047 for(int a=i-1; a>=0; a--) {
1048 field_type rhs(s[a]);
1049 for(int b=a+1; b<i; b++)
1050 rhs -= H[a][b]*y[b];
1051 y[a] = Simd::cond(rhs==field_type(0.),
1052 field_type(0.),
1053 rhs/H[a][a]);
1054
1055 // compute update on the fly
1056 // w += y[a]*v[a]
1057 w.axpy(y[a],v[a]);
1058 }
1059 }
1060
1061 template<typename T>
1062 typename std::enable_if<std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1063 return t;
1064 }
1065
1066 template<typename T>
1067 typename std::enable_if<!std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1068 using std::conj;
1069 return conj(t);
1070 }
1071
1072 void
1073 generatePlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1074 {
1075 using std::sqrt;
1076 using std::abs;
1077 using std::max;
1078 using std::min;
1079 const real_type eps = 1e-15;
1080 real_type norm_dx = abs(dx);
1081 real_type norm_dy = abs(dy);
1082 real_type norm_max = max(norm_dx, norm_dy);
1083 real_type norm_min = min(norm_dx, norm_dy);
1084 real_type temp = norm_min/norm_max;
1085 // we rewrite the code in a vectorizable fashion
1086 cs = Simd::cond(norm_dy < eps,
1087 real_type(1.0),
1088 Simd::cond(norm_dx < eps,
1089 real_type(0.0),
1090 Simd::cond(norm_dy > norm_dx,
1091 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
1092 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
1093 )));
1094 sn = Simd::cond(norm_dy < eps,
1095 field_type(0.0),
1096 Simd::cond(norm_dx < eps,
1097 field_type(1.0),
1098 Simd::cond(norm_dy > norm_dx,
1099 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*conjugate(dy)/norm_dx/norm_dy,
1100 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*conjugate(dy/dx)
1101 )));
1102 }
1103
1104
1105 void
1106 applyPlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1107 {
1108 field_type temp = cs * dx + sn * dy;
1109 dy = -conjugate(sn) * dx + cs * dy;
1110 dx = temp;
1111 }
1112
1113 using IterativeSolver<X,Y>::_op;
1114 using IterativeSolver<X,Y>::_prec;
1115 using IterativeSolver<X,Y>::_sp;
1116 using IterativeSolver<X,Y>::_reduction;
1117 using IterativeSolver<X,Y>::_maxit;
1118 using IterativeSolver<X,Y>::_verbose;
1119 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1120 int _restart;
1121 };
1122 DUNE_REGISTER_SOLVER("restartedgmressolver", defaultIterativeSolverCreator<Dune::RestartedGMResSolver>());
1123
1137 template<class X, class Y=X, class F = Y>
1139 {
1140 public:
1145
1146 private:
1148
1150 using fAlloc = typename RestartedGMResSolver<X,Y>::fAlloc;
1152 using rAlloc = typename RestartedGMResSolver<X,Y>::rAlloc;
1153
1154 public:
1155 // copy base class constructors
1157
1158 // don't shadow four-argument version of apply defined in the base class
1159 using RestartedGMResSolver<X,Y>::apply;
1160
1169 void apply (X& x, Y& b, [[maybe_unused]] double reduction, InverseOperatorResult& res) override
1170 {
1171 using std::abs;
1172 const Simd::Scalar<real_type> EPSILON = 1e-80;
1173 const int m = _restart;
1174 real_type norm = 0.0;
1175 int i, j = 1, k;
1176 std::vector<field_type,fAlloc> s(m+1), sn(m);
1177 std::vector<real_type,rAlloc> cs(m);
1178 // helper vector
1179 Y tmp(b);
1180 std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
1181 std::vector<F> v(m+1,b);
1182 std::vector<X> w(m+1,b);
1183
1184 Iteration iteration(*this,res);
1185 // setup preconditioner if it does something in pre
1186
1187 // calculate residual and overwrite a copy of the rhs with it
1188 _prec->pre(x, b);
1189 v[0] = b;
1190 _op->applyscaleadd(-1.0, x, v[0]); // b -= Ax
1191
1192 norm = _sp->norm(v[0]); // the residual norm
1193 if(iteration.step(0, norm)){
1194 _prec->post(x);
1195 return;
1196 }
1197
1198 // start iterations
1199 res.converged = false;;
1200 while(j <= _maxit && res.converged != true)
1201 {
1202 v[0] *= (1.0 / norm);
1203 s[0] = norm;
1204 for(i=1; i<m+1; ++i)
1205 s[i] = 0.0;
1206
1207 // inner loop
1208 for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++)
1209 {
1210 w[i] = 0.0;
1211 // compute wi = M^-1*vi (also called zi)
1212 _prec->apply(w[i], v[i]);
1213 // compute vi = A*wi
1214 // use v[i+1] as temporary vector for w
1215 _op->apply(w[i], v[i+1]);
1216 // do Arnoldi algorithm
1217 for(int kk=0; kk<i+1; kk++)
1218 {
1219 // notice that _sp->dot(v[k],v[i+1]) = v[k]\adjoint v[i+1]
1220 // so one has to pay attention to the order
1221 // in the scalar product for the complex case
1222 // doing the modified Gram-Schmidt algorithm
1223 H[kk][i] = _sp->dot(v[kk],v[i+1]);
1224 // w -= H[k][i] * v[kk]
1225 v[i+1].axpy(-H[kk][i], v[kk]);
1226 }
1227 H[i+1][i] = _sp->norm(v[i+1]);
1228 if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
1229 DUNE_THROW(SolverAbort, "breakdown in fGMRes - |w| (-> "
1230 << w[i] << ") == 0.0 after "
1231 << j << " iterations");
1232
1233 // v[i+1] = w*1/H[i+1][i]
1234 v[i+1] *= real_type(1.0)/H[i+1][i];
1235
1236 // update QR factorization
1237 for(k=0; k<i; k++)
1238 this->applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
1239
1240 // compute new givens rotation
1241 this->generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1242
1243 // finish updating QR factorization
1244 this->applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1245 this->applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
1246
1247 // norm of the residual is the last component of vector s
1248 using std::abs;
1249 norm = abs(s[i+1]);
1250 iteration.step(j, norm);
1251 } // end inner for loop
1252
1253 // calculate update vector
1254 tmp = 0.0;
1255 this->update(tmp, i, H, s, w);
1256 // and update current iterate
1257 x += tmp;
1258
1259 // restart fGMRes if convergence was not achieved,
1260 // i.e. linear residual has not reached desired reduction
1261 // and if still j < _maxit (do not restart on last iteration)
1262 if( res.converged != true && j < _maxit)
1263 {
1264 if (_verbose > 0)
1265 std::cout << "=== fGMRes::restart" << std::endl;
1266 // get rhs
1267 v[0] = b;
1268 // calculate new defect
1269 _op->applyscaleadd(-1.0, x,v[0]); // b -= Ax;
1270 // calculate preconditioned defect
1271 norm = _sp->norm(v[0]); // update the residual norm
1272 }
1273
1274 } // end outer while loop
1275
1276 // post-process preconditioner
1277 _prec->post(x);
1278 }
1279
1280private:
1281 using RestartedGMResSolver<X,Y>::_op;
1282 using RestartedGMResSolver<X,Y>::_prec;
1283 using RestartedGMResSolver<X,Y>::_sp;
1284 using RestartedGMResSolver<X,Y>::_reduction;
1285 using RestartedGMResSolver<X,Y>::_maxit;
1286 using RestartedGMResSolver<X,Y>::_verbose;
1287 using RestartedGMResSolver<X,Y>::_restart;
1288 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1289 };
1290 DUNE_REGISTER_SOLVER("restartedflexiblegmressolver", defaultIterativeSolverCreator<Dune::RestartedFlexibleGMResSolver>());
1291
1305 template<class X>
1307 {
1308 public:
1309 using typename IterativeSolver<X,X>::domain_type;
1310 using typename IterativeSolver<X,X>::range_type;
1311 using typename IterativeSolver<X,X>::field_type;
1312 using typename IterativeSolver<X,X>::real_type;
1313
1314 private:
1316
1318 using fAlloc = ReboundAllocatorType<X,field_type>;
1319
1320 public:
1321
1322 // don't shadow four-argument version of apply defined in the base class
1323 using IterativeSolver<X,X>::apply;
1324
1331 GeneralizedPCGSolver (const LinearOperator<X,X>& op, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1332 IterativeSolver<X,X>::IterativeSolver(op,prec,reduction,maxit,verbose),
1333 _restart(restart)
1334 {}
1335
1343 GeneralizedPCGSolver (const LinearOperator<X,X>& op, const ScalarProduct<X>& sp, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1344 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1345 _restart(restart)
1346 {}
1347
1348
1361 GeneralizedPCGSolver (std::shared_ptr<const LinearOperator<X,X> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1362 IterativeSolver<X,X>::IterativeSolver(op,prec,configuration),
1363 _restart(configuration.get<int>("restart"))
1364 {}
1365
1366 GeneralizedPCGSolver (std::shared_ptr<const LinearOperator<X,X> > op, std::shared_ptr<const ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1367 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,configuration),
1368 _restart(configuration.get<int>("restart"))
1369 {}
1377 GeneralizedPCGSolver (std::shared_ptr<const LinearOperator<X,X>> op,
1378 std::shared_ptr<const ScalarProduct<X>> sp,
1379 std::shared_ptr<Preconditioner<X,X>> prec,
1380 scalar_real_type reduction, int maxit, int verbose,
1381 int restart = 10) :
1382 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1383 _restart(restart)
1384 {}
1385
1391 void apply (X& x, X& b, InverseOperatorResult& res) override
1392 {
1393 Iteration iteration(*this, res);
1394 _prec->pre(x,b); // prepare preconditioner
1395 _op->applyscaleadd(-1,x,b); // overwrite b with defect
1396
1397 std::vector<std::shared_ptr<X> > p(_restart);
1398 std::vector<field_type,fAlloc> pp(_restart);
1399 X q(x); // a temporary vector
1400 X prec_res(x); // a temporary vector for preconditioner output
1401
1402 p[0].reset(new X(x));
1403
1404 real_type def = _sp->norm(b); // compute norm
1405 if(iteration.step(0, def)){
1406 _prec->post(x);
1407 return;
1408 }
1409 // some local variables
1410 field_type rho, lambda;
1411
1412 int i=0;
1413 // determine initial search direction
1414 *(p[0]) = 0; // clear correction
1415 _prec->apply(*(p[0]),b); // apply preconditioner
1416 rho = _sp->dot(*(p[0]),b); // orthogonalization
1417 _op->apply(*(p[0]),q); // q=Ap
1418 pp[0] = _sp->dot(*(p[0]),q); // scalar product
1419 lambda = rho/pp[0]; // minimization
1420 x.axpy(lambda,*(p[0])); // update solution
1421 b.axpy(-lambda,q); // update defect
1422
1423 // convergence test
1424 def=_sp->norm(b); // comp defect norm
1425 ++i;
1426 if(iteration.step(i, def)){
1427 _prec->post(x);
1428 return;
1429 }
1430
1431 while(i<_maxit) {
1432 // the loop
1433 int end=std::min(_restart, _maxit-i+1);
1434 for (int ii = 1; ii < end; ++ii)
1435 {
1436 //std::cout<<" ii="<<ii<<" i="<<i<<std::endl;
1437 // compute next conjugate direction
1438 prec_res = 0; // clear correction
1439 _prec->apply(prec_res,b); // apply preconditioner
1440
1441 p[ii].reset(new X(prec_res));
1442 _op->apply(prec_res, q);
1443
1444 for(int j=0; j<ii; ++j) {
1445 rho =_sp->dot(q,*(p[j]))/pp[j];
1446 p[ii]->axpy(-rho, *(p[j]));
1447 }
1448
1449 // minimize in given search direction
1450 _op->apply(*(p[ii]),q); // q=Ap
1451 pp[ii] = _sp->dot(*(p[ii]),q); // scalar product
1452 rho = _sp->dot(*(p[ii]),b); // orthogonalization
1453 lambda = rho/pp[ii]; // minimization
1454 x.axpy(lambda,*(p[ii])); // update solution
1455 b.axpy(-lambda,q); // update defect
1456
1457 // convergence test
1458 def = _sp->norm(b); // comp defect norm
1459
1460 ++i;
1461 iteration.step(i, def);
1462 }
1463 if(res.converged)
1464 break;
1465 if(end==_restart) {
1466 *(p[0])=*(p[_restart-1]);
1467 pp[0]=pp[_restart-1];
1468 }
1469 }
1470
1471 // postprocess preconditioner
1472 _prec->post(x);
1473
1474 }
1475
1476 private:
1477 using IterativeSolver<X,X>::_op;
1478 using IterativeSolver<X,X>::_prec;
1479 using IterativeSolver<X,X>::_sp;
1480 using IterativeSolver<X,X>::_reduction;
1481 using IterativeSolver<X,X>::_maxit;
1482 using IterativeSolver<X,X>::_verbose;
1483 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1484 int _restart;
1485 };
1486 DUNE_REGISTER_SOLVER("generalizedpcgsolver", defaultIterativeSolverCreator<Dune::GeneralizedPCGSolver>());
1487
1499 template<class X>
1501 public:
1502 using typename IterativeSolver<X,X>::domain_type;
1503 using typename IterativeSolver<X,X>::range_type;
1504 using typename IterativeSolver<X,X>::field_type;
1505 using typename IterativeSolver<X,X>::real_type;
1506
1507 private:
1509
1510 public:
1511 // don't shadow four-argument version of apply defined in the base class
1512 using IterativeSolver<X,X>::apply;
1519 scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose), _mmax(mmax)
1520 {
1521 }
1522
1529 scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1530 {
1531 }
1532
1538 RestartedFCGSolver (std::shared_ptr<const LinearOperator<X,X>> op,
1539 std::shared_ptr<const ScalarProduct<X>> sp,
1540 std::shared_ptr<Preconditioner<X,X>> prec,
1541 scalar_real_type reduction, int maxit, int verbose,
1542 int mmax = 10)
1543 : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1544 {}
1545
1558 RestartedFCGSolver (std::shared_ptr<const LinearOperator<X,X>> op,
1559 std::shared_ptr<Preconditioner<X,X>> prec,
1560 const ParameterTree& config)
1561 : IterativeSolver<X,X>(op, prec, config), _mmax(config.get("mmax", 10))
1562 {}
1563
1564 RestartedFCGSolver (std::shared_ptr<const LinearOperator<X,X>> op,
1565 std::shared_ptr<const ScalarProduct<X>> sp,
1566 std::shared_ptr<Preconditioner<X,X>> prec,
1567 const ParameterTree& config)
1568 : IterativeSolver<X,X>(op, sp, prec, config), _mmax(config.get("mmax", 10))
1569 {}
1570
1583 void apply (X& x, X& b, InverseOperatorResult& res) override
1584 {
1585 using rAlloc = ReboundAllocatorType<X,field_type>;
1586 res.clear();
1587 Iteration iteration(*this,res);
1588 _prec->pre(x,b); // prepare preconditioner
1589 _op->applyscaleadd(-1,x,b); // overwrite b with defect
1590
1591 //arrays for interim values:
1592 std::vector<X> d(_mmax+1, x); // array for directions
1593 std::vector<X> Ad(_mmax+1, x); // array for Ad[i]
1594 std::vector<field_type,rAlloc> ddotAd(_mmax+1,0); // array for <d[i],Ad[i]>
1595 X w(x);
1596
1597 real_type def = _sp->norm(b); // compute norm
1598 if(iteration.step(0, def)){
1599 _prec->post(x);
1600 return;
1601 }
1602
1603 // some local variables
1604 field_type alpha;
1605
1606 // the loop
1607 int i=1;
1608 int i_bounded=0;
1609 while(i<=_maxit && !res.converged) {
1610 for (; i_bounded <= _mmax && i<= _maxit; i_bounded++) {
1611 d[i_bounded] = 0; // reset search direction
1612 _prec->apply(d[i_bounded], b); // apply preconditioner
1613 w = d[i_bounded]; // copy of current d[i]
1614 // orthogonalization with previous directions
1615 orthogonalizations(i_bounded,Ad,w,ddotAd,d);
1616
1617 //saving interim values for future calculating
1618 _op->apply(d[i_bounded], Ad[i_bounded]); // save Ad[i]
1619 ddotAd[i_bounded]=_sp->dot(d[i_bounded],Ad[i_bounded]); // save <d[i],Ad[i]>
1620 alpha = _sp->dot(d[i_bounded], b)/ddotAd[i_bounded]; // <d[i],b>/<d[i],Ad[i]>
1621
1622 //update solution and defect
1623 x.axpy(alpha, d[i_bounded]);
1624 b.axpy(-alpha, Ad[i_bounded]);
1625
1626 // convergence test
1627 def = _sp->norm(b); // comp defect norm
1628
1629 iteration.step(i, def);
1630 i++;
1631 }
1632 //restart: exchange first and last stored values
1633 cycle(Ad,d,ddotAd,i_bounded);
1634 }
1635
1636 //correct i which is wrong if convergence was not achieved.
1637 i=std::min(_maxit,i);
1638
1639 _prec->post(x); // postprocess preconditioner
1640 }
1641
1642 private:
1643 //This function is called every iteration to orthogonalize against the last search directions
1644 virtual void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) {
1645 // The RestartedFCGSolver uses only values with lower array index;
1646 for (int k = 0; k < i_bounded; k++) {
1647 d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1648 }
1649 }
1650
1651 // This function is called every mmax iterations to handle limited array sizes.
1652 virtual void cycle(std::vector<X>& Ad,std::vector<X>& d,std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) {
1653 // Reset loop index and exchange the first and last arrays
1654 i_bounded = 1;
1655 std::swap(Ad[0], Ad[_mmax]);
1656 std::swap(d[0], d[_mmax]);
1657 std::swap(ddotAd[0], ddotAd[_mmax]);
1658 }
1659
1660 protected:
1661 int _mmax;
1662 using IterativeSolver<X,X>::_op;
1663 using IterativeSolver<X,X>::_prec;
1664 using IterativeSolver<X,X>::_sp;
1665 using IterativeSolver<X,X>::_reduction;
1666 using IterativeSolver<X,X>::_maxit;
1667 using IterativeSolver<X,X>::_verbose;
1668 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1669 };
1670 DUNE_REGISTER_SOLVER("restartedfcgsolver", defaultIterativeSolverCreator<Dune::RestartedFCGSolver>());
1671
1678 template<class X>
1680 public:
1682 using typename RestartedFCGSolver<X>::range_type;
1683 using typename RestartedFCGSolver<X>::field_type;
1684 using typename RestartedFCGSolver<X>::real_type;
1685
1686 // copy base class constructors
1688
1689 // don't shadow four-argument version of apply defined in the base class
1691
1692 // just a minor part of the RestartedFCGSolver apply method will be modified
1693 void apply (X& x, X& b, InverseOperatorResult& res) override
1694 {
1695 // reset limiter of orthogonalization loop
1696 _k_limit = 0;
1697 this->RestartedFCGSolver<X>::apply(x,b,res);
1698 };
1699
1700 private:
1701 // This function is called every iteration to orthogonalize against the last search directions.
1702 void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) override {
1703 // This FCGSolver uses values with higher array indexes too, if existent.
1704 for (int k = 0; k < _k_limit; k++) {
1705 if(i_bounded!=k)
1706 d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1707 }
1708 // The loop limit increase, if array is not completely filled.
1709 if(_k_limit<=i_bounded)
1710 _k_limit++;
1711
1712 };
1713
1714 // This function is called every mmax iterations to handle limited array sizes.
1715 void cycle(std::vector<X>& Ad, [[maybe_unused]] std::vector<X>& d, [[maybe_unused]] std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) override {
1716 // Only the loop index i_bounded return to 0, if it reached mmax.
1717 i_bounded = 0;
1718 // Now all arrays are filled and the loop in void orthogonalizations can use the whole arrays.
1719 _k_limit = Ad.size();
1720 };
1721
1722 int _k_limit = 0;
1723
1724 protected:
1725 using RestartedFCGSolver<X>::_mmax;
1726 using RestartedFCGSolver<X>::_op;
1727 using RestartedFCGSolver<X>::_prec;
1728 using RestartedFCGSolver<X>::_sp;
1729 using RestartedFCGSolver<X>::_reduction;
1730 using RestartedFCGSolver<X>::_maxit;
1731 using RestartedFCGSolver<X>::_verbose;
1732 };
1733 DUNE_REGISTER_SOLVER("completefcgsolver", defaultIterativeSolverCreator<Dune::CompleteFCGSolver>());
1735} // end namespace
1736
1737#endif
Wrapper to use a range of ARPACK++ eigenvalue solvers.
Definition: arpackpp.hh:245
void computeSymMaxMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:289
void computeSymMinMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:391
A sparse block matrix with compressed row storage.
Definition: bcrsmatrix.hh:466
@ row_wise
Build in a row-wise manner.
Definition: bcrsmatrix.hh:517
CreateIterator createend()
get create iterator pointing to one after the last block
Definition: bcrsmatrix.hh:1100
CreateIterator createbegin()
get initial create iterator
Definition: bcrsmatrix.hh:1094
size_type N() const
number of rows (counted in blocks)
Definition: bcrsmatrix.hh:2001
Bi-conjugate Gradient Stabilized (BiCG-STAB)
Definition: solvers.hh:419
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:439
A vector of blocks with memory management.
Definition: bvector.hh:392
conjugate gradient method
Definition: solvers.hh:193
CGSolver(std::shared_ptr< const LinearOperator< X, X > > op, std::shared_ptr< ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:256
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:279
CGSolver(const LinearOperator< X, X > &op, const ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:239
CGSolver(const LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:222
Complete flexible conjugate gradient method.
Definition: solvers.hh:1679
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1693
Generalized preconditioned conjugate gradient solver.
Definition: solvers.hh:1307
GeneralizedPCGSolver(const LinearOperator< X, X > &op, const ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1343
GeneralizedPCGSolver(const LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1331
GeneralizedPCGSolver(std::shared_ptr< const LinearOperator< X, X > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:1361
GeneralizedPCGSolver(std::shared_ptr< const LinearOperator< X, X > > op, std::shared_ptr< const ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1377
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1391
gradient method
Definition: solvers.hh:124
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:142
Simd::Scalar< real_type > scalar_real_type
scalar type underlying the field_type
Definition: solver.hh:116
Y range_type
Type of the range of the operator to be inverted.
Definition: solver.hh:107
X domain_type
Type of the domain of the operator to be inverted.
Definition: solver.hh:104
X::field_type field_type
The field type of the operator.
Definition: solver.hh:110
FieldTraits< field_type >::real_type real_type
The real type of the field type (is the same if using real numbers, but differs for std::complex)
Definition: solver.hh:113
Base class for all implementations of iterative solvers.
Definition: solver.hh:205
IterativeSolver(const LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose)
General constructor to initialize an iterative solver.
Definition: solver.hh:232
Preconditioned loop solver.
Definition: solvers.hh:59
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator,.
Definition: solvers.hh:73
Minimal Residual Method (MINRES)
Definition: solvers.hh:609
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:627
Hierarchical structure of string parameters.
Definition: parametertree.hh:37
Accelerated flexible conjugate gradient method.
Definition: solvers.hh:1500
RestartedFCGSolver(const LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1518
RestartedFCGSolver(std::shared_ptr< const LinearOperator< X, X > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &config)
Constructor.
Definition: solvers.hh:1558
RestartedFCGSolver(std::shared_ptr< const LinearOperator< X, X > > op, std::shared_ptr< const ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1538
void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1583
RestartedFCGSolver(const LinearOperator< X, X > &op, const ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1528
implements the Flexible Generalized Minimal Residual (FGMRes) method (right preconditioned)
Definition: solvers.hh:1139
void apply(X &x, Y &b, double reduction, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1169
implements the Generalized Minimal Residual (GMRes) method
Definition: solvers.hh:827
RestartedGMResSolver(std::shared_ptr< const LinearOperator< X, Y > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:878
void apply(X &x, Y &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:910
ReboundAllocatorType< X, field_type > fAlloc
field_type Allocator retrieved from domain type
Definition: solvers.hh:838
ReboundAllocatorType< X, real_type > rAlloc
real_type Allocator retrieved from domain type
Definition: solvers.hh:840
RestartedGMResSolver(const LinearOperator< X, Y > &op, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:850
RestartedGMResSolver(const LinearOperator< X, Y > &op, const ScalarProduct< X > &sp, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:861
void apply(X &x, Y &b, double reduction, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:923
RestartedGMResSolver(std::shared_ptr< const LinearOperator< X, Y > > op, std::shared_ptr< const ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, Y > > prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:894
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:52
Thrown when a solver aborts due to some problem.
Definition: istlexception.hh:46
A few common exception classes.
IO interface of the SIMD abstraction.
Implementation of the BCRSMatrix class.
Define general, extensible interface for inverse operators.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
constexpr auto max
Function object that returns the greater of the given values.
Definition: hybridutilities.hh:484
constexpr auto min
Function object that returns the smaller of the given values.
Definition: hybridutilities.hh:506
V cond(M &&mask, const V &ifTrue, const V &ifFalse)
Like the ?: operator.
Definition: interface.hh:386
auto io(const V &v)
construct a stream inserter
Definition: io.hh:106
bool allTrue(const Mask &mask)
Whether all entries are true
Definition: interface.hh:439
auto max(const V &v1, const V &v2)
The binary maximum value over two simd objects.
Definition: interface.hh:409
typename Overloads::ScalarType< std::decay_t< V > >::type Scalar
Element type of some SIMD type.
Definition: interface.hh:235
Some useful basic math stuff.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr auto get(std::integer_sequence< T, II... >, std::integral_constant< std::size_t, pos >={})
Return the entry at position pos of the given sequence.
Definition: integersequence.hh:22
Define general, extensible interface for operators. The available implementation wraps a matrix.
Define base class for scalar product and norm.
Include file for users of the SIMD abstraction layer.
Statistics about the application of an inverse operator.
Definition: solver.hh:50
double condition_estimate
Estimate of condition number.
Definition: solver.hh:81
void clear()
Resets all data.
Definition: solver.hh:58
bool converged
True if convergence criterion has been met.
Definition: solver.hh:75
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)