Dune Core Modules (2.8.0)

solvers.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3
4#ifndef DUNE_ISTL_SOLVERS_HH
5#define DUNE_ISTL_SOLVERS_HH
6
7#include <array>
8#include <cmath>
9#include <complex>
10#include <iostream>
11#include <memory>
12#include <type_traits>
13#include <vector>
14
16#include <dune/common/math.hh>
19#include <dune/common/std/type_traits.hh>
20#include <dune/common/timer.hh>
21
22#include <dune/istl/allocator.hh>
24#include <dune/istl/eigenvalue/arpackpp.hh>
25#include <dune/istl/istlexception.hh>
27#include <dune/istl/preconditioner.hh>
29#include <dune/istl/solver.hh>
30#include <dune/istl/solverregistry.hh>
31
32namespace Dune {
44 //=====================================================================
45 // Implementation of this interface
46 //=====================================================================
47
56 template<class X>
57 class LoopSolver : public IterativeSolver<X,X> {
58 public:
63
64 // copy base class constructors
66
67 // don't shadow four-argument version of apply defined in the base class
68 using IterativeSolver<X,X>::apply;
69
71 virtual void apply (X& x, X& b, InverseOperatorResult& res)
72 {
73 Iteration iteration(*this, res);
74 _prec->pre(x,b);
75
76 // overwrite b with defect
77 _op->applyscaleadd(-1,x,b);
78
79 // compute norm, \todo parallelization
80 real_type def = _sp->norm(b);
81 if(iteration.step(0, def)){
82 _prec->post(x);
83 return;
84 }
85 // prepare preconditioner
86
87 // allocate correction vector
88 X v(x);
89
90 // iteration loop
91 int i=1;
92 for ( ; i<=_maxit; i++ )
93 {
94 v = 0; // clear correction
95 _prec->apply(v,b); // apply preconditioner
96 x += v; // update solution
97 _op->applyscaleadd(-1,v,b); // update defect
98 def=_sp->norm(b); // comp defect norm
99 if(iteration.step(i, def))
100 break;
101 }
102
103 // postprocess preconditioner
104 _prec->post(x);
105 }
106
107 protected:
108 using IterativeSolver<X,X>::_op;
109 using IterativeSolver<X,X>::_prec;
110 using IterativeSolver<X,X>::_sp;
111 using IterativeSolver<X,X>::_reduction;
112 using IterativeSolver<X,X>::_maxit;
113 using IterativeSolver<X,X>::_verbose;
114 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
115 };
116 DUNE_REGISTER_ITERATIVE_SOLVER("loopsolver", defaultIterativeSolverCreator<Dune::LoopSolver>());
117
118
119 // all these solvers are taken from the SUMO library
121 template<class X>
122 class GradientSolver : public IterativeSolver<X,X> {
123 public:
127 using typename IterativeSolver<X,X>::real_type;
128
129 // copy base class constructors
131
132 // don't shadow four-argument version of apply defined in the base class
133 using IterativeSolver<X,X>::apply;
134
140 virtual void apply (X& x, X& b, InverseOperatorResult& res)
141 {
142 Iteration iteration(*this, res);
143 _prec->pre(x,b); // prepare preconditioner
144
145 _op->applyscaleadd(-1,x,b); // overwrite b with defec
146
147 real_type def = _sp->norm(b); // compute norm
148 if(iteration.step(0, def)){
149 _prec->post(x);
150 return;
151 }
152
153 X p(x); // create local vectors
154 X q(b);
155
156 int i=1; // loop variables
157 field_type lambda;
158 for ( ; i<=_maxit; i++ )
159 {
160 p = 0; // clear correction
161 _prec->apply(p,b); // apply preconditioner
162 _op->apply(p,q); // q=Ap
163 lambda = _sp->dot(p,b)/_sp->dot(q,p); // minimization
164 x.axpy(lambda,p); // update solution
165 b.axpy(-lambda,q); // update defect
166
167 def =_sp->norm(b); // comp defect norm
168 if(iteration.step(i, def))
169 break;
170 }
171 // postprocess preconditioner
172 _prec->post(x);
173 }
174
175 protected:
176 using IterativeSolver<X,X>::_op;
177 using IterativeSolver<X,X>::_prec;
178 using IterativeSolver<X,X>::_sp;
179 using IterativeSolver<X,X>::_reduction;
180 using IterativeSolver<X,X>::_maxit;
181 using IterativeSolver<X,X>::_verbose;
182 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
183 };
184 DUNE_REGISTER_ITERATIVE_SOLVER("gradientsolver", defaultIterativeSolverCreator<Dune::GradientSolver>());
185
187 template<class X>
188 class CGSolver : public IterativeSolver<X,X> {
189 public:
193 using typename IterativeSolver<X,X>::real_type;
194
195 // copy base class constructors
197
198 private:
200
201 protected:
202
203 static constexpr bool enableConditionEstimate = (std::is_same_v<field_type,float> || std::is_same_v<field_type,double>);
204 using enableConditionEstimate_t [[deprecated("Use bool enableConditionEstimate instead. Will be removed after Dune 2.8")]]
205 = std::bool_constant<enableConditionEstimate>;
206
207 public:
208
209 // don't shadow four-argument version of apply defined in the base class
210 using IterativeSolver<X,X>::apply;
211
220 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose),
221 condition_estimate_(condition_estimate)
222 {
223 if (condition_estimate && !enableConditionEstimate) {
224 condition_estimate_ = false;
225 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
226 }
227 }
228
237 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
238 condition_estimate_(condition_estimate)
239 {
240 if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
241 condition_estimate_ = false;
242 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
243 }
244 }
245
253 CGSolver (std::shared_ptr<LinearOperator<X,X>> op, std::shared_ptr<ScalarProduct<X>> sp,
254 std::shared_ptr<Preconditioner<X,X>> prec,
255 scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
256 : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
257 condition_estimate_(condition_estimate)
258 {
259 if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
260 condition_estimate_ = false;
261 std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
262 }
263 }
264
276 virtual void apply (X& x, X& b, InverseOperatorResult& res)
277 {
278 Iteration iteration(*this,res);
279 _prec->pre(x,b); // prepare preconditioner
280
281 _op->applyscaleadd(-1,x,b); // overwrite b with defect
282
283 real_type def = _sp->norm(b); // compute norm
284 if(iteration.step(0, def)){
285 _prec->post(x);
286 return;
287 }
288
289 X p(x); // the search direction
290 X q(x); // a temporary vector
291
292 // Remember lambda and beta values for condition estimate
293 std::vector<real_type> lambdas(0);
294 std::vector<real_type> betas(0);
295
296 // some local variables
297 field_type rho,rholast,lambda,alpha,beta;
298
299 // determine initial search direction
300 p = 0; // clear correction
301 _prec->apply(p,b); // apply preconditioner
302 rholast = _sp->dot(p,b); // orthogonalization
303
304 // the loop
305 int i=1;
306 for ( ; i<=_maxit; i++ )
307 {
308 // minimize in given search direction p
309 _op->apply(p,q); // q=Ap
310 alpha = _sp->dot(p,q); // scalar product
311 lambda = rholast/alpha; // minimization
312 if constexpr (enableConditionEstimate)
313 if (condition_estimate_)
314 lambdas.push_back(std::real(lambda));
315 x.axpy(lambda,p); // update solution
316 b.axpy(-lambda,q); // update defect
317
318 // convergence test
319 def=_sp->norm(b); // comp defect norm
320 if(iteration.step(i, def))
321 break;
322
323 // determine new search direction
324 q = 0; // clear correction
325 _prec->apply(q,b); // apply preconditioner
326 rho = _sp->dot(q,b); // orthogonalization
327 beta = rho/rholast; // scaling factor
328 if constexpr (enableConditionEstimate)
329 if (condition_estimate_)
330 betas.push_back(std::real(beta));
331 p *= beta; // scale old search direction
332 p += q; // orthogonalization with correction
333 rholast = rho; // remember rho for recurrence
334 }
335
336 _prec->post(x); // postprocess preconditioner
337
338 if (condition_estimate_) {
339#if HAVE_ARPACKPP
340 if constexpr (enableConditionEstimate) {
341 using std::sqrt;
342
343 // Build T matrix which has extreme eigenvalues approximating
344 // those of the original system
345 // (see Y. Saad, Iterative methods for sparse linear systems)
346
348
349 for (auto row = T.createbegin(); row != T.createend(); ++row) {
350 if (row.index() > 0)
351 row.insert(row.index()-1);
352 row.insert(row.index());
353 if (row.index() < T.N() - 1)
354 row.insert(row.index()+1);
355 }
356 for (int row = 0; row < i; ++row) {
357 if (row > 0) {
358 T[row][row-1] = sqrt(betas[row-1]) / lambdas[row-1];
359 }
360
361 T[row][row] = 1.0 / lambdas[row];
362 if (row > 0) {
363 T[row][row] += betas[row-1] / lambdas[row-1];
364 }
365
366 if (row < i - 1) {
367 T[row][row+1] = sqrt(betas[row]) / lambdas[row];
368 }
369 }
370
371 // Compute largest and smallest eigenvalue of T matrix and return as estimate
373
374 real_type eps = 0.0;
375 COND_VEC eigv;
376 real_type min_eigv, max_eigv;
377 arpack.computeSymMinMagnitude (eps, eigv, min_eigv);
378 arpack.computeSymMaxMagnitude (eps, eigv, max_eigv);
379
380 res.condition_estimate = max_eigv / min_eigv;
381
382 if (this->_verbose > 0) {
383 std::cout << "Min eigv estimate: " << Simd::io(min_eigv) << '\n';
384 std::cout << "Max eigv estimate: " << Simd::io(max_eigv) << '\n';
385 std::cout << "Condition estimate: "
386 << Simd::io(max_eigv / min_eigv) << std::endl;
387 }
388 }
389#else
390 std::cerr << "WARNING: Condition estimate was requested. This requires ARPACK, but ARPACK was not found!" << std::endl;
391#endif
392 }
393 }
394
395 private:
396 bool condition_estimate_ = false;
397
398 // Matrix and vector types used for condition estimate
401
402 protected:
403 using IterativeSolver<X,X>::_op;
404 using IterativeSolver<X,X>::_prec;
405 using IterativeSolver<X,X>::_sp;
406 using IterativeSolver<X,X>::_reduction;
407 using IterativeSolver<X,X>::_maxit;
408 using IterativeSolver<X,X>::_verbose;
409 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
410 };
411 DUNE_REGISTER_ITERATIVE_SOLVER("cgsolver", defaultIterativeSolverCreator<Dune::CGSolver>());
412
413 // Ronald Kriemanns BiCG-STAB implementation from Sumo
415 template<class X>
416 class BiCGSTABSolver : public IterativeSolver<X,X> {
417 public:
421 using typename IterativeSolver<X,X>::real_type;
422
423 // copy base class constructors
425
426 // don't shadow four-argument version of apply defined in the base class
427 using IterativeSolver<X,X>::apply;
428
436 virtual void apply (X& x, X& b, InverseOperatorResult& res)
437 {
438 using std::abs;
439 const Simd::Scalar<real_type> EPSILON=1e-80;
440 using std::abs;
441 double it;
442 field_type rho, rho_new, alpha, beta, h, omega;
443 real_type norm;
444
445 //
446 // get vectors and matrix
447 //
448 X& r=b;
449 X p(x);
450 X v(x);
451 X t(x);
452 X y(x);
453 X rt(x);
454
455 //
456 // begin iteration
457 //
458
459 // r = r - Ax; rt = r
460 Iteration<double> iteration(*this,res);
461 _prec->pre(x,r); // prepare preconditioner
462
463 _op->applyscaleadd(-1,x,r); // overwrite b with defect
464
465 rt=r;
466
467 norm = _sp->norm(r);
468 if(iteration.step(0, norm)){
469 _prec->post(x);
470 return;
471 }
472 p=0;
473 v=0;
474
475 rho = 1;
476 alpha = 1;
477 omega = 1;
478
479 //
480 // iteration
481 //
482
483 for (it = 0.5; it < _maxit; it+=.5)
484 {
485 //
486 // preprocess, set vecsizes etc.
487 //
488
489 // rho_new = < rt , r >
490 rho_new = _sp->dot(rt,r);
491
492 // look if breakdown occurred
493 if (Simd::allTrue(abs(rho) <= EPSILON))
494 DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - rho "
495 << Simd::io(rho) << " <= EPSILON " << EPSILON
496 << " after " << it << " iterations");
497 if (Simd::allTrue(abs(omega) <= EPSILON))
498 DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - omega "
499 << Simd::io(omega) << " <= EPSILON " << EPSILON
500 << " after " << it << " iterations");
501
502
503 if (it<1)
504 p = r;
505 else
506 {
507 beta = ( rho_new / rho ) * ( alpha / omega );
508 p.axpy(-omega,v); // p = r + beta (p - omega*v)
509 p *= beta;
510 p += r;
511 }
512
513 // y = W^-1 * p
514 y = 0;
515 _prec->apply(y,p); // apply preconditioner
516
517 // v = A * y
518 _op->apply(y,v);
519
520 // alpha = rho_new / < rt, v >
521 h = _sp->dot(rt,v);
522
523 if ( Simd::allTrue(abs(h) < EPSILON) )
524 DUNE_THROW(SolverAbort,"abs(h) < EPSILON in BiCGSTAB - abs(h) "
525 << Simd::io(abs(h)) << " < EPSILON " << EPSILON
526 << " after " << it << " iterations");
527
528 alpha = rho_new / h;
529
530 // apply first correction to x
531 // x <- x + alpha y
532 x.axpy(alpha,y);
533
534 // r = r - alpha*v
535 r.axpy(-alpha,v);
536
537 //
538 // test stop criteria
539 //
540
541 norm = _sp->norm(r);
542 if(iteration.step(it, norm)){
543 break;
544 }
545
546 it+=.5;
547
548 // y = W^-1 * r
549 y = 0;
550 _prec->apply(y,r);
551
552 // t = A * y
553 _op->apply(y,t);
554
555 // omega = < t, r > / < t, t >
556 omega = _sp->dot(t,r)/_sp->dot(t,t);
557
558 // apply second correction to x
559 // x <- x + omega y
560 x.axpy(omega,y);
561
562 // r = s - omega*t (remember : r = s)
563 r.axpy(-omega,t);
564
565 rho = rho_new;
566
567 //
568 // test stop criteria
569 //
570
571 norm = _sp->norm(r);
572 if(iteration.step(it, norm)){
573 break;
574 }
575 } // end for
576
577 _prec->post(x); // postprocess preconditioner
578 }
579
580 protected:
581 using IterativeSolver<X,X>::_op;
582 using IterativeSolver<X,X>::_prec;
583 using IterativeSolver<X,X>::_sp;
584 using IterativeSolver<X,X>::_reduction;
585 using IterativeSolver<X,X>::_maxit;
586 using IterativeSolver<X,X>::_verbose;
587 template<class CountType>
588 using Iteration = typename IterativeSolver<X,X>::template Iteration<CountType>;
589 };
590 DUNE_REGISTER_ITERATIVE_SOLVER("bicgstabsolver", defaultIterativeSolverCreator<Dune::BiCGSTABSolver>());
591
598 template<class X>
599 class MINRESSolver : public IterativeSolver<X,X> {
600 public:
604 using typename IterativeSolver<X,X>::real_type;
605
606 // copy base class constructors
608
609 // don't shadow four-argument version of apply defined in the base class
610 using IterativeSolver<X,X>::apply;
611
617 virtual void apply (X& x, X& b, InverseOperatorResult& res)
618 {
619 using std::sqrt;
620 using std::abs;
621 Iteration iteration(*this, res);
622 // prepare preconditioner
623 _prec->pre(x,b);
624
625 // overwrite rhs with defect
626 _op->applyscaleadd(-1,x,b);
627
628 // compute residual norm
629 real_type def = _sp->norm(b);
630 if(iteration.step(0, def)){
631 _prec->post(x);
632 return;
633 }
634
635 // recurrence coefficients as computed in Lanczos algorithm
636 field_type alpha, beta;
637 // diagonal entries of givens rotation
638 std::array<real_type,2> c{{0.0,0.0}};
639 // off-diagonal entries of givens rotation
640 std::array<field_type,2> s{{0.0,0.0}};
641
642 // recurrence coefficients (column k of tridiag matrix T_k)
643 std::array<field_type,3> T{{0.0,0.0,0.0}};
644
645 // the rhs vector of the min problem
646 std::array<field_type,2> xi{{1.0,0.0}};
647
648 // some temporary vectors
649 X z(b), dummy(b);
650
651 // initialize and clear correction
652 z = 0.0;
653 _prec->apply(z,b);
654
655 // beta is real and positive in exact arithmetic
656 // since it is the norm of the basis vectors (in unpreconditioned case)
657 beta = sqrt(_sp->dot(b,z));
658 field_type beta0 = beta;
659
660 // the search directions
661 std::array<X,3> p{{b,b,b}};
662 p[0] = 0.0;
663 p[1] = 0.0;
664 p[2] = 0.0;
665
666 // orthonormal basis vectors (in unpreconditioned case)
667 std::array<X,3> q{{b,b,b}};
668 q[0] = 0.0;
669 q[1] *= real_type(1.0)/beta;
670 q[2] = 0.0;
671
672 z *= real_type(1.0)/beta;
673
674 // the loop
675 int i = 1;
676 for( ; i<=_maxit; i++) {
677
678 dummy = z;
679 int i1 = i%3,
680 i0 = (i1+2)%3,
681 i2 = (i1+1)%3;
682
683 // symmetrically preconditioned Lanczos algorithm (see Greenbaum p.121)
684 _op->apply(z,q[i2]); // q[i2] = Az
685 q[i2].axpy(-beta,q[i0]);
686 // alpha is real since it is the diagonal entry of the hermitian tridiagonal matrix
687 // from the Lanczos Algorithm
688 // so the order in the scalar product doesn't matter even for the complex case
689 alpha = _sp->dot(z,q[i2]);
690 q[i2].axpy(-alpha,q[i1]);
691
692 z = 0.0;
693 _prec->apply(z,q[i2]);
694
695 // beta is real and positive in exact arithmetic
696 // since it is the norm of the basis vectors (in unpreconditioned case)
697 beta = sqrt(_sp->dot(q[i2],z));
698
699 q[i2] *= real_type(1.0)/beta;
700 z *= real_type(1.0)/beta;
701
702 // QR Factorization of recurrence coefficient matrix
703 // apply previous givens rotations to last column of T
704 T[1] = T[2];
705 if(i>2) {
706 T[0] = s[i%2]*T[1];
707 T[1] = c[i%2]*T[1];
708 }
709 if(i>1) {
710 T[2] = c[(i+1)%2]*alpha - s[(i+1)%2]*T[1];
711 T[1] = c[(i+1)%2]*T[1] + s[(i+1)%2]*alpha;
712 }
713 else
714 T[2] = alpha;
715
716 // update QR factorization
717 generateGivensRotation(T[2],beta,c[i%2],s[i%2]);
718 // to last column of T_k
719 T[2] = c[i%2]*T[2] + s[i%2]*beta;
720 // and to the rhs xi of the min problem
721 xi[i%2] = -s[i%2]*xi[(i+1)%2];
722 xi[(i+1)%2] *= c[i%2];
723
724 // compute correction direction
725 p[i2] = dummy;
726 p[i2].axpy(-T[1],p[i1]);
727 p[i2].axpy(-T[0],p[i0]);
728 p[i2] *= real_type(1.0)/T[2];
729
730 // apply correction/update solution
731 x.axpy(beta0*xi[(i+1)%2],p[i2]);
732
733 // remember beta_old
734 T[2] = beta;
735
736 // check for convergence
737 // the last entry in the rhs of the min-problem is the residual
738 def = abs(beta0*xi[i%2]);
739 if(iteration.step(i, def)){
740 break;
741 }
742 } // end for
743
744 // postprocess preconditioner
745 _prec->post(x);
746 }
747
748 private:
749
750 void generateGivensRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
751 {
752 using std::sqrt;
753 using std::abs;
754 using std::max;
755 using std::min;
756 const real_type eps = 1e-15;
757 real_type norm_dx = abs(dx);
758 real_type norm_dy = abs(dy);
759 real_type norm_max = max(norm_dx, norm_dy);
760 real_type norm_min = min(norm_dx, norm_dy);
761 real_type temp = norm_min/norm_max;
762 // we rewrite the code in a vectorizable fashion
763 cs = Simd::cond(norm_dy < eps,
764 real_type(1.0),
765 Simd::cond(norm_dx < eps,
766 real_type(0.0),
767 Simd::cond(norm_dy > norm_dx,
768 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
769 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
770 )));
771 sn = Simd::cond(norm_dy < eps,
772 field_type(0.0),
773 Simd::cond(norm_dx < eps,
774 field_type(1.0),
775 Simd::cond(norm_dy > norm_dx,
776 // dy and dx are real in exact arithmetic
777 // thus dx*dy is real so we can explicitly enforce it
778 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*dy/norm_dx/norm_dy,
779 // dy and dx is real in exact arithmetic
780 // so we don't have to conjugate both of them
781 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dy/dx
782 )));
783 }
784
785 protected:
786 using IterativeSolver<X,X>::_op;
787 using IterativeSolver<X,X>::_prec;
788 using IterativeSolver<X,X>::_sp;
789 using IterativeSolver<X,X>::_reduction;
790 using IterativeSolver<X,X>::_maxit;
791 using IterativeSolver<X,X>::_verbose;
792 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
793 };
794 DUNE_REGISTER_ITERATIVE_SOLVER("minressolver", defaultIterativeSolverCreator<Dune::MINRESSolver>());
795
809 template<class X, class Y=X, class F = Y>
811 {
812 public:
816 using typename IterativeSolver<X,Y>::real_type;
817
818 protected:
820
822 using fAlloc = ReboundAllocatorType<X,field_type>;
824 using rAlloc = ReboundAllocatorType<X,real_type>;
825
826 public:
827
834 RestartedGMResSolver (LinearOperator<X,Y>& op, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
835 IterativeSolver<X,Y>::IterativeSolver(op,prec,reduction,maxit,verbose),
836 _restart(restart)
837 {}
838
845 RestartedGMResSolver (LinearOperator<X,Y>& op, ScalarProduct<X>& sp, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
846 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
847 _restart(restart)
848 {}
849
862 RestartedGMResSolver (std::shared_ptr<LinearOperator<X,Y> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
863 IterativeSolver<X,Y>::IterativeSolver(op,prec,configuration),
864 _restart(configuration.get<int>("restart"))
865 {}
866
867 RestartedGMResSolver (std::shared_ptr<LinearOperator<X,Y> > op, std::shared_ptr<ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
868 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,configuration),
869 _restart(configuration.get<int>("restart"))
870 {}
871
879 std::shared_ptr<ScalarProduct<X>> sp,
880 std::shared_ptr<Preconditioner<X,Y>> prec,
881 scalar_real_type reduction, int restart, int maxit, int verbose) :
882 IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
883 _restart(restart)
884 {}
885
894 virtual void apply (X& x, Y& b, InverseOperatorResult& res)
895 {
896 apply(x,b,Simd::max(_reduction),res);
897 }
898
907 virtual void apply (X& x, Y& b, [[maybe_unused]] double reduction, InverseOperatorResult& res)
908 {
909 using std::abs;
910 const Simd::Scalar<real_type> EPSILON = 1e-80;
911 const int m = _restart;
912 real_type norm = 0.0;
913 int j = 1;
914 std::vector<field_type,fAlloc> s(m+1), sn(m);
915 std::vector<real_type,rAlloc> cs(m);
916 // need copy of rhs if GMRes has to be restarted
917 Y b2(b);
918 // helper vector
919 Y w(b);
920 std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
921 std::vector<F> v(m+1,b);
922
923 Iteration iteration(*this,res);
924
925 // clear solver statistics and set res.converged to false
926 _prec->pre(x,b);
927
928 // calculate defect and overwrite rhs with it
929 _op->applyscaleadd(-1.0,x,b); // b -= Ax
930 // calculate preconditioned defect
931 v[0] = 0.0; _prec->apply(v[0],b); // r = W^-1 b
932 norm = _sp->norm(v[0]);
933 if(iteration.step(0, norm)){
934 _prec->post(x);
935 return;
936 }
937
938 while(j <= _maxit && res.converged != true) {
939
940 int i = 0;
941 v[0] *= real_type(1.0)/norm;
942 s[0] = norm;
943 for(i=1; i<m+1; i++)
944 s[i] = 0.0;
945
946 for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++) {
947 w = 0.0;
948 // use v[i+1] as temporary vector
949 v[i+1] = 0.0;
950 // do Arnoldi algorithm
951 _op->apply(v[i],v[i+1]);
952 _prec->apply(w,v[i+1]);
953 for(int k=0; k<i+1; k++) {
954 // notice that _sp->dot(v[k],w) = v[k]\adjoint w
955 // so one has to pay attention to the order
956 // in the scalar product for the complex case
957 // doing the modified Gram-Schmidt algorithm
958 H[k][i] = _sp->dot(v[k],w);
959 // w -= H[k][i] * v[k]
960 w.axpy(-H[k][i],v[k]);
961 }
962 H[i+1][i] = _sp->norm(w);
963 if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
965 "breakdown in GMRes - |w| == 0.0 after " << j << " iterations");
966
967 // normalize new vector
968 v[i+1] = w; v[i+1] *= real_type(1.0)/H[i+1][i];
969
970 // update QR factorization
971 for(int k=0; k<i; k++)
972 applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
973
974 // compute new givens rotation
975 generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
976 // finish updating QR factorization
977 applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
978 applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
979
980 // norm of the defect is the last component the vector s
981 norm = abs(s[i+1]);
982
983 iteration.step(j, norm);
984
985 } // end for
986
987 // calculate update vector
988 w = 0.0;
989 update(w,i,H,s,v);
990 // and current iterate
991 x += w;
992
993 // restart GMRes if convergence was not achieved,
994 // i.e. linear defect has not reached desired reduction
995 // and if j < _maxit (do not restart on last iteration)
996 if( res.converged != true && j < _maxit ) {
997
998 if(_verbose > 0)
999 std::cout << "=== GMRes::restart" << std::endl;
1000 // get saved rhs
1001 b = b2;
1002 // calculate new defect
1003 _op->applyscaleadd(-1.0,x,b); // b -= Ax;
1004 // calculate preconditioned defect
1005 v[0] = 0.0;
1006 _prec->apply(v[0],b);
1007 norm = _sp->norm(v[0]);
1008 }
1009
1010 } //end while
1011
1012 // postprocess preconditioner
1013 _prec->post(x);
1014 }
1015
1016 protected :
1017
1018 void update(X& w, int i,
1019 const std::vector<std::vector<field_type,fAlloc> >& H,
1020 const std::vector<field_type,fAlloc>& s,
1021 const std::vector<X>& v) {
1022 // solution vector of the upper triangular system
1023 std::vector<field_type,fAlloc> y(s);
1024
1025 // backsolve
1026 for(int a=i-1; a>=0; a--) {
1027 field_type rhs(s[a]);
1028 for(int b=a+1; b<i; b++)
1029 rhs -= H[a][b]*y[b];
1030 y[a] = rhs/H[a][a];
1031
1032 // compute update on the fly
1033 // w += y[a]*v[a]
1034 w.axpy(y[a],v[a]);
1035 }
1036 }
1037
1038 template<typename T>
1039 typename std::enable_if<std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1040 return t;
1041 }
1042
1043 template<typename T>
1044 typename std::enable_if<!std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1045 using std::conj;
1046 return conj(t);
1047 }
1048
1049 void
1050 generatePlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1051 {
1052 using std::sqrt;
1053 using std::abs;
1054 using std::max;
1055 using std::min;
1056 const real_type eps = 1e-15;
1057 real_type norm_dx = abs(dx);
1058 real_type norm_dy = abs(dy);
1059 real_type norm_max = max(norm_dx, norm_dy);
1060 real_type norm_min = min(norm_dx, norm_dy);
1061 real_type temp = norm_min/norm_max;
1062 // we rewrite the code in a vectorizable fashion
1063 cs = Simd::cond(norm_dy < eps,
1064 real_type(1.0),
1065 Simd::cond(norm_dx < eps,
1066 real_type(0.0),
1067 Simd::cond(norm_dy > norm_dx,
1068 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
1069 real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
1070 )));
1071 sn = Simd::cond(norm_dy < eps,
1072 field_type(0.0),
1073 Simd::cond(norm_dx < eps,
1074 field_type(1.0),
1075 Simd::cond(norm_dy > norm_dx,
1076 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*conjugate(dy)/norm_dx/norm_dy,
1077 field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*conjugate(dy/dx)
1078 )));
1079 }
1080
1081
1082 void
1083 applyPlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1084 {
1085 field_type temp = cs * dx + sn * dy;
1086 dy = -conjugate(sn) * dx + cs * dy;
1087 dx = temp;
1088 }
1089
1090 using IterativeSolver<X,Y>::_op;
1091 using IterativeSolver<X,Y>::_prec;
1092 using IterativeSolver<X,Y>::_sp;
1093 using IterativeSolver<X,Y>::_reduction;
1094 using IterativeSolver<X,Y>::_maxit;
1095 using IterativeSolver<X,Y>::_verbose;
1096 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1097 int _restart;
1098 };
1099 DUNE_REGISTER_ITERATIVE_SOLVER("restartedgmressolver", defaultIterativeSolverCreator<Dune::RestartedGMResSolver>());
1100
1114 template<class X, class Y=X, class F = Y>
1116 {
1117 public:
1122
1123 private:
1125
1127 using fAlloc = typename RestartedGMResSolver<X,Y>::fAlloc;
1129 using rAlloc = typename RestartedGMResSolver<X,Y>::rAlloc;
1130
1131 public:
1132 // copy base class constructors
1134
1135 // don't shadow four-argument version of apply defined in the base class
1136 using RestartedGMResSolver<X,Y>::apply;
1137
1146 void apply (X& x, Y& b, double reduction, InverseOperatorResult& res) override
1147 {
1148 using std::abs;
1149 const Simd::Scalar<real_type> EPSILON = 1e-80;
1150 const int m = _restart;
1151 real_type norm = 0.0;
1152 int i, j = 1, k;
1153 std::vector<field_type,fAlloc> s(m+1), sn(m);
1154 std::vector<real_type,rAlloc> cs(m);
1155 // helper vector
1156 Y tmp(b);
1157 std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
1158 std::vector<F> v(m+1,b);
1159 std::vector<X> w(m+1,b);
1160
1161 Iteration iteration(*this,res);
1162 // setup preconditioner if it does something in pre
1163
1164 // calculate residual and overwrite a copy of the rhs with it
1165 _prec->pre(x, b);
1166 v[0] = b;
1167 _op->applyscaleadd(-1.0, x, v[0]); // b -= Ax
1168
1169 norm = _sp->norm(v[0]); // the residual norm
1170 if(iteration.step(0, norm)){
1171 _prec->post(x);
1172 return;
1173 }
1174
1175 // start iterations
1176 res.converged = false;;
1177 while(j <= _maxit && res.converged != true)
1178 {
1179 v[0] *= (1.0 / norm);
1180 s[0] = norm;
1181 for(i=1; i<m+1; ++i)
1182 s[i] = 0.0;
1183
1184 // inner loop
1185 for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++)
1186 {
1187 w[i] = 0.0;
1188 // compute wi = M^-1*vi (also called zi)
1189 _prec->apply(w[i], v[i]);
1190 // compute vi = A*wi
1191 // use v[i+1] as temporary vector for w
1192 _op->apply(w[i], v[i+1]);
1193 // do Arnoldi algorithm
1194 for(int kk=0; kk<i+1; kk++)
1195 {
1196 // notice that _sp->dot(v[k],v[i+1]) = v[k]\adjoint v[i+1]
1197 // so one has to pay attention to the order
1198 // in the scalar product for the complex case
1199 // doing the modified Gram-Schmidt algorithm
1200 H[kk][i] = _sp->dot(v[kk],v[i+1]);
1201 // w -= H[k][i] * v[kk]
1202 v[i+1].axpy(-H[kk][i], v[kk]);
1203 }
1204 H[i+1][i] = _sp->norm(v[i+1]);
1205 if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
1206 DUNE_THROW(SolverAbort, "breakdown in fGMRes - |w| (-> "
1207 << w[i] << ") == 0.0 after "
1208 << j << " iterations");
1209
1210 // v[i+1] = w*1/H[i+1][i]
1211 v[i+1] *= real_type(1.0)/H[i+1][i];
1212
1213 // update QR factorization
1214 for(k=0; k<i; k++)
1215 this->applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
1216
1217 // compute new givens rotation
1218 this->generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1219
1220 // finish updating QR factorization
1221 this->applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1222 this->applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
1223
1224 // norm of the residual is the last component of vector s
1225 using std::abs;
1226 norm = abs(s[i+1]);
1227 iteration.step(j, norm);
1228 } // end inner for loop
1229
1230 // calculate update vector
1231 tmp = 0.0;
1232 this->update(tmp, i, H, s, w);
1233 // and update current iterate
1234 x += tmp;
1235
1236 // restart fGMRes if convergence was not achieved,
1237 // i.e. linear residual has not reached desired reduction
1238 // and if still j < _maxit (do not restart on last iteration)
1239 if( res.converged != true && j < _maxit)
1240 {
1241 if (_verbose > 0)
1242 std::cout << "=== fGMRes::restart" << std::endl;
1243 // get rhs
1244 v[0] = b;
1245 // calculate new defect
1246 _op->applyscaleadd(-1.0, x,v[0]); // b -= Ax;
1247 // calculate preconditioned defect
1248 norm = _sp->norm(v[0]); // update the residual norm
1249 }
1250
1251 } // end outer while loop
1252
1253 // post-process preconditioner
1254 _prec->post(x);
1255 }
1256
1257private:
1258 using RestartedGMResSolver<X,Y>::_op;
1259 using RestartedGMResSolver<X,Y>::_prec;
1260 using RestartedGMResSolver<X,Y>::_sp;
1261 using RestartedGMResSolver<X,Y>::_reduction;
1262 using RestartedGMResSolver<X,Y>::_maxit;
1263 using RestartedGMResSolver<X,Y>::_verbose;
1264 using RestartedGMResSolver<X,Y>::_restart;
1265 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1266 };
1267 DUNE_REGISTER_ITERATIVE_SOLVER("restartedflexiblegmressolver", defaultIterativeSolverCreator<Dune::RestartedFlexibleGMResSolver>());
1268
1282 template<class X>
1284 {
1285 public:
1286 using typename IterativeSolver<X,X>::domain_type;
1287 using typename IterativeSolver<X,X>::range_type;
1288 using typename IterativeSolver<X,X>::field_type;
1289 using typename IterativeSolver<X,X>::real_type;
1290
1291 private:
1293
1295 using fAlloc = ReboundAllocatorType<X,field_type>;
1296
1297 public:
1298
1299 // don't shadow four-argument version of apply defined in the base class
1300 using IterativeSolver<X,X>::apply;
1301
1308 GeneralizedPCGSolver (LinearOperator<X,X>& op, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1309 IterativeSolver<X,X>::IterativeSolver(op,prec,reduction,maxit,verbose),
1310 _restart(restart)
1311 {}
1312
1320 GeneralizedPCGSolver (LinearOperator<X,X>& op, ScalarProduct<X>& sp, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1321 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1322 _restart(restart)
1323 {}
1324
1325
1338 GeneralizedPCGSolver (std::shared_ptr<LinearOperator<X,X> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1339 IterativeSolver<X,X>::IterativeSolver(op,prec,configuration),
1340 _restart(configuration.get<int>("restart"))
1341 {}
1342
1343 GeneralizedPCGSolver (std::shared_ptr<LinearOperator<X,X> > op, std::shared_ptr<ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1344 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,configuration),
1345 _restart(configuration.get<int>("restart"))
1346 {}
1355 std::shared_ptr<ScalarProduct<X>> sp,
1356 std::shared_ptr<Preconditioner<X,X>> prec,
1357 scalar_real_type reduction, int maxit, int verbose,
1358 int restart = 10) :
1359 IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1360 _restart(restart)
1361 {}
1362
1368 virtual void apply (X& x, X& b, InverseOperatorResult& res)
1369 {
1370 Iteration iteration(*this, res);
1371 _prec->pre(x,b); // prepare preconditioner
1372 _op->applyscaleadd(-1,x,b); // overwrite b with defect
1373
1374 std::vector<std::shared_ptr<X> > p(_restart);
1375 std::vector<field_type,fAlloc> pp(_restart);
1376 X q(x); // a temporary vector
1377 X prec_res(x); // a temporary vector for preconditioner output
1378
1379 p[0].reset(new X(x));
1380
1381 real_type def = _sp->norm(b); // compute norm
1382 if(iteration.step(0, def)){
1383 _prec->post(x);
1384 return;
1385 }
1386 // some local variables
1387 field_type rho, lambda;
1388
1389 int i=0;
1390 int ii=0;
1391 // determine initial search direction
1392 *(p[0]) = 0; // clear correction
1393 _prec->apply(*(p[0]),b); // apply preconditioner
1394 rho = _sp->dot(*(p[0]),b); // orthogonalization
1395 _op->apply(*(p[0]),q); // q=Ap
1396 pp[0] = _sp->dot(*(p[0]),q); // scalar product
1397 lambda = rho/pp[0]; // minimization
1398 x.axpy(lambda,*(p[0])); // update solution
1399 b.axpy(-lambda,q); // update defect
1400
1401 // convergence test
1402 def=_sp->norm(b); // comp defect norm
1403 ++i;
1404 if(iteration.step(i, def)){
1405 _prec->post(x);
1406 return;
1407 }
1408
1409 while(i<_maxit) {
1410 // the loop
1411 int end=std::min(_restart, _maxit-i+1);
1412 for (ii=1; ii<end; ++ii )
1413 {
1414 //std::cout<<" ii="<<ii<<" i="<<i<<std::endl;
1415 // compute next conjugate direction
1416 prec_res = 0; // clear correction
1417 _prec->apply(prec_res,b); // apply preconditioner
1418
1419 p[ii].reset(new X(prec_res));
1420 _op->apply(prec_res, q);
1421
1422 for(int j=0; j<ii; ++j) {
1423 rho =_sp->dot(q,*(p[j]))/pp[j];
1424 p[ii]->axpy(-rho, *(p[j]));
1425 }
1426
1427 // minimize in given search direction
1428 _op->apply(*(p[ii]),q); // q=Ap
1429 pp[ii] = _sp->dot(*(p[ii]),q); // scalar product
1430 rho = _sp->dot(*(p[ii]),b); // orthogonalization
1431 lambda = rho/pp[ii]; // minimization
1432 x.axpy(lambda,*(p[ii])); // update solution
1433 b.axpy(-lambda,q); // update defect
1434
1435 // convergence test
1436 def = _sp->norm(b); // comp defect norm
1437
1438 ++i;
1439 iteration.step(i, def);
1440 }
1441 if(res.converged)
1442 break;
1443 if(end==_restart) {
1444 *(p[0])=*(p[_restart-1]);
1445 pp[0]=pp[_restart-1];
1446 }
1447 }
1448
1449 // postprocess preconditioner
1450 _prec->post(x);
1451
1452 }
1453
1454 private:
1455 using IterativeSolver<X,X>::_op;
1456 using IterativeSolver<X,X>::_prec;
1457 using IterativeSolver<X,X>::_sp;
1458 using IterativeSolver<X,X>::_reduction;
1459 using IterativeSolver<X,X>::_maxit;
1460 using IterativeSolver<X,X>::_verbose;
1461 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1462 int _restart;
1463 };
1464 DUNE_REGISTER_ITERATIVE_SOLVER("generalizedpcgsolver", defaultIterativeSolverCreator<Dune::GeneralizedPCGSolver>());
1465
1477 template<class X>
1479 public:
1480 using typename IterativeSolver<X,X>::domain_type;
1481 using typename IterativeSolver<X,X>::range_type;
1482 using typename IterativeSolver<X,X>::field_type;
1483 using typename IterativeSolver<X,X>::real_type;
1484
1485 private:
1487
1488 public:
1489 // don't shadow four-argument version of apply defined in the base class
1490 using IterativeSolver<X,X>::apply;
1497 scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose), _mmax(mmax)
1498 {
1499 }
1500
1507 scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1508 {
1509 }
1510
1517 std::shared_ptr<ScalarProduct<X>> sp,
1518 std::shared_ptr<Preconditioner<X,X>> prec,
1519 scalar_real_type reduction, int maxit, int verbose,
1520 int mmax = 10)
1521 : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1522 {}
1523
1537 std::shared_ptr<Preconditioner<X,X>> prec,
1538 const ParameterTree& config)
1539 : IterativeSolver<X,X>(op, prec, config), _mmax(config.get("mmax", 10))
1540 {}
1541
1542 RestartedFCGSolver (std::shared_ptr<LinearOperator<X,X>> op,
1543 std::shared_ptr<ScalarProduct<X>> sp,
1544 std::shared_ptr<Preconditioner<X,X>> prec,
1545 const ParameterTree& config)
1546 : IterativeSolver<X,X>(op, sp, prec, config), _mmax(config.get("mmax", 10))
1547 {}
1548
1561 virtual void apply (X& x, X& b, InverseOperatorResult& res)
1562 {
1563 using rAlloc = ReboundAllocatorType<X,field_type>;
1564 res.clear();
1565 Iteration iteration(*this,res);
1566 _prec->pre(x,b); // prepare preconditioner
1567 _op->applyscaleadd(-1,x,b); // overwrite b with defect
1568
1569 //arrays for interim values:
1570 std::vector<X> d(_mmax+1, x); // array for directions
1571 std::vector<X> Ad(_mmax+1, x); // array for Ad[i]
1572 std::vector<field_type,rAlloc> ddotAd(_mmax+1,0); // array for <d[i],Ad[i]>
1573 X w(x);
1574
1575 real_type def = _sp->norm(b); // compute norm
1576 if(iteration.step(0, def)){
1577 _prec->post(x);
1578 return;
1579 }
1580
1581 // some local variables
1582 field_type alpha;
1583
1584 // the loop
1585 int i=1;
1586 int i_bounded=0;
1587 while(i<=_maxit && !res.converged) {
1588 for (; i_bounded <= _mmax && i<= _maxit; i_bounded++) {
1589 d[i_bounded] = 0; // reset search direction
1590 _prec->apply(d[i_bounded], b); // apply preconditioner
1591 w = d[i_bounded]; // copy of current d[i]
1592 // orthogonalization with previous directions
1593 orthogonalizations(i_bounded,Ad,w,ddotAd,d);
1594
1595 //saving interim values for future calculating
1596 _op->apply(d[i_bounded], Ad[i_bounded]); // save Ad[i]
1597 ddotAd[i_bounded]=_sp->dot(d[i_bounded],Ad[i_bounded]); // save <d[i],Ad[i]>
1598 alpha = _sp->dot(d[i_bounded], b)/ddotAd[i_bounded]; // <d[i],b>/<d[i],Ad[i]>
1599
1600 //update solution and defect
1601 x.axpy(alpha, d[i_bounded]);
1602 b.axpy(-alpha, Ad[i_bounded]);
1603
1604 // convergence test
1605 def = _sp->norm(b); // comp defect norm
1606
1607 iteration.step(i, def);
1608 i++;
1609 }
1610 //restart: exchange first and last stored values
1611 cycle(Ad,d,ddotAd,i_bounded);
1612 }
1613
1614 //correct i which is wrong if convergence was not achieved.
1615 i=std::min(_maxit,i);
1616
1617 _prec->post(x); // postprocess preconditioner
1618 }
1619
1620 private:
1621 //This function is called every iteration to orthogonalize against the last search directions
1622 virtual void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) {
1623 // The RestartedFCGSolver uses only values with lower array index;
1624 for (int k = 0; k < i_bounded; k++) {
1625 d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1626 }
1627 }
1628
1629 // This function is called every mmax iterations to handle limited array sizes.
1630 virtual void cycle(std::vector<X>& Ad,std::vector<X>& d,std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) {
1631 // Reset loop index and exchange the first and last arrays
1632 i_bounded = 1;
1633 std::swap(Ad[0], Ad[_mmax]);
1634 std::swap(d[0], d[_mmax]);
1635 std::swap(ddotAd[0], ddotAd[_mmax]);
1636 }
1637
1638 protected:
1639 int _mmax;
1640 using IterativeSolver<X,X>::_op;
1641 using IterativeSolver<X,X>::_prec;
1642 using IterativeSolver<X,X>::_sp;
1643 using IterativeSolver<X,X>::_reduction;
1644 using IterativeSolver<X,X>::_maxit;
1645 using IterativeSolver<X,X>::_verbose;
1646 using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1647 };
1648 DUNE_REGISTER_ITERATIVE_SOLVER("restartedfcgsolver", defaultIterativeSolverCreator<Dune::RestartedFCGSolver>());
1649
1656 template<class X>
1658 public:
1660 using typename RestartedFCGSolver<X>::range_type;
1661 using typename RestartedFCGSolver<X>::field_type;
1662 using typename RestartedFCGSolver<X>::real_type;
1663
1664 // copy base class constructors
1666
1667 // don't shadow four-argument version of apply defined in the base class
1669
1670 // just a minor part of the RestartedFCGSolver apply method will be modified
1671 virtual void apply (X& x, X& b, InverseOperatorResult& res) override {
1672 // reset limiter of orthogonalization loop
1673 _k_limit = 0;
1674 this->RestartedFCGSolver<X>::apply(x,b,res);
1675 };
1676
1677 private:
1678 // This function is called every iteration to orthogonalize against the last search directions.
1679 virtual void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) override {
1680 // This FCGSolver uses values with higher array indexes too, if existent.
1681 for (int k = 0; k < _k_limit; k++) {
1682 if(i_bounded!=k)
1683 d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1684 }
1685 // The loop limit increase, if array is not completely filled.
1686 if(_k_limit<=i_bounded)
1687 _k_limit++;
1688
1689 };
1690
1691 // This function is called every mmax iterations to handle limited array sizes.
1692 virtual void cycle(std::vector<X>& Ad,std::vector<X>& d,std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) override {
1693 // Only the loop index i_bounded return to 0, if it reached mmax.
1694 i_bounded = 0;
1695 // Now all arrays are filled and the loop in void orthogonalizations can use the whole arrays.
1696 _k_limit = Ad.size();
1697 };
1698
1699 int _k_limit = 0;
1700
1701 protected:
1702 using RestartedFCGSolver<X>::_mmax;
1703 using RestartedFCGSolver<X>::_op;
1704 using RestartedFCGSolver<X>::_prec;
1705 using RestartedFCGSolver<X>::_sp;
1706 using RestartedFCGSolver<X>::_reduction;
1707 using RestartedFCGSolver<X>::_maxit;
1708 using RestartedFCGSolver<X>::_verbose;
1709 };
1710 DUNE_REGISTER_ITERATIVE_SOLVER("completefcgsolver", defaultIterativeSolverCreator<Dune::CompleteFCGSolver>());
1712} // end namespace
1713
1714#endif
Implementation of the BCRSMatrix class.
Wrapper to use a range of ARPACK++ eigenvalue solvers.
Definition: arpackpp.hh:243
void computeSymMaxMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:287
void computeSymMinMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:389
A sparse block matrix with compressed row storage.
Definition: bcrsmatrix.hh:464
@ row_wise
Build in a row-wise manner.
Definition: bcrsmatrix.hh:519
CreateIterator createend()
get create iterator pointing to one after the last block
Definition: bcrsmatrix.hh:1101
CreateIterator createbegin()
get initial create iterator
Definition: bcrsmatrix.hh:1095
size_type N() const
number of rows (counted in blocks)
Definition: bcrsmatrix.hh:1970
Bi-conjugate Gradient Stabilized (BiCG-STAB)
Definition: solvers.hh:416
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:436
A vector of blocks with memory management.
Definition: bvector.hh:393
conjugate gradient method
Definition: solvers.hh:188
CGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:219
CGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:236
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:276
CGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:253
Complete flexible conjugate gradient method.
Definition: solvers.hh:1657
virtual void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1671
Generalized preconditioned conjugate gradient solver.
Definition: solvers.hh:1284
GeneralizedPCGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:1338
GeneralizedPCGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1354
GeneralizedPCGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1308
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:1368
GeneralizedPCGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1320
gradient method
Definition: solvers.hh:122
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:140
Simd::Scalar< real_type > scalar_real_type
scalar type underlying the field_type
Definition: solver.hh:112
Y range_type
Type of the range of the operator to be inverted.
Definition: solver.hh:103
X domain_type
Type of the domain of the operator to be inverted.
Definition: solver.hh:100
X::field_type field_type
The field type of the operator.
Definition: solver.hh:106
FieldTraits< field_type >::real_type real_type
The real type of the field type (is the same if using real numbers, but differs for std::complex)
Definition: solver.hh:109
Base class for all implementations of iterative solvers.
Definition: solver.hh:201
IterativeSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose)
General constructor to initialize an iterative solver.
Definition: solver.hh:228
Preconditioned loop solver.
Definition: solvers.hh:57
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator,.
Definition: solvers.hh:71
Minimal Residual Method (MINRES)
Definition: solvers.hh:599
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:617
Hierarchical structure of string parameters.
Definition: parametertree.hh:35
Accelerated flexible conjugate gradient method.
Definition: solvers.hh:1478
RestartedFCGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1506
RestartedFCGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, X > > prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1516
RestartedFCGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1496
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:1561
RestartedFCGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &config)
Constructor.
Definition: solvers.hh:1536
implements the Flexible Generalized Minimal Residual (FGMRes) method (right preconditioned)
Definition: solvers.hh:1116
void apply(X &x, Y &b, double reduction, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1146
implements the Generalized Minimal Residual (GMRes) method
Definition: solvers.hh:811
RestartedGMResSolver(LinearOperator< X, Y > &op, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:834
RestartedGMResSolver(LinearOperator< X, Y > &op, ScalarProduct< X > &sp, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:845
RestartedGMResSolver(std::shared_ptr< LinearOperator< X, Y > > op, std::shared_ptr< ScalarProduct< X > > sp, std::shared_ptr< Preconditioner< X, Y > > prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:878
RestartedGMResSolver(std::shared_ptr< LinearOperator< X, Y > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:862
ReboundAllocatorType< X, field_type > fAlloc
field_type Allocator retrieved from domain type
Definition: solvers.hh:822
ReboundAllocatorType< X, real_type > rAlloc
real_type Allocator retrieved from domain type
Definition: solvers.hh:824
virtual void apply(X &x, Y &b, double reduction, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:907
virtual void apply(X &x, Y &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:894
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:50
Thrown when a solver aborts due to some problem.
Definition: istlexception.hh:53
A few common exception classes.
IO interface of the SIMD abstraction.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
auto min(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::min()
Definition: defaults.hh:87
auto max(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::max()
Definition: defaults.hh:79
V cond(M &&mask, const V &ifTrue, const V &ifFalse)
Like the ?: operator.
Definition: interface.hh:384
auto io(const V &v)
construct a stream inserter
Definition: io.hh:104
bool allTrue(const Mask &mask)
Whether all entries are true
Definition: interface.hh:437
auto max(const V &v1, const V &v2)
The binary maximum value over two simd objects.
Definition: interface.hh:407
typename Overloads::ScalarType< std::decay_t< V > >::type Scalar
Element type of some SIMD type.
Definition: interface.hh:233
Some useful basic math stuff.
Dune namespace.
Definition: alignedallocator.hh:11
Define general, extensible interface for operators. The available implementation wraps a matrix.
Define base class for scalar product and norm.
Include file for users of the SIMD abstraction layer.
Define general, extensible interface for inverse operators.
Statistics about the application of an inverse operator.
Definition: solver.hh:46
double condition_estimate
Estimate of condition number.
Definition: solver.hh:77
void clear()
Resets all data.
Definition: solver.hh:54
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 22, 23:30, 2024)