DUNE PDELab (2.7)

solvers.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 
4 #ifndef DUNE_ISTL_SOLVERS_HH
5 #define DUNE_ISTL_SOLVERS_HH
6 
7 #include <array>
8 #include <cmath>
9 #include <complex>
10 #include <iostream>
11 #include <memory>
12 #include <type_traits>
13 #include <vector>
14 
16 #include <dune/common/hybridutilities.hh>
17 #include <dune/common/math.hh>
18 #include <dune/common/simd/io.hh>
19 #include <dune/common/simd/simd.hh>
20 #include <dune/common/std/type_traits.hh>
21 #include <dune/common/timer.hh>
22 
23 #include <dune/istl/allocator.hh>
24 #include <dune/istl/bcrsmatrix.hh>
25 #include <dune/istl/eigenvalue/arpackpp.hh>
26 #include <dune/istl/istlexception.hh>
27 #include <dune/istl/operators.hh>
28 #include <dune/istl/preconditioner.hh>
30 #include <dune/istl/solver.hh>
31 #include <dune/istl/solverfactory.hh>
32 
33 namespace Dune {
45  //=====================================================================
46  // Implementation of this interface
47  //=====================================================================
48 
57  template<class X>
58  class LoopSolver : public IterativeSolver<X,X> {
59  public:
60  using typename IterativeSolver<X,X>::domain_type;
61  using typename IterativeSolver<X,X>::range_type;
62  using typename IterativeSolver<X,X>::field_type;
63  using typename IterativeSolver<X,X>::real_type;
64 
65  // copy base class constructors
67 
68  // don't shadow four-argument version of apply defined in the base class
70 
72  virtual void apply (X& x, X& b, InverseOperatorResult& res)
73  {
74  Iteration iteration(*this, res);
75  _prec->pre(x,b);
76 
77  // overwrite b with defect
78  _op->applyscaleadd(-1,x,b);
79 
80  // compute norm, \todo parallelization
81  real_type def = _sp->norm(b);
82  if(iteration.step(0, def)){
83  _prec->post(x);
84  return;
85  }
86  // prepare preconditioner
87 
88  // allocate correction vector
89  X v(x);
90 
91  // iteration loop
92  int i=1;
93  for ( ; i<=_maxit; i++ )
94  {
95  v = 0; // clear correction
96  _prec->apply(v,b); // apply preconditioner
97  x += v; // update solution
98  _op->applyscaleadd(-1,v,b); // update defect
99  def=_sp->norm(b); // comp defect norm
100  if(iteration.step(i, def))
101  break;
102  }
103 
104  // postprocess preconditioner
105  _prec->post(x);
106  }
107 
108  protected:
115  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
116  };
117  DUNE_REGISTER_ITERATIVE_SOLVER("loopsolver", defaultIterativeSolverCreator<Dune::LoopSolver>());
118 
119 
120  // all these solvers are taken from the SUMO library
122  template<class X>
123  class GradientSolver : public IterativeSolver<X,X> {
124  public:
125  using typename IterativeSolver<X,X>::domain_type;
126  using typename IterativeSolver<X,X>::range_type;
127  using typename IterativeSolver<X,X>::field_type;
128  using typename IterativeSolver<X,X>::real_type;
129 
130  // copy base class constructors
132 
133  // don't shadow four-argument version of apply defined in the base class
135 
141  virtual void apply (X& x, X& b, InverseOperatorResult& res)
142  {
143  Iteration iteration(*this, res);
144  _prec->pre(x,b); // prepare preconditioner
145 
146  _op->applyscaleadd(-1,x,b); // overwrite b with defec
147 
148  real_type def = _sp->norm(b); // compute norm
149  if(iteration.step(0, def)){
150  _prec->post(x);
151  return;
152  }
153 
154  X p(x); // create local vectors
155  X q(b);
156 
157  int i=1; // loop variables
158  field_type lambda;
159  for ( ; i<=_maxit; i++ )
160  {
161  p = 0; // clear correction
162  _prec->apply(p,b); // apply preconditioner
163  _op->apply(p,q); // q=Ap
164  lambda = _sp->dot(p,b)/_sp->dot(q,p); // minimization
165  x.axpy(lambda,p); // update solution
166  b.axpy(-lambda,q); // update defect
167 
168  def =_sp->norm(b); // comp defect norm
169  if(iteration.step(i, def))
170  break;
171  }
172  // postprocess preconditioner
173  _prec->post(x);
174  }
175 
176  protected:
183  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
184  };
185  DUNE_REGISTER_ITERATIVE_SOLVER("gradientsolver", defaultIterativeSolverCreator<Dune::GradientSolver>());
186 
188  template<class X>
189  class CGSolver : public IterativeSolver<X,X> {
190  public:
191  using typename IterativeSolver<X,X>::domain_type;
192  using typename IterativeSolver<X,X>::range_type;
193  using typename IterativeSolver<X,X>::field_type;
194  using typename IterativeSolver<X,X>::real_type;
195 
196  // copy base class constructors
198 
199  private:
201 
202  protected:
203 
204  using enableConditionEstimate_t = Dune::Std::bool_constant<(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)>;
205 
206  public:
207 
208  // don't shadow four-argument version of apply defined in the base class
210 
219  scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose),
220  condition_estimate_(condition_estimate)
221  {
222  if (condition_estimate && !(enableConditionEstimate_t{})) {
223  condition_estimate_ = false;
224  std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
225  }
226  }
227 
236  scalar_real_type reduction, int maxit, int verbose, bool condition_estimate) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
237  condition_estimate_(condition_estimate)
238  {
239  if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
240  condition_estimate_ = false;
241  std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
242  }
243  }
244 
252  CGSolver (std::shared_ptr<LinearOperator<X,X>> op, std::shared_ptr<ScalarProduct<X>> sp,
253  std::shared_ptr<Preconditioner<X,X>> prec,
254  scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
255  : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose),
256  condition_estimate_(condition_estimate)
257  {
258  if (condition_estimate && !(std::is_same<field_type,float>::value || std::is_same<field_type,double>::value)) {
259  condition_estimate_ = false;
260  std::cerr << "WARNING: Condition estimate was disabled. It is only available for double and float field types!" << std::endl;
261  }
262  }
263 
275  virtual void apply (X& x, X& b, InverseOperatorResult& res)
276  {
277  Iteration iteration(*this,res);
278  _prec->pre(x,b); // prepare preconditioner
279 
280  _op->applyscaleadd(-1,x,b); // overwrite b with defect
281 
282  real_type def = _sp->norm(b); // compute norm
283  if(iteration.step(0, def)){
284  _prec->post(x);
285  return;
286  }
287 
288  X p(x); // the search direction
289  X q(x); // a temporary vector
290 
291  // Remember lambda and beta values for condition estimate
292  std::vector<real_type> lambdas(0);
293  std::vector<real_type> betas(0);
294 
295  // some local variables
296  field_type rho,rholast,lambda,alpha,beta;
297 
298  // determine initial search direction
299  p = 0; // clear correction
300  _prec->apply(p,b); // apply preconditioner
301  rholast = _sp->dot(p,b); // orthogonalization
302 
303  // the loop
304  int i=1;
305  for ( ; i<=_maxit; i++ )
306  {
307  // minimize in given search direction p
308  _op->apply(p,q); // q=Ap
309  alpha = _sp->dot(p,q); // scalar product
310  lambda = rholast/alpha; // minimization
311  Hybrid::ifElse(enableConditionEstimate_t{}, [&](auto id) {
312  if (condition_estimate_)
313  lambdas.push_back(std::real(id(lambda)));
314  });
315  x.axpy(lambda,p); // update solution
316  b.axpy(-lambda,q); // update defect
317 
318  // convergence test
319  def=_sp->norm(b); // comp defect norm
320  if(iteration.step(i, def))
321  break;
322 
323  // determine new search direction
324  q = 0; // clear correction
325  _prec->apply(q,b); // apply preconditioner
326  rho = _sp->dot(q,b); // orthogonalization
327  beta = rho/rholast; // scaling factor
328  Hybrid::ifElse(enableConditionEstimate_t{}, [&](auto id) {
329  if (condition_estimate_)
330  betas.push_back(std::real(id(beta)));
331  });
332  p *= beta; // scale old search direction
333  p += q; // orthogonalization with correction
334  rholast = rho; // remember rho for recurrence
335  }
336 
337  _prec->post(x); // postprocess preconditioner
338 
339  if (condition_estimate_) {
340 #if HAVE_ARPACKPP
341  Hybrid::ifElse(enableConditionEstimate_t{}, [&](auto id) {
342  using std::sqrt;
343 
344  // Build T matrix which has extreme eigenvalues approximating
345  // those of the original system
346  // (see Y. Saad, Iterative methods for sparse linear systems)
347 
348  COND_MAT T(i, i, COND_MAT::row_wise);
349 
350  for (auto row = T.createbegin(); row != T.createend(); ++row) {
351  if (row.index() > 0)
352  row.insert(row.index()-1);
353  row.insert(row.index());
354  if (row.index() < T.N() - 1)
355  row.insert(row.index()+1);
356  }
357  for (int row = 0; row < i; ++row) {
358  if (row > 0) {
359  T[row][row-1] = sqrt(betas[row-1]) / lambdas[row-1];
360  }
361 
362  T[row][row] = 1.0 / id(lambdas[row]);
363  if (row > 0) {
364  T[row][row] += betas[row-1] / lambdas[row-1];
365  }
366 
367  if (row < i - 1) {
368  T[row][row+1] = sqrt(betas[row]) / lambdas[row];
369  }
370  }
371 
372  // Compute largest and smallest eigenvalue of T matrix and return as estimate
374 
375  real_type eps = 0.0;
376  COND_VEC eigv;
377  real_type min_eigv, max_eigv;
378  arpack.computeSymMinMagnitude (eps, eigv, min_eigv);
379  arpack.computeSymMaxMagnitude (eps, eigv, max_eigv);
380 
381  res.condition_estimate = id(max_eigv / min_eigv);
382 
383  if (this->_verbose > 0) {
384  std::cout << "Min eigv estimate: " << Simd::io(min_eigv) << '\n';
385  std::cout << "Max eigv estimate: " << Simd::io(max_eigv) << '\n';
386  std::cout << "Condition estimate: "
387  << Simd::io(max_eigv / min_eigv) << std::endl;
388  }
389  });
390 #else
391  std::cerr << "WARNING: Condition estimate was requested. This requires ARPACK, but ARPACK was not found!" << std::endl;
392 #endif
393  }
394  }
395 
396  private:
397  bool condition_estimate_ = false;
398 
399  // Matrix and vector types used for condition estimate
402 
403  protected:
410  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
411  };
412  DUNE_REGISTER_ITERATIVE_SOLVER("cgsolver", defaultIterativeSolverCreator<Dune::CGSolver>());
413 
414  // Ronald Kriemanns BiCG-STAB implementation from Sumo
416  template<class X>
417  class BiCGSTABSolver : public IterativeSolver<X,X> {
418  public:
419  using typename IterativeSolver<X,X>::domain_type;
420  using typename IterativeSolver<X,X>::range_type;
421  using typename IterativeSolver<X,X>::field_type;
422  using typename IterativeSolver<X,X>::real_type;
423 
424  // copy base class constructors
426 
427  // don't shadow four-argument version of apply defined in the base class
429 
437  virtual void apply (X& x, X& b, InverseOperatorResult& res)
438  {
439  using std::abs;
440  const Simd::Scalar<real_type> EPSILON=1e-80;
441  using std::abs;
442  double it;
443  field_type rho, rho_new, alpha, beta, h, omega;
444  real_type norm;
445 
446  //
447  // get vectors and matrix
448  //
449  X& r=b;
450  X p(x);
451  X v(x);
452  X t(x);
453  X y(x);
454  X rt(x);
455 
456  //
457  // begin iteration
458  //
459 
460  // r = r - Ax; rt = r
461  Iteration<double> iteration(*this,res);
462  _prec->pre(x,r); // prepare preconditioner
463 
464  _op->applyscaleadd(-1,x,r); // overwrite b with defect
465 
466  rt=r;
467 
468  norm = _sp->norm(r);
469  if(iteration.step(0, norm)){
470  _prec->post(x);
471  return;
472  }
473  p=0;
474  v=0;
475 
476  rho = 1;
477  alpha = 1;
478  omega = 1;
479 
480  //
481  // iteration
482  //
483 
484  for (it = 0.5; it < _maxit; it+=.5)
485  {
486  //
487  // preprocess, set vecsizes etc.
488  //
489 
490  // rho_new = < rt , r >
491  rho_new = _sp->dot(rt,r);
492 
493  // look if breakdown occurred
494  if (Simd::allTrue(abs(rho) <= EPSILON))
495  DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - rho "
496  << Simd::io(rho) << " <= EPSILON " << EPSILON
497  << " after " << it << " iterations");
498  if (Simd::allTrue(abs(omega) <= EPSILON))
499  DUNE_THROW(SolverAbort,"breakdown in BiCGSTAB - omega "
500  << Simd::io(omega) << " <= EPSILON " << EPSILON
501  << " after " << it << " iterations");
502 
503 
504  if (it<1)
505  p = r;
506  else
507  {
508  beta = ( rho_new / rho ) * ( alpha / omega );
509  p.axpy(-omega,v); // p = r + beta (p - omega*v)
510  p *= beta;
511  p += r;
512  }
513 
514  // y = W^-1 * p
515  y = 0;
516  _prec->apply(y,p); // apply preconditioner
517 
518  // v = A * y
519  _op->apply(y,v);
520 
521  // alpha = rho_new / < rt, v >
522  h = _sp->dot(rt,v);
523 
524  if ( Simd::allTrue(abs(h) < EPSILON) )
525  DUNE_THROW(SolverAbort,"abs(h) < EPSILON in BiCGSTAB - abs(h) "
526  << Simd::io(abs(h)) << " < EPSILON " << EPSILON
527  << " after " << it << " iterations");
528 
529  alpha = rho_new / h;
530 
531  // apply first correction to x
532  // x <- x + alpha y
533  x.axpy(alpha,y);
534 
535  // r = r - alpha*v
536  r.axpy(-alpha,v);
537 
538  //
539  // test stop criteria
540  //
541 
542  norm = _sp->norm(r);
543  if(iteration.step(it, norm)){
544  break;
545  }
546 
547  it+=.5;
548 
549  // y = W^-1 * r
550  y = 0;
551  _prec->apply(y,r);
552 
553  // t = A * y
554  _op->apply(y,t);
555 
556  // omega = < t, r > / < t, t >
557  omega = _sp->dot(t,r)/_sp->dot(t,t);
558 
559  // apply second correction to x
560  // x <- x + omega y
561  x.axpy(omega,y);
562 
563  // r = s - omega*t (remember : r = s)
564  r.axpy(-omega,t);
565 
566  rho = rho_new;
567 
568  //
569  // test stop criteria
570  //
571 
572  norm = _sp->norm(r);
573  if(iteration.step(it, norm)){
574  break;
575  }
576  } // end for
577 
578  _prec->post(x); // postprocess preconditioner
579  }
580 
581  protected:
588  template<class CountType>
589  using Iteration = typename IterativeSolver<X,X>::template Iteration<CountType>;
590  };
591  DUNE_REGISTER_ITERATIVE_SOLVER("bicgstabsolver", defaultIterativeSolverCreator<Dune::BiCGSTABSolver>());
592 
599  template<class X>
600  class MINRESSolver : public IterativeSolver<X,X> {
601  public:
602  using typename IterativeSolver<X,X>::domain_type;
603  using typename IterativeSolver<X,X>::range_type;
604  using typename IterativeSolver<X,X>::field_type;
605  using typename IterativeSolver<X,X>::real_type;
606 
607  // copy base class constructors
609 
610  // don't shadow four-argument version of apply defined in the base class
612 
618  virtual void apply (X& x, X& b, InverseOperatorResult& res)
619  {
620  using std::sqrt;
621  using std::abs;
622  Iteration iteration(*this, res);
623  // prepare preconditioner
624  _prec->pre(x,b);
625 
626  // overwrite rhs with defect
627  _op->applyscaleadd(-1,x,b);
628 
629  // compute residual norm
630  real_type def = _sp->norm(b);
631  if(iteration.step(0, def)){
632  _prec->post(x);
633  return;
634  }
635 
636  // recurrence coefficients as computed in Lanczos algorithm
637  field_type alpha, beta;
638  // diagonal entries of givens rotation
639  std::array<real_type,2> c{{0.0,0.0}};
640  // off-diagonal entries of givens rotation
641  std::array<field_type,2> s{{0.0,0.0}};
642 
643  // recurrence coefficients (column k of tridiag matrix T_k)
644  std::array<field_type,3> T{{0.0,0.0,0.0}};
645 
646  // the rhs vector of the min problem
647  std::array<field_type,2> xi{{1.0,0.0}};
648 
649  // some temporary vectors
650  X z(b), dummy(b);
651 
652  // initialize and clear correction
653  z = 0.0;
654  _prec->apply(z,b);
655 
656  // beta is real and positive in exact arithmetic
657  // since it is the norm of the basis vectors (in unpreconditioned case)
658  beta = sqrt(_sp->dot(b,z));
659  field_type beta0 = beta;
660 
661  // the search directions
662  std::array<X,3> p{{b,b,b}};
663  p[0] = 0.0;
664  p[1] = 0.0;
665  p[2] = 0.0;
666 
667  // orthonormal basis vectors (in unpreconditioned case)
668  std::array<X,3> q{{b,b,b}};
669  q[0] = 0.0;
670  q[1] *= real_type(1.0)/beta;
671  q[2] = 0.0;
672 
673  z *= real_type(1.0)/beta;
674 
675  // the loop
676  int i = 1;
677  for( ; i<=_maxit; i++) {
678 
679  dummy = z;
680  int i1 = i%3,
681  i0 = (i1+2)%3,
682  i2 = (i1+1)%3;
683 
684  // symmetrically preconditioned Lanczos algorithm (see Greenbaum p.121)
685  _op->apply(z,q[i2]); // q[i2] = Az
686  q[i2].axpy(-beta,q[i0]);
687  // alpha is real since it is the diagonal entry of the hermitian tridiagonal matrix
688  // from the Lanczos Algorithm
689  // so the order in the scalar product doesn't matter even for the complex case
690  alpha = _sp->dot(z,q[i2]);
691  q[i2].axpy(-alpha,q[i1]);
692 
693  z = 0.0;
694  _prec->apply(z,q[i2]);
695 
696  // beta is real and positive in exact arithmetic
697  // since it is the norm of the basis vectors (in unpreconditioned case)
698  beta = sqrt(_sp->dot(q[i2],z));
699 
700  q[i2] *= real_type(1.0)/beta;
701  z *= real_type(1.0)/beta;
702 
703  // QR Factorization of recurrence coefficient matrix
704  // apply previous givens rotations to last column of T
705  T[1] = T[2];
706  if(i>2) {
707  T[0] = s[i%2]*T[1];
708  T[1] = c[i%2]*T[1];
709  }
710  if(i>1) {
711  T[2] = c[(i+1)%2]*alpha - s[(i+1)%2]*T[1];
712  T[1] = c[(i+1)%2]*T[1] + s[(i+1)%2]*alpha;
713  }
714  else
715  T[2] = alpha;
716 
717  // update QR factorization
718  generateGivensRotation(T[2],beta,c[i%2],s[i%2]);
719  // to last column of T_k
720  T[2] = c[i%2]*T[2] + s[i%2]*beta;
721  // and to the rhs xi of the min problem
722  xi[i%2] = -s[i%2]*xi[(i+1)%2];
723  xi[(i+1)%2] *= c[i%2];
724 
725  // compute correction direction
726  p[i2] = dummy;
727  p[i2].axpy(-T[1],p[i1]);
728  p[i2].axpy(-T[0],p[i0]);
729  p[i2] *= real_type(1.0)/T[2];
730 
731  // apply correction/update solution
732  x.axpy(beta0*xi[(i+1)%2],p[i2]);
733 
734  // remember beta_old
735  T[2] = beta;
736 
737  // check for convergence
738  // the last entry in the rhs of the min-problem is the residual
739  def = abs(beta0*xi[i%2]);
740  if(iteration.step(i, def)){
741  break;
742  }
743  } // end for
744 
745  // postprocess preconditioner
746  _prec->post(x);
747  }
748 
749  private:
750 
751  void generateGivensRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
752  {
753  using std::sqrt;
754  using std::abs;
755  using std::max;
756  using std::min;
757  const real_type eps = 1e-15;
758  real_type norm_dx = abs(dx);
759  real_type norm_dy = abs(dy);
760  real_type norm_max = max(norm_dx, norm_dy);
761  real_type norm_min = min(norm_dx, norm_dy);
762  real_type temp = norm_min/norm_max;
763  // we rewrite the code in a vectorizable fashion
764  cs = Simd::cond(norm_dy < eps,
765  real_type(1.0),
766  Simd::cond(norm_dx < eps,
767  real_type(0.0),
768  Simd::cond(norm_dy > norm_dx,
769  real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
770  real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
771  )));
772  sn = Simd::cond(norm_dy < eps,
773  field_type(0.0),
774  Simd::cond(norm_dx < eps,
775  field_type(1.0),
776  Simd::cond(norm_dy > norm_dx,
777  // dy and dx are real in exact arithmetic
778  // thus dx*dy is real so we can explicitly enforce it
779  field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*dy/norm_dx/norm_dy,
780  // dy and dx is real in exact arithmetic
781  // so we don't have to conjugate both of them
782  field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dy/dx
783  )));
784  }
785 
786  protected:
787  using IterativeSolver<X,X>::_op;
788  using IterativeSolver<X,X>::_prec;
789  using IterativeSolver<X,X>::_sp;
790  using IterativeSolver<X,X>::_reduction;
791  using IterativeSolver<X,X>::_maxit;
792  using IterativeSolver<X,X>::_verbose;
793  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
794  };
795  DUNE_REGISTER_ITERATIVE_SOLVER("minressolver", defaultIterativeSolverCreator<Dune::MINRESSolver>());
796 
810  template<class X, class Y=X, class F = Y>
812  {
813  public:
814  using typename IterativeSolver<X,Y>::domain_type;
815  using typename IterativeSolver<X,Y>::range_type;
816  using typename IterativeSolver<X,Y>::field_type;
817  using typename IterativeSolver<X,Y>::real_type;
818 
819  protected:
821 
823  using fAlloc = ReboundAllocatorType<X,field_type>;
825  using rAlloc = ReboundAllocatorType<X,real_type>;
826 
827  public:
828 
835  RestartedGMResSolver (LinearOperator<X,Y>& op, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
836  IterativeSolver<X,Y>::IterativeSolver(op,prec,reduction,maxit,verbose),
837  _restart(restart)
838  {}
839 
846  RestartedGMResSolver (LinearOperator<X,Y>& op, ScalarProduct<X>& sp, Preconditioner<X,Y>& prec, scalar_real_type reduction, int restart, int maxit, int verbose) :
847  IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
848  _restart(restart)
849  {}
850 
863  RestartedGMResSolver (std::shared_ptr<LinearOperator<X,Y> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
864  IterativeSolver<X,Y>::IterativeSolver(op,prec,configuration),
865  _restart(configuration.get<int>("restart"))
866  {}
867 
868  RestartedGMResSolver (std::shared_ptr<LinearOperator<X,Y> > op, std::shared_ptr<ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
869  IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,configuration),
870  _restart(configuration.get<int>("restart"))
871  {}
872 
880  std::shared_ptr<ScalarProduct<X>> sp,
881  std::shared_ptr<Preconditioner<X,Y>> prec,
882  scalar_real_type reduction, int restart, int maxit, int verbose) :
883  IterativeSolver<X,Y>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
884  _restart(restart)
885  {}
886 
895  virtual void apply (X& x, Y& b, InverseOperatorResult& res)
896  {
897  apply(x,b,Simd::max(_reduction),res);
898  }
899 
908  virtual void apply (X& x, Y& b, double reduction, InverseOperatorResult& res)
909  {
910  using std::abs;
911  const Simd::Scalar<real_type> EPSILON = 1e-80;
912  const int m = _restart;
913  real_type norm = 0.0;
914  int j = 1;
915  std::vector<field_type,fAlloc> s(m+1), sn(m);
916  std::vector<real_type,rAlloc> cs(m);
917  // need copy of rhs if GMRes has to be restarted
918  Y b2(b);
919  // helper vector
920  Y w(b);
921  std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
922  std::vector<F> v(m+1,b);
923 
924  Iteration iteration(*this,res);
925 
926  // clear solver statistics and set res.converged to false
927  _prec->pre(x,b);
928 
929  // calculate defect and overwrite rhs with it
930  _op->applyscaleadd(-1.0,x,b); // b -= Ax
931  // calculate preconditioned defect
932  v[0] = 0.0; _prec->apply(v[0],b); // r = W^-1 b
933  norm = _sp->norm(v[0]);
934  if(iteration.step(0, norm)){
935  _prec->post(x);
936  return;
937  }
938 
939  while(j <= _maxit && res.converged != true) {
940 
941  int i = 0;
942  v[0] *= real_type(1.0)/norm;
943  s[0] = norm;
944  for(i=1; i<m+1; i++)
945  s[i] = 0.0;
946 
947  for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++) {
948  w = 0.0;
949  // use v[i+1] as temporary vector
950  v[i+1] = 0.0;
951  // do Arnoldi algorithm
952  _op->apply(v[i],v[i+1]);
953  _prec->apply(w,v[i+1]);
954  for(int k=0; k<i+1; k++) {
955  // notice that _sp->dot(v[k],w) = v[k]\adjoint w
956  // so one has to pay attention to the order
957  // in the scalar product for the complex case
958  // doing the modified Gram-Schmidt algorithm
959  H[k][i] = _sp->dot(v[k],w);
960  // w -= H[k][i] * v[k]
961  w.axpy(-H[k][i],v[k]);
962  }
963  H[i+1][i] = _sp->norm(w);
964  if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
966  "breakdown in GMRes - |w| == 0.0 after " << j << " iterations");
967 
968  // normalize new vector
969  v[i+1] = w; v[i+1] *= real_type(1.0)/H[i+1][i];
970 
971  // update QR factorization
972  for(int k=0; k<i; k++)
973  applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
974 
975  // compute new givens rotation
976  generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
977  // finish updating QR factorization
978  applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
979  applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
980 
981  // norm of the defect is the last component the vector s
982  norm = abs(s[i+1]);
983 
984  iteration.step(j, norm);
985 
986  } // end for
987 
988  // calculate update vector
989  w = 0.0;
990  update(w,i,H,s,v);
991  // and current iterate
992  x += w;
993 
994  // restart GMRes if convergence was not achieved,
995  // i.e. linear defect has not reached desired reduction
996  // and if j < _maxit (do not restart on last iteration)
997  if( res.converged != true && j < _maxit ) {
998 
999  if(_verbose > 0)
1000  std::cout << "=== GMRes::restart" << std::endl;
1001  // get saved rhs
1002  b = b2;
1003  // calculate new defect
1004  _op->applyscaleadd(-1.0,x,b); // b -= Ax;
1005  // calculate preconditioned defect
1006  v[0] = 0.0;
1007  _prec->apply(v[0],b);
1008  norm = _sp->norm(v[0]);
1009  }
1010 
1011  } //end while
1012 
1013  // postprocess preconditioner
1014  _prec->post(x);
1015  }
1016 
1017  protected :
1018 
1019  void update(X& w, int i,
1020  const std::vector<std::vector<field_type,fAlloc> >& H,
1021  const std::vector<field_type,fAlloc>& s,
1022  const std::vector<X>& v) {
1023  // solution vector of the upper triangular system
1024  std::vector<field_type,fAlloc> y(s);
1025 
1026  // backsolve
1027  for(int a=i-1; a>=0; a--) {
1028  field_type rhs(s[a]);
1029  for(int b=a+1; b<i; b++)
1030  rhs -= H[a][b]*y[b];
1031  y[a] = rhs/H[a][a];
1032 
1033  // compute update on the fly
1034  // w += y[a]*v[a]
1035  w.axpy(y[a],v[a]);
1036  }
1037  }
1038 
1039  template<typename T>
1040  typename std::enable_if<std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1041  return t;
1042  }
1043 
1044  template<typename T>
1045  typename std::enable_if<!std::is_same<field_type,real_type>::value,T>::type conjugate(const T& t) {
1046  using std::conj;
1047  return conj(t);
1048  }
1049 
1050  void
1051  generatePlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1052  {
1053  using std::sqrt;
1054  using std::abs;
1055  using std::max;
1056  using std::min;
1057  const real_type eps = 1e-15;
1058  real_type norm_dx = abs(dx);
1059  real_type norm_dy = abs(dy);
1060  real_type norm_max = max(norm_dx, norm_dy);
1061  real_type norm_min = min(norm_dx, norm_dy);
1062  real_type temp = norm_min/norm_max;
1063  // we rewrite the code in a vectorizable fashion
1064  cs = Simd::cond(norm_dy < eps,
1065  real_type(1.0),
1066  Simd::cond(norm_dx < eps,
1067  real_type(0.0),
1068  Simd::cond(norm_dy > norm_dx,
1069  real_type(1.0)/sqrt(real_type(1.0) + temp*temp)*temp,
1070  real_type(1.0)/sqrt(real_type(1.0) + temp*temp)
1071  )));
1072  sn = Simd::cond(norm_dy < eps,
1073  field_type(0.0),
1074  Simd::cond(norm_dx < eps,
1075  field_type(1.0),
1076  Simd::cond(norm_dy > norm_dx,
1077  field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*dx*conjugate(dy)/norm_dx/norm_dy,
1078  field_type(1.0)/sqrt(real_type(1.0) + temp*temp)*conjugate(dy/dx)
1079  )));
1080  }
1081 
1082 
1083  void
1084  applyPlaneRotation(field_type &dx, field_type &dy, real_type &cs, field_type &sn)
1085  {
1086  field_type temp = cs * dx + sn * dy;
1087  dy = -conjugate(sn) * dx + cs * dy;
1088  dx = temp;
1089  }
1090 
1091  using IterativeSolver<X,Y>::_op;
1092  using IterativeSolver<X,Y>::_prec;
1093  using IterativeSolver<X,Y>::_sp;
1094  using IterativeSolver<X,Y>::_reduction;
1095  using IterativeSolver<X,Y>::_maxit;
1096  using IterativeSolver<X,Y>::_verbose;
1097  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1098  int _restart;
1099  };
1100  DUNE_REGISTER_ITERATIVE_SOLVER("restartedgmressolver", defaultIterativeSolverCreator<Dune::RestartedGMResSolver>());
1101 
1115  template<class X, class Y=X, class F = Y>
1117  {
1118  public:
1122  using typename RestartedGMResSolver<X,Y>::real_type;
1123 
1124  private:
1126 
1128  using fAlloc = typename RestartedGMResSolver<X,Y>::fAlloc;
1130  using rAlloc = typename RestartedGMResSolver<X,Y>::rAlloc;
1131 
1132  public:
1133  // copy base class constructors
1135 
1136  // don't shadow four-argument version of apply defined in the base class
1138 
1147  void apply (X& x, Y& b, double reduction, InverseOperatorResult& res) override
1148  {
1149  using std::abs;
1150  const Simd::Scalar<real_type> EPSILON = 1e-80;
1151  const int m = _restart;
1152  real_type norm = 0.0;
1153  int i, j = 1, k;
1154  std::vector<field_type,fAlloc> s(m+1), sn(m);
1155  std::vector<real_type,rAlloc> cs(m);
1156  // helper vector
1157  Y tmp(b);
1158  std::vector< std::vector<field_type,fAlloc> > H(m+1,s);
1159  std::vector<F> v(m+1,b);
1160  std::vector<X> w(m+1,b);
1161 
1162  Iteration iteration(*this,res);
1163  // setup preconditioner if it does something in pre
1164 
1165  // calculate residual and overwrite a copy of the rhs with it
1166  _prec->pre(x, b);
1167  v[0] = b;
1168  _op->applyscaleadd(-1.0, x, v[0]); // b -= Ax
1169 
1170  norm = _sp->norm(v[0]); // the residual norm
1171  if(iteration.step(0, norm)){
1172  _prec->post(x);
1173  return;
1174  }
1175 
1176  // start iterations
1177  res.converged = false;;
1178  while(j <= _maxit && res.converged != true)
1179  {
1180  v[0] *= (1.0 / norm);
1181  s[0] = norm;
1182  for(i=1; i<m+1; ++i)
1183  s[i] = 0.0;
1184 
1185  // inner loop
1186  for(i=0; i < m && j <= _maxit && res.converged != true; i++, j++)
1187  {
1188  w[i] = 0.0;
1189  // compute wi = M^-1*vi (also called zi)
1190  _prec->apply(w[i], v[i]);
1191  // compute vi = A*wi
1192  // use v[i+1] as temporary vector for w
1193  _op->apply(w[i], v[i+1]);
1194  // do Arnoldi algorithm
1195  for(int k=0; k<i+1; k++)
1196  {
1197  // notice that _sp->dot(v[k],v[i+1]) = v[k]\adjoint v[i+1]
1198  // so one has to pay attention to the order
1199  // in the scalar product for the complex case
1200  // doing the modified Gram-Schmidt algorithm
1201  H[k][i] = _sp->dot(v[k],v[i+1]);
1202  // w -= H[k][i] * v[k]
1203  v[i+1].axpy(-H[k][i], v[k]);
1204  }
1205  H[i+1][i] = _sp->norm(v[i+1]);
1206  if(Simd::allTrue(abs(H[i+1][i]) < EPSILON))
1207  DUNE_THROW(SolverAbort, "breakdown in fGMRes - |w| (-> "
1208  << w[i] << ") == 0.0 after "
1209  << j << " iterations");
1210 
1211  // v[i+1] = w*1/H[i+1][i]
1212  v[i+1] *= real_type(1.0)/H[i+1][i];
1213 
1214  // update QR factorization
1215  for(k=0; k<i; k++)
1216  this->applyPlaneRotation(H[k][i],H[k+1][i],cs[k],sn[k]);
1217 
1218  // compute new givens rotation
1219  this->generatePlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1220 
1221  // finish updating QR factorization
1222  this->applyPlaneRotation(H[i][i],H[i+1][i],cs[i],sn[i]);
1223  this->applyPlaneRotation(s[i],s[i+1],cs[i],sn[i]);
1224 
1225  // norm of the residual is the last component of vector s
1226  using std::abs;
1227  norm = abs(s[i+1]);
1228  iteration.step(j, norm);
1229  } // end inner for loop
1230 
1231  // calculate update vector
1232  tmp = 0.0;
1233  this->update(tmp, i, H, s, w);
1234  // and update current iterate
1235  x += tmp;
1236 
1237  // restart fGMRes if convergence was not achieved,
1238  // i.e. linear residual has not reached desired reduction
1239  // and if still j < _maxit (do not restart on last iteration)
1240  if( res.converged != true && j < _maxit)
1241  {
1242  if (_verbose > 0)
1243  std::cout << "=== fGMRes::restart" << std::endl;
1244  // get rhs
1245  v[0] = b;
1246  // calculate new defect
1247  _op->applyscaleadd(-1.0, x,v[0]); // b -= Ax;
1248  // calculate preconditioned defect
1249  norm = _sp->norm(v[0]); // update the residual norm
1250  }
1251 
1252  } // end outer while loop
1253 
1254  // post-process preconditioner
1255  _prec->post(x);
1256  }
1257 
1258 private:
1266  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1267  };
1268  DUNE_REGISTER_ITERATIVE_SOLVER("restartedflexiblegmressolver", defaultIterativeSolverCreator<Dune::RestartedFlexibleGMResSolver>());
1269 
1283  template<class X>
1285  {
1286  public:
1287  using typename IterativeSolver<X,X>::domain_type;
1288  using typename IterativeSolver<X,X>::range_type;
1289  using typename IterativeSolver<X,X>::field_type;
1290  using typename IterativeSolver<X,X>::real_type;
1291 
1292  private:
1294 
1296  using fAlloc = ReboundAllocatorType<X,field_type>;
1297 
1298  public:
1299 
1300  // don't shadow four-argument version of apply defined in the base class
1302 
1309  GeneralizedPCGSolver (LinearOperator<X,X>& op, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1310  IterativeSolver<X,X>::IterativeSolver(op,prec,reduction,maxit,verbose),
1311  _restart(restart)
1312  {}
1313 
1321  GeneralizedPCGSolver (LinearOperator<X,X>& op, ScalarProduct<X>& sp, Preconditioner<X,X>& prec, scalar_real_type reduction, int maxit, int verbose, int restart = 10) :
1322  IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1323  _restart(restart)
1324  {}
1325 
1326 
1339  GeneralizedPCGSolver (std::shared_ptr<LinearOperator<X,X> > op, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1340  IterativeSolver<X,X>::IterativeSolver(op,prec,configuration),
1341  _restart(configuration.get<int>("restart"))
1342  {}
1343 
1344  GeneralizedPCGSolver (std::shared_ptr<LinearOperator<X,X> > op, std::shared_ptr<ScalarProduct<X> > sp, std::shared_ptr<Preconditioner<X,X> > prec, const ParameterTree& configuration) :
1345  IterativeSolver<X,X>::IterativeSolver(op,sp,prec,configuration),
1346  _restart(configuration.get<int>("restart"))
1347  {}
1356  std::shared_ptr<ScalarProduct<X>> sp,
1357  std::shared_ptr<Preconditioner<X,X>> prec,
1358  scalar_real_type reduction, int maxit, int verbose,
1359  int restart = 10) :
1360  IterativeSolver<X,X>::IterativeSolver(op,sp,prec,reduction,maxit,verbose),
1361  _restart(restart)
1362  {}
1363 
1369  virtual void apply (X& x, X& b, InverseOperatorResult& res)
1370  {
1371  Iteration iteration(*this, res);
1372  _prec->pre(x,b); // prepare preconditioner
1373  _op->applyscaleadd(-1,x,b); // overwrite b with defect
1374 
1375  std::vector<std::shared_ptr<X> > p(_restart);
1376  std::vector<field_type,fAlloc> pp(_restart);
1377  X q(x); // a temporary vector
1378  X prec_res(x); // a temporary vector for preconditioner output
1379 
1380  p[0].reset(new X(x));
1381 
1382  real_type def = _sp->norm(b); // compute norm
1383  if(iteration.step(0, def)){
1384  _prec->post(x);
1385  return;
1386  }
1387  // some local variables
1388  field_type rho, lambda;
1389 
1390  int i=0;
1391  int ii=0;
1392  // determine initial search direction
1393  *(p[0]) = 0; // clear correction
1394  _prec->apply(*(p[0]),b); // apply preconditioner
1395  rho = _sp->dot(*(p[0]),b); // orthogonalization
1396  _op->apply(*(p[0]),q); // q=Ap
1397  pp[0] = _sp->dot(*(p[0]),q); // scalar product
1398  lambda = rho/pp[0]; // minimization
1399  x.axpy(lambda,*(p[0])); // update solution
1400  b.axpy(-lambda,q); // update defect
1401 
1402  // convergence test
1403  def=_sp->norm(b); // comp defect norm
1404  ++i;
1405  if(iteration.step(i, def)){
1406  _prec->post(x);
1407  return;
1408  }
1409 
1410  while(i<_maxit) {
1411  // the loop
1412  int end=std::min(_restart, _maxit-i+1);
1413  for (ii=1; ii<end; ++ii )
1414  {
1415  //std::cout<<" ii="<<ii<<" i="<<i<<std::endl;
1416  // compute next conjugate direction
1417  prec_res = 0; // clear correction
1418  _prec->apply(prec_res,b); // apply preconditioner
1419 
1420  p[ii].reset(new X(prec_res));
1421  _op->apply(prec_res, q);
1422 
1423  for(int j=0; j<ii; ++j) {
1424  rho =_sp->dot(q,*(p[j]))/pp[j];
1425  p[ii]->axpy(-rho, *(p[j]));
1426  }
1427 
1428  // minimize in given search direction
1429  _op->apply(*(p[ii]),q); // q=Ap
1430  pp[ii] = _sp->dot(*(p[ii]),q); // scalar product
1431  rho = _sp->dot(*(p[ii]),b); // orthogonalization
1432  lambda = rho/pp[ii]; // minimization
1433  x.axpy(lambda,*(p[ii])); // update solution
1434  b.axpy(-lambda,q); // update defect
1435 
1436  // convergence test
1437  def = _sp->norm(b); // comp defect norm
1438 
1439  ++i;
1440  iteration.step(i, def);
1441  }
1442  if(res.converged)
1443  break;
1444  if(end==_restart) {
1445  *(p[0])=*(p[_restart-1]);
1446  pp[0]=pp[_restart-1];
1447  }
1448  }
1449 
1450  // postprocess preconditioner
1451  _prec->post(x);
1452 
1453  }
1454 
1455  private:
1462  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1463  int _restart;
1464  };
1465  DUNE_REGISTER_ITERATIVE_SOLVER("generalizedpcgsolver", defaultIterativeSolverCreator<Dune::GeneralizedPCGSolver>());
1466 
1478  template<class X>
1479  class RestartedFCGSolver : public IterativeSolver<X,X> {
1480  public:
1481  using typename IterativeSolver<X,X>::domain_type;
1482  using typename IterativeSolver<X,X>::range_type;
1483  using typename IterativeSolver<X,X>::field_type;
1484  using typename IterativeSolver<X,X>::real_type;
1485 
1486  private:
1488 
1489  public:
1490  // don't shadow four-argument version of apply defined in the base class
1498  scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, prec, reduction, maxit, verbose), _mmax(mmax)
1499  {
1500  }
1501 
1508  scalar_real_type reduction, int maxit, int verbose, int mmax = 10) : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1509  {
1510  }
1511 
1518  std::shared_ptr<ScalarProduct<X>> sp,
1519  std::shared_ptr<Preconditioner<X,X>> prec,
1520  scalar_real_type reduction, int maxit, int verbose,
1521  int mmax = 10)
1522  : IterativeSolver<X,X>(op, sp, prec, reduction, maxit, verbose), _mmax(mmax)
1523  {}
1524 
1538  std::shared_ptr<ScalarProduct<X>> sp,
1539  std::shared_ptr<Preconditioner<X,X>> prec,
1540  const ParameterTree& config)
1541  : IterativeSolver<X,X>(op, sp, prec, config), _mmax(config.get("mmax", 10))
1542  {}
1543 
1556  virtual void apply (X& x, X& b, InverseOperatorResult& res)
1557  {
1558  using rAlloc = ReboundAllocatorType<X,field_type>;
1559  res.clear();
1560  Iteration iteration(*this,res);
1561  _prec->pre(x,b); // prepare preconditioner
1562  _op->applyscaleadd(-1,x,b); // overwrite b with defect
1563 
1564  //arrays for interim values:
1565  std::vector<X> d(_mmax+1, x); // array for directions
1566  std::vector<X> Ad(_mmax+1, x); // array for Ad[i]
1567  std::vector<field_type,rAlloc> ddotAd(_mmax+1,0); // array for <d[i],Ad[i]>
1568  X w(x);
1569 
1570  real_type def = _sp->norm(b); // compute norm
1571  if(iteration.step(0, def)){
1572  _prec->post(x);
1573  return;
1574  }
1575 
1576  // some local variables
1577  field_type alpha;
1578 
1579  // the loop
1580  int i=1;
1581  int i_bounded=0;
1582  while(i<=_maxit && !res.converged) {
1583  for (; i_bounded <= _mmax && i<= _maxit; i_bounded++) {
1584  d[i_bounded] = 0; // reset search direction
1585  _prec->apply(d[i_bounded], b); // apply preconditioner
1586  w = d[i_bounded]; // copy of current d[i]
1587  // orthogonalization with previous directions
1588  orthogonalizations(i_bounded,Ad,w,ddotAd,d);
1589 
1590  //saving interim values for future calculating
1591  _op->apply(d[i_bounded], Ad[i_bounded]); // save Ad[i]
1592  ddotAd[i_bounded]=_sp->dot(d[i_bounded],Ad[i_bounded]); // save <d[i],Ad[i]>
1593  alpha = _sp->dot(d[i_bounded], b)/ddotAd[i_bounded]; // <d[i],b>/<d[i],Ad[i]>
1594 
1595  //update solution and defect
1596  x.axpy(alpha, d[i_bounded]);
1597  b.axpy(-alpha, Ad[i_bounded]);
1598 
1599  // convergence test
1600  def = _sp->norm(b); // comp defect norm
1601 
1602  iteration.step(i, def);
1603  i++;
1604  }
1605  //restart: exchange first and last stored values
1606  cycle(Ad,d,ddotAd,i_bounded);
1607  }
1608 
1609  //correct i which is wrong if convergence was not achieved.
1610  i=std::min(_maxit,i);
1611 
1612  _prec->post(x); // postprocess preconditioner
1613  }
1614 
1615  private:
1616  //This function is called every iteration to orthogonalize against the last search directions
1617  virtual void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) {
1618  // The RestartedFCGSolver uses only values with lower array index;
1619  for (int k = 0; k < i_bounded; k++) {
1620  d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1621  }
1622  }
1623 
1624  // This function is called every mmax iterations to handle limited array sizes.
1625  virtual void cycle(std::vector<X>& Ad,std::vector<X>& d,std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) {
1626  // Reset loop index and exchange the first and last arrays
1627  i_bounded = 1;
1628  std::swap(Ad[0], Ad[_mmax]);
1629  std::swap(d[0], d[_mmax]);
1630  std::swap(ddotAd[0], ddotAd[_mmax]);
1631  }
1632 
1633  protected:
1634  int _mmax;
1635  using IterativeSolver<X,X>::_op;
1636  using IterativeSolver<X,X>::_prec;
1637  using IterativeSolver<X,X>::_sp;
1638  using IterativeSolver<X,X>::_reduction;
1639  using IterativeSolver<X,X>::_maxit;
1640  using IterativeSolver<X,X>::_verbose;
1641  using Iteration = typename IterativeSolver<X,X>::template Iteration<unsigned int>;
1642  };
1643  DUNE_REGISTER_ITERATIVE_SOLVER("restartedfcgsolver", defaultIterativeSolverCreator<Dune::RestartedFCGSolver>());
1644 
1651  template<class X>
1653  public:
1654  using typename RestartedFCGSolver<X>::domain_type;
1655  using typename RestartedFCGSolver<X>::range_type;
1656  using typename RestartedFCGSolver<X>::field_type;
1657  using typename RestartedFCGSolver<X>::real_type;
1658 
1659  // copy base class constructors
1661 
1662  // don't shadow four-argument version of apply defined in the base class
1664 
1665  // just a minor part of the RestartedFCGSolver apply method will be modified
1666  virtual void apply (X& x, X& b, InverseOperatorResult& res) override {
1667  // reset limiter of orthogonalization loop
1668  _k_limit = 0;
1669  this->RestartedFCGSolver<X>::apply(x,b,res);
1670  };
1671 
1672  private:
1673  // This function is called every iteration to orthogonalize against the last search directions.
1674  virtual void orthogonalizations(const int& i_bounded,const std::vector<X>& Ad, const X& w, const std::vector<field_type,ReboundAllocatorType<X,field_type>>& ddotAd,std::vector<X>& d) override {
1675  // This FCGSolver uses values with higher array indexes too, if existent.
1676  for (int k = 0; k < _k_limit; k++) {
1677  if(i_bounded!=k)
1678  d[i_bounded].axpy(-_sp->dot(Ad[k], w) / ddotAd[k], d[k]); // d[i] -= <<Ad[k],w>/<d[k],Ad[k]>>d[k]
1679  }
1680  // The loop limit increase, if array is not completely filled.
1681  if(_k_limit<=i_bounded)
1682  _k_limit++;
1683 
1684  };
1685 
1686  // This function is called every mmax iterations to handle limited array sizes.
1687  virtual void cycle(std::vector<X>& Ad,std::vector<X>& d,std::vector<field_type,ReboundAllocatorType<X,field_type> >& ddotAd,int& i_bounded) override {
1688  // Only the loop index i_bounded return to 0, if it reached mmax.
1689  i_bounded = 0;
1690  // Now all arrays are filled and the loop in void orthogonalizations can use the whole arrays.
1691  _k_limit = Ad.size();
1692  };
1693 
1694  int _k_limit = 0;
1695 
1696  protected:
1697  using RestartedFCGSolver<X>::_mmax;
1698  using RestartedFCGSolver<X>::_op;
1699  using RestartedFCGSolver<X>::_prec;
1700  using RestartedFCGSolver<X>::_sp;
1701  using RestartedFCGSolver<X>::_reduction;
1702  using RestartedFCGSolver<X>::_maxit;
1703  using RestartedFCGSolver<X>::_verbose;
1704  };
1705  DUNE_REGISTER_ITERATIVE_SOLVER("completefcgsolver", defaultIterativeSolverCreator<Dune::CompleteFCGSolver>());
1707 } // end namespace
1708 
1709 #endif
Wrapper to use a range of ARPACK++ eigenvalue solvers.
Definition: arpackpp.hh:242
void computeSymMaxMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:286
void computeSymMinMagnitude(const Real &epsilon, BlockVector &x, Real &lambda) const
Assume the matrix to be square, symmetric and perform IRLM to compute an approximation lambda of its ...
Definition: arpackpp.hh:388
A sparse block matrix with compressed row storage.
Definition: bcrsmatrix.hh:426
@ row_wise
Build in a row-wise manner.
Definition: bcrsmatrix.hh:480
CreateIterator createend()
get create iterator pointing to one after the last block
Definition: bcrsmatrix.hh:1062
CreateIterator createbegin()
get initial create iterator
Definition: bcrsmatrix.hh:1056
size_type N() const
number of rows (counted in blocks)
Definition: bcrsmatrix.hh:1931
Bi-conjugate Gradient Stabilized (BiCG-STAB)
Definition: solvers.hh:417
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:437
A vector of blocks with memory management.
Definition: bvector.hh:403
conjugate gradient method
Definition: solvers.hh:189
CGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:218
CGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:235
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:275
CGSolver(std::shared_ptr< LinearOperator< X, X >> op, std::shared_ptr< ScalarProduct< X >> sp, std::shared_ptr< Preconditioner< X, X >> prec, scalar_real_type reduction, int maxit, int verbose, bool condition_estimate)
Constructor to initialize a CG solver.
Definition: solvers.hh:252
Complete flexible conjugate gradient method.
Definition: solvers.hh:1652
virtual void apply(X &x, X &b, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1666
Generalized preconditioned conjugate gradient solver.
Definition: solvers.hh:1285
GeneralizedPCGSolver(std::shared_ptr< LinearOperator< X, X > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:1339
GeneralizedPCGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1309
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:1369
GeneralizedPCGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1321
GeneralizedPCGSolver(std::shared_ptr< LinearOperator< X, X >> op, std::shared_ptr< ScalarProduct< X >> sp, std::shared_ptr< Preconditioner< X, X >> prec, scalar_real_type reduction, int maxit, int verbose, int restart=10)
Set up nonlinear preconditioned conjugate gradient solver.
Definition: solvers.hh:1355
gradient method
Definition: solvers.hh:123
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:141
Simd::Scalar< real_type > scalar_real_type
scalar type underlying the field_type
Definition: solver.hh:112
Y range_type
Type of the range of the operator to be inverted.
Definition: solver.hh:103
X domain_type
Type of the domain of the operator to be inverted.
Definition: solver.hh:100
X::field_type field_type
The field type of the operator.
Definition: solver.hh:106
FieldTraits< field_type >::real_type real_type
The real type of the field type (is the same if using real numbers, but differs for std::complex)
Definition: solver.hh:109
Base class for all implementations of iterative solvers.
Definition: solver.hh:201
Preconditioned loop solver.
Definition: solvers.hh:58
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator,.
Definition: solvers.hh:72
Minimal Residual Method (MINRES)
Definition: solvers.hh:600
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:618
Hierarchical structure of string parameters.
Definition: parametertree.hh:35
Accelerated flexible conjugate gradient method.
Definition: solvers.hh:1479
RestartedFCGSolver(LinearOperator< X, X > &op, ScalarProduct< X > &sp, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1507
RestartedFCGSolver(LinearOperator< X, X > &op, Preconditioner< X, X > &prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1497
virtual void apply(X &x, X &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:1556
RestartedFCGSolver(std::shared_ptr< LinearOperator< X, X >> op, std::shared_ptr< ScalarProduct< X >> sp, std::shared_ptr< Preconditioner< X, X >> prec, const ParameterTree &config)
Constructor.
Definition: solvers.hh:1537
RestartedFCGSolver(std::shared_ptr< LinearOperator< X, X >> op, std::shared_ptr< ScalarProduct< X >> sp, std::shared_ptr< Preconditioner< X, X >> prec, scalar_real_type reduction, int maxit, int verbose, int mmax=10)
Constructor to initialize a RestartedFCG solver.
Definition: solvers.hh:1517
implements the Flexible Generalized Minimal Residual (FGMRes) method (right preconditioned)
Definition: solvers.hh:1117
void apply(X &x, Y &b, double reduction, InverseOperatorResult &res) override
Apply inverse operator.
Definition: solvers.hh:1147
implements the Generalized Minimal Residual (GMRes) method
Definition: solvers.hh:812
RestartedGMResSolver(LinearOperator< X, Y > &op, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:835
RestartedGMResSolver(LinearOperator< X, Y > &op, ScalarProduct< X > &sp, Preconditioner< X, Y > &prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:846
RestartedGMResSolver(std::shared_ptr< LinearOperator< X, Y > > op, std::shared_ptr< Preconditioner< X, X > > prec, const ParameterTree &configuration)
Constructor.
Definition: solvers.hh:863
ReboundAllocatorType< X, field_type > fAlloc
field_type Allocator retrieved from domain type
Definition: solvers.hh:823
ReboundAllocatorType< X, real_type > rAlloc
real_type Allocator retrieved from domain type
Definition: solvers.hh:825
virtual void apply(X &x, Y &b, double reduction, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:908
RestartedGMResSolver(std::shared_ptr< LinearOperator< X, Y >> op, std::shared_ptr< ScalarProduct< X >> sp, std::shared_ptr< Preconditioner< X, Y >> prec, scalar_real_type reduction, int restart, int maxit, int verbose)
Set up RestartedGMResSolver solver.
Definition: solvers.hh:879
virtual void apply(X &x, Y &b, InverseOperatorResult &res)
Apply inverse operator.
Definition: solvers.hh:895
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:48
Thrown when a solver aborts due to some problem.
Definition: istlexception.hh:43
A few common exception classes.
IO interface of the SIMD abstraction.
Implementation of the BCRSMatrix class.
Define general, extensible interface for inverse operators.
std::integral_constant< bool, value > bool_constant
A template alias for std::integral_constant<bool, value>
Definition: type_traits.hh:118
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
decltype(auto) ifElse(const Condition &condition, IfFunc &&ifFunc, ElseFunc &&elseFunc)
A conditional expression.
Definition: hybridutilities.hh:355
auto min(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::min()
Definition: defaults.hh:87
auto max(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::max()
Definition: defaults.hh:79
V cond(M &&mask, const V &ifTrue, const V &ifFalse)
Like the ?: operator.
Definition: interface.hh:384
auto io(const V &v)
construct a stream inserter
Definition: io.hh:104
bool allTrue(const Mask &mask)
Whether all entries are true
Definition: interface.hh:437
auto max(const V &v1, const V &v2)
The binary maximum value over two simd objects.
Definition: interface.hh:407
typename Overloads::ScalarType< std::decay_t< V > >::type Scalar
Element type of some SIMD type.
Definition: interface.hh:233
Some useful basic math stuff.
Dune namespace.
Definition: alignedallocator.hh:14
Define general, extensible interface for operators. The available implementation wraps a matrix.
Define base class for scalar product and norm.
Include file for users of the SIMD abstraction layer.
Statistics about the application of an inverse operator.
Definition: solver.hh:46
double condition_estimate
Estimate of condition number.
Definition: solver.hh:77
void clear()
Resets all data.
Definition: solver.hh:54
bool converged
True if convergence criterion has been met.
Definition: solver.hh:71
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 16, 22:29, 2024)