DUNE PDELab (2.8)

cholmod.hh
1#pragma once
2
3#if HAVE_SUITESPARSE_CHOLMOD
4
10#include <dune/istl/solverfactory.hh>
11#include <dune/istl/foreach.hh>
12
13#include <vector>
14#include <memory>
15
16#include <cholmod.h>
17
18namespace Dune {
19
20namespace Impl{
21
30 struct NoIgnore
31 {
32 const NoIgnore& operator[](std::size_t) const { return *this; }
33 explicit operator bool() const { return false; }
34 static constexpr std::size_t size() { return 0; }
35
36 };
37
38
39 template<class BlockedVector, class FlatVector>
40 void copyToFlatVector(const BlockedVector& blockedVector, FlatVector& flatVector)
41 {
42 // traverse the vector once just to compute the size
43 std::size_t len = flatVectorForEach(blockedVector, [&](auto&&, auto...){});
44 flatVector.resize(len);
45
46 flatVectorForEach(blockedVector, [&](auto&& entry, auto offset){
47 flatVector[offset] = entry;
48 });
49 }
50
51 // special (dummy) case for NoIgnore
52 template<class FlatVector>
53 void copyToFlatVector(const NoIgnore&, FlatVector&)
54 {
55 // just do nothing
56 return;
57 }
58
59 template<class FlatVector, class BlockedVector>
60 void copyToBlockedVector(const FlatVector& flatVector, BlockedVector& blockedVector)
61 {
62 flatVectorForEach(blockedVector, [&](auto& entry, auto offset){
63 entry = flatVector[offset];
64 });
65 }
66
67
68} //namespace Impl
69
74template<class Vector>
75class Cholmod : public InverseOperator<Vector, Vector>
76{
77public:
78
84 Cholmod()
85 {
86 cholmod_start(&c_);
87 }
88
94 ~Cholmod()
95 {
96 if (L_)
97 cholmod_free_factor(&L_, &c_);
98 cholmod_finish(&c_);
99 }
100
101 // forbid copying to avoid freeing memory twice
102 Cholmod(const Cholmod&) = delete;
103 Cholmod& operator=(const Cholmod&) = delete;
104
105
108 void apply (Vector& x, Vector& b, [[maybe_unused]] double reduction, InverseOperatorResult& res)
109 {
110 apply(x,b,res);
111 }
112
118 void apply(Vector& x, Vector& b, InverseOperatorResult& res)
119 {
120 // do nothing if N=0
121 if ( nIsZero_ )
122 {
123 return;
124 }
125
126 if (x.size() != b.size())
127 DUNE_THROW(Exception, "Error in apply(): sizes of x and b do not match!");
128
129 // cast to double array
130 auto b2 = std::make_unique<double[]>(L_->n);
131 auto x2 = std::make_unique<double[]>(L_->n);
132
133 // copy to cholmod
134 auto bp = b2.get();
135
136 flatVectorForEach(b, [&](auto&& entry, auto&& flatIndex){
137 if ( subIndices_.empty() )
138 bp[ flatIndex ] = entry;
139 else
140 if( subIndices_[ flatIndex ] != std::numeric_limits<std::size_t>::max() )
141 bp[ subIndices_[ flatIndex ] ] = entry;
142 });
143
144 // create a cholmod dense object
145 auto b3 = make_cholmod_dense(cholmod_allocate_dense(L_->n, 1, L_->n, CHOLMOD_REAL, &c_), &c_);
146 // cast because void-ptr
147 auto b4 = static_cast<double*>(b3->x);
148 std::copy(b2.get(), b2.get() + L_->n, b4);
149
150 // solve for a cholmod x object
151 auto x3 = make_cholmod_dense(cholmod_solve(CHOLMOD_A, L_, b3.get(), &c_), &c_);
152 // cast because void-ptr
153 auto xp = static_cast<double*>(x3->x);
154
155 // copy into x
156 flatVectorForEach(x, [&](auto&& entry, auto&& flatIndex){
157 if ( subIndices_.empty() )
158 entry = xp[ flatIndex ];
159 else
160 if( subIndices_[ flatIndex ] != std::numeric_limits<std::size_t>::max() )
161 entry = xp[ subIndices_[ flatIndex ] ];
162 });
163
164 // statistics for a direct solver
165 res.iterations = 1;
166 res.converged = true;
167 }
168
169
175 template<class Matrix>
176 void setMatrix(const Matrix& matrix)
177 {
178 const Impl::NoIgnore* noIgnore = nullptr;
179 setMatrix(matrix, noIgnore);
180 }
181
196 template<class Matrix, class Ignore>
197 void setMatrix(const Matrix& matrix, const Ignore* ignore)
198 {
199 // count the number of entries and diagonal entries
200 int nonZeros = 0;
201 int numberOfIgnoredDofs = 0;
202
203
204 auto [flatRows,flatCols] = flatMatrixForEach( matrix, [&](auto&& /*entry*/, auto&& flatRowIndex, auto&& flatColIndex){
205 if( flatRowIndex <= flatColIndex )
206 nonZeros++;
207 });
208
209 std::vector<bool> flatIgnore;
210
211 if ( ignore )
212 {
213 Impl::copyToFlatVector(*ignore,flatIgnore);
214 numberOfIgnoredDofs = std::count(flatIgnore.begin(),flatIgnore.end(),true);
215 }
216
217 // Total number of rows
218 int N = flatRows - numberOfIgnoredDofs;
219
220 nIsZero_ = (N <= 0);
221
222 if ( nIsZero_ )
223 {
224 return;
225 }
226
227 /*
228 * CHOLMOD uses compressed-column sparse matrices, but for symmetric
229 * matrices this is the same as the compressed-row sparse matrix used
230 * by DUNE. So we can just store Mᵀ instead of M (as M = Mᵀ).
231 */
232 const auto deleter = [c = &this->c_](auto* p) {
233 cholmod_free_sparse(&p, c);
234 };
235 auto M = std::unique_ptr<cholmod_sparse, decltype(deleter)>(
236 cholmod_allocate_sparse(N, // # rows
237 N, // # cols
238 nonZeros, // # of nonzeroes
239 1, // indices are sorted ( 1 = true)
240 1, // matrix is "packed" ( 1 = true)
241 -1, // stype of matrix ( -1 = cosider the lower part only )
242 CHOLMOD_REAL, // xtype of matrix ( CHOLMOD_REAL = single array, no complex numbers)
243 &c_ // cholmod_common ptr
244 ), deleter);
245
246 // copy the data of BCRS matrix to Cholmod Sparse matrix
247 int* Ap = static_cast<int*>(M->p);
248 int* Ai = static_cast<int*>(M->i);
249 double* Ax = static_cast<double*>(M->x);
250
251
252 if ( ignore )
253 {
254 // init the mapping
255 subIndices_.resize(flatRows,std::numeric_limits<std::size_t>::max());
256
257 std::size_t subIndexCounter = 0;
258
259 for ( std::size_t i=0; i<flatRows; i++ )
260 {
261 if ( not flatIgnore[ i ] )
262 {
263 subIndices_[ i ] = subIndexCounter++;
264 }
265 }
266 }
267
268 // at first, we need to compute the row starts "Ap"
269 // therefore, we count all (not ignored) entries in each row and in the end we accumulate everything
270 flatMatrixForEach(matrix, [&](auto&& /*entry*/, auto&& flatRowIndex, auto&& flatColIndex){
271
272 // stop if ignored
273 if ( ignore and ( flatIgnore[flatRowIndex] or flatIgnore[flatColIndex] ) )
274 return;
275
276 // stop if in lower half
277 if ( flatRowIndex > flatColIndex )
278 return;
279
280 // ok, count the entry
281 auto idx = ignore ? subIndices_[flatRowIndex] : flatRowIndex;
282 Ap[idx+1]++;
283
284 });
285
286 // now accumulate
287 Ap[0] = 0;
288 for ( int i=0; i<N; i++ )
289 {
290 Ap[i+1] += Ap[i];
291 }
292
293 // we need a compressed row position counter
294 std::vector<std::size_t> rowPosition(N,0);
295
296 // now we can set the entries
297 flatMatrixForEach(matrix, [&](auto&& entry, auto&& flatRowIndex, auto&& flatColIndex){
298
299 // stop if ignored
300 if ( ignore and ( flatIgnore[flatRowIndex] or flatIgnore[flatColIndex] ) )
301 return;
302
303 // stop if in lower half
304 if ( flatRowIndex > flatColIndex )
305 return;
306
307 // ok, set the entry
308 auto rowIdx = ignore ? subIndices_[flatRowIndex] : flatRowIndex;
309 auto colIdx = ignore ? subIndices_[flatColIndex] : flatColIndex;
310 auto rowStart = Ap[rowIdx];
311 auto rowPos = rowPosition[rowIdx];
312 Ai[ rowStart + rowPos ] = colIdx;
313 Ax[ rowStart + rowPos ] = entry;
314 rowPosition[rowIdx]++;
315
316 });
317
318 // Now analyse the pattern and optimal row order
319 L_ = cholmod_analyze(M.get(), &c_);
320
321 // Do the factorization (this may take some time)
322 cholmod_factorize(M.get(), L_, &c_);
323 }
324
325 virtual SolverCategory::Category category() const
326 {
327 return SolverCategory::Category::sequential;
328 }
329
335 cholmod_common& cholmodCommonObject()
336 {
337 return c_;
338 }
339
340private:
341
342 // create a destrucable unique_ptr
343 auto make_cholmod_dense(cholmod_dense* x, cholmod_common* c)
344 {
345 const auto deleter = [c](auto* p) {
346 cholmod_free_dense(&p, c);
347 };
348 return std::unique_ptr<cholmod_dense, decltype(deleter)>(x, deleter);
349 }
350
351 cholmod_common c_;
352 cholmod_factor* L_ = nullptr;
353
354 // indicator for a 0x0 problem (due to ignore dof's)
355 bool nIsZero_ = false;
356
357 // vector mapping all indices in flat order to the not ignored indices
358 std::vector<std::size_t> subIndices_;
359};
360
361 struct CholmodCreator{
362 template<class F> struct isValidBlock : std::false_type{};
363 template<int k> struct isValidBlock<FieldVector<double,k>> : std::true_type{};
364 template<int k> struct isValidBlock<FieldVector<float,k>> : std::true_type{};
365
366 template<class TL, typename M>
367 std::shared_ptr<Dune::InverseOperator<typename Dune::TypeListElement<1, TL>::type,
368 typename Dune::TypeListElement<2, TL>::type>>
369 operator()(TL /*tl*/, const M& mat, const Dune::ParameterTree& /*config*/,
370 std::enable_if_t<isValidBlock<typename Dune::TypeListElement<1, TL>::type::block_type>::value,int> = 0) const
371 {
372 using D = typename Dune::TypeListElement<1, TL>::type;
373 auto solver = std::make_shared<Dune::Cholmod<D>>();
374 solver->setMatrix(mat);
375 return solver;
376 }
377
378 // second version with SFINAE to validate the template parameters of Cholmod
379 template<typename TL, typename M>
380 std::shared_ptr<Dune::InverseOperator<typename Dune::TypeListElement<1, TL>::type,
381 typename Dune::TypeListElement<2, TL>::type>>
382 operator() (TL /*tl*/, const M& /*mat*/, const Dune::ParameterTree& /*config*/,
383 std::enable_if_t<!isValidBlock<typename Dune::TypeListElement<1, TL>::type::block_type>::value,int> = 0) const
384 {
385 DUNE_THROW(UnsupportedType, "Unsupported Type in Cholmod");
386 }
387 };
388 DUNE_REGISTER_DIRECT_SOLVER("cholmod", Dune::CholmodCreator());
389
390} /* namespace Dune */
391
392#endif // HAVE_SUITESPARSE_CHOLMOD
This file implements a vector space as a tensor product of a given vector space. The number of compon...
Hierarchical structure of string parameters.
Definition: parametertree.hh:35
Implementation of the BCRSMatrix class.
Define general, extensible interface for inverse operators.
Implements a matrix constructed from a given type representing a field and compile-time given number ...
Implements a vector constructed from a given type representing a field and a compile-time given size.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
auto max(ADLTag< 0 >, const V &v1, const V &v2)
implements binary Simd::max()
Definition: defaults.hh:79
Dune namespace.
Definition: alignedallocator.hh:11
std::pair< std::size_t, std::size_t > flatMatrixForEach(Matrix &&matrix, F &&f, std::size_t rowOffset=0, std::size_t colOffset=0)
Traverse a blocked matrix and call a functor at each scalar entry.
Definition: foreach.hh:129
std::size_t flatVectorForEach(Vector &&vector, F &&f, std::size_t offset=0)
Traverse a blocked vector and call a functor at each scalar entry.
Definition: foreach.hh:92
Category
Definition: solvercategory.hh:21
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 21, 23:30, 2024)