DUNE PDELab (2.7)

blockmatrixdiagonal.hh
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_PDELAB_BACKEND_ISTL_BLOCKMATRIXDIAGONAL_HH
4#define DUNE_PDELAB_BACKEND_ISTL_BLOCKMATRIXDIAGONAL_HH
5
6#include <dune/pdelab/backend/istl/bcrsmatrix.hh>
7#include <dune/pdelab/backend/istl/vector.hh>
8#include <dune/pdelab/backend/istl/utility.hh>
9#include <dune/pdelab/gridfunctionspace/entityindexcache.hh>
10
11namespace Dune {
12 namespace PDELab {
13 namespace ISTL {
14
15#ifndef DOXYGEN
16
17 // implementation namespace for matrix diagonal
18
19 namespace diagonal {
20
21 // TMP for determining the type of vector to use for the matrix diagonal
22 template<typename M>
23 struct matrix_element_vector;
24
25 // At FieldMatrix level, we keep the whole matrix
26 template<typename E, int n, int m>
27 struct matrix_element_vector<
28 FieldMatrix<E,n,m>
29 >
30 {
31 typedef FieldMatrix<E,n,m> type;
32 };
33
34 // At BCRSMatrix level, we use a BlockVector and recursively apply the
35 // TMP to the block type.
36 template<typename Block, typename Allocator>
37 struct matrix_element_vector<
38 Dune::BCRSMatrix<Block,Allocator>
39 >
40 {
41 typedef Dune::BlockVector<
42 typename matrix_element_vector<Block>::type,
43 Allocator
44 > type;
45 };
46
47
48 // Function for extracting the diagonal from a matrix.
49 // For the FieldMatrix, we just copy the complete matrix.
50 template<typename FieldMatrix>
51 void matrix_element_vector_from_matrix(tags::field_matrix, FieldMatrix& c, const FieldMatrix& matrix)
52 {
53 c = matrix;
54 }
55
56 // For the BCRSMatrix, we recursively copy diagonal blocks.
57 template<typename BlockVector, typename BCRSMatrix>
58 void matrix_element_vector_from_matrix(tags::block_vector, BlockVector& c, const BCRSMatrix& m)
59 {
60 const std::size_t rows = m.N();
61 c.resize(rows,false);
62 for (std::size_t i = 0; i < rows; ++i)
63 matrix_element_vector_from_matrix(container_tag(c[i]),c[i],m[i][i]);
64 }
65
66
67 // Function for inverting the diagonal.
68 // The FieldMatrix supports direct inverson.
69 template<typename FieldMatrix>
70 void invert_blocks(tags::field_matrix, FieldMatrix& c)
71 {
72 c.invert();
73 }
74
75 // For a BCRSMatrix, we recursively invert all diagonal entries.
76 template<typename BlockVector>
77 void invert_blocks(tags::block_vector, BlockVector& c)
78 {
79 const std::size_t rows = c.size();
80 for (std::size_t i = 0; i < rows; ++i)
81 invert_blocks(container_tag(c[i]),c[i]);
82 }
83
84
85 // Matrix-vector product between matrix consisting of only the
86 // diagonal and a matching vector.
87 // For the FieldMatrix, simply call its matrix-vector product.
88 template<typename FieldMatrix, typename X, typename Y>
89 void mv(tags::field_matrix, const FieldMatrix& c, const X& x, Y& y)
90 {
91 c.mv(x,y);
92 }
93
94 // For the BCRSMatrix, recursively apply this function to the
95 // individual blocks.
96 template<typename BlockVector, typename X, typename Y>
97 void mv(tags::block_vector, const BlockVector& c, const X& x, Y& y)
98 {
99 const std::size_t rows = c.size();
100 for (std::size_t i = 0; i < rows; ++i)
101 mv(container_tag(c[i]),c[i],x[i],y[i]);
102 }
103
104
105 // We don't know the type of the container that stores the actual field values (double, complex,...)
106 // Moreover, there might be different containers in case of heterogeneous matrices (multidomain etc.)
107 // In order to communicate the matrix blocks, we need to stream the single row inside the lowest-level
108 // diagonal matrix block for each DOF.
109 // The following set of functions extracts this information from the container and provides a simple
110 // interface that uses pointers to the field type as iterators.
111 //
112 // WARNING: This assumes that matrix blocks at the lowest level are dense and stored in column-major format!
113 //
114
115 template<typename FieldMatrix, typename CI>
116 std::size_t row_size(tags::field_matrix, const FieldMatrix& c, const CI& ci, int i)
117 {
118 return FieldMatrix::cols;
119 }
120
121 template<typename FieldMatrix>
122 std::size_t row_size(tags::field_matrix, const FieldMatrix& c)
123 {
124 return FieldMatrix::cols;
125 }
126
127 template<typename BlockVector, typename CI>
128 std::size_t row_size(tags::block_vector, const BlockVector& c, const CI& ci, int i)
129 {
130 return row_size(container_tag(c[0]),c[0]);
131 }
132
133 template<typename BlockVector>
134 std::size_t row_size(tags::block_vector, const BlockVector& c)
135 {
136 return row_size(container_tag(c[0]),c[0]);
137 }
138
139 // FieldMatrix with a single row is special because the last-level index isn't stored, so we have to
140 // manually extract row 0.
141 template<typename FieldMatrix, typename CI>
142 typename FieldMatrix::field_type* row_begin(tags::field_matrix_1_any, FieldMatrix& c, const CI& ci, int i)
143 {
144 assert(i == -1);
145 return &(*c[0].begin());
146 }
147
148 template<typename FieldMatrix, typename CI>
149 typename FieldMatrix::field_type* row_begin(tags::field_matrix_n_any, FieldMatrix& c, const CI& ci, int i)
150 {
151 assert(i == 0);
152 return &(*c[ci[0]].begin());
153 }
154
155
156 // The end iterators are a little tricky: We want a pointer to the memory location directly after the last
157 // entry for the given row. In theory, we could get this location by dereferencing the end() iterator and
158 // then taking the address of that location, but we are not allowed to dereference an end() iterator. So
159 // we instead decrement the end() iterator by one, take the (valid) address of the element at that location
160 // and increment that pointer by 1. Yay for ugly hackery! :-P
161
162 // With a 1x1 matrix, we can simply take the address directly following the begin() iterator's target.
163 template<typename FieldMatrix, typename CI>
164 typename FieldMatrix::field_type* row_end(tags::field_matrix_1_1, FieldMatrix& c, const CI& ci, int i)
165 {
166 assert(i == -1);
167 return &(*c[0].begin()) + 1;
168 }
169
170 // For any other matrix, we perform the decrement iterator / increment address of target dance...
171 // Once for the optimized storage scheme of single row matrices...
172 template<typename FieldMatrix, typename CI>
173 typename FieldMatrix::field_type* row_end(tags::field_matrix_1_any, FieldMatrix& c, const CI& ci, int i)
174 {
175 assert(i == -1);
176 typename FieldMatrix::row_type::iterator it = c[0].end();
177 --it;
178 return &(*it) + 1;
179 }
180
181 // ... and once for the general case.
182 template<typename FieldMatrix, typename CI>
183 typename FieldMatrix::field_type* row_end(tags::field_matrix_n_any, FieldMatrix& c, const CI& ci, int i)
184 {
185 assert(i == 0);
186 typename FieldMatrix::row_type::iterator it = c[ci[0]].end();
187 --it;
188 return &(*it) + 1;
189 }
190
191
192 // These are the standard begin() and end() methods for BlockVector. They recursvely call row_begin()
193 // to arrive at the lowest level block structure.
194
195 template<typename BlockVector, typename CI>
196 typename BlockVector::field_type* row_begin(tags::block_vector, BlockVector& c, const CI& ci, std::size_t i)
197 {
198 return row_begin(container_tag(c[ci[i]]),c[ci[i]],ci,i-1);
199 }
200
201 template<typename BlockVector, typename CI>
202 typename BlockVector::field_type* row_end(tags::block_vector, BlockVector& c, const CI& ci, std::size_t i)
203 {
204 return row_end(container_tag(c[ci[i]]),c[ci[i]],ci,i-1);
205 }
206
207
208 } // namespace diagonal
209
210#endif // DOXYGEN
211
212
213 template<typename M>
214 struct BlockMatrixDiagonal
215 {
216
217 typedef Backend::Native<M> Matrix;
218
219 struct MatrixElementVector
220 {
221
222 typedef typename diagonal::matrix_element_vector<Matrix>::type Container;
223 typedef typename Container::field_type field_type;
224 typedef field_type* iterator;
225
226 Container _container;
227
228 MatrixElementVector(const M& m)
229 {
230 diagonal::matrix_element_vector_from_matrix(container_tag(_container),_container,Backend::native(m));
231 }
232
233 void invert()
234 {
235 diagonal::invert_blocks(container_tag(_container),_container);
236 }
237
238 template<typename X, typename Y>
239 void mv(const X& x, Y& y) const
240 {
241 diagonal::mv(container_tag(_container),_container,Backend::native(x),Backend::native(y));
242 }
243
244 template<typename ContainerIndex>
245 std::size_t row_size(const ContainerIndex& ci) const
246 {
247 return diagonal::row_size(container_tag(_container),_container,ci,ci.size()-1);
248 }
249
250 template<typename ContainerIndex>
251 iterator row_begin(const ContainerIndex& ci)
252 {
253 return diagonal::row_begin(container_tag(_container),_container,ci,ci.size()-1);
254 }
255
256 template<typename ContainerIndex>
257 iterator row_end(const ContainerIndex& ci)
258 {
259 return diagonal::row_end(container_tag(_container),_container,ci,ci.size()-1);
260 }
261
262 };
263
264
265 template<typename GFS>
266 class AddMatrixElementVectorDataHandle
267 : public Dune::CommDataHandleIF<AddMatrixElementVectorDataHandle<GFS>,typename Matrix::field_type>
268 {
269
270 public:
271
272 typedef typename Matrix::field_type DataType;
273 typedef typename GFS::Traits::SizeType size_type;
274
275 AddMatrixElementVectorDataHandle(const GFS& gfs, MatrixElementVector& v)
276 : _gfs(gfs)
277 , _index_cache(gfs)
278 , _v(v)
279 {}
280
282 bool contains(int dim, int codim) const
283 {
284 return _gfs.dataHandleContains(codim);
285 }
286
288 bool fixedSize(int dim, int codim) const
289 {
290 return _gfs.dataHandleFixedSize(codim);
291 }
292
297 template<typename Entity>
298 size_type size(Entity& e) const
299 {
300 _index_cache.update(e);
301
302 size_type s = 0;
303 for (size_type i = 0; i < _index_cache.size(); ++i)
304 s += _v.row_size(_index_cache.containerIndex(i));
305 return s;
306 }
307
309 template<typename MessageBuffer, typename Entity>
310 void gather(MessageBuffer& buff, const Entity& e) const
311 {
312 _index_cache.update(e);
313 for (size_type i = 0; i < _index_cache.size(); ++i)
314 {
315 const CI& ci = _index_cache.containerIndex(i);
316 for (RowIterator it = _v.row_begin(ci),
317 end_it = _v.row_end(ci);
318 it != end_it;
319 ++it)
320 buff.write(*it);
321 }
322 }
323
328 template<typename MessageBuffer, typename Entity>
329 void scatter(MessageBuffer& buff, const Entity& e, size_type n)
330 {
331 _index_cache.update(e);
332 for (size_type i = 0; i < _index_cache.size(); ++i)
333 {
334 const CI& ci = _index_cache.containerIndex(i);
335 for (RowIterator it = _v.row_begin(ci),
336 end_it = _v.row_end(ci);
337 it != end_it;
338 ++it)
339 {
340 DataType x;
341 buff.read(x);
342 *it += x;
343 }
344 }
345 }
346
347 private:
348
349 typedef EntityIndexCache<GFS> IndexCache;
350 typedef typename IndexCache::ContainerIndex CI;
351 typedef typename MatrixElementVector::iterator RowIterator;
352
353 const GFS& _gfs;
354 mutable IndexCache _index_cache;
355 MatrixElementVector& _v;
356
357 };
358
359 };
360
361 } // namespace ISTL
362 } // namespace PDELab
363} // namespace Dune
364
365#endif // DUNE_PDELAB_BACKEND_ISTL_BLOCKMATRIXDIAGONAL_HH
A vector of blocks with memory management.
Definition: bvector.hh:403
void resize(size_type size)
Resize the vector.
Definition: bvector.hh:536
typename Imp::BlockTraits< B >::field_type field_type
export the type representing the field
Definition: bvector.hh:409
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:76
void scatter(MessageBufferImp &buff, const EntityType &e, size_t n)
unpack data from message buffer to user.
Definition: datahandleif.hh:205
size_t size(const EntityType &e) const
how many objects of type DataType have to be sent for a given entity
Definition: datahandleif.hh:180
void gather(MessageBufferImp &buff, const EntityType &e) const
pack data from user to message buffer
Definition: datahandleif.hh:191
Traits::value_type field_type
export the type representing the field
Definition: densematrix.hh:184
Iterator iterator
typedef for stl compliant access
Definition: densevector.hh:345
@ cols
The number of columns.
Definition: fmatrix.hh:79
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:562
Dune namespace.
Definition: alignedallocator.hh:14
std::size_t fixedSize
The number of data items per index if it is fixed, 0 otherwise.
Definition: variablesizecommunicator.hh:245
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)