DUNE-FEM (unstable)

coeffmatrix.hh
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3// SPDX-FileCopyrightInfo: Copyright © DUNE Project contributors, see file LICENSE.md in module root
4// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
5#ifndef DUNE_COEFFMATRIX_HH
6#define DUNE_COEFFMATRIX_HH
7#include <cassert>
8#include <iostream>
9#include <vector>
11#include <dune/localfunctions/utility/field.hh>
12#include <dune/localfunctions/utility/tensor.hh>
13
14namespace Dune
15{
16 /*************************************************
17 * Default class for storing a coefficient matrix
18 * for the PolynomialBasis. Basically a simple
19 * CRS structure is used. The additional complexity
20 * is due to the storage and efficient evaluation
21 * of higher order derivatives. See the remarks
22 * in tensor.hh which also hold true for this file.
23 *************************************************/
24 template <class Field, class Field2>
25 struct Mult
26 {
27 typedef Field2 BasisEntry;
28 static void add(const Field &vec1, const BasisEntry &vec2,
29 BasisEntry &res)
30 {
31 res += vec1*vec2;
32 }
33 };
34
35 template <class Field,class Field2, int dimRange>
36 struct Mult< Field,FieldVector<Field2,dimRange> >
37 {
38 typedef FieldVector<Field2,dimRange> BasisEntry;
39 static void add(const Field &vec1, const BasisEntry &vec2,
40 BasisEntry &res)
41 {
42 res.axpy(vec1,vec2);
43 }
44 };
45
46 template< class F , unsigned int bSize >
47 class SparseCoeffMatrix
48 {
49 public:
50 typedef F Field;
51 static const unsigned int blockSize = bSize;
52 typedef SparseCoeffMatrix<Field,blockSize> This;
53
54 SparseCoeffMatrix()
55 : coeff_(0),
56 rows_(0),
57 skip_(0),
58 numRows_(0),
59 numCols_(0)
60 {}
61
62 ~SparseCoeffMatrix()
63 {
64 delete [] coeff_;
65 delete [] rows_;
66 delete [] skip_;
67 }
68
69 unsigned int size () const
70 {
71 return numRows_/blockSize;
72 }
73 unsigned int baseSize () const
74 {
75 return numCols_;
76 }
77
78 template< class BasisIterator, class FF>
79 void mult ( const BasisIterator &x,
80 unsigned int numLsg,
81 FF *y ) const
82 {
83 typedef typename BasisIterator::Derivatives XDerivatives;
84 assert( numLsg*blockSize <= (size_t)numRows_ );
85 unsigned int row = 0;
86 Field *pos = rows_[ 0 ];
87 unsigned int *skipIt = skip_;
88 XDerivatives val;
89 for( size_t i = 0; i < numLsg; ++i)
90 {
91 for( unsigned int r = 0; r < blockSize; ++r, ++row )
92 {
93 val = 0;
94 BasisIterator itx = x;
95 for( ; pos != rows_[ row+1 ]; ++pos, ++skipIt )
96 {
97 itx += *skipIt;
98 val.axpy(*pos,*itx);
99 }
100 DerivativeAssign<XDerivatives,FF>::apply(r,val,*(y+i*XDerivatives::size*blockSize));
101 }
102 }
103 }
104 template< class BasisIterator, class Vector>
105 void mult ( const BasisIterator &x,
106 Vector &y ) const
107 {
108 typedef typename Vector::value_type YDerivatives;
109 typedef typename BasisIterator::Derivatives XDerivatives;
110 size_t numLsg = y.size();
111 assert( numLsg*blockSize <= (size_t)numRows_ );
112 unsigned int row = 0;
113 Field *pos = rows_[ 0 ];
114 unsigned int *skipIt = skip_;
115 XDerivatives val;
116 for( size_t i = 0; i < numLsg; ++i)
117 {
118 for( unsigned int r = 0; r < blockSize; ++r, ++row )
119 {
120 val = 0;
121 BasisIterator itx = x;
122 for( ; pos != rows_[ row+1 ]; ++pos, ++skipIt )
123 {
124 itx += *skipIt;
125 val.axpy(*pos,*itx);
126 }
127 DerivativeAssign<XDerivatives,YDerivatives>::apply(r,val,y[i]);
128 }
129 }
130 }
131 template <unsigned int deriv, class BasisIterator, class Vector>
132 void mult ( const BasisIterator &x,
133 Vector &y ) const
134 {
135 typedef typename Vector::value_type YDerivatives;
136 typedef typename BasisIterator::Derivatives XDerivatives;
137 typedef FieldVector<typename XDerivatives::Field,YDerivatives::dimension> XLFETensor;
138 size_t numLsg = y.size();
139 assert( numLsg*blockSize <= (size_t)numRows_ );
140 unsigned int row = 0;
141 Field *pos = rows_[ 0 ];
142 unsigned int *skipIt = skip_;
143 for( size_t i = 0; i < numLsg; ++i)
144 {
145 XLFETensor val(typename XDerivatives::Field(0));
146 for( unsigned int r = 0; r < blockSize; ++r, ++row )
147 {
148 BasisIterator itx = x;
149 for( ; pos != rows_[ row+1 ]; ++pos, ++skipIt )
150 {
151 itx += *skipIt;
152 LFETensorAxpy<XDerivatives,XLFETensor,deriv>::apply(r,*pos,*itx,val);
153 }
154 }
155 field_cast(val,y[i]);
156 }
157 }
158
159 template< class RowMatrix >
160 void fill ( const RowMatrix &mat, bool verbose=false )
161 {
162 numRows_ = mat.rows();
163 numCols_ = mat.cols();
164 unsigned int size = numRows_*numCols_;
165
166 delete [] coeff_;
167 delete [] rows_;
168 delete [] skip_;
169
170 Field* coeff = new Field[ size ];
171 // we always initialize the next skip entry to zero,
172 // including the one following the end, so allocate
173 // size+1 entries so we will stay within the bounds.
174 unsigned int *skip = new unsigned int[ size+1 ];
175 rows_ = new Field*[ numRows_+1 ];
176 std::vector<Field> row( numCols_ );
177
178 rows_[ 0 ] = coeff;
179 Field *cit = coeff;
180 unsigned int *sit = skip;
181 for( unsigned int r = 0; r < numRows_; ++r )
182 {
183 *sit = 0;
184 mat.row( r, row );
185 for( unsigned int c = 0; c < numCols_; ++c )
186 {
187 const Field &val = row[c];
188 if (val < Zero<Field>() || Zero<Field>() < val)
189 {
190 *cit = val;
191 ++sit;
192 ++cit;
193 *sit = 1;
194 } else
195 {
196 ++(*sit);
197 }
198 }
199 rows_[ r+1 ] = cit;
200 }
201 assert( size_t(rows_[numRows_]-rows_[0]) <= size_t(size) );
202 size = rows_[numRows_]-rows_[0];
203 coeff_ = new Field[ size ];
204 skip_ = new unsigned int[ size ];
205 for (unsigned int i=0; i<size; ++i)
206 {
207 coeff_[i] = coeff[i];
208 skip_[i] = skip[i];
209 }
210 for (unsigned int i=0; i<=numRows_; ++i)
211 rows_[ i ] = coeff_ + (rows_[ i ] - coeff);
212
213 delete [] coeff;
214 delete [] skip;
215
216 if (verbose)
217 std::cout << "Entries: " << (rows_[numRows_]-rows_[0])
218 << " full: " << numCols_*numRows_
219 << std::endl;
220 }
221 // b += a*C[k]
222 template <class Vector>
223 void addRow( unsigned int k, const Field &a, Vector &b) const
224 {
225 assert(k<numRows_);
226 unsigned int j=0;
227 unsigned int *skipIt = skip_ + (rows_[ k ]-rows_[ 0 ]);
228 for( Field *pos = rows_[ k ];
229 pos != rows_[ k+1 ];
230 ++pos, ++skipIt )
231 {
232 j += *skipIt;
233 assert( j < b.size() );
234 b[j] += field_cast<typename Vector::value_type>( (*pos)*a ); // field_cast
235 }
236 }
237 private:
238 SparseCoeffMatrix ( const This &other )
239 : numRows_( other.numRows_ ),
240 numCols_( other.numCols_ )
241 {
242 const unsigned int size = other.rows_[numRows_]-other.rows_[0];
243 coeff_ = new Field[ size ];
244 rows_ = new Field*[ numRows_+1 ];
245 skip_ = new unsigned int[ size ];
246 for (unsigned int i=0; i<size; ++i)
247 {
248 coeff_[i] = other.coeff_[i];
249 skip_[i] = other.skip_[i];
250 }
251 for (unsigned int i=0; i<=numRows_; ++i)
252 rows_[ i ] = coeff_ + (other.rows_[ i ] - other.coeff_);
253 }
254
255 This &operator= (const This&);
256 Field *coeff_;
257 Field **rows_;
258 unsigned int *skip_;
259 unsigned int numRows_,numCols_;
260 };
261
262}
263
264#endif // DUNE_COEFFMATRIX_HH
Implements a vector constructed from a given type representing a field and a compile-time given size.
Dune namespace.
Definition: alignedallocator.hh:13
void field_cast(const F1 &f1, F2 &f2)
a helper class to cast from one field to another
Definition: field.hh:159
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)