DUNE PDELab (2.8)

subdomainprojectedcoarsespace.hh
1
2#ifndef DUNE_PDELAB_BACKEND_ISTL_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
3#define DUNE_PDELAB_BACKEND_ISTL_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
4
5
7
9
10#include <dune/pdelab/backend/istl/geneo/multicommdatahandle.hh>
11
12#include <dune/pdelab/backend/istl/geneo/coarsespace.hh>
13#include <dune/pdelab/backend/istl/geneo/subdomainbasis.hh>
14
15namespace Dune {
16 namespace PDELab {
17
26 template<class GFS, class M, class X, class PIH>
28 {
29
30 typedef int rank_type;
31 public:
32 typedef typename CoarseSpace<X>::COARSE_V COARSE_V;
33 typedef typename CoarseSpace<X>::COARSE_M COARSE_M;
34 typedef typename COARSE_M::field_type field_type;
35
42 SubdomainProjectedCoarseSpace (const GFS& gfs, const M& AF_exterior_, std::shared_ptr<SubdomainBasis<X> > subdomainbasis, const PIH& parallelhelper, int verbosity = 1)
43 : gfs_(gfs), AF_exterior_(AF_exterior_),
44 verbosity_(verbosity),
45 ranks_(gfs.gridView().comm().size()),
46 my_rank_(gfs.gridView().comm().rank()),
47 subdomainbasis_(subdomainbasis)
48 {
49 neighbor_ranks_ = parallelhelper.getNeighborRanks();
50
51 setup_coarse_system();
52 }
53
54 private:
55 void setup_coarse_system() {
56 using Dune::PDELab::Backend::native;
57
58 // Barrier for proper time measurement
59 gfs_.gridView().comm().barrier();
60 if (my_rank_ == 0 && verbosity_ > 0) std::cout << "Matrix setup" << std::endl;
61 Dune::Timer timer_setup;
62
63 // Communicate local coarse space dimensions
64 local_basis_sizes_.resize(ranks_);
65 rank_type local_size = subdomainbasis_->basis_size();
66 gfs_.gridView().comm().allgather(&local_size, 1, local_basis_sizes_.data());
67
68 // Count coarse space dimensions
69 global_basis_size_ = 0;
70 for (rank_type n : local_basis_sizes_) {
71 global_basis_size_ += n;
72 }
73 my_basis_array_offset_ = basis_array_offset(my_rank_);
74 rank_type max_local_basis_size = *std::max_element(local_basis_sizes_.begin(),local_basis_sizes_.end());
75
76 if (my_rank_ == 0 && verbosity_ > 0) std::cout << "Global basis size B=" << global_basis_size_ << std::endl;
77
78
79 coarse_system_ = std::make_shared<COARSE_M>(global_basis_size_, global_basis_size_, COARSE_M::row_wise);
80
81 // Set up container for storing rows of coarse matrix associated with current rank
82 // Hierarchy: Own basis functions -> Current other basis function from each neighbor -> Actual entries
83 std::vector<std::vector<std::vector<field_type> > > local_rows(local_basis_sizes_[my_rank_]);
84 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[my_rank_]; basis_index++) {
85 local_rows[basis_index].resize(neighbor_ranks_.size()+1);
86 }
87
88 // Container for neighbors' basis functions
89 std::vector<std::shared_ptr<X> > neighbor_basis(neighbor_ranks_.size());
90 for (auto& basis : neighbor_basis) {
91 basis = std::make_shared<X>(gfs_, 0.0);
92 }
93
94 // Assemble local section of coarse matrix
95 for (rank_type basis_index_remote = 0; basis_index_remote < max_local_basis_size; basis_index_remote++) {
96
97 // Communicate one basis vectors of every subdomain to all of its neighbors in one go
98 // If the current rank has already communicated all its basis vectors, just pass zeros
99 if (basis_index_remote < local_basis_sizes_[my_rank_]) {
100 Dune::PDELab::MultiCommDataHandle<GFS,X,rank_type> commdh(gfs_, *subdomainbasis_->get_basis_vector(basis_index_remote), neighbor_basis, neighbor_ranks_);
101 gfs_.gridView().communicate(commdh,Dune::All_All_Interface,Dune::ForwardCommunication);
102 } else {
103 X dummy(gfs_, 0.0);
104 Dune::PDELab::MultiCommDataHandle<GFS,X,rank_type> commdh(gfs_, dummy, neighbor_basis, neighbor_ranks_);
105 gfs_.gridView().communicate(commdh,Dune::All_All_Interface,Dune::ForwardCommunication);
106 }
107
108 // Compute local products of basis functions with discretization matrix
109 if (basis_index_remote < local_basis_sizes_[my_rank_]) {
110 auto basis_vector = *subdomainbasis_->get_basis_vector(basis_index_remote);
111 X Atimesv(gfs_,0.0);
112 native(AF_exterior_).mv(native(basis_vector), native(Atimesv));
113 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[my_rank_]; basis_index++) {
114 field_type entry = *subdomainbasis_->get_basis_vector(basis_index)*Atimesv;
115 local_rows[basis_index][neighbor_ranks_.size()].push_back(entry);
116 }
117 }
118
119 // Compute products of discretization matrix with local and remote vectors
120 for (std::size_t neighbor_id = 0; neighbor_id < neighbor_ranks_.size(); neighbor_id++) {
121 if (basis_index_remote >= local_basis_sizes_[neighbor_ranks_[neighbor_id]])
122 continue;
123
124 auto basis_vector = *neighbor_basis[neighbor_id];
125 X Atimesv(gfs_,0.0);
126 native(AF_exterior_).mv(native(basis_vector), native(Atimesv));
127
128 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[my_rank_]; basis_index++) {
129
130 field_type entry = *subdomainbasis_->get_basis_vector(basis_index)*Atimesv;
131 local_rows[basis_index][neighbor_id].push_back(entry);
132 }
133 }
134
135 }
136
137 // Construct coarse matrix from local sections
138 auto setup_row = coarse_system_->createbegin();
139 rank_type row_id = 0;
140 for (rank_type rank = 0; rank < ranks_; rank++) {
141 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[rank]; basis_index++) {
142
143 // Communicate number of entries in this row
144 rank_type couplings = 0;
145 if (rank == my_rank_) {
146 couplings = local_basis_sizes_[my_rank_];
147 for (rank_type neighbor_id : neighbor_ranks_) {
148 couplings += local_basis_sizes_[neighbor_id];
149 }
150 }
151 gfs_.gridView().comm().broadcast(&couplings, 1, rank);
152
153 // Communicate row's pattern
154 rank_type entries_pos[couplings];
155 if (rank == my_rank_) {
156 rank_type cnt = 0;
157 for (rank_type basis_index2 = 0; basis_index2 < local_basis_sizes_[my_rank_]; basis_index2++) {
158 entries_pos[cnt] = my_basis_array_offset_ + basis_index2;
159 cnt++;
160 }
161 for (std::size_t neighbor_id = 0; neighbor_id < neighbor_ranks_.size(); neighbor_id++) {
162 rank_type neighbor_offset = basis_array_offset (neighbor_ranks_[neighbor_id]);
163 for (rank_type basis_index2 = 0; basis_index2 < local_basis_sizes_[neighbor_ranks_[neighbor_id]]; basis_index2++) {
164 entries_pos[cnt] = neighbor_offset + basis_index2;
165 cnt++;
166 }
167 }
168 }
169 gfs_.gridView().comm().broadcast(entries_pos, couplings, rank);
170
171 // Communicate actual entries
172 field_type entries[couplings];
173 if (rank == my_rank_) {
174 rank_type cnt = 0;
175 for (rank_type basis_index2 = 0; basis_index2 < local_basis_sizes_[my_rank_]; basis_index2++) {
176 entries[cnt] = local_rows[basis_index][neighbor_ranks_.size()][basis_index2];
177 cnt++;
178 }
179 for (std::size_t neighbor_id = 0; neighbor_id < neighbor_ranks_.size(); neighbor_id++) {
180 for (rank_type basis_index2 = 0; basis_index2 < local_basis_sizes_[neighbor_ranks_[neighbor_id]]; basis_index2++) {
181 entries[cnt] = local_rows[basis_index][neighbor_id][basis_index2];
182 cnt++;
183 }
184 }
185 }
186 gfs_.gridView().comm().broadcast(entries, couplings, rank);
187
188 // Build matrix row based on pattern
189 for (rank_type i = 0; i < couplings; i++)
190 setup_row.insert(entries_pos[i]);
191 ++setup_row;
192
193 // Set matrix entries
194 for (rank_type i = 0; i < couplings; i++) {
195 (*coarse_system_)[row_id][entries_pos[i]] = entries[i];
196 }
197
198 row_id++;
199 }
200 }
201
202
203 if (my_rank_ == 0 && verbosity_ > 0) std::cout << "Matrix setup finished: M=" << timer_setup.elapsed() << std::endl;
204 }
205
208 rank_type basis_array_offset (rank_type rank) {
209 rank_type offset = 0;
210 for (rank_type i = 0; i < rank; i++) {
211 offset += local_basis_sizes_[i];
212 }
213 return offset;
214 }
215
216 public:
217
218 void restrict (const X& fine, COARSE_V& restricted) const override {
219
220 using Dune::PDELab::Backend::native;
221
222 rank_type recvcounts[ranks_];
223 rank_type displs[ranks_];
224 for (rank_type rank = 0; rank < ranks_; rank++) {
225 displs[rank] = 0;
226 }
227 for (rank_type rank = 0; rank < ranks_; rank++) {
228 recvcounts[rank] = local_basis_sizes_[rank];
229 for (rank_type i = rank+1; i < ranks_; i++)
230 displs[i] += local_basis_sizes_[rank];
231 }
232
233 field_type buf_defect[global_basis_size_];
234 field_type buf_defect_local[local_basis_sizes_[my_rank_]];
235
236 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[my_rank_]; basis_index++) {
237 buf_defect_local[basis_index] = 0.0;
238 for (std::size_t i = 0; i < native(fine).N(); i++)
239 buf_defect_local[basis_index] += native(*subdomainbasis_->get_basis_vector(basis_index))[i] * native(fine)[i];
240 }
241
242 MPI_Allgatherv(&buf_defect_local, local_basis_sizes_[my_rank_], MPITraits<field_type>::getType(), &buf_defect, recvcounts, displs, MPITraits<field_type>::getType(), gfs_.gridView().comm());
243 for (rank_type basis_index = 0; basis_index < global_basis_size_; basis_index++) {
244 restricted[basis_index] = buf_defect[basis_index];
245 }
246 }
247
248 void prolongate (const COARSE_V& coarse, X& prolongated) const override {
249 using Dune::PDELab::Backend::native;
250
251 prolongated = 0.0;
252
253 // Prolongate result
254 for (rank_type basis_index = 0; basis_index < local_basis_sizes_[my_rank_]; basis_index++) {
255 X local_result(*subdomainbasis_->get_basis_vector(basis_index));
256 native(local_result) *= coarse[my_basis_array_offset_ + basis_index];
257 prolongated += local_result;
258 }
259 }
260
261 std::shared_ptr<COARSE_M> get_coarse_system () override {
262 return coarse_system_;
263 }
264
265 rank_type basis_size() override {
266 return global_basis_size_;
267 }
268
269 private:
270
271 const GFS& gfs_;
272 const M& AF_exterior_;
273
274 int verbosity_;
275
276 std::vector<rank_type> neighbor_ranks_;
277
278 rank_type ranks_, my_rank_;
279
280 std::shared_ptr<SubdomainBasis<X> > subdomainbasis_;
281
282 std::vector<rank_type> local_basis_sizes_; // Dimensions of local coarse space per subdomain
283 rank_type my_basis_array_offset_; // Start of local basis functions in a consecutive global ordering
284 rank_type global_basis_size_; // Dimension of entire coarse space
285
286 std::shared_ptr<COARSE_M> coarse_system_; // Coarse space matrix
287 };
288 }
289}
290
291#endif //DUNE_PDELAB_BACKEND_ISTL_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
Representation of a coarse space intended for two-level Schwarz preconditioners.
Definition: coarsespace.hh:8
A sparse block matrix with compressed row storage.
Definition: bcrsmatrix.hh:464
typename Imp::BlockTraits< B >::field_type field_type
export the type representing the field
Definition: bcrsmatrix.hh:486
@ row_wise
Build in a row-wise manner.
Definition: bcrsmatrix.hh:519
A vector of blocks with memory management.
Definition: bvector.hh:393
Gather/scatter communication that passes a single function from each subdomain to all its neighbors....
Definition: multicommdatahandle.hh:189
This represents a general per-subdomain basis.
Definition: subdomainbasis.hh:12
This constructs a coarse space from a per-subdomain local basis.
Definition: subdomainprojectedcoarsespace.hh:28
void prolongate(const COARSE_V &coarse, X &prolongated) const override
Prolongates a vector defined on the coarse space to the subdomain.
Definition: subdomainprojectedcoarsespace.hh:248
rank_type basis_size() override
Returns the size of the coarse basis.
Definition: subdomainprojectedcoarsespace.hh:265
void restrict(const X &fine, COARSE_V &restricted) const override
Restricts a vector defined on a subdomain to the coarse space.
Definition: subdomainprojectedcoarsespace.hh:218
std::shared_ptr< COARSE_M > get_coarse_system() override
Returns the matrix representing the coarse basis.
Definition: subdomainprojectedcoarsespace.hh:261
SubdomainProjectedCoarseSpace(const GFS &gfs, const M &AF_exterior_, std::shared_ptr< SubdomainBasis< X > > subdomainbasis, const PIH &parallelhelper, int verbosity=1)
Constructor.
Definition: subdomainprojectedcoarsespace.hh:42
A simple stop watch.
Definition: timer.hh:41
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:75
@ ForwardCommunication
communicate as given in InterfaceType
Definition: gridenums.hh:169
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:89
Dune namespace.
Definition: alignedallocator.hh:11
Provide some classes to reduce boiler plate code in pdelab applications.
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:39
A simple timing class.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 22, 23:30, 2024)