DUNE PDELab (git)

multicommdatahandle.hh
1#ifndef DUNE_PDELAB_BACKEND_ISTL_GENEO_MULTICOMMDATAHANDLE_HH
2#define DUNE_PDELAB_BACKEND_ISTL_GENEO_MULTICOMMDATAHANDLE_HH
3
4#include <dune/pdelab/gridfunctionspace/entityindexcache.hh>
5
6namespace Dune {
7 namespace PDELab {
8
13 template<typename GFS, typename RankIndex, typename V>
15 {
16
17 typedef Dune::PDELab::EntityIndexCache<GFS> IndexCache;
18 typedef typename V::template LocalView<IndexCache> LocalView;
19
20 public:
21
22 typedef std::size_t size_type;
23
24 template<typename MessageBuffer, typename Entity, typename LocalView>
25 bool gather(MessageBuffer& buff, const Entity& e, LocalView& local_view) const
26 {
27 // Write values
28 for (std::size_t i = 0; i < local_view.size(); ++i) {
29 buff.write (local_view[i]);
30 }
31 return false;
32 }
33
34 template<typename MessageBuffer, typename Entity, typename LocalView>
35 bool scatter(MessageBuffer& buff, std::size_t n, const Entity& e, LocalView& local_view) const
36 {
37 RankIndex remote_rank = buff.senderRank();
38
39 // Get neighbor index from rank
40 int remote_index = std::distance(_neighbor_ranks.begin(), std::find(_neighbor_ranks.begin(), _neighbor_ranks.end(), remote_rank));
41 if (remote_index == static_cast<RankIndex>(_neighbor_ranks.size()))
42 DUNE_THROW(Exception,"Received remote rank " << remote_rank << ", but it's not in the given neighbor set!");
43
44 // Get values
45 auto target_view = LocalView(*_target_vectors[remote_index]);
46 _index_cache.update(e);
47 target_view.bind(_index_cache);
48
49
50 if (target_view.cache().gridFunctionSpace().entitySet().partitions().contains(e.partitionType())) {
51 if (target_view.size() != n)
52 DUNE_THROW(Exception,"size mismatch in GridFunctionSpace data handle, have " << target_view.size() << "DOFs, but received " << n);
53
54 for (size_type i = 0; i < target_view.size(); ++i) {
55 typename LocalView::ElementType x;
56 buff.read(x);
57 target_view[i] = x;
58 }
59 target_view.unbind();
60 return true;
61
62 } else {
63
64 if (target_view.size() != 0)
65 DUNE_THROW(Exception,"expected no DOFs in partition '" << e.partitionType() << "', but have " << target_view.size());
66
67 for (size_type i = 0; i < target_view.size(); ++i) {
68 typename LocalView::ElementType dummy;
69 buff.read(dummy);
70 }
71 target_view.unbind();
72 return false;
73 }
74 }
75
76 // enhanced scatter with support for function spaces with different structure on sender and receiver side
77 template<typename MessageBuffer, typename Offsets, typename Entity, typename LocalView>
78 bool scatter(MessageBuffer& buff, const Offsets& remote_offsets, const Offsets& local_offsets, const Entity& e, LocalView& local_view) const
79 {
80 if (local_view.cache().gridFunctionSpace().entitySet().partitions().contains(e.partitionType()))
81 {
82 // the idea here is this:
83 // the compile time structure of the overall function space (and its ordering) will be identical on both sides
84 // of the communication, but one side may be missing information on some leaf orderings, e.g. because the DOF
85 // belongs to a MultiDomain subdomain that only has an active grid cell on one side of the communication.
86 // So we step through the leaves and simply ignore any block where one of the two sides is of size 0.
87 // Otherwise, it's business as usual: we make sure that the sizes from both sides match and then process all
88 // data with the DOF-local gather / scatter functor.
89
90 RankIndex remote_rank = buff.senderRank();
91
92 // Get neighbor index from rank
93 int remote_index = std::distance(_neighbor_ranks.begin(), std::find(_neighbor_ranks.begin(), _neighbor_ranks.end(), remote_rank));
94 if (remote_index == static_cast<RankIndex>(_neighbor_ranks.size()))
95 DUNE_THROW(Exception,"Received remote rank " << remote_rank << ", but it's not in the given neighbor set!");
96
97 // Get values
98 auto target_view = LocalView(*_target_vectors[remote_index]);
99 _index_cache.update(e);
100 target_view.bind(_index_cache);
101
102
103 size_type remote_i = 0;
104 size_type local_i = 0;
105 bool needs_commit = false;
106 for (size_type block = 1; block < local_offsets.size(); ++block)
107 {
108 // we didn't get any data - just ignore
109 if (remote_offsets[block] == remote_i)
110 {
111 local_i = local_offsets[block];
112 continue;
113 }
114
115 // we got data for DOFs we don't have - drain buffer
116 if (local_offsets[block] == local_i)
117 {
118 for (; remote_i < remote_offsets[block]; ++remote_i)
119 {
120 typename LocalView::ElementType dummy;
121 buff.read(dummy);
122 }
123 continue;
124 }
125
126 if (remote_offsets[block] - remote_i != local_offsets[block] - local_i)
127 DUNE_THROW(Exception,"size mismatch in GridFunctionSpace data handle block " << block << ", have " << local_offsets[block] - local_i << "DOFs, but received " << remote_offsets[block] - remote_i);
128
129 for (; local_i < local_offsets[block]; ++local_i) { // TODO: Correct?
130 typename LocalView::ElementType x;
131 buff.read(x);
132 target_view[local_i] = x;
133 }
134
135 remote_i = remote_offsets[block];
136 needs_commit = true;
137 }
138 target_view.unbind();
139 return needs_commit;
140 }
141 else
142 {
143 if (local_view.size() != 0)
144 DUNE_THROW(Exception,"expected no DOFs in partition '" << e.partitionType() << "', but have " << local_view.size());
145
146 for (std::size_t i = 0; i < remote_offsets.back(); ++i)
147 {
148 typename LocalView::ElementType dummy;
149 buff.read(dummy);
150 }
151 return false;
152 }
153 }
154
161 MultiCommGatherScatter(const GFS& gfs, RankIndex rank,
162 std::vector<std::shared_ptr<V> > target_vectors,
163 std::vector<RankIndex> neighbor_ranks)
164 : _index_cache(gfs)
165 , _rank(rank)
166 , _target_vectors(target_vectors)
167 , _neighbor_ranks(neighbor_ranks)
168 {}
169
170 private:
171
172 mutable IndexCache _index_cache;
173
174 const RankIndex _rank;
175 std::vector<std::shared_ptr<V> > _target_vectors;
176 std::vector<RankIndex> _neighbor_ranks;
177 };
178
184 template<class GFS, class V, typename RankIndex>
186 : public Dune::PDELab::GFSDataHandle<GFS,V,
187 MultiCommGatherScatter<GFS, RankIndex, V>,
188 Dune::PDELab::DOFDataCommunicationDescriptor<typename V::ElementType,true>>
189 {
190 typedef Dune::PDELab::GFSDataHandle<GFS,V,
193
194 public:
195
202 MultiCommDataHandle(const GFS& gfs_, V& v_, std::vector<std::shared_ptr<V> > target_vectors, std::vector<RankIndex> neighbor_ranks)
203 : BaseT(gfs_,v_,MultiCommGatherScatter<GFS, RankIndex, V>(gfs_, gfs_.gridView().comm().rank(), target_vectors, neighbor_ranks),
204 Dune::PDELab::DOFDataCommunicationDescriptor<typename V::ElementType,true>())
205 {
206 }
207 };
208 }
209}
210
211#endif //DUNE_PDELAB_BACKEND_ISTL_GENEO_MULTICOMMDATAHANDLE_HH
Wrapper class for entities.
Definition: entity.hh:66
Base class for all PDELab exceptions.
Definition: exceptions.hh:19
Implement a data handle with a grid function space.
Definition: genericdatahandle.hh:132
Gather/scatter communication that passes a single function from each subdomain to all its neighbors....
Definition: multicommdatahandle.hh:189
MultiCommDataHandle(const GFS &gfs_, V &v_, std::vector< std::shared_ptr< V > > target_vectors, std::vector< RankIndex > neighbor_ranks)
Definition: multicommdatahandle.hh:202
Gather/scatter communication that passes a single function from each subdomain to all its neighbors.
Definition: multicommdatahandle.hh:15
MultiCommGatherScatter(const GFS &gfs, RankIndex rank, std::vector< std::shared_ptr< V > > target_vectors, std::vector< RankIndex > neighbor_ranks)
Definition: multicommdatahandle.hh:161
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
Dune namespace.
Definition: alignedallocator.hh:13
Communication descriptor for sending one item of type E per DOF.
Definition: genericdatahandle.hh:25
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 13, 23:29, 2024)