Loading [MathJax]/extensions/MathZoom.js

dune-mmesh (1.4)

communication.hh
1#pragma GCC diagnostic ignored "-Wattributes"
2#ifndef DUNE_MMESH_MISC_COMMUNICATION_HH
3#define DUNE_MMESH_MISC_COMMUNICATION_HH
4
5#include <vector>
6#include <mpi.h>
7#include <dune/common/hybridutilities.hh>
8#include <dune/grid/common/gridenums.hh>
9#include <dune/common/parallel/variablesizecommunicator.hh>
10
11#include "objectstream.hh"
12
13namespace Dune
14{
15
16 template< class Grid, class MMeshType >
17 class MMeshCommunication
18 {
19 typedef MMeshCommunication< Grid, MMeshType > This;
20 typedef PartitionHelper< MMeshType > PartitionHelperType;
21 typedef typename PartitionHelperType::LinksType Links;
22
23 // prohibit copying and assignment
24 MMeshCommunication( const This & );
25 const This &operator= ( const This & );
26
27 template< int codim >
28 struct PackData;
29
30 template< int codim >
31 struct UnpackData;
32
33 public:
34 static const int dimension = Grid::dimension;
35
36 MMeshCommunication ( const PartitionHelperType &partitionHelper )
37 : partitionHelper_( partitionHelper ), tag_( 0 )
38 {}
39
40 template< class PackIterator, class UnpackIterator, class DataHandleImp >
41 void operator() ( const PackIterator packBegin, const PackIterator packEnd,
42 const UnpackIterator unpackBegin, const UnpackIterator unpackEnd,
43 DataHandleImp &dataHandle,
44 const PartitionType sendType, const PartitionType recvType,
45 const bool packAll ) const
46 {
47 typedef MMeshImpl::ObjectStream BufferType;
48 const Links& links = partitionHelper_.links();
49
50 // vector of message buffers
51 std::vector< BufferType > sendBuffers( links.size() ), recvBuffers( links.size() );
52 for( int link = 0; link < links.size(); ++link )
53 {
54 sendBuffers[ link ].clear();
55 recvBuffers[ link ].clear();
56 }
57
58 // pack data on send entities
59 for( PackIterator it = packBegin; it != packEnd; ++it )
60 {
61 const typename PackIterator::Entity &entity = *it;
62 if (entity.partitionType() == sendType && partitionHelper_.connectivity(entity).size() > 0)
63 {
64 Hybrid::forEach(std::make_index_sequence<dimension+1>{}, [&](auto codim){
65 PackData<codim>::apply(links, partitionHelper_, dataHandle, sendBuffers, entity);
66 });
67 }
68 }
69
70 if( packAll )
71 {
72 // pack data on receive entities
73 for( UnpackIterator it = unpackBegin; it != unpackEnd; ++it )
74 {
75 const typename UnpackIterator::Entity &entity = *it;
76 if (entity.partitionType() == recvType && partitionHelper_.connectivity(entity).size() > 0)
77 {
78 Hybrid::forEach(std::make_index_sequence<dimension+1>{}, [&](auto codim){
79 PackData<codim>::apply(links, partitionHelper_, dataHandle, sendBuffers, entity);
80 });
81 }
82 }
83 }
84
85 // Send to all links
86 const auto& comm = partitionHelper_.comm();
87 MPI_Request sendRequests[links.size()];
88 for (int link = 0; link < links.size(); ++link)
89 {
90 BufferType& buf = sendBuffers[ link ];
91 int dest = links[ link ];
92 MPI_Request& request = sendRequests[ link ];
93 MPI_Isend( buf._buf, buf._wb, MPI_BYTE, dest, tag_, comm, &request );
94 }
95
96 // Receive data
97 int count = 0;
98 std::vector<bool> received (links.size(), false);
99 MPI_Request recvRequests[links.size()];
100 while( count < links.size() )
101 {
102 for (int link = 0; link < links.size(); ++link)
103 {
104 if (received[ link ])
105 continue;
106
107 int source = links[ link ];
108
109 int available = 0;
110 MPI_Status status;
111 MPI_Iprobe( source, tag_, comm, &available, &status );
112
113 if (available)
114 {
115 int bufferSize;
116 MPI_Get_count( &status, MPI_BYTE, &bufferSize );
117
118 BufferType& buf = recvBuffers[ link ];
119 buf.reserve( bufferSize );
120 buf.clear();
121
122 MPI_Request& request = recvRequests[ link ];
123 MPI_Irecv( buf._buf, bufferSize, MPI_BYTE, source, tag_, comm, &request );
124 buf.seekp( bufferSize );
125
126 count++;
127 received[ link ] = true;
128 }
129 }
130 }
131
132 MPI_Waitall( links.size(), sendRequests, MPI_STATUSES_IGNORE );
133 MPI_Waitall( links.size(), recvRequests, MPI_STATUSES_IGNORE );
134
135 // unpack data on receive entities
136 for( UnpackIterator it = unpackBegin; it != unpackEnd; ++it )
137 {
138 const typename UnpackIterator::Entity &entity = *it;
139 if (entity.partitionType() == recvType && partitionHelper_.connectivity(entity).size() > 0)
140 {
141 Hybrid::forEach(std::make_index_sequence<dimension+1>{}, [&](auto codim){
142 UnpackData<codim>::apply(links, partitionHelper_, dataHandle, recvBuffers, entity);
143 });
144 }
145 }
146
147 if( packAll )
148 {
149 // unpack data on send entities
150 for( PackIterator it = packBegin; it != packEnd; ++it )
151 {
152 const typename PackIterator::Entity &entity = *it;
153 if (entity.partitionType() == sendType && partitionHelper_.connectivity(entity).size() > 0)
154 {
155 Hybrid::forEach(std::make_index_sequence<dimension+1>{}, [&](auto codim){
156 UnpackData<codim>::apply(links, partitionHelper_, dataHandle, recvBuffers, entity);
157 });
158 }
159 }
160 }
161
162 tag_++;
163 if (tag_ < 0) tag_ = 0;
164 }
165
166 private:
167 const PartitionHelperType &partitionHelper_;
168 mutable int tag_;
169 };
170
171 // MMeshCommunication::PackData
172 // -----------------------------------
173
174 template< class Grid, class MMeshType >
175 template< int codim >
176 struct MMeshCommunication< Grid, MMeshType >::PackData
177 {
178 typedef typename Grid::template Codim< 0 >::Entity Element;
179
180 typedef typename Grid::template Codim< codim >::Entity Entity;
181
182 template< class DataHandleIF, class BufferType >
183 static void apply ( const Links& links,
184 const PartitionHelperType &partitionHelper,
185 DataHandleIF &dataHandle,
186 std::vector< BufferType > &buffer,
187 const Element &element )
188 {
189 // if codim is not contained just go on
190 if( !dataHandle.contains( dimension, codim ) )
191 return;
192
193 const auto& connectivity = partitionHelper.connectivity(element);
194
195 const int numSubEntities = element.subEntities(codim);
196 for( int subEntity = 0; subEntity < numSubEntities; ++subEntity )
197 {
198 // get subentity
199 const Entity &entity = element.template subEntity< codim >( subEntity );
200
201 for (int link = 0; link < links.size(); ++link)
202 {
203 // make sure entity belongs to the link
204 if (connectivity.count(links[link]) > 0 || links[link] == partitionHelper.rank(element))
205 {
206 std::size_t size = dataHandle.size( entity );
207
208 // write size into stream
209 buffer[ link ].write( size );
210
211 // write data to message buffer using data handle
212 dataHandle.gather( buffer[ link ], entity );
213 }
214 }
215 }
216 }
217 };
218
219
220
221 // MMeshCommunication::UnpackData
222 // -------------------------------------
223
224 template< class Grid, class MMeshType >
225 template< int codim >
226 struct MMeshCommunication< Grid, MMeshType >::UnpackData
227 {
228 using Element = typename Grid::template Codim< 0 >::Entity;
229
230 template< class DataHandleIF, class BufferType >
231 static void apply ( const Links& links,
232 const PartitionHelperType &partitionHelper,
233 DataHandleIF &dataHandle,
234 std::vector< BufferType > &buffer,
235 const Element &element )
236 {
237 // if codim is not contained just go on
238 if( !dataHandle.contains( dimension, codim ) )
239 return;
240
241 const auto& connectivity = partitionHelper.connectivity(element);
242
243 // get number of sub entities
244 const int numSubEntities = element.subEntities(codim);
245 for( int subEntity = 0; subEntity < numSubEntities; ++subEntity )
246 {
247 // get subentity
248 const auto& entity = element.template subEntity< codim >( subEntity );
249
250 for (int link = 0; link < links.size(); ++link)
251 {
252 // make sure entity belongs to the rank of the link
253 if (links[link] == partitionHelper.rank(element) || connectivity.count(links[link]) > 0)
254 {
255 // read size from stream
256 std::size_t size( 0 );
257 buffer[ link ].read( size );
258
259 // read data from message buffer using data handle
260 dataHandle.scatter( buffer[ link ], entity, size );
261 }
262 }
263 }
264 }
265 };
266
267} // namespace Dune
268
269#endif
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 6, 22:49, 2025)