Loading [MathJax]/extensions/tex2jax.js

DUNE-GRID-GLUE (2.10)

ringcomm.hh
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3// SPDX-FileCopyrightInfo: Copyright © DUNE Project contributors, see file LICENSE.md in module root
4// SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-GPL-2.0-only-with-dune-grid-glue-exception
5/* IMPLEMENTATION OF CLASS G R I D G L U E */
6
8#define CheckMPIStatus(A,B) {}
9
10#include <mpi.h>
11#include <functional>
12#include <utility>
13
14#include <dune/common/fvector.hh>
15#include <dune/common/hybridutilities.hh>
16
17#include <dune/geometry/type.hh>
18
19namespace Dune {
20namespace Parallel {
21
22 namespace Impl {
23
25 template<typename T>
26 struct MPITypeInfo {};
27
28 template<>
29 struct MPITypeInfo< int >
30 {
31 static const unsigned int size = 1;
32 static inline MPI_Datatype getType()
33 {
34 return MPI_INT;
35 }
36 };
37
38 template<typename K, int N>
39 struct MPITypeInfo< Dune::FieldVector<K,N> >
40 {
41 static const unsigned int size = N;
42 static inline MPI_Datatype getType()
43 {
44 return Dune::MPITraits<K>::getType();
45 }
46 };
47
48 template<>
49 struct MPITypeInfo< unsigned int >
50 {
51 static const unsigned int size = 1;
52 static inline MPI_Datatype getType()
53 {
54 return MPI_UNSIGNED;
55 }
56 };
57
58 template<>
59 struct MPITypeInfo< Dune::GeometryType >
60 {
61 static const unsigned int size = 1;
62 static inline MPI_Datatype getType()
63 {
64 return Dune::MPITraits< Dune::GeometryType >::getType();
65 }
66 };
67
68 template<typename T>
69 void MPI_SetVectorSize(
70 std::vector<T> & data,
71 MPI_Status & status)
72 {
73 typedef MPITypeInfo<T> Info;
74 int sz;
75 MPI_Get_count(&status, Info::getType(), &sz);
76 assert(sz%Info::size == 0);
77 data.resize(sz/Info::size);
78 }
79
89 template<typename T>
90 void MPI_SendVectorInRing(
91 std::vector<T> & data,
92 std::vector<T> & next,
93 int tag,
94 int rightrank,
95 int leftrank,
96 MPI_Comm comm,
97 MPI_Request& r_send,
98 MPI_Request& r_recv
99 )
100 {
101 // mpi status stuff
102 [[maybe_unused]] int result = 0;
103 typedef MPITypeInfo<T> Info;
104 // resize next buffer to maximum size
105 next.resize(next.capacity());
106 // send data (explicitly send data.size elements)
107 result =
108 MPI_Isend(
109 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, tag,
110 comm, &r_send);
111 // receive up to maximum size. The actual size is stored in the status
112 result =
113 MPI_Irecv(
114 &(next[0]), Info::size*next.size(), Info::getType(), leftrank, tag,
115 comm, &r_recv);
116 // // check result
117 // MPI_Status status;
118 // CheckMPIStatus(result, status);
119 }
120
121 template<typename T>
122 using ptr_t = T*;
123
124 /* these helper structs are needed as long as we still support
125 C++11, as we can't use variadic lambdas */
126 template<typename... Args>
127 struct call_MPI_SendVectorInRing
128 {
129 std::tuple<Args...> & remotedata;
130 std::tuple<Args...> & nextdata;
131 int & tag;
132 int & rightrank;
133 int & leftrank;
134 MPI_Comm & mpicomm;
135 std::array<MPI_Request,sizeof...(Args)> & requests_send;
136 std::array<MPI_Request,sizeof...(Args)> & requests_recv;
137
138 template<typename I>
139 void operator()(I i)
140 {
141 MPI_SendVectorInRing(
142 std::get<i>(remotedata),
143 std::get<i>(nextdata),
144 tag+i,
145 rightrank, leftrank, mpicomm,
146 requests_send[i],
147 requests_recv[i]);
148 }
149 };
150 template<typename... Args>
151 struct call_MPI_SetVectorSize
152 {
153 std::tuple<Args...> & nextdata;
154 std::array<MPI_Status,sizeof...(Args)> & status_recv;
155
156 template<typename I>
157 void operator()(I i)
158 {
159 MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
160 }
161 };
162
163 template<typename OP, std::size_t... Indices, typename... Args>
164 void MPI_AllApply_impl(MPI_Comm mpicomm,
165 OP && op,
166 std::index_sequence<Indices...> indices,
167 const Args&... data)
168 {
169 constexpr std::size_t N = sizeof...(Args);
170 int myrank = 0;
171 int commsize = 0;
172#if HAVE_MPI
173 MPI_Comm_rank(mpicomm, &myrank);
174 MPI_Comm_size(mpicomm, &commsize);
175#endif // HAVE_MPI
176
177 if (commsize > 1)
178 {
179#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
180 std::cout << myrank << " Start Communication, size " << commsize << std::endl;
181#endif
182
183 // get data sizes
184 std::array<unsigned int, N> size({ ((unsigned int)data.size())... });
185
186 // communicate max data size
187 std::array<unsigned int, N> maxSize;
188 MPI_Allreduce(&size, &maxSize,
189 size.size(), MPI_UNSIGNED, MPI_MAX, mpicomm);
190#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
191 std::cout << myrank << " maxSize " << "done... " << std::endl;
192#endif
193
194 // allocate receiving buffers with maxsize to ensure sufficient buffer size for communication
195 std::tuple<Args...> remotedata { Args(maxSize[Indices])... };
196
197 // copy local data to receiving buffer
198 remotedata = std::tie(data...);
199
200 // allocate second set of receiving buffers necessary for async communication
201 std::tuple<Args...> nextdata { Args(maxSize[Indices])... };
202
203 // communicate data in the ring
204 int rightrank = (myrank + 1 + commsize) % commsize;
205 int leftrank = (myrank - 1 + commsize) % commsize;
206
207 std::cout << myrank << ": size = " << commsize << std::endl;
208 std::cout << myrank << ": left = " << leftrank
209 << " right = " << rightrank << std::endl;
210
211 // currently the remote data is our own data
212 int remoterank = myrank;
213
214 for (int i=1; i<commsize; i++)
215 {
216 // in this iteration we will receive data from nextrank
217 int nextrank = (myrank - i + commsize) % commsize;
218
219 std::cout << myrank << ": next = " << nextrank << std::endl;
220
221 // send remote data to right neighbor and receive from left neighbor
222 std::array<MPI_Request,N> requests_send;
223 std::array<MPI_Request,N> requests_recv;
224
225 int tag = 0;
226 Dune::Hybrid::forEach(indices,
227 // [&](auto i){
228 // MPI_SendVectorInRing(
229 // std::get<i>(remotedata),
230 // std::get<i>(nextdata),
231 // tag+i,
232 // rightrank, leftrank, mpicomm,
233 // requests_send[i],
234 // requests_recv[i]);
235 // });
236 call_MPI_SendVectorInRing<Args...>({
237 remotedata,
238 nextdata,
239 tag,
240 rightrank, leftrank, mpicomm,
241 requests_send,
242 requests_recv
243 }));
244
245 // apply operator
246 op(remoterank,std::get<Indices>(remotedata)...);
247
248 // wait for communication to finalize
249 std::array<MPI_Status,N> status_send;
250 std::array<MPI_Status,N> status_recv;
251 MPI_Waitall(N,&requests_recv[0],&status_recv[0]);
252
253 // we finished receiving from nextrank and thus remoterank = nextrank
254 remoterank = nextrank;
255
256 // get current data sizes
257 // and resize vectors
258 Dune::Hybrid::forEach(indices,
259 // [&](auto i){
260 // MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
261 // });
262 call_MPI_SetVectorSize<Args...>({
263 nextdata, status_recv
264 }));
265
266 MPI_Waitall(N,&requests_send[0],&status_send[0]);
267
268 // swap the communication buffers
269 std::swap(remotedata,nextdata);
270 }
271
272 // last apply (or the only one in the case of sequential application)
273 op(remoterank,std::get<Indices>(remotedata)...);
274 }
275 else // sequential
276 {
277 op(myrank,data...);
278 }
279 }
280
281 } // end namespace Impl
282
296template<typename OP, typename... Args>
297void MPI_AllApply(MPI_Comm mpicomm,
298 OP && op,
299 const Args& ... data)
300{
301 Impl::MPI_AllApply_impl(
302 mpicomm,
303 std::forward<OP>(op),
304 std::make_index_sequence<sizeof...(Args)>(),
305 data...
306 );
307}
308
309} // end namespace Parallel
310} // end namespace Dune
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Apr 3, 22:46, 2025)