DUNE PDELab (git)

mpicommunication.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3// SPDX-FileCopyrightInfo: Copyright © DUNE Project contributors, see file LICENSE.md in module root
4// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
5#ifndef DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
6#define DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
7
16#if HAVE_MPI
17
18#include <algorithm>
19#include <functional>
20#include <memory>
21
22#include <mpi.h>
23
28#include <dune/common/parallel/mpifuture.hh>
30
31namespace Dune
32{
33
34 //=======================================================
35 // use singleton pattern and template specialization to
36 // generate MPI operations
37 //=======================================================
38
39 template<typename Type, typename BinaryFunction, typename Enable=void>
40 class Generic_MPI_Op
41 {
42
43 public:
44 static MPI_Op get ()
45 {
46 if (!op)
47 {
48 op = std::make_unique<MPI_Op>();
49 // The following line leaks an MPI operation object, because the corresponding
50 //`MPI_Op_free` is never called. It is never called because there is no easy
51 // way to call it at the right moment: right before the call to MPI_Finalize.
52 // See https://gitlab.dune-project.org/core/dune-istl/issues/80
53 MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
54 }
55 return *op;
56 }
57 private:
58 static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
59 {
60 BinaryFunction func;
61
62 for (int i=0; i< *len; ++i, ++in, ++inout) {
63 Type temp;
64 temp = func(*in, *inout);
65 *inout = temp;
66 }
67 }
68 Generic_MPI_Op () {}
69 Generic_MPI_Op (const Generic_MPI_Op& ) {}
70 static std::unique_ptr<MPI_Op> op;
71 };
72
73
74 template<typename Type, typename BinaryFunction, typename Enable>
75 std::unique_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction, Enable>::op;
76
77#define ComposeMPIOp(func,op) \
78 template<class T, class S> \
79 class Generic_MPI_Op<T, func<S>, std::enable_if_t<MPITraits<S>::is_intrinsic> >{ \
80 public: \
81 static MPI_Op get(){ \
82 return op; \
83 } \
84 private: \
85 Generic_MPI_Op () {} \
86 Generic_MPI_Op (const Generic_MPI_Op & ) {} \
87 }
88
89
90 ComposeMPIOp(std::plus, MPI_SUM);
91 ComposeMPIOp(std::multiplies, MPI_PROD);
92 ComposeMPIOp(Min, MPI_MIN);
93 ComposeMPIOp(Max, MPI_MAX);
94
95#undef ComposeMPIOp
96
97
98 //=======================================================
99 // use singleton pattern and template specialization to
100 // generate MPI operations
101 //=======================================================
102
106 template<>
107 class Communication<MPI_Comm>
108 {
109 public:
111 Communication (const MPI_Comm& c = MPI_COMM_WORLD)
112 : communicator(c)
113 {
114 if(communicator!=MPI_COMM_NULL) {
115 int initialized = 0;
116 MPI_Initialized(&initialized);
117 if (!initialized)
118 DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI Communication!");
119 MPI_Comm_rank(communicator,&me);
120 MPI_Comm_size(communicator,&procs);
121 }else{
122 procs=0;
123 me=-1;
124 }
125 }
126
129 : Communication(MPI_COMM_SELF)
130 {}
131
133 int rank () const
134 {
135 return me;
136 }
137
139 int size () const
140 {
141 return procs;
142 }
143
145 template<class T>
146 int send(const T& data, int dest_rank, int tag) const
147 {
148 auto mpi_data = getMPIData(data);
149 return MPI_Send(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
150 dest_rank, tag, communicator);
151 }
152
154 template<class T>
155 MPIFuture<T> isend(T&& data, int dest_rank, int tag) const
156 {
157 MPIFuture<T> future(std::forward<T>(data));
158 auto mpidata = future.get_mpidata();
159 MPI_Isend(mpidata.ptr(), mpidata.size(), mpidata.type(),
160 dest_rank, tag, communicator, &future.req_);
161 return future;
162 }
163
165 template<class T>
166 T recv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
167 {
168 T lvalue_data(std::forward<T>(data));
169 auto mpi_data = getMPIData(lvalue_data);
170 MPI_Recv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
171 source_rank, tag, communicator, status);
172 return lvalue_data;
173 }
174
176 template<class T>
177 MPIFuture<T> irecv(T&& data, int source_rank, int tag) const
178 {
179 MPIFuture<T> future(std::forward<T>(data));
180 auto mpidata = future.get_mpidata();
181 if (mpidata.size() == 0)
182 DUNE_THROW(ParallelError, "Size if irecv data object is zero. Reserve sufficient size for the whole message");
183 MPI_Irecv(mpidata.ptr(), mpidata.size(), mpidata.type(),
184 source_rank, tag, communicator, &future.req_);
185 return future;
186 }
187
188 template<class T>
189 T rrecv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
190 {
191 MPI_Status _status;
192 MPI_Message _message;
193 T lvalue_data(std::forward<T>(data));
194 auto mpi_data = getMPIData(lvalue_data);
195 static_assert(!mpi_data.static_size, "rrecv work only for non-static-sized types.");
196 if(status == MPI_STATUS_IGNORE)
197 status = &_status;
198 MPI_Mprobe(source_rank, tag, communicator, &_message, status);
199 int size;
200 MPI_Get_count(status, mpi_data.type(), &size);
201 mpi_data.resize(size);
202 MPI_Mrecv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(), &_message, status);
203 return lvalue_data;
204 }
205
207 template<typename T>
208 T sum (const T& in) const
209 {
210 T out;
211 allreduce<std::plus<T> >(&in,&out,1);
212 return out;
213 }
214
216 template<typename T>
217 int sum (T* inout, int len) const
218 {
219 return allreduce<std::plus<T> >(inout,len);
220 }
221
223 template<typename T>
224 T prod (const T& in) const
225 {
226 T out;
227 allreduce<std::multiplies<T> >(&in,&out,1);
228 return out;
229 }
230
232 template<typename T>
233 int prod (T* inout, int len) const
234 {
235 return allreduce<std::multiplies<T> >(inout,len);
236 }
237
239 template<typename T>
240 T min (const T& in) const
241 {
242 T out;
243 allreduce<Min<T> >(&in,&out,1);
244 return out;
245 }
246
248 template<typename T>
249 int min (T* inout, int len) const
250 {
251 return allreduce<Min<T> >(inout,len);
252 }
253
254
256 template<typename T>
257 T max (const T& in) const
258 {
259 T out;
260 allreduce<Max<T> >(&in,&out,1);
261 return out;
262 }
263
265 template<typename T>
266 int max (T* inout, int len) const
267 {
268 return allreduce<Max<T> >(inout,len);
269 }
270
272 int barrier () const
273 {
274 return MPI_Barrier(communicator);
275 }
276
279 {
280 MPIFuture<void> future(true); // make a valid MPIFuture<void>
281 MPI_Ibarrier(communicator, &future.req_);
282 return future;
283 }
284
285
287 template<typename T>
288 int broadcast (T* inout, int len, int root) const
289 {
290 return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
291 }
292
294 template<class T>
295 MPIFuture<T> ibroadcast(T&& data, int root) const{
296 MPIFuture<T> future(std::forward<T>(data));
297 auto mpidata = future.get_mpidata();
298 MPI_Ibcast(mpidata.ptr(),
299 mpidata.size(),
300 mpidata.type(),
301 root,
302 communicator,
303 &future.req_);
304 return future;
305 }
306
309 template<typename T>
310 int gather (const T* in, T* out, int len, int root) const
311 {
312 return MPI_Gather(const_cast<T*>(in),len,MPITraits<T>::getType(),
313 out,len,MPITraits<T>::getType(),
314 root,communicator);
315 }
316
318 template<class TIN, class TOUT = std::vector<TIN>>
319 MPIFuture<TOUT, TIN> igather(TIN&& data_in, TOUT&& data_out, int root) const{
320 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
321 auto mpidata_in = future.get_send_mpidata();
322 auto mpidata_out = future.get_mpidata();
323 assert(root != me || mpidata_in.size()*procs <= mpidata_out.size());
324 int outlen = (me==root) * mpidata_in.size();
325 MPI_Igather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
326 mpidata_out.ptr(), outlen, mpidata_out.type(),
327 root, communicator, &future.req_);
328 return future;
329 }
330
332 template<typename T>
333 int gatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ, int root) const
334 {
335 return MPI_Gatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
336 out,recvDataLen,displ,MPITraits<T>::getType(),
337 root,communicator);
338 }
339
342 template<typename T>
343 int scatter (const T* sendData, T* recvData, int len, int root) const
344 {
345 return MPI_Scatter(const_cast<T*>(sendData),len,MPITraits<T>::getType(),
346 recvData,len,MPITraits<T>::getType(),
347 root,communicator);
348 }
349
351 template<class TIN, class TOUT = TIN>
352 MPIFuture<TOUT, TIN> iscatter(TIN&& data_in, TOUT&& data_out, int root) const
353 {
354 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
355 auto mpidata_in = future.get_send_mpidata();
356 auto mpidata_out = future.get_mpidata();
357 int inlen = (me==root) * mpidata_in.size()/procs;
358 MPI_Iscatter(mpidata_in.ptr(), inlen, mpidata_in.type(),
359 mpidata_out.ptr(), mpidata_out.size(), mpidata_out.type(),
360 root, communicator, &future.req_);
361 return future;
362 }
363
365 template<typename T>
366 int scatterv (const T* sendData, int* sendDataLen, int* displ, T* recvData, int recvDataLen, int root) const
367 {
368 return MPI_Scatterv(const_cast<T*>(sendData),sendDataLen,displ,MPITraits<T>::getType(),
369 recvData,recvDataLen,MPITraits<T>::getType(),
370 root,communicator);
371 }
372
373
374 operator MPI_Comm () const
375 {
376 return communicator;
377 }
378
380 template<typename T, typename T1>
381 int allgather(const T* sbuf, int count, T1* rbuf) const
382 {
383 return MPI_Allgather(const_cast<T*>(sbuf), count, MPITraits<T>::getType(),
384 rbuf, count, MPITraits<T1>::getType(),
385 communicator);
386 }
387
389 template<class TIN, class TOUT = TIN>
390 MPIFuture<TOUT, TIN> iallgather(TIN&& data_in, TOUT&& data_out) const
391 {
392 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
393 auto mpidata_in = future.get_send_mpidata();
394 auto mpidata_out = future.get_mpidata();
395 assert(mpidata_in.size()*procs <= mpidata_out.size());
396 int outlen = mpidata_in.size();
397 MPI_Iallgather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
398 mpidata_out.ptr(), outlen, mpidata_out.type(),
399 communicator, &future.req_);
400 return future;
401 }
402
404 template<typename T>
405 int allgatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ) const
406 {
407 return MPI_Allgatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
408 out,recvDataLen,displ,MPITraits<T>::getType(),
409 communicator);
410 }
411
413 template<typename BinaryFunction, typename Type>
414 int allreduce(Type* inout, int len) const
415 {
416 Type* out = new Type[len];
417 int ret = allreduce<BinaryFunction>(inout,out,len);
418 std::copy(out, out+len, inout);
419 delete[] out;
420 return ret;
421 }
422
423 template<typename BinaryFunction, typename Type>
424 Type allreduce(Type&& in) const{
425 Type lvalue_data = std::forward<Type>(in);
426 auto data = getMPIData(lvalue_data);
427 MPI_Allreduce(MPI_IN_PLACE, data.ptr(), data.size(), data.type(),
428 (Generic_MPI_Op<Type, BinaryFunction>::get()),
429 communicator);
430 return lvalue_data;
431 }
432
434 template<class BinaryFunction, class TIN, class TOUT = TIN>
435 MPIFuture<TOUT, TIN> iallreduce(TIN&& data_in, TOUT&& data_out) const {
436 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
437 auto mpidata_in = future.get_send_mpidata();
438 auto mpidata_out = future.get_mpidata();
439 assert(mpidata_out.size() == mpidata_in.size());
440 assert(mpidata_out.type() == mpidata_in.type());
441 MPI_Iallreduce(mpidata_in.ptr(), mpidata_out.ptr(),
442 mpidata_out.size(), mpidata_out.type(),
443 (Generic_MPI_Op<TIN, BinaryFunction>::get()),
444 communicator, &future.req_);
445 return future;
446 }
447
449 template<class BinaryFunction, class T>
450 MPIFuture<T> iallreduce(T&& data) const{
451 MPIFuture<T> future(std::forward<T>(data));
452 auto mpidata = future.get_mpidata();
453 MPI_Iallreduce(MPI_IN_PLACE, mpidata.ptr(),
454 mpidata.size(), mpidata.type(),
455 (Generic_MPI_Op<T, BinaryFunction>::get()),
456 communicator, &future.req_);
457 return future;
458 }
459
461 template<typename BinaryFunction, typename Type>
462 int allreduce(const Type* in, Type* out, int len) const
463 {
464 return MPI_Allreduce(const_cast<Type*>(in), out, len, MPITraits<Type>::getType(),
465 (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
466 }
467
468 private:
469 MPI_Comm communicator;
470 int me;
471 int procs;
472 };
473} // namespace dune
474
475#endif // HAVE_MPI
476
477#endif
helper classes to provide unique types for standard functions
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:266
int allgatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicommunication.hh:405
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:257
MPIFuture< T > ibroadcast(T &&data, int root) const
Distribute an array from the process with rank root to all other processes nonblocking.
Definition: mpicommunication.hh:295
MPIFuture< void > ibarrier() const
Nonblocking barrier.
Definition: mpicommunication.hh:278
T recv(T &&data, int source_rank, int tag, MPI_Status *status=MPI_STATUS_IGNORE) const
Receives the data from the source_rank.
Definition: mpicommunication.hh:166
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicommunication.hh:272
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicommunication.hh:133
int scatterv(const T *sendData, int *sendDataLen, int *displ, T *recvData, int recvDataLen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicommunication.hh:366
MPIFuture< T > isend(T &&data, int dest_rank, int tag) const
Sends the data to the dest_rank nonblocking.
Definition: mpicommunication.hh:155
MPIFuture< TOUT, TIN > iallgather(TIN &&data_in, TOUT &&data_out) const
Gathers data from all tasks and distribute it to all nonblocking.
Definition: mpicommunication.hh:390
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:217
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicommunication.hh:288
MPIFuture< T > iallreduce(T &&data) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:450
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:208
int allreduce(const Type *in, Type *out, int len) const
Definition: mpicommunication.hh:462
MPIFuture< TOUT, TIN > iallreduce(TIN &&data_in, TOUT &&data_out) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:435
int size() const
Number of processes in set, is greater than 0.
Definition: mpicommunication.hh:139
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicommunication.hh:310
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicommunication.hh:414
int scatter(const T *sendData, T *recvData, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicommunication.hh:343
MPIFuture< T > irecv(T &&data, int source_rank, int tag) const
Receives the data from the source_rank nonblocking.
Definition: mpicommunication.hh:177
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:233
MPIFuture< TOUT, TIN > igather(TIN &&data_in, TOUT &&data_out, int root) const
Gather arrays on root task nonblocking.
Definition: mpicommunication.hh:319
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:240
Communication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicommunication.hh:111
MPIFuture< TOUT, TIN > iscatter(TIN &&data_in, TOUT &&data_out, int root) const
Scatter array from a root to all other task nonblocking.
Definition: mpicommunication.hh:352
int gatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicommunication.hh:333
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:249
int allgather(const T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicommunication.hh:381
int send(const T &data, int dest_rank, int tag) const
Sends the data to the dest_rank.
Definition: mpicommunication.hh:146
Communication(const Communication< No_Comm > &)
Converting constructor for no-communication that is interpreted as MPI_COMM_SELF.
Definition: mpicommunication.hh:128
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:224
Collective communication interface and sequential default implementation.
Definition: communication.hh:100
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: communication.hh:486
int size() const
Number of processes in set, is greater than 0.
Definition: communication.hh:126
Provides a future-like object for MPI communication. It contains the object that will be received and...
Definition: mpifuture.hh:93
Default exception if an error in the parallel communication of the program occurred.
Definition: exceptions.hh:287
Implements an utility class that provides collective communication methods for sequential programs.
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
constexpr auto plus
Function object for performing addition.
Definition: hybridutilities.hh:528
Interface class to translate objects to a MPI_Datatype, void* and size used for MPI calls.
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr auto get(std::integer_sequence< T, II... >, std::integral_constant< std::size_t, pos >={})
Return the entry at position pos of the given sequence.
Definition: integersequence.hh:22
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:41
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Jul 15, 22:36, 2024)