Dune Core Modules (2.7.1)

mpicommunication.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
4#define DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
5
14#if HAVE_MPI
15
16#include <algorithm>
17#include <functional>
18#include <memory>
19
20#include <mpi.h>
21
26#include <dune/common/parallel/mpifuture.hh>
28
29namespace Dune
30{
31
32 //=======================================================
33 // use singleton pattern and template specialization to
34 // generate MPI operations
35 //=======================================================
36
37 template<typename Type, typename BinaryFunction, typename Enable=void>
38 class Generic_MPI_Op
39 {
40
41 public:
42 static MPI_Op get ()
43 {
44 if (!op)
45 {
46 op = std::make_unique<MPI_Op>();
47 // The following line leaks an MPI operation object, because the corresponding
48 //`MPI_Op_free` is never called. It is never called because there is no easy
49 // way to call it at the right moment: right before the call to MPI_Finalize.
50 // See https://gitlab.dune-project.org/core/dune-istl/issues/80
51 MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
52 }
53 return *op;
54 }
55 private:
56 static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
57 {
58 BinaryFunction func;
59
60 for (int i=0; i< *len; ++i, ++in, ++inout) {
61 Type temp;
62 temp = func(*in, *inout);
63 *inout = temp;
64 }
65 }
66 Generic_MPI_Op () {}
67 Generic_MPI_Op (const Generic_MPI_Op& ) {}
68 static std::unique_ptr<MPI_Op> op;
69 };
70
71
72 template<typename Type, typename BinaryFunction, typename Enable>
73 std::unique_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction, Enable>::op;
74
75#define ComposeMPIOp(func,op) \
76 template<class T, class S> \
77 class Generic_MPI_Op<T, func<S>, std::enable_if_t<MPITraits<S>::is_intrinsic> >{ \
78 public: \
79 static MPI_Op get(){ \
80 return op; \
81 } \
82 private: \
83 Generic_MPI_Op () {} \
84 Generic_MPI_Op (const Generic_MPI_Op & ) {} \
85 }
86
87
88 ComposeMPIOp(std::plus, MPI_SUM);
89 ComposeMPIOp(std::multiplies, MPI_PROD);
90 ComposeMPIOp(Min, MPI_MIN);
91 ComposeMPIOp(Max, MPI_MAX);
92
93#undef ComposeMPIOp
94
95
96 //=======================================================
97 // use singleton pattern and template specialization to
98 // generate MPI operations
99 //=======================================================
100
104 template<>
105 class Communication<MPI_Comm>
106 {
107 public:
109 Communication (const MPI_Comm& c = MPI_COMM_WORLD)
110 : communicator(c)
111 {
112 if(communicator!=MPI_COMM_NULL) {
113 int initialized = 0;
114 MPI_Initialized(&initialized);
115 if (!initialized)
116 DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI Communication!");
117 MPI_Comm_rank(communicator,&me);
118 MPI_Comm_size(communicator,&procs);
119 }else{
120 procs=0;
121 me=-1;
122 }
123 }
124
126 int rank () const
127 {
128 return me;
129 }
130
132 int size () const
133 {
134 return procs;
135 }
136
138 template<class T>
139 int send(const T& data, int dest_rank, int tag) const
140 {
141 auto mpi_data = getMPIData(data);
142 return MPI_Send(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
143 dest_rank, tag, communicator);
144 }
145
147 template<class T>
148 MPIFuture<const T> isend(const T&& data, int dest_rank, int tag) const
149 {
150 MPIFuture<const T> future(std::forward<const T>(data));
151 auto mpidata = future.get_mpidata();
152 MPI_Isend(mpidata.ptr(), mpidata.size(), mpidata.type(),
153 dest_rank, tag, communicator, &future.req_);
154 return future;
155 }
156
158 template<class T>
159 T recv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
160 {
161 T lvalue_data(std::forward<T>(data));
162 auto mpi_data = getMPIData(lvalue_data);
163 MPI_Recv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
164 source_rank, tag, communicator, status);
165 return lvalue_data;
166 }
167
169 template<class T>
170 MPIFuture<T> irecv(T&& data, int source_rank, int tag) const
171 {
172 MPIFuture<T> future(std::forward<T>(data));
173 auto mpidata = future.get_mpidata();
174 MPI_Irecv(mpidata.ptr(), mpidata.size(), mpidata.type(),
175 source_rank, tag, communicator, &future.req_);
176 return future;
177 }
178
179 template<class T>
180 T rrecv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
181 {
182 MPI_Status _status;
183 MPI_Message _message;
184 T lvalue_data(std::forward<T>(data));
185 auto mpi_data = getMPIData(lvalue_data);
186 static_assert(!mpi_data.static_size, "rrecv work only for non-static-sized types.");
187 if(status == MPI_STATUS_IGNORE)
188 status = &_status;
189 MPI_Mprobe(source_rank, tag, communicator, &_message, status);
190 int size;
191 MPI_Get_count(status, mpi_data.type(), &size);
192 mpi_data.resize(size);
193 MPI_Mrecv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(), &_message, status);
194 return lvalue_data;
195 }
196
198 template<typename T>
199 T sum (const T& in) const
200 {
201 T out;
202 allreduce<std::plus<T> >(&in,&out,1);
203 return out;
204 }
205
207 template<typename T>
208 int sum (T* inout, int len) const
209 {
210 return allreduce<std::plus<T> >(inout,len);
211 }
212
214 template<typename T>
215 T prod (const T& in) const
216 {
217 T out;
218 allreduce<std::multiplies<T> >(&in,&out,1);
219 return out;
220 }
221
223 template<typename T>
224 int prod (T* inout, int len) const
225 {
226 return allreduce<std::multiplies<T> >(inout,len);
227 }
228
230 template<typename T>
231 T min (const T& in) const
232 {
233 T out;
234 allreduce<Min<T> >(&in,&out,1);
235 return out;
236 }
237
239 template<typename T>
240 int min (T* inout, int len) const
241 {
242 return allreduce<Min<T> >(inout,len);
243 }
244
245
247 template<typename T>
248 T max (const T& in) const
249 {
250 T out;
251 allreduce<Max<T> >(&in,&out,1);
252 return out;
253 }
254
256 template<typename T>
257 int max (T* inout, int len) const
258 {
259 return allreduce<Max<T> >(inout,len);
260 }
261
263 int barrier () const
264 {
265 return MPI_Barrier(communicator);
266 }
267
270 {
271 MPIFuture<void> future(true); // make a valid MPIFuture<void>
272 MPI_Ibarrier(communicator, &future.req_);
273 return future;
274 }
275
276
278 template<typename T>
279 int broadcast (T* inout, int len, int root) const
280 {
281 return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
282 }
283
285 template<class T>
286 MPIFuture<T> ibroadcast(T&& data, int root) const{
287 MPIFuture<T> future(std::forward<T>(data));
288 auto mpidata = future.get_mpidata();
289 MPI_Ibcast(mpidata.ptr(),
290 mpidata.size(),
291 mpidata.type(),
292 root,
293 communicator,
294 &future.req_);
295 return future;
296 }
297
300 template<typename T>
301 int gather (const T* in, T* out, int len, int root) const
302 {
303 return MPI_Gather(const_cast<T*>(in),len,MPITraits<T>::getType(),
304 out,len,MPITraits<T>::getType(),
305 root,communicator);
306 }
307
309 template<class TIN, class TOUT = std::vector<TIN>>
310 MPIFuture<TOUT, TIN> igather(TIN&& data_in, TOUT&& data_out, int root) const{
311 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
312 auto mpidata_in = future.get_send_mpidata();
313 auto mpidata_out = future.get_mpidata();
314 assert(root != me || mpidata_in.size()*procs <= mpidata_out.size());
315 int outlen = (me==root) * mpidata_in.size();
316 MPI_Igather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
317 mpidata_out.ptr(), outlen, mpidata_out.type(),
318 root, communicator, &future.req_);
319 return future;
320 }
321
323 template<typename T>
324 int gatherv (const T* in, int sendlen, T* out, int* recvlen, int* displ, int root) const
325 {
326 return MPI_Gatherv(const_cast<T*>(in),sendlen,MPITraits<T>::getType(),
327 out,recvlen,displ,MPITraits<T>::getType(),
328 root,communicator);
329 }
330
333 template<typename T>
334 int scatter (const T* send, T* recv, int len, int root) const
335 {
336 return MPI_Scatter(const_cast<T*>(send),len,MPITraits<T>::getType(),
338 root,communicator);
339 }
340
342 template<class TIN, class TOUT = TIN>
343 MPIFuture<TOUT, TIN> iscatter(TIN&& data_in, TOUT&& data_out, int root) const
344 {
345 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
346 auto mpidata_in = future.get_send_mpidata();
347 auto mpidata_out = future.get_mpidata();
348 int inlen = (me==root) * mpidata_in.size()/procs;
349 MPI_Iscatter(mpidata_in.ptr(), inlen, mpidata_in.type(),
350 mpidata_out.ptr(), mpidata_out.size(), mpidata_out.type(),
351 root, communicator, &future.req_);
352 return future;
353 }
354
356 template<typename T>
357 int scatterv (const T* send, int* sendlen, int* displ, T* recv, int recvlen, int root) const
358 {
359 return MPI_Scatterv(const_cast<T*>(send),sendlen,displ,MPITraits<T>::getType(),
360 recv,recvlen,MPITraits<T>::getType(),
361 root,communicator);
362 }
363
364
365 operator MPI_Comm () const
366 {
367 return communicator;
368 }
369
371 template<typename T, typename T1>
372 int allgather(const T* sbuf, int count, T1* rbuf) const
373 {
374 return MPI_Allgather(const_cast<T*>(sbuf), count, MPITraits<T>::getType(),
375 rbuf, count, MPITraits<T1>::getType(),
376 communicator);
377 }
378
380 template<class TIN, class TOUT = TIN>
381 MPIFuture<TOUT, TIN> iallgather(TIN&& data_in, TOUT&& data_out) const
382 {
383 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
384 auto mpidata_in = future.get_send_mpidata();
385 auto mpidata_out = future.get_mpidata();
386 assert(mpidata_in.size()*procs <= mpidata_out.size());
387 int outlen = mpidata_in.size();
388 MPI_Iallgather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
389 mpidata_out.ptr(), outlen, mpidata_out.type(),
390 communicator, &future.req_);
391 return future;
392 }
393
395 template<typename T>
396 int allgatherv (const T* in, int sendlen, T* out, int* recvlen, int* displ) const
397 {
398 return MPI_Allgatherv(const_cast<T*>(in),sendlen,MPITraits<T>::getType(),
399 out,recvlen,displ,MPITraits<T>::getType(),
400 communicator);
401 }
402
404 template<typename BinaryFunction, typename Type>
405 int allreduce(Type* inout, int len) const
406 {
407 Type* out = new Type[len];
408 int ret = allreduce<BinaryFunction>(inout,out,len);
409 std::copy(out, out+len, inout);
410 delete[] out;
411 return ret;
412 }
413
414 template<typename BinaryFunction, typename Type>
415 Type allreduce(Type&& in) const{
416 Type lvalue_data = std::forward<Type>(in);
417 auto data = getMPIData(lvalue_data);
418 MPI_Allreduce(MPI_IN_PLACE, data.ptr(), data.size(), data.type(),
419 (Generic_MPI_Op<Type, BinaryFunction>::get()),
420 communicator);
421 return lvalue_data;
422 }
423
425 template<class BinaryFunction, class TIN, class TOUT = TIN>
426 MPIFuture<TOUT, TIN> iallreduce(TIN&& data_in, TOUT&& data_out) const {
427 MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
428 auto mpidata_in = future.get_send_mpidata();
429 auto mpidata_out = future.get_mpidata();
430 assert(mpidata_out.size() == mpidata_in.size());
431 assert(mpidata_out.type() == mpidata_in.type());
432 MPI_Iallreduce(mpidata_in.ptr(), mpidata_out.ptr(),
433 mpidata_out.size(), mpidata_out.type(),
434 (Generic_MPI_Op<TIN, BinaryFunction>::get()),
435 communicator, &future.req_);
436 return future;
437 }
438
440 template<class BinaryFunction, class T>
441 MPIFuture<T> iallreduce(T&& data) const{
442 MPIFuture<T> future(std::forward<T>(data));
443 auto mpidata = future.get_mpidata();
444 MPI_Iallreduce(MPI_IN_PLACE, mpidata.ptr(),
445 mpidata.size(), mpidata.type(),
446 (Generic_MPI_Op<T, BinaryFunction>::get()),
447 communicator, &future.req_);
448 return future;
449 }
450
452 template<typename BinaryFunction, typename Type>
453 int allreduce(const Type* in, Type* out, int len) const
454 {
455 return MPI_Allreduce(const_cast<Type*>(in), out, len, MPITraits<Type>::getType(),
456 (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
457 }
458
459 private:
460 MPI_Comm communicator;
461 int me;
462 int procs;
463 };
464} // namespace dune
465
466#endif // HAVE_MPI
467
468#endif
helper classes to provide unique types for standard functions
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:257
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:248
MPIFuture< T > ibroadcast(T &&data, int root) const
Distribute an array from the process with rank root to all other processes nonblocking.
Definition: mpicommunication.hh:286
MPIFuture< void > ibarrier() const
Nonblocking barrier.
Definition: mpicommunication.hh:269
MPIFuture< const T > isend(const T &&data, int dest_rank, int tag) const
Sends the data to the dest_rank nonblocking.
Definition: mpicommunication.hh:148
T recv(T &&data, int source_rank, int tag, MPI_Status *status=MPI_STATUS_IGNORE) const
Receives the data from the source_rank.
Definition: mpicommunication.hh:159
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicommunication.hh:263
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicommunication.hh:126
MPIFuture< TOUT, TIN > iallgather(TIN &&data_in, TOUT &&data_out) const
Gathers data from all tasks and distribute it to all nonblocking.
Definition: mpicommunication.hh:381
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:208
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicommunication.hh:279
MPIFuture< T > iallreduce(T &&data) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:441
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:199
int allreduce(const Type *in, Type *out, int len) const
Definition: mpicommunication.hh:453
MPIFuture< TOUT, TIN > iallreduce(TIN &&data_in, TOUT &&data_out) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:426
int size() const
Number of processes in set, is greater than 0.
Definition: mpicommunication.hh:132
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicommunication.hh:301
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicommunication.hh:405
int scatterv(const T *send, int *sendlen, int *displ, T *recv, int recvlen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicommunication.hh:357
MPIFuture< T > irecv(T &&data, int source_rank, int tag) const
Receives the data from the source_rank nonblocking.
Definition: mpicommunication.hh:170
int allgatherv(const T *in, int sendlen, T *out, int *recvlen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicommunication.hh:396
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:224
MPIFuture< TOUT, TIN > igather(TIN &&data_in, TOUT &&data_out, int root) const
Gather arrays on root task nonblocking.
Definition: mpicommunication.hh:310
int scatter(const T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicommunication.hh:334
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:231
Communication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicommunication.hh:109
MPIFuture< TOUT, TIN > iscatter(TIN &&data_in, TOUT &&data_out, int root) const
Scatter array from a root to all other task nonblocking.
Definition: mpicommunication.hh:343
int gatherv(const T *in, int sendlen, T *out, int *recvlen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicommunication.hh:324
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:240
int allgather(const T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicommunication.hh:372
int send(const T &data, int dest_rank, int tag) const
Sends the data to the dest_rank.
Definition: mpicommunication.hh:139
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:215
Collective communication interface and sequential default implementation.
Definition: communication.hh:81
int send(const T &data, int dest_rank, int tag)
Sends the data to the dest_rank.
Definition: communication.hh:110
T recv(T &&data, int source_rank, int tag, void *status=0)
Receives the data from the source_rank.
Definition: communication.hh:132
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: communication.hh:472
int size() const
Number of processes in set, is greater than 0.
Definition: communication.hh:101
Provides a future-like object for MPI communication. It contains the object that will be received and...
Definition: mpifuture.hh:82
Default exception if an error in the parallel communication of the program occurred.
Definition: exceptions.hh:285
Implements an utility class that provides collective communication methods for sequential programs.
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
Interface class to translate objects to a MPI_Datatype, void* and size used for MPI calls.
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:14
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:38
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)