Dune Core Modules (2.9.0)

mpicommunication.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 // SPDX-FileCopyrightInfo: Copyright (C) DUNE Project contributors, see file LICENSE.md in module root
4 // SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
5 #ifndef DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
6 #define DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
7 
16 #if HAVE_MPI
17 
18 #include <algorithm>
19 #include <functional>
20 #include <memory>
21 
22 #include <mpi.h>
23 
28 #include <dune/common/parallel/mpifuture.hh>
30 
31 namespace Dune
32 {
33 
34  //=======================================================
35  // use singleton pattern and template specialization to
36  // generate MPI operations
37  //=======================================================
38 
39  template<typename Type, typename BinaryFunction, typename Enable=void>
40  class Generic_MPI_Op
41  {
42 
43  public:
44  static MPI_Op get ()
45  {
46  if (!op)
47  {
48  op = std::make_unique<MPI_Op>();
49  // The following line leaks an MPI operation object, because the corresponding
50  //`MPI_Op_free` is never called. It is never called because there is no easy
51  // way to call it at the right moment: right before the call to MPI_Finalize.
52  // See https://gitlab.dune-project.org/core/dune-istl/issues/80
53  MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
54  }
55  return *op;
56  }
57  private:
58  static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
59  {
60  BinaryFunction func;
61 
62  for (int i=0; i< *len; ++i, ++in, ++inout) {
63  Type temp;
64  temp = func(*in, *inout);
65  *inout = temp;
66  }
67  }
68  Generic_MPI_Op () {}
69  Generic_MPI_Op (const Generic_MPI_Op& ) {}
70  static std::unique_ptr<MPI_Op> op;
71  };
72 
73 
74  template<typename Type, typename BinaryFunction, typename Enable>
75  std::unique_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction, Enable>::op;
76 
77 #define ComposeMPIOp(func,op) \
78  template<class T, class S> \
79  class Generic_MPI_Op<T, func<S>, std::enable_if_t<MPITraits<S>::is_intrinsic> >{ \
80  public: \
81  static MPI_Op get(){ \
82  return op; \
83  } \
84  private: \
85  Generic_MPI_Op () {} \
86  Generic_MPI_Op (const Generic_MPI_Op & ) {} \
87  }
88 
89 
90  ComposeMPIOp(std::plus, MPI_SUM);
91  ComposeMPIOp(std::multiplies, MPI_PROD);
92  ComposeMPIOp(Min, MPI_MIN);
93  ComposeMPIOp(Max, MPI_MAX);
94 
95 #undef ComposeMPIOp
96 
97 
98  //=======================================================
99  // use singleton pattern and template specialization to
100  // generate MPI operations
101  //=======================================================
102 
106  template<>
107  class Communication<MPI_Comm>
108  {
109  public:
111  Communication (const MPI_Comm& c = MPI_COMM_WORLD)
112  : communicator(c)
113  {
114  if(communicator!=MPI_COMM_NULL) {
115  int initialized = 0;
116  MPI_Initialized(&initialized);
117  if (!initialized)
118  DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI Communication!");
119  MPI_Comm_rank(communicator,&me);
120  MPI_Comm_size(communicator,&procs);
121  }else{
122  procs=0;
123  me=-1;
124  }
125  }
126 
128  int rank () const
129  {
130  return me;
131  }
132 
134  int size () const
135  {
136  return procs;
137  }
138 
140  template<class T>
141  int send(const T& data, int dest_rank, int tag) const
142  {
143  auto mpi_data = getMPIData(data);
144  return MPI_Send(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
145  dest_rank, tag, communicator);
146  }
147 
149  template<class T>
150  MPIFuture<const T> isend(const T&& data, int dest_rank, int tag) const
151  {
152  MPIFuture<const T> future(std::forward<const T>(data));
153  auto mpidata = future.get_mpidata();
154  MPI_Isend(mpidata.ptr(), mpidata.size(), mpidata.type(),
155  dest_rank, tag, communicator, &future.req_);
156  return future;
157  }
158 
160  template<class T>
161  T recv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
162  {
163  T lvalue_data(std::forward<T>(data));
164  auto mpi_data = getMPIData(lvalue_data);
165  MPI_Recv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
166  source_rank, tag, communicator, status);
167  return lvalue_data;
168  }
169 
171  template<class T>
172  MPIFuture<T> irecv(T&& data, int source_rank, int tag) const
173  {
174  MPIFuture<T> future(std::forward<T>(data));
175  auto mpidata = future.get_mpidata();
176  MPI_Irecv(mpidata.ptr(), mpidata.size(), mpidata.type(),
177  source_rank, tag, communicator, &future.req_);
178  return future;
179  }
180 
181  template<class T>
182  T rrecv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
183  {
184  MPI_Status _status;
185  MPI_Message _message;
186  T lvalue_data(std::forward<T>(data));
187  auto mpi_data = getMPIData(lvalue_data);
188  static_assert(!mpi_data.static_size, "rrecv work only for non-static-sized types.");
189  if(status == MPI_STATUS_IGNORE)
190  status = &_status;
191  MPI_Mprobe(source_rank, tag, communicator, &_message, status);
192  int size;
193  MPI_Get_count(status, mpi_data.type(), &size);
194  mpi_data.resize(size);
195  MPI_Mrecv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(), &_message, status);
196  return lvalue_data;
197  }
198 
200  template<typename T>
201  T sum (const T& in) const
202  {
203  T out;
204  allreduce<std::plus<T> >(&in,&out,1);
205  return out;
206  }
207 
209  template<typename T>
210  int sum (T* inout, int len) const
211  {
212  return allreduce<std::plus<T> >(inout,len);
213  }
214 
216  template<typename T>
217  T prod (const T& in) const
218  {
219  T out;
220  allreduce<std::multiplies<T> >(&in,&out,1);
221  return out;
222  }
223 
225  template<typename T>
226  int prod (T* inout, int len) const
227  {
228  return allreduce<std::multiplies<T> >(inout,len);
229  }
230 
232  template<typename T>
233  T min (const T& in) const
234  {
235  T out;
236  allreduce<Min<T> >(&in,&out,1);
237  return out;
238  }
239 
241  template<typename T>
242  int min (T* inout, int len) const
243  {
244  return allreduce<Min<T> >(inout,len);
245  }
246 
247 
249  template<typename T>
250  T max (const T& in) const
251  {
252  T out;
253  allreduce<Max<T> >(&in,&out,1);
254  return out;
255  }
256 
258  template<typename T>
259  int max (T* inout, int len) const
260  {
261  return allreduce<Max<T> >(inout,len);
262  }
263 
265  int barrier () const
266  {
267  return MPI_Barrier(communicator);
268  }
269 
272  {
273  MPIFuture<void> future(true); // make a valid MPIFuture<void>
274  MPI_Ibarrier(communicator, &future.req_);
275  return future;
276  }
277 
278 
280  template<typename T>
281  int broadcast (T* inout, int len, int root) const
282  {
283  return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
284  }
285 
287  template<class T>
288  MPIFuture<T> ibroadcast(T&& data, int root) const{
289  MPIFuture<T> future(std::forward<T>(data));
290  auto mpidata = future.get_mpidata();
291  MPI_Ibcast(mpidata.ptr(),
292  mpidata.size(),
293  mpidata.type(),
294  root,
295  communicator,
296  &future.req_);
297  return future;
298  }
299 
302  template<typename T>
303  int gather (const T* in, T* out, int len, int root) const
304  {
305  return MPI_Gather(const_cast<T*>(in),len,MPITraits<T>::getType(),
306  out,len,MPITraits<T>::getType(),
307  root,communicator);
308  }
309 
311  template<class TIN, class TOUT = std::vector<TIN>>
312  MPIFuture<TOUT, TIN> igather(TIN&& data_in, TOUT&& data_out, int root) const{
313  MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
314  auto mpidata_in = future.get_send_mpidata();
315  auto mpidata_out = future.get_mpidata();
316  assert(root != me || mpidata_in.size()*procs <= mpidata_out.size());
317  int outlen = (me==root) * mpidata_in.size();
318  MPI_Igather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
319  mpidata_out.ptr(), outlen, mpidata_out.type(),
320  root, communicator, &future.req_);
321  return future;
322  }
323 
325  template<typename T>
326  int gatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ, int root) const
327  {
328  return MPI_Gatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
329  out,recvDataLen,displ,MPITraits<T>::getType(),
330  root,communicator);
331  }
332 
335  template<typename T>
336  int scatter (const T* sendData, T* recvData, int len, int root) const
337  {
338  return MPI_Scatter(const_cast<T*>(sendData),len,MPITraits<T>::getType(),
339  recvData,len,MPITraits<T>::getType(),
340  root,communicator);
341  }
342 
344  template<class TIN, class TOUT = TIN>
345  MPIFuture<TOUT, TIN> iscatter(TIN&& data_in, TOUT&& data_out, int root) const
346  {
347  MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
348  auto mpidata_in = future.get_send_mpidata();
349  auto mpidata_out = future.get_mpidata();
350  int inlen = (me==root) * mpidata_in.size()/procs;
351  MPI_Iscatter(mpidata_in.ptr(), inlen, mpidata_in.type(),
352  mpidata_out.ptr(), mpidata_out.size(), mpidata_out.type(),
353  root, communicator, &future.req_);
354  return future;
355  }
356 
358  template<typename T>
359  int scatterv (const T* sendData, int* sendDataLen, int* displ, T* recvData, int recvDataLen, int root) const
360  {
361  return MPI_Scatterv(const_cast<T*>(sendData),sendDataLen,displ,MPITraits<T>::getType(),
362  recvData,recvDataLen,MPITraits<T>::getType(),
363  root,communicator);
364  }
365 
366 
367  operator MPI_Comm () const
368  {
369  return communicator;
370  }
371 
373  template<typename T, typename T1>
374  int allgather(const T* sbuf, int count, T1* rbuf) const
375  {
376  return MPI_Allgather(const_cast<T*>(sbuf), count, MPITraits<T>::getType(),
377  rbuf, count, MPITraits<T1>::getType(),
378  communicator);
379  }
380 
382  template<class TIN, class TOUT = TIN>
383  MPIFuture<TOUT, TIN> iallgather(TIN&& data_in, TOUT&& data_out) const
384  {
385  MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
386  auto mpidata_in = future.get_send_mpidata();
387  auto mpidata_out = future.get_mpidata();
388  assert(mpidata_in.size()*procs <= mpidata_out.size());
389  int outlen = mpidata_in.size();
390  MPI_Iallgather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
391  mpidata_out.ptr(), outlen, mpidata_out.type(),
392  communicator, &future.req_);
393  return future;
394  }
395 
397  template<typename T>
398  int allgatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ) const
399  {
400  return MPI_Allgatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
401  out,recvDataLen,displ,MPITraits<T>::getType(),
402  communicator);
403  }
404 
406  template<typename BinaryFunction, typename Type>
407  int allreduce(Type* inout, int len) const
408  {
409  Type* out = new Type[len];
410  int ret = allreduce<BinaryFunction>(inout,out,len);
411  std::copy(out, out+len, inout);
412  delete[] out;
413  return ret;
414  }
415 
416  template<typename BinaryFunction, typename Type>
417  Type allreduce(Type&& in) const{
418  Type lvalue_data = std::forward<Type>(in);
419  auto data = getMPIData(lvalue_data);
420  MPI_Allreduce(MPI_IN_PLACE, data.ptr(), data.size(), data.type(),
421  (Generic_MPI_Op<Type, BinaryFunction>::get()),
422  communicator);
423  return lvalue_data;
424  }
425 
427  template<class BinaryFunction, class TIN, class TOUT = TIN>
428  MPIFuture<TOUT, TIN> iallreduce(TIN&& data_in, TOUT&& data_out) const {
429  MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
430  auto mpidata_in = future.get_send_mpidata();
431  auto mpidata_out = future.get_mpidata();
432  assert(mpidata_out.size() == mpidata_in.size());
433  assert(mpidata_out.type() == mpidata_in.type());
434  MPI_Iallreduce(mpidata_in.ptr(), mpidata_out.ptr(),
435  mpidata_out.size(), mpidata_out.type(),
436  (Generic_MPI_Op<TIN, BinaryFunction>::get()),
437  communicator, &future.req_);
438  return future;
439  }
440 
442  template<class BinaryFunction, class T>
443  MPIFuture<T> iallreduce(T&& data) const{
444  MPIFuture<T> future(std::forward<T>(data));
445  auto mpidata = future.get_mpidata();
446  MPI_Iallreduce(MPI_IN_PLACE, mpidata.ptr(),
447  mpidata.size(), mpidata.type(),
448  (Generic_MPI_Op<T, BinaryFunction>::get()),
449  communicator, &future.req_);
450  return future;
451  }
452 
454  template<typename BinaryFunction, typename Type>
455  int allreduce(const Type* in, Type* out, int len) const
456  {
457  return MPI_Allreduce(const_cast<Type*>(in), out, len, MPITraits<Type>::getType(),
458  (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
459  }
460 
461  private:
462  MPI_Comm communicator;
463  int me;
464  int procs;
465  };
466 } // namespace dune
467 
468 #endif // HAVE_MPI
469 
470 #endif
helper classes to provide unique types for standard functions
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:259
int allgatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicommunication.hh:398
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:250
T recv(T &&data, int source_rank, int tag, MPI_Status *status=MPI_STATUS_IGNORE) const
Receives the data from the source_rank.
Definition: mpicommunication.hh:161
MPIFuture< T > iallreduce(T &&data) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:443
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicommunication.hh:265
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicommunication.hh:128
MPIFuture< TOUT, TIN > iallreduce(TIN &&data_in, TOUT &&data_out) const
Compute something over all processes nonblocking.
Definition: mpicommunication.hh:428
MPIFuture< T > irecv(T &&data, int source_rank, int tag) const
Receives the data from the source_rank nonblocking.
Definition: mpicommunication.hh:172
int scatterv(const T *sendData, int *sendDataLen, int *displ, T *recvData, int recvDataLen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicommunication.hh:359
MPIFuture< TOUT, TIN > iallgather(TIN &&data_in, TOUT &&data_out) const
Gathers data from all tasks and distribute it to all nonblocking.
Definition: mpicommunication.hh:383
MPIFuture< T > ibroadcast(T &&data, int root) const
Distribute an array from the process with rank root to all other processes nonblocking.
Definition: mpicommunication.hh:288
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:210
MPIFuture< const T > isend(const T &&data, int dest_rank, int tag) const
Sends the data to the dest_rank nonblocking.
Definition: mpicommunication.hh:150
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicommunication.hh:281
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:201
int allreduce(const Type *in, Type *out, int len) const
Definition: mpicommunication.hh:455
int size() const
Number of processes in set, is greater than 0.
Definition: mpicommunication.hh:134
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicommunication.hh:303
int allreduce(Type *inout, int len) const
Definition: mpicommunication.hh:407
MPIFuture< TOUT, TIN > igather(TIN &&data_in, TOUT &&data_out, int root) const
Gather arrays on root task nonblocking.
Definition: mpicommunication.hh:312
int scatter(const T *sendData, T *recvData, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicommunication.hh:336
MPIFuture< TOUT, TIN > iscatter(TIN &&data_in, TOUT &&data_out, int root) const
Scatter array from a root to all other task nonblocking.
Definition: mpicommunication.hh:345
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:226
MPIFuture< void > ibarrier() const
Nonblocking barrier.
Definition: mpicommunication.hh:271
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:233
Communication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicommunication.hh:111
int gatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicommunication.hh:326
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:242
int allgather(const T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicommunication.hh:374
int send(const T &data, int dest_rank, int tag) const
Sends the data to the dest_rank.
Definition: mpicommunication.hh:141
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicommunication.hh:217
Collective communication interface and sequential default implementation.
Definition: communication.hh:100
int size() const
Number of processes in set, is greater than 0.
Definition: communication.hh:126
int allreduce([[maybe_unused]] Type *inout, [[maybe_unused]] int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: communication.hh:486
Provides a future-like object for MPI communication. It contains the object that will be received and...
Definition: mpifuture.hh:85
Default exception if an error in the parallel communication of the program occurred.
Definition: exceptions.hh:287
A few common exception classes.
Implements an utility class that provides collective communication methods for sequential programs.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
Interface class to translate objects to a MPI_Datatype, void* and size used for MPI calls.
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:13
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:41
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 1, 22:29, 2024)