dune-common  2.3.1-rc1
parallel/mpicollectivecommunication.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
5 
14 #include <iostream>
15 #include <complex>
16 #include <algorithm>
17 #include <functional>
18 
22 
24 #include "mpitraits.hh"
25 
26 #if HAVE_MPI
27 // MPI header
28 #include <mpi.h>
29 
30 namespace Dune
31 {
32 
33  //=======================================================
34  // use singleton pattern and template specialization to
35  // generate MPI operations
36  //=======================================================
37 
38  template<typename Type, typename BinaryFunction>
40  {
41 
42  public:
43  static MPI_Op get ()
44  {
45  if (!op)
46  {
47  op = shared_ptr<MPI_Op>(new MPI_Op);
48  MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
49  }
50  return *op;
51  }
52  private:
53  static void operation (Type *in, Type *inout, int *len, MPI_Datatype *dptr)
54  {
55  BinaryFunction func;
56 
57  for (int i=0; i< *len; ++i, ++in, ++inout) {
58  Type temp;
59  temp = func(*in, *inout);
60  *inout = temp;
61  }
62  }
63  Generic_MPI_Op () {}
64  Generic_MPI_Op (const Generic_MPI_Op& ) {}
65  static shared_ptr<MPI_Op> op;
66  };
67 
68 
69  template<typename Type, typename BinaryFunction>
70  shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(static_cast<MPI_Op*>(0));
71 
72 #define ComposeMPIOp(type,func,op) \
73  template<> \
74  class Generic_MPI_Op<type, func<type> >{ \
75  public:\
76  static MPI_Op get(){ \
77  return op; \
78  } \
79  private:\
80  Generic_MPI_Op () {}\
81  Generic_MPI_Op (const Generic_MPI_Op & ) {}\
82  }
83 
84 
85  ComposeMPIOp(char, std::plus, MPI_SUM);
86  ComposeMPIOp(unsigned char, std::plus, MPI_SUM);
87  ComposeMPIOp(short, std::plus, MPI_SUM);
88  ComposeMPIOp(unsigned short, std::plus, MPI_SUM);
89  ComposeMPIOp(int, std::plus, MPI_SUM);
90  ComposeMPIOp(unsigned int, std::plus, MPI_SUM);
91  ComposeMPIOp(long, std::plus, MPI_SUM);
92  ComposeMPIOp(unsigned long, std::plus, MPI_SUM);
93  ComposeMPIOp(float, std::plus, MPI_SUM);
94  ComposeMPIOp(double, std::plus, MPI_SUM);
95  ComposeMPIOp(long double, std::plus, MPI_SUM);
96 
97  ComposeMPIOp(char, std::multiplies, MPI_PROD);
98  ComposeMPIOp(unsigned char, std::multiplies, MPI_PROD);
99  ComposeMPIOp(short, std::multiplies, MPI_PROD);
100  ComposeMPIOp(unsigned short, std::multiplies, MPI_PROD);
101  ComposeMPIOp(int, std::multiplies, MPI_PROD);
102  ComposeMPIOp(unsigned int, std::multiplies, MPI_PROD);
103  ComposeMPIOp(long, std::multiplies, MPI_PROD);
104  ComposeMPIOp(unsigned long, std::multiplies, MPI_PROD);
105  ComposeMPIOp(float, std::multiplies, MPI_PROD);
106  ComposeMPIOp(double, std::multiplies, MPI_PROD);
107  ComposeMPIOp(long double, std::multiplies, MPI_PROD);
108 
109  ComposeMPIOp(char, Min, MPI_MIN);
110  ComposeMPIOp(unsigned char, Min, MPI_MIN);
111  ComposeMPIOp(short, Min, MPI_MIN);
112  ComposeMPIOp(unsigned short, Min, MPI_MIN);
113  ComposeMPIOp(int, Min, MPI_MIN);
114  ComposeMPIOp(unsigned int, Min, MPI_MIN);
115  ComposeMPIOp(long, Min, MPI_MIN);
116  ComposeMPIOp(unsigned long, Min, MPI_MIN);
117  ComposeMPIOp(float, Min, MPI_MIN);
118  ComposeMPIOp(double, Min, MPI_MIN);
119  ComposeMPIOp(long double, Min, MPI_MIN);
120 
121  ComposeMPIOp(char, Max, MPI_MAX);
122  ComposeMPIOp(unsigned char, Max, MPI_MAX);
123  ComposeMPIOp(short, Max, MPI_MAX);
124  ComposeMPIOp(unsigned short, Max, MPI_MAX);
125  ComposeMPIOp(int, Max, MPI_MAX);
126  ComposeMPIOp(unsigned int, Max, MPI_MAX);
127  ComposeMPIOp(long, Max, MPI_MAX);
128  ComposeMPIOp(unsigned long, Max, MPI_MAX);
129  ComposeMPIOp(float, Max, MPI_MAX);
130  ComposeMPIOp(double, Max, MPI_MAX);
131  ComposeMPIOp(long double, Max, MPI_MAX);
132 
133 #undef ComposeMPIOp
134 
135 
136  //=======================================================
137  // use singleton pattern and template specialization to
138  // generate MPI operations
139  //=======================================================
140 
144  template<>
145  class CollectiveCommunication<MPI_Comm>
146  {
147  public:
149  CollectiveCommunication (const MPI_Comm& c)
150  : communicator(c)
151  {
152  if(communicator!=MPI_COMM_NULL) {
153  MPI_Comm_rank(communicator,&me);
154  MPI_Comm_size(communicator,&procs);
155  }else{
156  procs=0;
157  me=-1;
158  }
159  }
160 
162  int rank () const
163  {
164  return me;
165  }
166 
168  int size () const
169  {
170  return procs;
171  }
172 
174  template<typename T>
175  T sum (T& in) const // MPI does not know about const :-(
176  {
177  T out;
178  allreduce<std::plus<T> >(&in,&out,1);
179  return out;
180  }
181 
183  template<typename T>
184  int sum (T* inout, int len) const
185  {
186  return allreduce<std::plus<T> >(inout,len);
187  }
188 
190  template<typename T>
191  T prod (T& in) const // MPI does not know about const :-(
192  {
193  T out;
194  allreduce<std::multiplies<T> >(&in,&out,1);
195  return out;
196  }
197 
199  template<typename T>
200  int prod (T* inout, int len) const
201  {
202  return allreduce<std::multiplies<T> >(inout,len);
203  }
204 
206  template<typename T>
207  T min (T& in) const // MPI does not know about const :-(
208  {
209  T out;
210  allreduce<Min<T> >(&in,&out,1);
211  return out;
212  }
213 
215  template<typename T>
216  int min (T* inout, int len) const
217  {
218  return allreduce<Min<T> >(inout,len);
219  }
220 
221 
223  template<typename T>
224  T max (T& in) const // MPI does not know about const :-(
225  {
226  T out;
227  allreduce<Max<T> >(&in,&out,1);
228  return out;
229  }
230 
232  template<typename T>
233  int max (T* inout, int len) const
234  {
235  return allreduce<Max<T> >(inout,len);
236  }
237 
239  int barrier () const
240  {
241  return MPI_Barrier(communicator);
242  }
243 
245  template<typename T>
246  int broadcast (T* inout, int len, int root) const
247  {
248  return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
249  }
250 
253  template<typename T>
254  int gather (T* in, T* out, int len, int root) const
255  {
256  return MPI_Gather(in,len,MPITraits<T>::getType(),
257  out,len,MPITraits<T>::getType(),
258  root,communicator);
259  }
260 
263  template<typename T>
264  int scatter (T* send, T* recv, int len, int root) const
265  {
266  return MPI_Scatter(send,len,MPITraits<T>::getType(),
267  recv,len,MPITraits<T>::getType(),
268  root,communicator);
269  }
270 
271  operator MPI_Comm () const
272  {
273  return communicator;
274  }
275 
277  template<typename T, typename T1>
278  int allgather(T* sbuf, int count, T1* rbuf) const
279  {
280  return MPI_Allgather(sbuf, count, MPITraits<T>::getType(),
281  rbuf, count, MPITraits<T1>::getType(),
282  communicator);
283  }
284 
286  template<typename BinaryFunction, typename Type>
287  int allreduce(Type* inout, int len) const
288  {
289  Type* out = new Type[len];
290  int ret = allreduce<BinaryFunction>(inout,out,len);
291  std::copy(out, out+len, inout);
292  delete[] out;
293  return ret;
294  }
295 
297  template<typename BinaryFunction, typename Type>
298  int allreduce(Type* in, Type* out, int len) const
299  {
300  return MPI_Allreduce(in, out, len, MPITraits<Type>::getType(),
302  }
303 
304  private:
305  MPI_Comm communicator;
306  int me;
307  int procs;
308  };
309 } // namespace dune
310 
311 #endif
312 #endif
char c
Definition: alignment.hh:37
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: parallel/mpicollectivecommunication.hh:239
ComposeMPIOp(char, std::plus, MPI_SUM)
T min(T &in) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:207
Implements an utility class that provides collective communication methods for sequential programs...
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: parallel/mpicollectivecommunication.hh:246
A few common exception classes.
Collective communication interface and sequential default implementation.
Definition: parallel/collectivecommunication.hh:71
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:216
Various helper classes derived from from std::binary_function for stl-style functional programming...
T prod(T &in) const
Compute the product of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:191
CollectiveCommunication(const MPI_Comm &c)
Instantiation using a MPI communicator.
Definition: parallel/mpicollectivecommunication.hh:149
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:200
int size() const
Number of processes in set, is greater than 0.
Definition: parallel/mpicollectivecommunication.hh:168
int gather(T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: parallel/mpicollectivecommunication.hh:254
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: parallel/mpicollectivecommunication.hh:287
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:184
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:224
int rank() const
Return rank, is between 0 and size()-1.
Definition: parallel/mpicollectivecommunication.hh:162
int allreduce(Type *in, Type *out, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: parallel/mpicollectivecommunication.hh:298
T sum(T &in) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:175
element_type * get() const
Access to the raw pointer, if you really want it.
Definition: shared_ptr.hh:147
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: bigunsignedint.hh:29
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:233
int scatter(T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: parallel/mpicollectivecommunication.hh:264
This file implements the class shared_ptr (a reference counting pointer), for those systems that don'...
int allgather(T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: parallel/mpicollectivecommunication.hh:278
Traits classes for mapping types onto MPI_Datatype.
Definition: parallel/mpicollectivecommunication.hh:39