Dune Core Modules (2.6.0)

mpicollectivecommunication.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4#define DUNE_MPICOLLECTIVECOMMUNICATION_HH
5
14#if HAVE_MPI
15
16#include <algorithm>
17#include <functional>
18#include <memory>
19
20#include <mpi.h>
21
26
27namespace Dune
28{
29
30 //=======================================================
31 // use singleton pattern and template specialization to
32 // generate MPI operations
33 //=======================================================
34
35 template<typename Type, typename BinaryFunction>
36 class Generic_MPI_Op
37 {
38
39 public:
40 static MPI_Op get ()
41 {
42 if (!op)
43 {
44 op = std::shared_ptr<MPI_Op>(new MPI_Op);
45 MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
46 }
47 return *op;
48 }
49 private:
50 static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
51 {
52 BinaryFunction func;
53
54 for (int i=0; i< *len; ++i, ++in, ++inout) {
55 Type temp;
56 temp = func(*in, *inout);
57 *inout = temp;
58 }
59 }
60 Generic_MPI_Op () {}
61 Generic_MPI_Op (const Generic_MPI_Op& ) {}
62 static std::shared_ptr<MPI_Op> op;
63 };
64
65
66 template<typename Type, typename BinaryFunction>
67 std::shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = std::shared_ptr<MPI_Op>(static_cast<MPI_Op*>(0));
68
69#define ComposeMPIOp(type,func,op) \
70 template<> \
71 class Generic_MPI_Op<type, func<type> >{ \
72 public:\
73 static MPI_Op get(){ \
74 return op; \
75 } \
76 private:\
77 Generic_MPI_Op () {}\
78 Generic_MPI_Op (const Generic_MPI_Op & ) {}\
79 }
80
81
82 ComposeMPIOp(char, std::plus, MPI_SUM);
83 ComposeMPIOp(unsigned char, std::plus, MPI_SUM);
84 ComposeMPIOp(short, std::plus, MPI_SUM);
85 ComposeMPIOp(unsigned short, std::plus, MPI_SUM);
86 ComposeMPIOp(int, std::plus, MPI_SUM);
87 ComposeMPIOp(unsigned int, std::plus, MPI_SUM);
88 ComposeMPIOp(long, std::plus, MPI_SUM);
89 ComposeMPIOp(unsigned long, std::plus, MPI_SUM);
90 ComposeMPIOp(float, std::plus, MPI_SUM);
91 ComposeMPIOp(double, std::plus, MPI_SUM);
92 ComposeMPIOp(long double, std::plus, MPI_SUM);
93
94 ComposeMPIOp(char, std::multiplies, MPI_PROD);
95 ComposeMPIOp(unsigned char, std::multiplies, MPI_PROD);
96 ComposeMPIOp(short, std::multiplies, MPI_PROD);
97 ComposeMPIOp(unsigned short, std::multiplies, MPI_PROD);
98 ComposeMPIOp(int, std::multiplies, MPI_PROD);
99 ComposeMPIOp(unsigned int, std::multiplies, MPI_PROD);
100 ComposeMPIOp(long, std::multiplies, MPI_PROD);
101 ComposeMPIOp(unsigned long, std::multiplies, MPI_PROD);
102 ComposeMPIOp(float, std::multiplies, MPI_PROD);
103 ComposeMPIOp(double, std::multiplies, MPI_PROD);
104 ComposeMPIOp(long double, std::multiplies, MPI_PROD);
105
106 ComposeMPIOp(char, Min, MPI_MIN);
107 ComposeMPIOp(unsigned char, Min, MPI_MIN);
108 ComposeMPIOp(short, Min, MPI_MIN);
109 ComposeMPIOp(unsigned short, Min, MPI_MIN);
110 ComposeMPIOp(int, Min, MPI_MIN);
111 ComposeMPIOp(unsigned int, Min, MPI_MIN);
112 ComposeMPIOp(long, Min, MPI_MIN);
113 ComposeMPIOp(unsigned long, Min, MPI_MIN);
114 ComposeMPIOp(float, Min, MPI_MIN);
115 ComposeMPIOp(double, Min, MPI_MIN);
116 ComposeMPIOp(long double, Min, MPI_MIN);
117
118 ComposeMPIOp(char, Max, MPI_MAX);
119 ComposeMPIOp(unsigned char, Max, MPI_MAX);
120 ComposeMPIOp(short, Max, MPI_MAX);
121 ComposeMPIOp(unsigned short, Max, MPI_MAX);
122 ComposeMPIOp(int, Max, MPI_MAX);
123 ComposeMPIOp(unsigned int, Max, MPI_MAX);
124 ComposeMPIOp(long, Max, MPI_MAX);
125 ComposeMPIOp(unsigned long, Max, MPI_MAX);
126 ComposeMPIOp(float, Max, MPI_MAX);
127 ComposeMPIOp(double, Max, MPI_MAX);
128 ComposeMPIOp(long double, Max, MPI_MAX);
129
130#undef ComposeMPIOp
131
132
133 //=======================================================
134 // use singleton pattern and template specialization to
135 // generate MPI operations
136 //=======================================================
137
141 template<>
143 {
144 public:
146 CollectiveCommunication (const MPI_Comm& c = MPI_COMM_WORLD)
147 : communicator(c)
148 {
149 if(communicator!=MPI_COMM_NULL) {
150 int initialized = 0;
151 MPI_Initialized(&initialized);
152 if (!initialized)
153 DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI CollectiveCommunication!");
154 MPI_Comm_rank(communicator,&me);
155 MPI_Comm_size(communicator,&procs);
156 }else{
157 procs=0;
158 me=-1;
159 }
160 }
161
163 int rank () const
164 {
165 return me;
166 }
167
169 int size () const
170 {
171 return procs;
172 }
173
175 template<typename T>
176 T sum (const T& in) const
177 {
178 T out;
179 allreduce<std::plus<T> >(&in,&out,1);
180 return out;
181 }
182
184 template<typename T>
185 int sum (T* inout, int len) const
186 {
187 return allreduce<std::plus<T> >(inout,len);
188 }
189
191 template<typename T>
192 T prod (const T& in) const
193 {
194 T out;
195 allreduce<std::multiplies<T> >(&in,&out,1);
196 return out;
197 }
198
200 template<typename T>
201 int prod (T* inout, int len) const
202 {
203 return allreduce<std::multiplies<T> >(inout,len);
204 }
205
207 template<typename T>
208 T min (const T& in) const
209 {
210 T out;
211 allreduce<Min<T> >(&in,&out,1);
212 return out;
213 }
214
216 template<typename T>
217 int min (T* inout, int len) const
218 {
219 return allreduce<Min<T> >(inout,len);
220 }
221
222
224 template<typename T>
225 T max (const T& in) const
226 {
227 T out;
228 allreduce<Max<T> >(&in,&out,1);
229 return out;
230 }
231
233 template<typename T>
234 int max (T* inout, int len) const
235 {
236 return allreduce<Max<T> >(inout,len);
237 }
238
240 int barrier () const
241 {
242 return MPI_Barrier(communicator);
243 }
244
246 template<typename T>
247 int broadcast (T* inout, int len, int root) const
248 {
249 return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
250 }
251
254 template<typename T>
255 int gather (const T* in, T* out, int len, int root) const
256 {
257 return MPI_Gather(const_cast<T*>(in),len,MPITraits<T>::getType(),
258 out,len,MPITraits<T>::getType(),
259 root,communicator);
260 }
261
263 template<typename T>
264 int gatherv (const T* in, int sendlen, T* out, int* recvlen, int* displ, int root) const
265 {
266 return MPI_Gatherv(const_cast<T*>(in),sendlen,MPITraits<T>::getType(),
267 out,recvlen,displ,MPITraits<T>::getType(),
268 root,communicator);
269 }
270
273 template<typename T>
274 int scatter (const T* send, T* recv, int len, int root) const
275 {
276 return MPI_Scatter(const_cast<T*>(send),len,MPITraits<T>::getType(),
277 recv,len,MPITraits<T>::getType(),
278 root,communicator);
279 }
280
282 template<typename T>
283 int scatterv (const T* send, int* sendlen, int* displ, T* recv, int recvlen, int root) const
284 {
285 return MPI_Scatterv(const_cast<T*>(send),sendlen,displ,MPITraits<T>::getType(),
286 recv,recvlen,MPITraits<T>::getType(),
287 root,communicator);
288 }
289
290
291 operator MPI_Comm () const
292 {
293 return communicator;
294 }
295
297 template<typename T, typename T1>
298 int allgather(const T* sbuf, int count, T1* rbuf) const
299 {
300 return MPI_Allgather(const_cast<T*>(sbuf), count, MPITraits<T>::getType(),
301 rbuf, count, MPITraits<T1>::getType(),
302 communicator);
303 }
304
306 template<typename T>
307 int allgatherv (const T* in, int sendlen, T* out, int* recvlen, int* displ) const
308 {
309 return MPI_Allgatherv(const_cast<T*>(in),sendlen,MPITraits<T>::getType(),
310 out,recvlen,displ,MPITraits<T>::getType(),
311 communicator);
312 }
313
315 template<typename BinaryFunction, typename Type>
316 int allreduce(Type* inout, int len) const
317 {
318 Type* out = new Type[len];
319 int ret = allreduce<BinaryFunction>(inout,out,len);
320 std::copy(out, out+len, inout);
321 delete[] out;
322 return ret;
323 }
324
326 template<typename BinaryFunction, typename Type>
327 int allreduce(const Type* in, Type* out, int len) const
328 {
329 return MPI_Allreduce(const_cast<Type*>(in), out, len, MPITraits<Type>::getType(),
330 (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
331 }
332
333 private:
334 MPI_Comm communicator;
335 int me;
336 int procs;
337 };
338} // namespace dune
339
340#endif // HAVE_MPI
341
342#endif
Various helper classes derived from from std::binary_function for stl-style functional programming.
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:217
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicollectivecommunication.hh:163
int scatterv(const T *send, int *sendlen, int *displ, T *recv, int recvlen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicollectivecommunication.hh:283
int allgatherv(const T *in, int sendlen, T *out, int *recvlen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:307
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicollectivecommunication.hh:247
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicollectivecommunication.hh:316
int size() const
Number of processes in set, is greater than 0.
Definition: mpicollectivecommunication.hh:169
CollectiveCommunication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicollectivecommunication.hh:146
int scatter(const T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicollectivecommunication.hh:274
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:234
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:176
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicollectivecommunication.hh:240
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicollectivecommunication.hh:255
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:192
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:201
int allgather(const T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:298
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:185
int allreduce(const Type *in, Type *out, int len) const
Definition: mpicollectivecommunication.hh:327
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:225
int gatherv(const T *in, int sendlen, T *out, int *recvlen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicollectivecommunication.hh:264
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:208
Collective communication interface and sequential default implementation.
Definition: collectivecommunication.hh:80
Default exception if an error in the parallel communication of the program occurred.
Definition: exceptions.hh:285
Implements an utility class that provides collective communication methods for sequential programs.
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:10
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:38
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 24, 23:30, 2024)