Dune Core Modules (2.4.1)

mpicollectivecommunication.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4#define DUNE_MPICOLLECTIVECOMMUNICATION_HH
5
14#include <iostream>
15#include <complex>
16#include <algorithm>
17#include <functional>
18
22
24#include "mpitraits.hh"
25
26#if HAVE_MPI
27// MPI header
28#include <mpi.h>
29
30namespace Dune
31{
32
33 //=======================================================
34 // use singleton pattern and template specialization to
35 // generate MPI operations
36 //=======================================================
37
38 template<typename Type, typename BinaryFunction>
39 class Generic_MPI_Op
40 {
41
42 public:
43 static MPI_Op get ()
44 {
45 if (!op)
46 {
47 op = shared_ptr<MPI_Op>(new MPI_Op);
48 MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
49 }
50 return *op;
51 }
52 private:
53 static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
54 {
55 BinaryFunction func;
56
57 for (int i=0; i< *len; ++i, ++in, ++inout) {
58 Type temp;
59 temp = func(*in, *inout);
60 *inout = temp;
61 }
62 }
63 Generic_MPI_Op () {}
64 Generic_MPI_Op (const Generic_MPI_Op& ) {}
65 static shared_ptr<MPI_Op> op;
66 };
67
68
69 template<typename Type, typename BinaryFunction>
70 shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(static_cast<MPI_Op*>(0));
71
72#define ComposeMPIOp(type,func,op) \
73 template<> \
74 class Generic_MPI_Op<type, func<type> >{ \
75 public:\
76 static MPI_Op get(){ \
77 return op; \
78 } \
79 private:\
80 Generic_MPI_Op () {}\
81 Generic_MPI_Op (const Generic_MPI_Op & ) {}\
82 }
83
84
85 ComposeMPIOp(char, std::plus, MPI_SUM);
86 ComposeMPIOp(unsigned char, std::plus, MPI_SUM);
87 ComposeMPIOp(short, std::plus, MPI_SUM);
88 ComposeMPIOp(unsigned short, std::plus, MPI_SUM);
89 ComposeMPIOp(int, std::plus, MPI_SUM);
90 ComposeMPIOp(unsigned int, std::plus, MPI_SUM);
91 ComposeMPIOp(long, std::plus, MPI_SUM);
92 ComposeMPIOp(unsigned long, std::plus, MPI_SUM);
93 ComposeMPIOp(float, std::plus, MPI_SUM);
94 ComposeMPIOp(double, std::plus, MPI_SUM);
95 ComposeMPIOp(long double, std::plus, MPI_SUM);
96
97 ComposeMPIOp(char, std::multiplies, MPI_PROD);
98 ComposeMPIOp(unsigned char, std::multiplies, MPI_PROD);
99 ComposeMPIOp(short, std::multiplies, MPI_PROD);
100 ComposeMPIOp(unsigned short, std::multiplies, MPI_PROD);
101 ComposeMPIOp(int, std::multiplies, MPI_PROD);
102 ComposeMPIOp(unsigned int, std::multiplies, MPI_PROD);
103 ComposeMPIOp(long, std::multiplies, MPI_PROD);
104 ComposeMPIOp(unsigned long, std::multiplies, MPI_PROD);
105 ComposeMPIOp(float, std::multiplies, MPI_PROD);
106 ComposeMPIOp(double, std::multiplies, MPI_PROD);
107 ComposeMPIOp(long double, std::multiplies, MPI_PROD);
108
109 ComposeMPIOp(char, Min, MPI_MIN);
110 ComposeMPIOp(unsigned char, Min, MPI_MIN);
111 ComposeMPIOp(short, Min, MPI_MIN);
112 ComposeMPIOp(unsigned short, Min, MPI_MIN);
113 ComposeMPIOp(int, Min, MPI_MIN);
114 ComposeMPIOp(unsigned int, Min, MPI_MIN);
115 ComposeMPIOp(long, Min, MPI_MIN);
116 ComposeMPIOp(unsigned long, Min, MPI_MIN);
117 ComposeMPIOp(float, Min, MPI_MIN);
118 ComposeMPIOp(double, Min, MPI_MIN);
119 ComposeMPIOp(long double, Min, MPI_MIN);
120
121 ComposeMPIOp(char, Max, MPI_MAX);
122 ComposeMPIOp(unsigned char, Max, MPI_MAX);
123 ComposeMPIOp(short, Max, MPI_MAX);
124 ComposeMPIOp(unsigned short, Max, MPI_MAX);
125 ComposeMPIOp(int, Max, MPI_MAX);
126 ComposeMPIOp(unsigned int, Max, MPI_MAX);
127 ComposeMPIOp(long, Max, MPI_MAX);
128 ComposeMPIOp(unsigned long, Max, MPI_MAX);
129 ComposeMPIOp(float, Max, MPI_MAX);
130 ComposeMPIOp(double, Max, MPI_MAX);
131 ComposeMPIOp(long double, Max, MPI_MAX);
132
133#undef ComposeMPIOp
134
135
136 //=======================================================
137 // use singleton pattern and template specialization to
138 // generate MPI operations
139 //=======================================================
140
144 template<>
146 {
147 public:
149 CollectiveCommunication (const MPI_Comm& c = MPI_COMM_WORLD)
150 : communicator(c)
151 {
152 if(communicator!=MPI_COMM_NULL) {
153 int initialized = 0;
154 MPI_Initialized(&initialized);
155 if (!initialized)
156 DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI CollectiveCommunication!");
157 MPI_Comm_rank(communicator,&me);
158 MPI_Comm_size(communicator,&procs);
159 }else{
160 procs=0;
161 me=-1;
162 }
163 }
164
166 int rank () const
167 {
168 return me;
169 }
170
172 int size () const
173 {
174 return procs;
175 }
176
178 template<typename T>
179 T sum (T& in) const // MPI does not know about const :-(
180 {
181 T out;
182 allreduce<std::plus<T> >(&in,&out,1);
183 return out;
184 }
185
187 template<typename T>
188 int sum (T* inout, int len) const
189 {
190 return allreduce<std::plus<T> >(inout,len);
191 }
192
194 template<typename T>
195 T prod (T& in) const // MPI does not know about const :-(
196 {
197 T out;
198 allreduce<std::multiplies<T> >(&in,&out,1);
199 return out;
200 }
201
203 template<typename T>
204 int prod (T* inout, int len) const
205 {
206 return allreduce<std::multiplies<T> >(inout,len);
207 }
208
210 template<typename T>
211 T min (T& in) const // MPI does not know about const :-(
212 {
213 T out;
214 allreduce<Min<T> >(&in,&out,1);
215 return out;
216 }
217
219 template<typename T>
220 int min (T* inout, int len) const
221 {
222 return allreduce<Min<T> >(inout,len);
223 }
224
225
227 template<typename T>
228 T max (T& in) const // MPI does not know about const :-(
229 {
230 T out;
231 allreduce<Max<T> >(&in,&out,1);
232 return out;
233 }
234
236 template<typename T>
237 int max (T* inout, int len) const
238 {
239 return allreduce<Max<T> >(inout,len);
240 }
241
243 int barrier () const
244 {
245 return MPI_Barrier(communicator);
246 }
247
249 template<typename T>
250 int broadcast (T* inout, int len, int root) const
251 {
252 return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
253 }
254
257 template<typename T>
258 int gather (T* in, T* out, int len, int root) const
259 {
260 return MPI_Gather(in,len,MPITraits<T>::getType(),
261 out,len,MPITraits<T>::getType(),
262 root,communicator);
263 }
264
266 template<typename T>
267 int gatherv (T* in, int sendlen, T* out, int* recvlen, int* displ, int root) const
268 {
269 return MPI_Gatherv(in,sendlen,MPITraits<T>::getType(),
270 out,recvlen,displ,MPITraits<T>::getType(),
271 root,communicator);
272 }
273
276 template<typename T>
277 int scatter (T* send, T* recv, int len, int root) const
278 {
279 return MPI_Scatter(send,len,MPITraits<T>::getType(),
280 recv,len,MPITraits<T>::getType(),
281 root,communicator);
282 }
283
285 template<typename T>
286 int scatterv (T* send, int* sendlen, int* displ, T* recv, int recvlen, int root) const
287 {
288 return MPI_Scatterv(send,sendlen,displ,MPITraits<T>::getType(),
289 recv,recvlen,MPITraits<T>::getType(),
290 root,communicator);
291 }
292
293
294 operator MPI_Comm () const
295 {
296 return communicator;
297 }
298
300 template<typename T, typename T1>
301 int allgather(T* sbuf, int count, T1* rbuf) const
302 {
303 return MPI_Allgather(sbuf, count, MPITraits<T>::getType(),
304 rbuf, count, MPITraits<T1>::getType(),
305 communicator);
306 }
307
309 template<typename T>
310 int allgatherv (T* in, int sendlen, T* out, int* recvlen, int* displ) const
311 {
312 return MPI_Allgatherv(in,sendlen,MPITraits<T>::getType(),
313 out,recvlen,displ,MPITraits<T>::getType(),
314 communicator);
315 }
316
318 template<typename BinaryFunction, typename Type>
319 int allreduce(Type* inout, int len) const
320 {
321 Type* out = new Type[len];
322 int ret = allreduce<BinaryFunction>(inout,out,len);
323 std::copy(out, out+len, inout);
324 delete[] out;
325 return ret;
326 }
327
329 template<typename BinaryFunction, typename Type>
330 int allreduce(Type* in, Type* out, int len) const
331 {
332 return MPI_Allreduce(in, out, len, MPITraits<Type>::getType(),
333 (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
334 }
335
336 private:
337 MPI_Comm communicator;
338 int me;
339 int procs;
340 };
341} // namespace dune
342
343#endif
344#endif
Various helper classes derived from from std::binary_function for stl-style functional programming.
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:220
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicollectivecommunication.hh:166
int gather(T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: mpicollectivecommunication.hh:258
int allgatherv(T *in, int sendlen, T *out, int *recvlen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:310
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:228
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: mpicollectivecommunication.hh:250
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicollectivecommunication.hh:319
T prod(T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:195
int scatterv(T *send, int *sendlen, int *displ, T *recv, int recvlen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: mpicollectivecommunication.hh:286
int scatter(T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: mpicollectivecommunication.hh:277
int size() const
Number of processes in set, is greater than 0.
Definition: mpicollectivecommunication.hh:172
CollectiveCommunication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition: mpicollectivecommunication.hh:149
T min(T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:211
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:237
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicollectivecommunication.hh:243
int allreduce(Type *in, Type *out, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: mpicollectivecommunication.hh:330
T sum(T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:179
int allgather(T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: mpicollectivecommunication.hh:301
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:204
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:188
int gatherv(T *in, int sendlen, T *out, int *recvlen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: mpicollectivecommunication.hh:267
Collective communication interface and sequential default implementation.
Definition: collectivecommunication.hh:73
Default exception if an error in the parallel communication of the programm occured.
Definition: exceptions.hh:312
Implements an utility class that provides collective communication methods for sequential programs.
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:243
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignment.hh:10
This file implements the class shared_ptr (a reference counting pointer), for those systems that don'...
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:37
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)