3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
38 template<
typename Type,
typename BinaryFunction>
48 MPI_Op_create((
void (*)(
void*,
void*,
int*, MPI_Datatype*))&operation,
true,op.
get());
53 static void operation (Type *in, Type *inout,
int *len, MPI_Datatype *dptr)
57 for (
int i=0; i< *len; ++i, ++in, ++inout) {
59 temp = func(*in, *inout);
64 Generic_MPI_Op (
const Generic_MPI_Op& ) {}
65 static shared_ptr<MPI_Op> op;
69 template<
typename Type,
typename BinaryFunction>
70 shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(
static_cast<MPI_Op*
>(0));
72 #define ComposeMPIOp(type,func,op) \
74 class Generic_MPI_Op<type, func<type> >{ \
76 static MPI_Op get(){ \
81 Generic_MPI_Op (const Generic_MPI_Op & ) {}\
100 ComposeMPIOp(
unsigned short, std::multiplies, MPI_PROD);
152 if(communicator!=MPI_COMM_NULL) {
153 MPI_Comm_rank(communicator,&me);
154 MPI_Comm_size(communicator,&procs);
178 allreduce<std::plus<T> >(&in,&out,1);
184 int sum (T* inout,
int len)
const
186 return allreduce<std::plus<T> >(inout,len);
194 allreduce<std::multiplies<T> >(&in,&out,1);
200 int prod (T* inout,
int len)
const
202 return allreduce<std::multiplies<T> >(inout,len);
210 allreduce<Min<T> >(&in,&out,1);
216 int min (T* inout,
int len)
const
218 return allreduce<Min<T> >(inout,len);
227 allreduce<Max<T> >(&in,&out,1);
233 int max (T* inout,
int len)
const
235 return allreduce<Max<T> >(inout,len);
241 return MPI_Barrier(communicator);
254 int gather (T* in, T* out,
int len,
int root)
const
264 int scatter (T* send, T* recv,
int len,
int root)
const
271 operator MPI_Comm ()
const
277 template<
typename T,
typename T1>
286 template<
typename BinaryFunction,
typename Type>
289 Type* out =
new Type[len];
290 int ret = allreduce<BinaryFunction>(inout,out,len);
291 std::copy(out, out+len, inout);
297 template<
typename BinaryFunction,
typename Type>
305 MPI_Comm communicator;
char c
Definition: alignment.hh:37
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: parallel/mpicollectivecommunication.hh:239
ComposeMPIOp(char, std::plus, MPI_SUM)
T min(T &in) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:207
Implements an utility class that provides collective communication methods for sequential programs...
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: parallel/mpicollectivecommunication.hh:246
A few common exception classes.
Collective communication interface and sequential default implementation.
Definition: parallel/collectivecommunication.hh:71
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:216
Various helper classes derived from from std::binary_function for stl-style functional programming...
T prod(T &in) const
Compute the product of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:191
CollectiveCommunication(const MPI_Comm &c)
Instantiation using a MPI communicator.
Definition: parallel/mpicollectivecommunication.hh:149
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:200
int size() const
Number of processes in set, is greater than 0.
Definition: parallel/mpicollectivecommunication.hh:168
int gather(T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: parallel/mpicollectivecommunication.hh:254
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: parallel/mpicollectivecommunication.hh:287
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:184
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:224
int rank() const
Return rank, is between 0 and size()-1.
Definition: parallel/mpicollectivecommunication.hh:162
int allreduce(Type *in, Type *out, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: parallel/mpicollectivecommunication.hh:298
T sum(T &in) const
Compute the sum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:175
element_type * get() const
Access to the raw pointer, if you really want it.
Definition: shared_ptr.hh:147
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: bigunsignedint.hh:29
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process...
Definition: parallel/mpicollectivecommunication.hh:233
int scatter(T *send, T *recv, int len, int root) const
Scatter array from a root to all other task.
Definition: parallel/mpicollectivecommunication.hh:264
This file implements the class shared_ptr (a reference counting pointer), for those systems that don'...
int allgather(T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: parallel/mpicollectivecommunication.hh:278
Traits classes for mapping types onto MPI_Datatype.
Definition: parallel/mpicollectivecommunication.hh:39