DUNE PDELab (2.8)

communication.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_COMMON_PARALLEL_COMMUNICATION_HH
4#define DUNE_COMMON_PARALLEL_COMMUNICATION_HH
12#include <iostream>
13#include <complex>
14#include <algorithm>
15#include <vector>
16
19#include <dune/common/parallel/future.hh>
20
40namespace Dune
41{
42
43 /* define some type that definitely differs from MPI_Comm */
44 struct No_Comm {};
45
50 inline bool operator==(const No_Comm&, const No_Comm&)
51 {
52 return true;
53 }
54
59 inline bool operator!=(const No_Comm&, const No_Comm&)
60 {
61 return false;
62 }
63
96 template<typename Communicator>
98 {
99 public:
102 {}
103
108 Communication (const Communicator&)
109 {}
110
112 int rank () const
113 {
114 return 0;
115 }
116
118 operator No_Comm() const
119 {
120 return {};
121 }
122
124 int size () const
125 {
126 return 1;
127 }
128
132 template<class T>
133 int send([[maybe_unused]] const T& data,
134 [[maybe_unused]] int dest_rank,
135 [[maybe_unused]] int tag)
136 {
137 DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
138 }
139
143 template<class T>
144 PseudoFuture<T> isend([[maybe_unused]] const T&& data,
145 [[maybe_unused]] int dest_rank,
146 [[maybe_unused]] int tag)
147 {
148 DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
149 }
150
154 template<class T>
155 T recv([[maybe_unused]] T&& data,
156 [[maybe_unused]] int source_rank,
157 [[maybe_unused]] int tag,
158 [[maybe_unused]] void* status = 0)
159 {
160 DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
161 }
162
166 template<class T>
167 PseudoFuture<T> irecv([[maybe_unused]] T&& data,
168 [[maybe_unused]] int source_rank,
169 [[maybe_unused]] int tag)
170 {
171 DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
172 }
173
174 template<class T>
175 T rrecv([[maybe_unused]] T&& data,
176 [[maybe_unused]] int source_rank,
177 [[maybe_unused]] int tag,
178 [[maybe_unused]] void* status = 0) const
179 {
180 DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
181 }
185 template<typename T>
186 T sum (const T& in) const
187 {
188 return in;
189 }
190
196 template<typename T>
197 int sum ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
198 {
199 return 0;
200 }
201
205 template<typename T>
206 T prod (const T& in) const
207 {
208 return in;
209 }
210
216 template<typename T>
217 int prod ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
218 {
219 return 0;
220 }
221
225 template<typename T>
226 T min (const T& in) const
227 {
228 return in;
229 }
230
236 template<typename T>
237 int min ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
238 {
239 return 0;
240 }
241
245 template<typename T>
246 T max (const T& in) const
247 {
248 return in;
249 }
250
256 template<typename T>
257 int max ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
258 {
259 return 0;
260 }
261
265 int barrier () const
266 {
267 return 0;
268 }
269
274 {
275 return {true}; // return a valid future
276 }
277
281 template<typename T>
282 int broadcast ([[maybe_unused]] T* inout,
283 [[maybe_unused]] int len,
284 [[maybe_unused]] int root) const
285 {
286 return 0;
287 }
288
292 template<class T>
293 PseudoFuture<T> ibroadcast(T&& data, int root) const{
294 return {std::forward<T>(data)};
295 }
296
297
310 template<typename T>
311 int gather (const T* in, T* out, int len, [[maybe_unused]] int root) const // note out must have same size as in
312 {
313 for (int i=0; i<len; i++)
314 out[i] = in[i];
315 return 0;
316 }
317
321 template<class TIN, class TOUT = std::vector<TIN>>
322 PseudoFuture<TOUT> igather(TIN&& data_in, TOUT&& data_out, int root){
323 *(data_out.begin()) = std::forward<TIN>(data_in);
324 return {std::forward<TOUT>(data_out)};
325 }
326
327
347 template<typename T>
348 int gatherv (const T* in,
349 int sendDataLen,
350 T* out,
351 [[maybe_unused]] int* recvDataLen,
352 int* displ,
353 [[maybe_unused]] int root) const
354 {
355 for (int i=*displ; i<sendDataLen; i++)
356 out[i] = in[i];
357 return 0;
358 }
359
373 template<typename T>
374 int scatter (const T* sendData, T* recvData, int len, [[maybe_unused]] int root) const // note out must have same size as in
375 {
376 for (int i=0; i<len; i++)
377 recvData[i] = sendData[i];
378 return 0;
379 }
380
384 template<class TIN, class TOUT = TIN>
385 PseudoFuture<TOUT> iscatter(TIN&& data_in, TOUT&& data_out, int root){
386 data_out = *(std::forward<TIN>(data_in).begin());
387 return {std::forward<TOUT>(data_out)};
388 }
389
408 template<typename T>
409 int scatterv (const T* sendData,int* sendDataLen, int* displ, T* recvData,
410 [[maybe_unused]] int recvDataLen, [[maybe_unused]] int root) const
411 {
412 for (int i=*displ; i<*sendDataLen; i++)
413 recvData[i] = sendData[i];
414 return 0;
415 }
416
430 template<typename T>
431 int allgather(const T* sbuf, int count, T* rbuf) const
432 {
433 for(const T* end=sbuf+count; sbuf < end; ++sbuf, ++rbuf)
434 *rbuf=*sbuf;
435 return 0;
436 }
437
442 template<class TIN, class TOUT = TIN>
443 PseudoFuture<TOUT> iallgather(TIN&& data_in, TOUT&& data_out){
444 return {std::forward<TOUT>(data_out)};
445 }
446
463 template<typename T>
464 int allgatherv (const T* in, int sendDataLen, T* out, [[maybe_unused]] int* recvDataLen, int* displ) const
465 {
466 for (int i=*displ; i<sendDataLen; i++)
467 out[i] = in[i];
468 return 0;
469 }
470
483 template<typename BinaryFunction, typename Type>
484 int allreduce([[maybe_unused]] Type* inout, [[maybe_unused]] int len) const
485 {
486 return 0;
487 }
488
493 template<class BinaryFunction, class TIN, class TOUT = TIN>
494 PseudoFuture<TOUT> iallreduce(TIN&& data_in, TOUT&& data_out){
495 data_out = std::forward<TIN>(data_in);
496 return {std::forward<TOUT>(data_out)};
497 }
498
503 template<class BinaryFunction, class T>
505 return {std::forward<T>(data)};
506 }
507
508
522 template<typename BinaryFunction, typename Type>
523 int allreduce(const Type* in, Type* out, int len) const
524 {
525 std::copy(in, in+len, out);
526 return 0;
527 }
528
529 };
530
531 template<class T>
532 using CollectiveCommunication
533 // Will be deprecated after the 2.7 release
534 //[[deprecated("CollectiveCommunication is deprecated. Use Communication instead.")]]
535 = Communication<T>;
536}
537
538#endif
helper classes to provide unique types for standard functions
Collective communication interface and sequential default implementation.
Definition: communication.hh:98
PseudoFuture< TOUT > iallreduce(TIN &&data_in, TOUT &&data_out)
Compute something over all processes nonblocking.
Definition: communication.hh:494
int send(const T &data, int dest_rank, int tag)
Sends the data to the dest_rank.
Definition: communication.hh:133
int allreduce(const Type *in, Type *out, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: communication.hh:523
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: communication.hh:246
int rank() const
Return rank, is between 0 and size()-1.
Definition: communication.hh:112
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: communication.hh:186
int scatterv(const T *sendData, int *sendDataLen, int *displ, T *recvData, int recvDataLen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition: communication.hh:409
int prod(T *inout, int len) const
Compute the product over all processes for each component of an array and return the result in every ...
Definition: communication.hh:217
T recv(T &&data, int source_rank, int tag, void *status=0)
Receives the data from the source_rank.
Definition: communication.hh:155
PseudoFuture< T > isend(const T &&data, int dest_rank, int tag)
Sends the data to the dest_rank nonblocking.
Definition: communication.hh:144
PseudoFuture< void > ibarrier() const
Nonblocking barrier.
Definition: communication.hh:273
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition: communication.hh:484
int size() const
Number of processes in set, is greater than 0.
Definition: communication.hh:124
int sum(T *inout, int len) const
Compute the sum over all processes for each component of an array and return the result in every proc...
Definition: communication.hh:197
int allgatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition: communication.hh:464
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition: communication.hh:226
PseudoFuture< T > irecv(T &&data, int source_rank, int tag)
Receives the data from the source_rank nonblocking.
Definition: communication.hh:167
int gatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition: communication.hh:348
PseudoFuture< T > ibroadcast(T &&data, int root) const
Distribute an array from the process with rank root to all other processes nonblocking.
Definition: communication.hh:293
int allgather(const T *sbuf, int count, T *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition: communication.hh:431
int scatter(const T *sendData, T *recvData, int len, int root) const
Scatter array from a root to all other task.
Definition: communication.hh:374
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition: communication.hh:311
PseudoFuture< T > iallreduce(T &&data)
Compute something over all processes nonblocking and in-place.
Definition: communication.hh:504
Communication(const Communicator &)
Constructor with a given communicator.
Definition: communication.hh:108
int max(T *inout, int len) const
Compute the maximum over all processes for each component of an array and return the result in every ...
Definition: communication.hh:257
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition: communication.hh:206
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition: communication.hh:282
PseudoFuture< TOUT > iscatter(TIN &&data_in, TOUT &&data_out, int root)
Scatter array from a root to all other task nonblocking.
Definition: communication.hh:385
int min(T *inout, int len) const
Compute the minimum over all processes for each component of an array and return the result in every ...
Definition: communication.hh:237
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: communication.hh:265
PseudoFuture< TOUT > igather(TIN &&data_in, TOUT &&data_out, int root)
Gather arrays on root task nonblocking.
Definition: communication.hh:322
PseudoFuture< TOUT > iallgather(TIN &&data_in, TOUT &&data_out)
Gathers data from all tasks and distribute it to all nonblocking.
Definition: communication.hh:443
Communication()
Construct default object.
Definition: communication.hh:101
Default exception if an error in the parallel communication of the program occurred.
Definition: exceptions.hh:285
A wrapper-class for a object which is ready immediately.
Definition: future.hh:120
A few common exception classes.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
EnableIfInterOperable< T1, T2, bool >::type operator==(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for equality.
Definition: iteratorfacades.hh:235
EnableIfInterOperable< T1, T2, bool >::type operator!=(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for inequality.
Definition: iteratorfacades.hh:257
Dune namespace.
Definition: alignedallocator.hh:11
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 21, 23:30, 2024)