Dune Core Modules (2.10.0)

remoteindices.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3// SPDX-FileCopyrightInfo: Copyright © DUNE Project contributors, see file LICENSE.md in module root
4// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
5#ifndef DUNE_COMMON_PARALLEL_REMOTEINDICES_HH
6#define DUNE_COMMON_PARALLEL_REMOTEINDICES_HH
7
8#if HAVE_MPI
9
10#include <cassert>
11#include <iostream>
12#include <ostream>
13#include <map>
14#include <memory>
15#include <set>
16#include <tuple>
17#include <utility>
18#include <vector>
19
20#include <mpi.h>
21
23#include <dune/common/sllist.hh>
28
29namespace Dune {
41 template<typename TG, typename TA>
43 {
44 public:
45 inline static MPI_Datatype getType();
46 private:
47 static MPI_Datatype type;
48 };
49
50
51 template<typename T, typename A>
52 class RemoteIndices;
53
54 template<typename T1, typename T2>
55 class RemoteIndex;
56
57 // forward declaration needed for friend declaration.
58 template<typename T>
59 class IndicesSyncer;
60
61 template<typename T1, typename T2>
62 std::ostream& operator<<(std::ostream& os, const RemoteIndex<T1,T2>& index);
63
64
65 template<typename T, typename A, bool mode>
67
68
72 template<typename T1, typename T2>
74 {
75 template<typename T>
76 friend class IndicesSyncer;
77
78 template<typename T, typename A, typename A1>
79 friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> >&,
81 const T&);
82
83 template<typename T, typename A, bool mode>
84 friend class RemoteIndexListModifier;
85
86 public:
91 typedef T1 GlobalIndex;
100 typedef T2 Attribute;
101
107
112 const Attribute attribute() const;
113
119 const PairType& localIndexPair() const;
120
124 RemoteIndex();
125
126
132 RemoteIndex(const T2& attribute,
133 const PairType* local);
134
135
141 RemoteIndex(const T2& attribute);
142
143 bool operator==(const RemoteIndex& ri) const;
144
145 bool operator!=(const RemoteIndex& ri) const;
146 private:
148 const PairType* localIndex_;
149
151 char attribute_;
152 };
153
154 template<class T, class A>
155 std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices);
156
157 class InterfaceBuilder;
158
159 template<class T, class A>
160 class CollectiveIterator;
161
162 // forward declaration needed for friend declaration.
163 template<class T>
164 class IndicesSyncer;
165
166 // forward declaration needed for friend declaration.
167 template<typename T1, typename T2>
169
170
187 template<class T, class A=std::allocator<RemoteIndex<typename T::GlobalIndex,
188 typename T::LocalIndex::Attribute> > >
190 {
191 friend class InterfaceBuilder;
192 friend class IndicesSyncer<T>;
193 template<typename T1, typename A2, typename A1>
194 friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T1::GlobalIndex, typename T1::LocalIndex::Attribute>,A2> >&,
196 const T1&);
197
198 template<class G, class T1, class T2>
199 friend void fillIndexSetHoles(const G& graph, Dune::OwnerOverlapCopyCommunication<T1,T2>& oocomm);
200 friend std::ostream& operator<<<>(std::ostream&, const RemoteIndices<T>&);
201
202 public:
203
208
212
217
218
223
227 typedef typename LocalIndex::Attribute Attribute;
228
233
234
238 using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
239
243
245 typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
247
248 typedef typename RemoteIndexMap::const_iterator const_iterator;
249
267 inline RemoteIndices(const ParallelIndexSet& source, const ParallelIndexSet& destination,
268 const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>(), bool includeSelf=false);
269
271
279 void setIncludeSelf(bool includeSelf);
280
297 void setIndexSets(const ParallelIndexSet& source, const ParallelIndexSet& destination,
298 const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>());
299
300 template<typename C>
301 void setNeighbours(const C& neighbours)
302 {
303 neighbourIds.clear();
304 neighbourIds.insert(neighbours.begin(), neighbours.end());
305
306 }
307
308 const std::set<int>& getNeighbours() const
309 {
310 return neighbourIds;
311 }
312
317
327 template<bool ignorePublic>
328 void rebuild();
329
330 bool operator==(const RemoteIndices& ri) const;
331
339 inline bool isSynced() const;
340
344 inline MPI_Comm communicator() const;
345
360 template<bool mode, bool send>
362
369 inline const_iterator find(int proc) const;
370
375 inline const_iterator begin() const;
376
381 inline const_iterator end() const;
382
386 template<bool send>
388
392 inline void free();
393
398 inline int neighbours() const;
399
401 inline const ParallelIndexSet& sourceIndexSet() const;
402
405
406 private:
408 RemoteIndices(const RemoteIndices&) = delete;
409
411 const ParallelIndexSet* source_;
412
414 const ParallelIndexSet* target_;
415
417 MPI_Comm comm_;
418
421 std::set<int> neighbourIds;
422
424 const static int commTag_=333;
425
430 int sourceSeqNo_;
431
436 int destSeqNo_;
437
441 bool publicIgnored;
442
446 bool firstBuild;
447
448 /*
449 * @brief If true, sending from indices of the processor to other
450 * indices on the same processor is enabled even if the same indexset is used
451 * on both the
452 * sending and receiving side.
453 */
454 bool includeSelf;
455
458 PairType;
459
466 RemoteIndexMap remoteIndices_;
467
478 template<bool ignorePublic>
479 inline void buildRemote(bool includeSelf);
480
486 inline int noPublic(const ParallelIndexSet& indexSet);
487
499 template<bool ignorePublic>
500 inline void packEntries(PairType** myPairs, const ParallelIndexSet& indexSet,
501 char* p_out, MPI_Datatype type, int bufferSize,
502 int* position, int n);
503
517 inline void unpackIndices(RemoteIndexList& remote, int remoteEntries,
518 PairType** local, int localEntries, char* p_in,
519 MPI_Datatype type, int* position, int bufferSize,
520 bool fromOurself);
521
522 inline void unpackIndices(RemoteIndexList& send, RemoteIndexList& receive,
523 int remoteEntries, PairType** localSource,
524 int localSourceEntries, PairType** localDest,
525 int localDestEntries, char* p_in,
526 MPI_Datatype type, int* position, int bufferSize);
527
528 void unpackCreateRemote(char* p_in, PairType** sourcePairs, PairType** DestPairs,
529 int remoteProc, int sourcePublish, int destPublish,
530 int bufferSize, bool sendTwo, bool fromOurSelf=false);
531 };
532
548 template<class T, class A, bool mode>
550 {
551
552 template<typename T1, typename A1>
553 friend class RemoteIndices;
554
555 public:
556 class InvalidPosition : public RangeError
557 {};
558
567 constexpr static bool MODIFYINDEXSET = mode;
568
573
578
583
587 typedef typename LocalIndex::Attribute Attribute;
588
593
597 typedef A Allocator;
598
602
607
612
626 void insert(const RemoteIndex& index);
627
628
643 void insert(const RemoteIndex& index, const GlobalIndex& global);
644
652 bool remove(const GlobalIndex& global);
653
667
668
670
676 : glist_()
677 {}
678
679 private:
680
687 RemoteIndexList& rList);
688
689 typedef SLList<GlobalIndex,Allocator> GlobalList;
690 typedef typename GlobalList::ModifyIterator GlobalModifyIterator;
691 RemoteIndexList* rList_;
692 const ParallelIndexSet* indexSet_;
693 GlobalList glist_;
694 ModifyIterator iter_;
695 GlobalModifyIterator giter_;
696 ConstIterator end_;
697 bool first_;
698 GlobalIndex last_;
699 };
700
705 template<class T, class A>
707 {
708
712 typedef T ParallelIndexSet;
713
717 typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
718
722 typedef typename ParallelIndexSet::LocalIndex LocalIndex;
723
727 typedef typename LocalIndex::Attribute Attribute;
728
731
733 using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
734
737
739 typedef std::map<int,std::pair<typename RemoteIndexList::const_iterator,
740 const typename RemoteIndexList::const_iterator> >
741 Map;
742
743 public:
744
746 typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
748
754 inline CollectiveIterator(const RemoteIndexMap& map_, bool send);
755
764 inline void advance(const GlobalIndex& global);
765
775 inline void advance(const GlobalIndex& global, const Attribute& attribute);
776
777 CollectiveIterator& operator++();
778
782 inline bool empty() const;
783
791 {
792 public:
793 typedef typename Map::iterator RealIterator;
794 typedef typename Map::iterator ConstRealIterator;
795
796
798 iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex& index)
799 : iter_(iter), end_(end), index_(index), hasAttribute(false)
800 {
801 // Move to the first valid entry
802 while(iter_!=end_ && iter_->second.first->localIndexPair().global()!=index_)
803 ++iter_;
804 }
805
806 iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex index,
807 Attribute attribute)
808 : iter_(iter), end_(end), index_(index), attribute_(attribute), hasAttribute(true)
809 {
810 // Move to the first valid entry or the end
811 while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_
812 || iter_->second.first->localIndexPair().local().attribute()!=attribute))
813 ++iter_;
814 }
816 iterator(const iterator& other)
817 : iter_(other.iter_), end_(other.end_), index_(other.index_)
818 { }
819
822 {
823 ++iter_;
824 // If entry is not valid move on
825 while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_ ||
826 (hasAttribute &&
827 iter_->second.first->localIndexPair().local().attribute()!=attribute_)))
828 ++iter_;
829 assert(iter_==end_ ||
830 (iter_->second.first->localIndexPair().global()==index_));
831 assert(iter_==end_ || !hasAttribute ||
832 (iter_->second.first->localIndexPair().local().attribute()==attribute_));
833 return *this;
834 }
835
837 const RemoteIndex& operator*() const
838 {
839 return *(iter_->second.first);
840 }
841
843 int process() const
844 {
845 return iter_->first;
846 }
847
849 const RemoteIndex* operator->() const
850 {
851 return iter_->second.first.operator->();
852 }
853
855 bool operator==(const iterator& other) const
856 {
857 return other.iter_==iter_;
858 }
859
861 bool operator!=(const iterator& other) const
862 {
863 return other.iter_!=iter_;
864 }
865
866 private:
867 iterator();
868
869 RealIterator iter_;
870 RealIterator end_;
871 GlobalIndex index_;
872 Attribute attribute_;
873 bool hasAttribute;
874 };
875
876 iterator begin();
877
878 iterator end();
879
880 private:
881
882 Map map_;
883 GlobalIndex index_;
884 Attribute attribute_;
885 bool noattribute;
886 };
887
888 template<typename TG, typename TA>
889 MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::getType()
890 {
891 if(type==MPI_DATATYPE_NULL) {
892 int length[2] = {1, 1};
893 MPI_Aint base;
894 MPI_Aint disp[2];
895 MPI_Datatype types[2] = {MPITraits<TG>::getType(),
896 MPITraits<ParallelLocalIndex<TA> >::getType()};
897 IndexPair<TG,ParallelLocalIndex<TA> > rep;
898 MPI_Get_address(&rep, &base); // lower bound of the datatype
899 MPI_Get_address(&(rep.global_), &disp[0]);
900 MPI_Get_address(&(rep.local_), &disp[1]);
901 for (MPI_Aint& d : disp)
902 d -= base;
903
904 MPI_Datatype tmp;
905 MPI_Type_create_struct(2, length, disp, types, &tmp);
906
907 MPI_Type_create_resized(tmp, 0, sizeof(IndexPair<TG,ParallelLocalIndex<TA> >), &type);
908 MPI_Type_commit(&type);
909
910 MPI_Type_free(&tmp);
911 }
912 return type;
913 }
914
915 template<typename TG, typename TA>
916 MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::type=MPI_DATATYPE_NULL;
917
918 template<typename T1, typename T2>
919 RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute, const PairType* local)
920 : localIndex_(local), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
921 {}
922
923 template<typename T1, typename T2>
925 : localIndex_(0), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
926 {}
927
928 template<typename T1, typename T2>
930 : localIndex_(0), attribute_()
931 {}
932 template<typename T1, typename T2>
933 inline bool RemoteIndex<T1,T2>::operator==(const RemoteIndex& ri) const
934 {
935 return localIndex_==ri.localIndex_ && attribute_==ri.attribute_;
936 }
937
938 template<typename T1, typename T2>
939 inline bool RemoteIndex<T1,T2>::operator!=(const RemoteIndex& ri) const
940 {
941 return localIndex_!=ri.localIndex_ || attribute_!=ri.attribute_;
942 }
943
944 template<typename T1, typename T2>
945 inline const T2 RemoteIndex<T1,T2>::attribute() const
946 {
947 return T2(attribute_);
948 }
949
950 template<typename T1, typename T2>
952 {
953 return *localIndex_;
954 }
955
956 template<typename T, typename A>
958 const ParallelIndexSet& destination,
959 const MPI_Comm& comm,
960 const std::vector<int>& neighbours,
961 bool includeSelf_)
962 : source_(&source), target_(&destination), comm_(comm),
963 sourceSeqNo_(-1), destSeqNo_(-1), publicIgnored(false), firstBuild(true),
964 includeSelf(includeSelf_)
965 {
966 setNeighbours(neighbours);
967 }
968
969 template<typename T, typename A>
971 {
972 includeSelf=b;
973 }
974
975 template<typename T, typename A>
977 : source_(0), target_(0), sourceSeqNo_(-1),
978 destSeqNo_(-1), publicIgnored(false), firstBuild(true),
979 includeSelf(false)
980 {}
981
982 template<class T, typename A>
984 const ParallelIndexSet& destination,
985 const MPI_Comm& comm,
986 const std::vector<int>& neighbours)
987 {
988 free();
989 source_ = &source;
990 target_ = &destination;
991 comm_ = comm;
992 firstBuild = true;
993 setNeighbours(neighbours);
994 }
995
996 template<typename T, typename A>
999 {
1000 return *source_;
1001 }
1002
1003
1004 template<typename T, typename A>
1007 {
1008 return *target_;
1009 }
1010
1011
1012 template<typename T, typename A>
1014 {
1015 free();
1016 }
1017
1018 template<typename T, typename A>
1019 template<bool ignorePublic>
1021 const ParallelIndexSet& indexSet,
1022 char* p_out, MPI_Datatype type,
1023 int bufferSize,
1024 int *position,
1025 [[maybe_unused]] int n)
1026 {
1027 // fill with own indices
1028 const auto end = indexSet.end();
1029
1030 //Now pack the source indices
1031 int i=0;
1032 for(auto index = indexSet.begin(); index != end; ++index)
1033 if(ignorePublic || index->local().isPublic()) {
1034
1035 MPI_Pack(const_cast<PairType*>(&(*index)), 1,
1036 type,
1037 p_out, bufferSize, position, comm_);
1038 pairs[i++] = const_cast<PairType*>(&(*index));
1039
1040 }
1041 assert(i==n);
1042 }
1043
1044 template<typename T, typename A>
1045 inline int RemoteIndices<T,A>::noPublic(const ParallelIndexSet& indexSet)
1046 {
1047
1048 int noPublic=0;
1049
1050 const auto end=indexSet.end();
1051 for(auto index=indexSet.begin(); index!=end; ++index)
1052 if(index->local().isPublic())
1053 noPublic++;
1054
1055 return noPublic;
1056
1057 }
1058
1059
1060 template<typename T, typename A>
1061 inline void RemoteIndices<T,A>::unpackCreateRemote(char* p_in, PairType** sourcePairs,
1062 PairType** destPairs, int remoteProc,
1063 int sourcePublish, int destPublish,
1064 int bufferSize, bool sendTwo,
1065 bool fromOurSelf)
1066 {
1067
1068 // unpack the number of indices we received
1069 int noRemoteSource=-1, noRemoteDest=-1;
1070 char twoIndexSets=0;
1071 int position=0;
1072 // Did we receive two index sets?
1073 MPI_Unpack(p_in, bufferSize, &position, &twoIndexSets, 1, MPI_CHAR, comm_);
1074 // The number of source indices received
1075 MPI_Unpack(p_in, bufferSize, &position, &noRemoteSource, 1, MPI_INT, comm_);
1076 // The number of destination indices received
1077 MPI_Unpack(p_in, bufferSize, &position, &noRemoteDest, 1, MPI_INT, comm_);
1078
1079
1080 // Indices for which we receive
1081 RemoteIndexList* receive= new RemoteIndexList();
1082 // Indices for which we send
1083 RemoteIndexList* send=0;
1084
1085 MPI_Datatype type= MPITraits<PairType>::getType();
1086
1087 if(!twoIndexSets) {
1088 if(sendTwo) {
1089 send = new RemoteIndexList();
1090 // Create both remote index sets simultaneously
1091 unpackIndices(*send, *receive, noRemoteSource, sourcePairs, sourcePublish,
1092 destPairs, destPublish, p_in, type, &position, bufferSize);
1093 }else{
1094 // we only need one list
1095 unpackIndices(*receive, noRemoteSource, sourcePairs, sourcePublish,
1096 p_in, type, &position, bufferSize, fromOurSelf);
1097 send=receive;
1098 }
1099 }else{
1100
1101 int oldPos=position;
1102 // Two index sets received
1103 unpackIndices(*receive, noRemoteSource, destPairs, destPublish,
1104 p_in, type, &position, bufferSize, fromOurSelf);
1105 if(!sendTwo)
1106 //unpack source entries again as destination entries
1107 position=oldPos;
1108
1109 send = new RemoteIndexList();
1110 unpackIndices(*send, noRemoteDest, sourcePairs, sourcePublish,
1111 p_in, type, &position, bufferSize, fromOurSelf);
1112 }
1113
1114 if(receive->empty() && send->empty()) {
1115 if(send==receive) {
1116 delete send;
1117 }else{
1118 delete send;
1119 delete receive;
1120 }
1121 }else{
1122 remoteIndices_.insert(std::make_pair(remoteProc,
1123 std::make_pair(send,receive)));
1124 }
1125 }
1126
1127
1128 template<typename T, typename A>
1129 template<bool ignorePublic>
1130 inline void RemoteIndices<T,A>::buildRemote(bool includeSelf_)
1131 {
1132 // Processor configuration
1133 int rank, procs;
1134 MPI_Comm_rank(comm_, &rank);
1135 MPI_Comm_size(comm_, &procs);
1136
1137 // number of local indices to publish
1138 // The indices of the destination will be send.
1139 int sourcePublish, destPublish;
1140
1141 // Do we need to send two index sets?
1142 char sendTwo = (source_ != target_);
1143
1144 if(procs==1 && !(sendTwo || includeSelf_))
1145 // Nothing to communicate
1146 return;
1147
1148 sourcePublish = (ignorePublic) ? source_->size() : noPublic(*source_);
1149
1150 if(sendTwo)
1151 destPublish = (ignorePublic) ? target_->size() : noPublic(*target_);
1152 else
1153 // we only need to send one set of indices
1154 destPublish = 0;
1155
1156 int maxPublish, publish=sourcePublish+destPublish;
1157
1158 // Calculate maximum number of indices send
1159 MPI_Allreduce(&publish, &maxPublish, 1, MPI_INT, MPI_MAX, comm_);
1160
1161 // allocate buffers
1162 PairType** destPairs;
1163 PairType** sourcePairs = new PairType*[sourcePublish>0 ? sourcePublish : 1];
1164
1165 if(sendTwo)
1166 destPairs = new PairType*[destPublish>0 ? destPublish : 1];
1167 else
1168 destPairs=sourcePairs;
1169
1170 char** buffer = new char*[2];
1171 int bufferSize;
1172 int position=0;
1173 int intSize;
1174 int charSize;
1175
1176 // calculate buffer size
1177 MPI_Datatype type = MPITraits<PairType>::getType();
1178
1179 MPI_Pack_size(maxPublish, type, comm_,
1180 &bufferSize);
1181 MPI_Pack_size(1, MPI_INT, comm_,
1182 &intSize);
1183 MPI_Pack_size(1, MPI_CHAR, comm_,
1184 &charSize);
1185 // Our message will contain the following:
1186 // a bool whether two index sets where sent
1187 // the size of the source and the dest indexset,
1188 // then the source and destination indices
1189 bufferSize += 2 * intSize + charSize;
1190
1191 if(bufferSize<=0) bufferSize=1;
1192
1193 buffer[0] = new char[bufferSize];
1194 buffer[1] = new char[bufferSize];
1195
1196
1197 // pack entries into buffer[0], p_out below!
1198 MPI_Pack(&sendTwo, 1, MPI_CHAR, buffer[0], bufferSize, &position,
1199 comm_);
1200
1201 // The number of indices we send for each index set
1202 MPI_Pack(&sourcePublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1203 comm_);
1204 MPI_Pack(&destPublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1205 comm_);
1206
1207 // Now pack the source indices and setup the destination pairs
1208 packEntries<ignorePublic>(sourcePairs, *source_, buffer[0], type,
1209 bufferSize, &position, sourcePublish);
1210 // If necessary send the dest indices and setup the source pairs
1211 if(sendTwo)
1212 packEntries<ignorePublic>(destPairs, *target_, buffer[0], type,
1213 bufferSize, &position, destPublish);
1214
1215
1216 // Update remote indices for ourself
1217 if(sendTwo|| includeSelf_)
1218 unpackCreateRemote(buffer[0], sourcePairs, destPairs, rank, sourcePublish,
1219 destPublish, bufferSize, sendTwo, includeSelf_);
1220
1221 neighbourIds.erase(rank);
1222
1223 if(neighbourIds.size()==0)
1224 {
1225 Dune::dvverb<<rank<<": Sending messages in a ring"<<std::endl;
1226 // send messages in ring
1227 for(int proc=1; proc<procs; proc++) {
1228 // pointers to the current input and output buffers
1229 char* p_out = buffer[1-(proc%2)];
1230 char* p_in = buffer[proc%2];
1231
1232 MPI_Status status;
1233 if(rank%2==0) {
1234 MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1235 commTag_, comm_);
1236 MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1237 commTag_, comm_, &status);
1238 }else{
1239 MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1240 commTag_, comm_, &status);
1241 MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1242 commTag_, comm_);
1243 }
1244
1245
1246 // The process these indices are from
1247 int remoteProc = (rank+procs-proc)%procs;
1248
1249 unpackCreateRemote(p_in, sourcePairs, destPairs, remoteProc, sourcePublish,
1250 destPublish, bufferSize, sendTwo);
1251
1252 }
1253
1254 }
1255 else
1256 {
1257 MPI_Request* requests=new MPI_Request[neighbourIds.size()];
1258 MPI_Request* req=requests;
1259
1260 typedef typename std::set<int>::size_type size_type;
1261 size_type noNeighbours=neighbourIds.size();
1262
1263 // setup sends
1264 for(std::set<int>::iterator neighbour=neighbourIds.begin();
1265 neighbour!= neighbourIds.end(); ++neighbour) {
1266 // Only send the information to the neighbouring processors
1267 MPI_Issend(buffer[0], position , MPI_PACKED, *neighbour, commTag_, comm_, req++);
1268 }
1269
1270 //Test for received messages
1271
1272 for(size_type received=0; received <noNeighbours; ++received)
1273 {
1274 MPI_Status status;
1275 // probe for next message
1276 MPI_Probe(MPI_ANY_SOURCE, commTag_, comm_, &status);
1277 int remoteProc=status.MPI_SOURCE;
1278 int size;
1279 MPI_Get_count(&status, MPI_PACKED, &size);
1280 // receive message
1281 MPI_Recv(buffer[1], size, MPI_PACKED, remoteProc,
1282 commTag_, comm_, &status);
1283
1284 unpackCreateRemote(buffer[1], sourcePairs, destPairs, remoteProc, sourcePublish,
1285 destPublish, bufferSize, sendTwo);
1286 }
1287 // wait for completion of pending requests
1288 MPI_Status* statuses = new MPI_Status[neighbourIds.size()];
1289
1290 if(int(MPI_ERR_IN_STATUS)==MPI_Waitall(neighbourIds.size(), requests, statuses)) {
1291 for(size_type i=0; i < neighbourIds.size(); ++i)
1292 if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
1293 std::cerr<<rank<<": MPI_Error occurred while receiving message."<<std::endl;
1294 MPI_Abort(comm_, 999);
1295 }
1296 }
1297 delete[] requests;
1298 delete[] statuses;
1299 }
1300
1301
1302 // delete allocated memory
1303 if(destPairs!=sourcePairs)
1304 delete[] destPairs;
1305
1306 delete[] sourcePairs;
1307 delete[] buffer[0];
1308 delete[] buffer[1];
1309 delete[] buffer;
1310 }
1311
1312 template<typename T, typename A>
1313 inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& remote,
1314 int remoteEntries,
1315 PairType** local,
1316 int localEntries,
1317 char* p_in,
1318 MPI_Datatype type,
1319 int* position,
1320 int bufferSize,
1321 bool fromOurSelf)
1322 {
1323 if(remoteEntries==0)
1324 return;
1325
1326 PairType index;
1327 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1328 type, comm_);
1329 GlobalIndex oldGlobal=index.global();
1330 int n_in=0, localIndex=0;
1331
1332 //Check if we know the global index
1333 while(localIndex<localEntries) {
1334 if(local[localIndex]->global()==index.global()) {
1335 int oldLocalIndex=localIndex;
1336
1337 while(localIndex<localEntries &&
1338 local[localIndex]->global()==index.global()) {
1339 if(!fromOurSelf || index.local().attribute() !=
1340 local[localIndex]->local().attribute())
1341 // if index is from us it has to have a different attribute
1342 remote.push_back(RemoteIndex(index.local().attribute(),
1343 local[localIndex]));
1344 localIndex++;
1345 }
1346
1347 // unpack next remote index
1348 if((++n_in) < remoteEntries) {
1349 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1350 type, comm_);
1351 if(index.global()==oldGlobal)
1352 // Restart comparison for the same global indices
1353 localIndex=oldLocalIndex;
1354 else
1355 oldGlobal=index.global();
1356 }else{
1357 // No more received indices
1358 break;
1359 }
1360 continue;
1361 }
1362
1363 if (local[localIndex]->global()<index.global()) {
1364 // compare with next entry in our list
1365 ++localIndex;
1366 }else{
1367 // We do not know the index, unpack next
1368 if((++n_in) < remoteEntries) {
1369 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1370 type, comm_);
1371 oldGlobal=index.global();
1372 }else
1373 // No more received indices
1374 break;
1375 }
1376 }
1377
1378 // Unpack the other received indices without doing anything
1379 while(++n_in < remoteEntries)
1380 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1381 type, comm_);
1382 }
1383
1384
1385 template<typename T, typename A>
1386 inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& send,
1387 RemoteIndexList& receive,
1388 int remoteEntries,
1389 PairType** localSource,
1390 int localSourceEntries,
1391 PairType** localDest,
1392 int localDestEntries,
1393 char* p_in,
1394 MPI_Datatype type,
1395 int* position,
1396 int bufferSize)
1397 {
1398 int n_in=0, sourceIndex=0, destIndex=0;
1399
1400 //Check if we know the global index
1401 while(n_in<remoteEntries && (sourceIndex<localSourceEntries || destIndex<localDestEntries)) {
1402 // Unpack next index
1403 PairType index;
1404 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1405 type, comm_);
1406 n_in++;
1407
1408 // Advance until global index in localSource and localDest are >= than the one in the unpacked index
1409 while(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()<index.global())
1410 sourceIndex++;
1411
1412 while(destIndex<localDestEntries && localDest[destIndex]->global()<index.global())
1413 destIndex++;
1414
1415 // Add a remote index if we found the global index.
1416 if(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()==index.global())
1417 send.push_back(RemoteIndex(index.local().attribute(),
1418 localSource[sourceIndex]));
1419
1420 if(destIndex < localDestEntries && localDest[destIndex]->global() == index.global())
1421 receive.push_back(RemoteIndex(index.local().attribute(),
1422 localDest[sourceIndex]));
1423 }
1424
1425 }
1426
1427 template<typename T, typename A>
1429 {
1430 auto lend = remoteIndices_.end();
1431 for(auto lists=remoteIndices_.begin(); lists != lend; ++lists) {
1432 if(lists->second.first==lists->second.second) {
1433 // there is only one remote index list.
1434 delete lists->second.first;
1435 }else{
1436 delete lists->second.first;
1437 delete lists->second.second;
1438 }
1439 }
1440 remoteIndices_.clear();
1441 firstBuild=true;
1442 }
1443
1444 template<typename T, typename A>
1446 {
1447 return remoteIndices_.size();
1448 }
1449
1450 template<typename T, typename A>
1451 template<bool ignorePublic>
1453 {
1454 // Test whether a rebuild is Needed.
1455 if(firstBuild ||
1456 ignorePublic!=publicIgnored || !
1457 isSynced()) {
1458 free();
1459
1460 buildRemote<ignorePublic>(includeSelf);
1461
1462 sourceSeqNo_ = source_->seqNo();
1463 destSeqNo_ = target_->seqNo();
1464 firstBuild=false;
1465 publicIgnored=ignorePublic;
1466 }
1467
1468
1469 }
1470
1471 template<typename T, typename A>
1473 {
1474 return sourceSeqNo_==source_->seqNo() && destSeqNo_ ==target_->seqNo();
1475 }
1476
1477 template<typename T, typename A>
1478 template<bool mode, bool send>
1480 {
1481
1482 // The user are on their own now!
1483 // We assume they know what they are doing and just set the
1484 // remote indices to synced status.
1485 sourceSeqNo_ = source_->seqNo();
1486 destSeqNo_ = target_->seqNo();
1487
1488 typename RemoteIndexMap::iterator found = remoteIndices_.find(process);
1489
1490 if(found == remoteIndices_.end())
1491 {
1492 if(source_ != target_)
1493 found = remoteIndices_.insert(found, std::make_pair(process,
1494 std::make_pair(new RemoteIndexList(),
1495 new RemoteIndexList())));
1496 else{
1497 RemoteIndexList* rlist = new RemoteIndexList();
1498 found = remoteIndices_.insert(found,
1499 std::make_pair(process,
1500 std::make_pair(rlist, rlist)));
1501 }
1502 }
1503
1504 firstBuild = false;
1505
1506 if(send)
1507 return RemoteIndexListModifier<T,A,mode>(*source_, *(found->second.first));
1508 else
1509 return RemoteIndexListModifier<T,A,mode>(*target_, *(found->second.second));
1510 }
1511
1512 template<typename T, typename A>
1513 inline typename RemoteIndices<T,A>::const_iterator
1515 {
1516 return remoteIndices_.find(proc);
1517 }
1518
1519 template<typename T, typename A>
1520 inline typename RemoteIndices<T,A>::const_iterator
1522 {
1523 return remoteIndices_.begin();
1524 }
1525
1526 template<typename T, typename A>
1527 inline typename RemoteIndices<T,A>::const_iterator
1529 {
1530 return remoteIndices_.end();
1531 }
1532
1533
1534 template<typename T, typename A>
1535 bool RemoteIndices<T,A>::operator==(const RemoteIndices& ri) const
1536 {
1537 if(neighbours()!=ri.neighbours())
1538 return false;
1539
1540 const auto rend = remoteIndices_.end();
1541
1542 for(auto rindex = remoteIndices_.begin(), rindex1=ri.remoteIndices_.begin(); rindex!=rend; ++rindex, ++rindex1) {
1543 if(rindex->first != rindex1->first)
1544 return false;
1545 if(*(rindex->second.first) != *(rindex1->second.first))
1546 return false;
1547 if(*(rindex->second.second) != *(rindex1->second.second))
1548 return false;
1549 }
1550 return true;
1551 }
1552
1553 template<class T, class A, bool mode>
1554 RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const ParallelIndexSet& indexSet,
1555 RemoteIndexList& rList)
1556 : rList_(&rList), indexSet_(&indexSet), iter_(rList.beginModify()), end_(rList.end()), first_(true)
1557 {
1558 if(MODIFYINDEXSET) {
1559 assert(indexSet_);
1560 for(ConstIterator iter=iter_; iter != end_; ++iter)
1561 glist_.push_back(iter->localIndexPair().global());
1562 giter_ = glist_.beginModify();
1563 }
1564 }
1565
1566 template<typename T, typename A, bool mode>
1567 RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const RemoteIndexListModifier<T,A,mode>& other)
1568 : rList_(other.rList_), indexSet_(other.indexSet_),
1569 glist_(other.glist_), iter_(other.iter_), giter_(other.giter_), end_(other.end_),
1570 first_(other.first_), last_(other.last_)
1571 {}
1572
1573 template<typename T, typename A, bool mode>
1575 {
1576 if(MODIFYINDEXSET) {
1577 // repair pointers to local index set.
1578#ifdef DUNE_ISTL_WITH_CHECKING
1579 if(indexSet_->state()!=GROUND)
1580 DUNE_THROW(InvalidIndexSetState, "Index has to be in ground mode for repairing pointers to indices");
1581#endif
1582 auto giter = glist_.begin();
1583 auto index = indexSet_->begin();
1584
1585 for(auto iter=rList_->begin(); iter != end_; ++iter) {
1586 while(index->global()<*giter) {
1587 ++index;
1588#ifdef DUNE_ISTL_WITH_CHECKING
1589 if(index == indexSet_->end())
1590 DUNE_THROW(InvalidPosition, "No such global index in set!");
1591#endif
1592 }
1593
1594#ifdef DUNE_ISTL_WITH_CHECKING
1595 if(index->global() != *giter)
1596 DUNE_THROW(InvalidPosition, "No such global index in set!");
1597#endif
1598 iter->localIndex_ = &(*index);
1599 }
1600 }
1601 }
1602
1603 template<typename T, typename A, bool mode>
1605 {
1606 static_assert(!mode,"Not allowed if the mode indicates that new indices"
1607 "might be added to the underlying index set. Use "
1608 "insert(const RemoteIndex&, const GlobalIndex&) instead");
1609
1610#ifdef DUNE_ISTL_WITH_CHECKING
1611 if(!first_ && index.localIndexPair().global()<last_)
1612 DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1613#endif
1614 // Move to the correct position
1615 while(iter_ != end_ && iter_->localIndexPair().global() < index.localIndexPair().global()) {
1616 ++iter_;
1617 }
1618
1619 // No duplicate entries allowed
1620 assert(iter_==end_ || iter_->localIndexPair().global() != index.localIndexPair().global());
1621 iter_.insert(index);
1622 last_ = index.localIndexPair().global();
1623 first_ = false;
1624 }
1625
1626 template<typename T, typename A, bool mode>
1628 {
1629 static_assert(mode,"Not allowed if the mode indicates that no new indices"
1630 "might be added to the underlying index set. Use "
1631 "insert(const RemoteIndex&) instead");
1632#ifdef DUNE_ISTL_WITH_CHECKING
1633 if(!first_ && global<last_)
1634 DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1635#endif
1636 // Move to the correct position
1637 while(iter_ != end_ && *giter_ < global) {
1638 ++giter_;
1639 ++iter_;
1640 }
1641
1642 // No duplicate entries allowed
1643 assert(iter_->localIndexPair().global() != global);
1644 iter_.insert(index);
1645 giter_.insert(global);
1646
1647 last_ = global;
1648 first_ = false;
1649 }
1650
1651 template<typename T, typename A, bool mode>
1653 {
1654#ifdef DUNE_ISTL_WITH_CHECKING
1655 if(!first_ && global<last_)
1656 DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1657#endif
1658
1659 bool found= false;
1660
1661 if(MODIFYINDEXSET) {
1662 // Move to the correct position
1663 while(iter_!=end_ && *giter_< global) {
1664 ++giter_;
1665 ++iter_;
1666 }
1667 if(*giter_ == global) {
1668 giter_.remove();
1669 iter_.remove();
1670 found=true;
1671 }
1672 }else{
1673 while(iter_!=end_ && iter_->localIndexPair().global() < global)
1674 ++iter_;
1675
1676 if(iter_->localIndexPair().global()==global) {
1677 iter_.remove();
1678 found = true;
1679 }
1680 }
1681
1682 last_ = global;
1683 first_ = false;
1684 return found;
1685 }
1686
1687 template<typename T, typename A>
1688 template<bool send>
1690 {
1691 return CollectiveIterator<T,A>(remoteIndices_, send);
1692 }
1693
1694 template<typename T, typename A>
1695 inline MPI_Comm RemoteIndices<T,A>::communicator() const
1696 {
1697 return comm_;
1698
1699 }
1700
1701 template<typename T, typename A>
1703 {
1704
1705 const auto end = pmap.end();
1706 for(auto process = pmap.begin(); process != end; ++process) {
1707 const RemoteIndexList* list = send ? process->second.first : process->second.second;
1708 using ri_iterator = typename RemoteIndexList::const_iterator;
1709 map_.insert(std::make_pair(process->first,
1710 std::pair<ri_iterator, const ri_iterator>(list->begin(), list->end())));
1711 }
1712 }
1713
1714 template<typename T, typename A>
1715 inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index)
1716 {
1717 const auto end = map_.end();
1718
1719 for(auto iter = map_.begin(); iter != end;) {
1720 // Step the iterator until we are >= index
1721 typename RemoteIndexList::const_iterator current = iter->second.first;
1722 typename RemoteIndexList::const_iterator rend = iter->second.second;
1723 RemoteIndex remoteIndex;
1724 if(current != rend)
1725 remoteIndex = *current;
1726
1727 while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1728 ++(iter->second.first);
1729
1730 // erase from the map if there are no more entries.
1731 if(iter->second.first == iter->second.second)
1732 map_.erase(iter++);
1733 else{
1734 ++iter;
1735 }
1736 }
1737 index_=index;
1738 noattribute=true;
1739 }
1740
1741 template<typename T, typename A>
1742 inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index,
1743 const Attribute& attribute)
1744 {
1745 const auto end = map_.end();
1746
1747 for(auto iter = map_.begin(); iter != end;) {
1748 // Step the iterator until we are >= index
1749 typename RemoteIndexList::const_iterator current = iter->second.first;
1750 typename RemoteIndexList::const_iterator rend = iter->second.second;
1751 RemoteIndex remoteIndex;
1752 if(current != rend)
1753 remoteIndex = *current;
1754
1755 // Move to global index or bigger
1756 while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1757 ++(iter->second.first);
1758
1759 // move to attribute or bigger
1760 while(iter->second.first!=iter->second.second
1761 && iter->second.first->localIndexPair().global()==index
1762 && iter->second.first->localIndexPair().local().attribute()<attribute)
1763 ++(iter->second.first);
1764
1765 // erase from the map if there are no more entries.
1766 if(iter->second.first == iter->second.second)
1767 map_.erase(iter++);
1768 else{
1769 ++iter;
1770 }
1771 }
1772 index_=index;
1773 attribute_=attribute;
1774 noattribute=false;
1775 }
1776
1777 template<typename T, typename A>
1779 {
1780 const auto end = map_.end();
1781
1782 for(auto iter = map_.begin(); iter != end;) {
1783 // Step the iterator until we are >= index
1784 auto current = iter->second.first;
1785 auto rend = iter->second.second;
1786
1787 // move all iterators pointing to the current global index to next value
1788 if(iter->second.first->localIndexPair().global()==index_ &&
1789 (noattribute || iter->second.first->localIndexPair().local().attribute() == attribute_))
1790 ++(iter->second.first);
1791
1792 // erase from the map if there are no more entries.
1793 if(iter->second.first == iter->second.second)
1794 map_.erase(iter++);
1795 else{
1796 ++iter;
1797 }
1798 }
1799 return *this;
1800 }
1801
1802 template<typename T, typename A>
1804 {
1805 return map_.empty();
1806 }
1807
1808 template<typename T, typename A>
1809 inline typename CollectiveIterator<T,A>::iterator
1811 {
1812 if(noattribute)
1813 return iterator(map_.begin(), map_.end(), index_);
1814 else
1815 return iterator(map_.begin(), map_.end(), index_,
1816 attribute_);
1817 }
1818
1819 template<typename T, typename A>
1820 inline typename CollectiveIterator<T,A>::iterator
1821 CollectiveIterator<T,A>::end()
1822 {
1823 return iterator(map_.end(), map_.end(), index_);
1824 }
1825
1826 template<typename TG, typename TA>
1827 inline std::ostream& operator<<(std::ostream& os, const RemoteIndex<TG,TA>& index)
1828 {
1829 os<<"[global="<<index.localIndexPair().global()<<", remote attribute="<<index.attribute()<<" local attribute="<<index.localIndexPair().local().attribute()<<"]";
1830 return os;
1831 }
1832
1833 template<typename T, typename A>
1834 inline std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices)
1835 {
1836 int rank;
1837 MPI_Comm_rank(indices.comm_, &rank);
1838 const auto rend = indices.remoteIndices_.end();
1839
1840 for(auto rindex = indices.remoteIndices_.begin(); rindex!=rend; ++rindex) {
1841 os<<rank<<": Process "<<rindex->first<<":";
1842
1843 if(!rindex->second.first->empty()) {
1844 os<<" send:";
1845
1846 const auto send= rindex->second.first->end();
1847
1848 for(auto index = rindex->second.first->begin();
1849 index != send; ++index)
1850 os<<*index<<" ";
1851 os<<std::endl;
1852 }
1853 if(!rindex->second.second->empty()) {
1854 os<<rank<<": Process "<<rindex->first<<": "<<"receive: ";
1855
1856 for(const auto& index : *(rindex->second.second))
1857 os << index << " ";
1858 }
1859 os<<std::endl<<std::flush;
1860 }
1861 return os;
1862 }
1864}
1865
1866#endif // HAVE_MPI
1867#endif // DUNE_COMMON_PARALLEL_REMOTEINDICES_HH
Iterator over the valid underlying iterators.
Definition: remoteindices.hh:791
iterator(const RealIterator &iter, const ConstRealIterator &end, GlobalIndex &index)
Definition: remoteindices.hh:798
iterator(const iterator &other)
Definition: remoteindices.hh:816
const RemoteIndex & operator*() const
Definition: remoteindices.hh:837
iterator & operator++()
Definition: remoteindices.hh:821
const RemoteIndex * operator->() const
Definition: remoteindices.hh:849
bool operator==(const iterator &other) const
Definition: remoteindices.hh:855
int process() const
Definition: remoteindices.hh:843
bool operator!=(const iterator &other) const
Definition: remoteindices.hh:861
A collective iterator for moving over the remote indices for all processes collectively.
Definition: remoteindices.hh:707
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:747
A pair consisting of a global and local index.
Definition: indexset.hh:85
Class for recomputing missing indices of a distributed index set.
Definition: indicessyncer.hh:45
Base class of all classes representing a communication interface.
Definition: interface.hh:44
Exception indicating that the index set is not in the expected state.
Definition: indexset.hh:205
A class setting up standard communication for a two-valued attribute set with owner/overlap/copy sema...
Definition: owneroverlapcopy.hh:174
Manager class for the mapping between local indices and globally unique indices.
Definition: indexset.hh:218
An index present on the local process with an additional attribute flag.
Definition: plocalindex.hh:52
Default exception class for range errors.
Definition: exceptions.hh:254
Modifier for adding and/or deleting remote indices from the remote index list.
Definition: remoteindices.hh:550
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:601
A Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:597
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:577
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:582
RemoteIndexList::const_iterator ConstIterator
The type of the remote index list iterator.
Definition: remoteindices.hh:611
SLListModifyIterator< RemoteIndex, Allocator > ModifyIterator
The type of the modifying iterator of the remote index list.
Definition: remoteindices.hh:606
T ParallelIndexSet
Type of the index set we use.
Definition: remoteindices.hh:572
RemoteIndexListModifier()
Default constructor.
Definition: remoteindices.hh:675
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:587
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:592
static constexpr bool MODIFYINDEXSET
If true the index set corresponding to the remote indices might get modified.
Definition: remoteindices.hh:567
Information about an index residing on another processor.
Definition: remoteindices.hh:74
T1 GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: remoteindices.hh:91
T2 Attribute
The type of the attributes. Normally this will be an enumeration like.
Definition: remoteindices.hh:100
IndexPair< GlobalIndex, ParallelLocalIndex< Attribute > > PairType
The type of the index pair.
Definition: remoteindices.hh:106
The indices present on remote processes.
Definition: remoteindices.hh:190
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:232
friend void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition: repartition.hh:83
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:216
T ParallelIndexSet
Type of the index set we use, e.g. ParallelLocalIndexSet.
Definition: remoteindices.hh:207
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:227
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:246
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:242
CollectiveIterator< T, A > CollectiveIteratorT
The type of the collective iterator over all remote indices.
Definition: remoteindices.hh:211
typename std::allocator_traits< A >::template rebind_alloc< RemoteIndex > Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:238
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:222
A constant iterator for the SLList.
Definition: sllist.hh:371
A single linked list.
Definition: sllist.hh:44
A few common exception classes.
void repairLocalIndexPointers()
Repair the pointers to the local index pairs.
Definition: remoteindices.hh:1574
const Attribute attribute() const
Get the attribute of the index on the remote process.
Definition: remoteindices.hh:945
void setIndexSets(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >())
Set the index sets and communicator we work with.
Definition: remoteindices.hh:983
void free()
Free the index lists.
Definition: remoteindices.hh:1428
void insert(const RemoteIndex &index)
Insert an index to the list.
Definition: remoteindices.hh:1604
void rebuild()
Rebuilds the set of remote indices.
Definition: remoteindices.hh:1452
void repairLocalIndexPointers(std::map< int, SLList< std::pair< typename T::GlobalIndex, typename T::LocalIndex::Attribute >, A > > &globalMap, RemoteIndices< T, A1 > &remoteIndices, const T &indexSet)
Repair the pointers to the local indices in the remote indices.
Definition: indicessyncer.hh:495
MPI_Comm communicator() const
Get the mpi communicator used.
Definition: remoteindices.hh:1695
CollectiveIteratorT iterator() const
Get an iterator for collectively iterating over the remote indices of all remote processes.
Definition: remoteindices.hh:1689
void setIncludeSelf(bool includeSelf)
Tell whether sending from indices of the processor to other indices on the same processor is enabled ...
Definition: remoteindices.hh:970
CollectiveIterator(const RemoteIndexMap &map_, bool send)
Constructor.
Definition: remoteindices.hh:1702
iterator begin()
Get an iterator over the indices positioned at the first index.
iterator end()
Get an iterator over the indices positioned after the last index.
const_iterator end() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1528
bool empty() const
Checks whether there are still iterators in the map.
Definition: remoteindices.hh:1803
RemoteIndexListModifier< T, A, mode > getModifier(int process)
Get a modifier for a remote index list.
Definition: remoteindices.hh:1479
void advance(const GlobalIndex &global)
Advances all underlying iterators.
Definition: remoteindices.hh:1715
const PairType & localIndexPair() const
Get the corresponding local index pair.
Definition: remoteindices.hh:951
const ParallelIndexSet & sourceIndexSet() const
Get the index set at the source.
Definition: remoteindices.hh:998
~RemoteIndices()
Destructor.
Definition: remoteindices.hh:1013
TL LocalIndex
The type of the local index, e.g. ParallelLocalIndex.
Definition: indexset.hh:239
bool remove(const GlobalIndex &global)
Remove a remote index.
Definition: remoteindices.hh:1652
const GlobalIndex & global() const
Get the global index.
RemoteIndex()
Parameterless Constructor.
Definition: remoteindices.hh:929
int neighbours() const
Get the number of processors we share indices with.
Definition: remoteindices.hh:1445
TG GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: indexset.hh:226
const ParallelIndexSet & destinationIndexSet() const
Get the index set at destination.
Definition: remoteindices.hh:1006
RemoteIndices(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >(), bool includeSelf=false)
Constructor.
Definition: remoteindices.hh:957
const_iterator find(int proc) const
Find an iterator over the remote index lists of a specific process.
Definition: remoteindices.hh:1514
bool isSynced() const
Checks whether the remote indices are synced with the indexsets.
Definition: remoteindices.hh:1472
const_iterator begin() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1521
@ GROUND
The default mode. Indicates that the index set is ready to be used.
Definition: indexset.hh:186
iterator end()
Get an iterator pointing to the end of the list.
Definition: sllist.hh:774
SLListConstIterator< RemoteIndex, Allocator > const_iterator
The constant iterator of the list.
Definition: sllist.hh:74
SLListModifyIterator< GlobalIndex, Allocator > ModifyIterator
The type of the iterator capable of deletion and insertion.
Definition: sllist.hh:103
iterator begin()
Get an iterator pointing to the first element in the list.
Definition: sllist.hh:762
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
EnableIfInterOperable< T1, T2, bool >::type operator==(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for equality.
Definition: iteratorfacades.hh:238
EnableIfInterOperable< T1, T2, bool >::type operator!=(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for inequality.
Definition: iteratorfacades.hh:260
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:96
Provides a map between global and local indices.
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:13
constexpr std::integral_constant< std::size_t, sizeof...(II)> size(std::integer_sequence< T, II... >)
Return the size of the sequence.
Definition: integersequence.hh:75
STL namespace.
Provides classes for use as the local index in ParallelIndexSet for distributed computing.
Implements a singly linked list together with the necessary iterators.
Standard Dune debug streams.
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:41
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 27, 23:30, 2024)