Dune Core Modules (2.5.0)

remoteindices.hh
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3#ifndef DUNE_REMOTEINDICES_HH
4#define DUNE_REMOTEINDICES_HH
5
6#include "indexset.hh"
7#include "plocalindex.hh"
10#include <dune/common/sllist.hh>
12#include <map>
13#include <set>
14#include <utility>
15#include <iostream>
16#include <algorithm>
17#include <iterator>
18#if HAVE_MPI
19#include "mpitraits.hh"
20#include <mpi.h>
21
22namespace Dune {
34 template<typename TG, typename TA>
36 {
37 public:
38 inline static MPI_Datatype getType();
39 private:
40 static MPI_Datatype type;
41 };
42
43
44 template<typename T, typename A>
45 class RemoteIndices;
46
47 template<typename T1, typename T2>
48 class RemoteIndex;
49
50 template<typename T>
51 class IndicesSyncer;
52
53 template<typename T1, typename T2>
54 std::ostream& operator<<(std::ostream& os, const RemoteIndex<T1,T2>& index);
55
56
57 template<typename T, typename A, bool mode>
59
60
64 template<typename T1, typename T2>
66 {
67 template<typename T>
68 friend class IndicesSyncer;
69
70 template<typename T, typename A, typename A1>
71 friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> >&,
73 const T&);
74
75 template<typename T, typename A, bool mode>
76 friend class RemoteIndexListModifier;
77
78 public:
83 typedef T1 GlobalIndex;
92 typedef T2 Attribute;
93
99
104 const Attribute attribute() const;
105
111 const PairType& localIndexPair() const;
112
116 RemoteIndex();
117
118
124 RemoteIndex(const T2& attribute,
125 const PairType* local);
126
127
133 RemoteIndex(const T2& attribute);
134
135 bool operator==(const RemoteIndex& ri) const;
136
137 bool operator!=(const RemoteIndex& ri) const;
138 private:
140 const PairType* localIndex_;
141
143 char attribute_;
144 };
145
146 template<class T, class A>
147 std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices);
148
149 class InterfaceBuilder;
150
151 template<class T, class A>
152 class CollectiveIterator;
153
154 template<class T>
155 class IndicesSyncer;
156
157 // forward declaration needed for friend declaration.
158 template<typename T1, typename T2>
160
161
178 template<class T, class A=std::allocator<RemoteIndex<typename T::GlobalIndex,
179 typename T::LocalIndex::Attribute> > >
181 {
182 friend class InterfaceBuilder;
183 friend class IndicesSyncer<T>;
184 template<typename T1, typename A2, typename A1>
185 friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T1::GlobalIndex, typename T1::LocalIndex::Attribute>,A2> >&,
187 const T1&);
188
189 template<class G, class T1, class T2>
190 friend void fillIndexSetHoles(const G& graph, Dune::OwnerOverlapCopyCommunication<T1,T2>& oocomm);
191 friend std::ostream& operator<<<>(std::ostream&, const RemoteIndices<T>&);
192
193 public:
194
199
203
208
209
214
218 typedef typename LocalIndex::Attribute Attribute;
219
224
225
229 typedef typename A::template rebind<RemoteIndex>::other Allocator;
230
234
236 typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
238
239 typedef typename RemoteIndexMap::const_iterator const_iterator;
240
258 inline RemoteIndices(const ParallelIndexSet& source, const ParallelIndexSet& destination,
259 const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>(), bool includeSelf=false);
260
262
270 void setIncludeSelf(bool includeSelf);
271
288 void setIndexSets(const ParallelIndexSet& source, const ParallelIndexSet& destination,
289 const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>());
290
291 template<typename C>
292 void setNeighbours(const C& neighbours)
293 {
294 neighbourIds.clear();
295 neighbourIds.insert(neighbours.begin(), neighbours.end());
296
297 }
298
299 const std::set<int>& getNeighbours() const
300 {
301 return neighbourIds;
302 }
303
308
318 template<bool ignorePublic>
319 void rebuild();
320
321 bool operator==(const RemoteIndices& ri);
322
330 inline bool isSynced() const;
331
335 inline MPI_Comm communicator() const;
336
351 template<bool mode, bool send>
353
360 inline const_iterator find(int proc) const;
361
366 inline const_iterator begin() const;
367
372 inline const_iterator end() const;
373
377 template<bool send>
379
383 inline void free();
384
389 inline int neighbours() const;
390
392 inline const ParallelIndexSet& sourceIndexSet() const;
393
396
397 private:
400 {}
401
403 const ParallelIndexSet* source_;
404
406 const ParallelIndexSet* target_;
407
409 MPI_Comm comm_;
410
413 std::set<int> neighbourIds;
414
416 const static int commTag_=333;
417
422 int sourceSeqNo_;
423
428 int destSeqNo_;
429
433 bool publicIgnored;
434
438 bool firstBuild;
439
440 /*
441 * @brief If true, sending from indices of the processor to other
442 * indices on the same processor is enabled even if the same indexset is used
443 * on both the
444 * sending and receiving side.
445 */
446 bool includeSelf;
447
450 PairType;
451
458 RemoteIndexMap remoteIndices_;
459
470 template<bool ignorePublic>
471 inline void buildRemote(bool includeSelf);
472
478 inline int noPublic(const ParallelIndexSet& indexSet);
479
491 template<bool ignorePublic>
492 inline void packEntries(PairType** myPairs, const ParallelIndexSet& indexSet,
493 char* p_out, MPI_Datatype type, int bufferSize,
494 int* position, int n);
495
509 inline void unpackIndices(RemoteIndexList& remote, int remoteEntries,
510 PairType** local, int localEntries, char* p_in,
511 MPI_Datatype type, int* position, int bufferSize,
512 bool fromOurself);
513
514 inline void unpackIndices(RemoteIndexList& send, RemoteIndexList& receive,
515 int remoteEntries, PairType** localSource,
516 int localSourceEntries, PairType** localDest,
517 int localDestEntries, char* p_in,
518 MPI_Datatype type, int* position, int bufferSize);
519
520 void unpackCreateRemote(char* p_in, PairType** sourcePairs, PairType** DestPairs,
521 int remoteProc, int sourcePublish, int destPublish,
522 int bufferSize, bool sendTwo, bool fromOurSelf=false);
523 };
524
542 template<class T, class A, bool mode>
544 {
545
546 template<typename T1, typename A1>
547 friend class RemoteIndices;
548
549 public:
550 class InvalidPosition : public RangeError
551 {};
552
553 enum {
562 MODIFYINDEXSET=mode
563 };
564
569
574
579
583 typedef typename LocalIndex::Attribute Attribute;
584
589
593 typedef A Allocator;
594
598
603
608
622 void insert(const RemoteIndex& index) throw(InvalidPosition);
623
624
639 void insert(const RemoteIndex& index, const GlobalIndex& global) throw(InvalidPosition);
640
648 bool remove(const GlobalIndex& global) throw(InvalidPosition);
649
663
664
666
672 : glist_()
673 {}
674
675 private:
676
683 RemoteIndexList& rList);
684
685 typedef SLList<GlobalIndex,Allocator> GlobalList;
686 typedef typename GlobalList::ModifyIterator GlobalModifyIterator;
687 RemoteIndexList* rList_;
688 const ParallelIndexSet* indexSet_;
689 GlobalList glist_;
690 ModifyIterator iter_;
691 GlobalModifyIterator giter_;
692 ConstIterator end_;
693 bool first_;
694 GlobalIndex last_;
695 };
696
701 template<class T, class A>
703 {
704
708 typedef T ParallelIndexSet;
709
713 typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
714
718 typedef typename ParallelIndexSet::LocalIndex LocalIndex;
719
723 typedef typename LocalIndex::Attribute Attribute;
724
727
729 typedef typename A::template rebind<RemoteIndex>::other Allocator;
730
733
735 typedef std::map<int,std::pair<typename RemoteIndexList::const_iterator,
736 const typename RemoteIndexList::const_iterator> >
737 Map;
738
739 public:
740
742 typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
744
750 inline CollectiveIterator(const RemoteIndexMap& map_, bool send);
751
760 inline void advance(const GlobalIndex& global);
761
771 inline void advance(const GlobalIndex& global, const Attribute& attribute);
772
773 CollectiveIterator& operator++();
774
778 inline bool empty();
779
787 {
788 public:
789 typedef typename Map::iterator RealIterator;
790 typedef typename Map::iterator ConstRealIterator;
791
792
794 iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex& index)
795 : iter_(iter), end_(end), index_(index), hasAttribute(false)
796 {
797 // Move to the first valid entry
798 while(iter_!=end_ && iter_->second.first->localIndexPair().global()!=index_)
799 ++iter_;
800 }
801
802 iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex index,
803 Attribute attribute)
804 : iter_(iter), end_(end), index_(index), attribute_(attribute), hasAttribute(true)
805 {
806 // Move to the first valid entry or the end
807 while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_
808 || iter_->second.first->localIndexPair().local().attribute()!=attribute))
809 ++iter_;
810 }
812 iterator(const iterator& other)
813 : iter_(other.iter_), end_(other.end_), index_(other.index_)
814 { }
815
818 {
819 ++iter_;
820 // If entry is not valid move on
821 while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_ ||
822 (hasAttribute &&
823 iter_->second.first->localIndexPair().local().attribute()!=attribute_)))
824 ++iter_;
825 assert(iter_==end_ ||
826 (iter_->second.first->localIndexPair().global()==index_));
827 assert(iter_==end_ || !hasAttribute ||
828 (iter_->second.first->localIndexPair().local().attribute()==attribute_));
829 return *this;
830 }
831
833 const RemoteIndex& operator*() const
834 {
835 return *(iter_->second.first);
836 }
837
839 int process() const
840 {
841 return iter_->first;
842 }
843
845 const RemoteIndex* operator->() const
846 {
847 return iter_->second.first.operator->();
848 }
849
851 bool operator==(const iterator& other)
852 {
853 return other.iter_==iter_;
854 }
855
857 bool operator!=(const iterator& other)
858 {
859 return other.iter_!=iter_;
860 }
861
862 private:
863 iterator();
864
865 RealIterator iter_;
866 RealIterator end_;
867 GlobalIndex index_;
868 Attribute attribute_;
869 bool hasAttribute;
870 };
871
872 iterator begin();
873
874 iterator end();
875
876 private:
877
878 Map map_;
879 GlobalIndex index_;
880 Attribute attribute_;
881 bool noattribute;
882 };
883
884 template<typename TG, typename TA>
885 MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::getType()
886 {
887 if(type==MPI_DATATYPE_NULL) {
888 int length[2] = {1, 1};
889 MPI_Aint base;
890 MPI_Aint disp[2];
891 MPI_Datatype types[2] = {MPITraits<TG>::getType(),
892 MPITraits<ParallelLocalIndex<TA> >::getType()};
893 IndexPair<TG,ParallelLocalIndex<TA> > rep;
894 MPI_Get_address(&rep, &base); // lower bound of the datatype
895 MPI_Get_address(&(rep.global_), &disp[0]);
896 MPI_Get_address(&(rep.local_), &disp[1]);
897 for (MPI_Aint& d : disp)
898 d -= base;
899
900 MPI_Datatype tmp;
901 MPI_Type_create_struct(2, length, disp, types, &tmp);
902
903 MPI_Type_create_resized(tmp, 0, sizeof(IndexPair<TG,ParallelLocalIndex<TA> >), &type);
904 MPI_Type_commit(&type);
905
906 MPI_Type_free(&tmp);
907 }
908 return type;
909 }
910
911 template<typename TG, typename TA>
912 MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::type=MPI_DATATYPE_NULL;
913
914 template<typename T1, typename T2>
915 RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute, const PairType* local)
916 : localIndex_(local), attribute_(attribute)
917 {}
918
919 template<typename T1, typename T2>
921 : localIndex_(0), attribute_(attribute)
922 {}
923
924 template<typename T1, typename T2>
926 : localIndex_(0), attribute_()
927 {}
928 template<typename T1, typename T2>
929 inline bool RemoteIndex<T1,T2>::operator==(const RemoteIndex& ri) const
930 {
931 return localIndex_==ri.localIndex_ && attribute_==ri.attribute;
932 }
933
934 template<typename T1, typename T2>
935 inline bool RemoteIndex<T1,T2>::operator!=(const RemoteIndex& ri) const
936 {
937 return localIndex_!=ri.localIndex_ || attribute_!=ri.attribute_;
938 }
939
940 template<typename T1, typename T2>
941 inline const T2 RemoteIndex<T1,T2>::attribute() const
942 {
943 return T2(attribute_);
944 }
945
946 template<typename T1, typename T2>
948 {
949 return *localIndex_;
950 }
951
952 template<typename T, typename A>
954 const ParallelIndexSet& destination,
955 const MPI_Comm& comm,
956 const std::vector<int>& neighbours,
957 bool includeSelf_)
958 : source_(&source), target_(&destination), comm_(comm),
959 sourceSeqNo_(-1), destSeqNo_(-1), publicIgnored(false), firstBuild(true),
960 includeSelf(includeSelf_)
961 {
962 setNeighbours(neighbours);
963 }
964
965 template<typename T, typename A>
967 {
968 includeSelf=b;
969 }
970
971 template<typename T, typename A>
973 : source_(0), target_(0), sourceSeqNo_(-1),
974 destSeqNo_(-1), publicIgnored(false), firstBuild(true),
975 includeSelf(false)
976 {}
977
978 template<class T, typename A>
980 const ParallelIndexSet& destination,
981 const MPI_Comm& comm,
982 const std::vector<int>& neighbours)
983 {
984 free();
985 source_ = &source;
986 target_ = &destination;
987 comm_ = comm;
988 firstBuild = true;
989 setNeighbours(neighbours);
990 }
991
992 template<typename T, typename A>
995 {
996 return *source_;
997 }
998
999
1000 template<typename T, typename A>
1003 {
1004 return *target_;
1005 }
1006
1007
1008 template<typename T, typename A>
1010 {
1011 free();
1012 }
1013
1014 template<typename T, typename A>
1015 template<bool ignorePublic>
1017 const ParallelIndexSet& indexSet,
1018 char* p_out, MPI_Datatype type,
1019 int bufferSize,
1020 int *position, int n)
1021 {
1023 // fill with own indices
1024 typedef typename ParallelIndexSet::const_iterator const_iterator;
1025 typedef IndexPair<GlobalIndex,LocalIndex> PairType;
1026 const const_iterator end = indexSet.end();
1027
1028 //Now pack the source indices
1029 int i=0;
1030 for(const_iterator index = indexSet.begin(); index != end; ++index)
1031 if(ignorePublic || index->local().isPublic()) {
1032
1033 MPI_Pack(const_cast<PairType*>(&(*index)), 1,
1034 type,
1035 p_out, bufferSize, position, comm_);
1036 pairs[i++] = const_cast<PairType*>(&(*index));
1037
1038 }
1039 assert(i==n);
1040 }
1041
1042 template<typename T, typename A>
1043 inline int RemoteIndices<T,A>::noPublic(const ParallelIndexSet& indexSet)
1044 {
1045 typedef typename ParallelIndexSet::const_iterator const_iterator;
1046
1047 int noPublic=0;
1048
1049 const const_iterator end=indexSet.end();
1050 for(const_iterator index=indexSet.begin(); index!=end; ++index)
1051 if(index->local().isPublic())
1052 noPublic++;
1053
1054 return noPublic;
1055
1056 }
1057
1058
1059 template<typename T, typename A>
1060 inline void RemoteIndices<T,A>::unpackCreateRemote(char* p_in, PairType** sourcePairs,
1061 PairType** destPairs, int remoteProc,
1062 int sourcePublish, int destPublish,
1063 int bufferSize, bool sendTwo,
1064 bool fromOurSelf)
1065 {
1066
1067 // unpack the number of indices we received
1068 int noRemoteSource=-1, noRemoteDest=-1;
1069 char twoIndexSets=0;
1070 int position=0;
1071 // Did we receive two index sets?
1072 MPI_Unpack(p_in, bufferSize, &position, &twoIndexSets, 1, MPI_CHAR, comm_);
1073 // The number of source indices received
1074 MPI_Unpack(p_in, bufferSize, &position, &noRemoteSource, 1, MPI_INT, comm_);
1075 // The number of destination indices received
1076 MPI_Unpack(p_in, bufferSize, &position, &noRemoteDest, 1, MPI_INT, comm_);
1077
1078
1079 // Indices for which we receive
1080 RemoteIndexList* receive= new RemoteIndexList();
1081 // Indices for which we send
1082 RemoteIndexList* send=0;
1083
1084 MPI_Datatype type= MPITraits<PairType>::getType();
1085
1086 if(!twoIndexSets) {
1087 if(sendTwo) {
1088 send = new RemoteIndexList();
1089 // Create both remote index sets simultaneously
1090 unpackIndices(*send, *receive, noRemoteSource, sourcePairs, sourcePublish,
1091 destPairs, destPublish, p_in, type, &position, bufferSize);
1092 }else{
1093 // we only need one list
1094 unpackIndices(*receive, noRemoteSource, sourcePairs, sourcePublish,
1095 p_in, type, &position, bufferSize, fromOurSelf);
1096 send=receive;
1097 }
1098 }else{
1099
1100 int oldPos=position;
1101 // Two index sets received
1102 unpackIndices(*receive, noRemoteSource, destPairs, destPublish,
1103 p_in, type, &position, bufferSize, fromOurSelf);
1104 if(!sendTwo)
1105 //unpack source entries again as destination entries
1106 position=oldPos;
1107
1108 send = new RemoteIndexList();
1109 unpackIndices(*send, noRemoteDest, sourcePairs, sourcePublish,
1110 p_in, type, &position, bufferSize, fromOurSelf);
1111 }
1112
1113 if(receive->empty() && send->empty()) {
1114 if(send==receive) {
1115 delete send;
1116 }else{
1117 delete send;
1118 delete receive;
1119 }
1120 }else{
1121 remoteIndices_.insert(std::make_pair(remoteProc,
1122 std::make_pair(send,receive)));
1123 }
1124 }
1125
1126
1127 template<typename T, typename A>
1128 template<bool ignorePublic>
1129 inline void RemoteIndices<T,A>::buildRemote(bool includeSelf_)
1130 {
1131 // Processor configuration
1132 int rank, procs;
1133 MPI_Comm_rank(comm_, &rank);
1134 MPI_Comm_size(comm_, &procs);
1135
1136 // number of local indices to publish
1137 // The indices of the destination will be send.
1138 int sourcePublish, destPublish;
1139
1140 // Do we need to send two index sets?
1141 char sendTwo = (source_ != target_);
1142
1143 if(procs==1 && !(sendTwo || includeSelf_))
1144 // Nothing to communicate
1145 return;
1146
1147 sourcePublish = (ignorePublic) ? source_->size() : noPublic(*source_);
1148
1149 if(sendTwo)
1150 destPublish = (ignorePublic) ? target_->size() : noPublic(*target_);
1151 else
1152 // we only need to send one set of indices
1153 destPublish = 0;
1154
1155 int maxPublish, publish=sourcePublish+destPublish;
1156
1157 // Calucate maximum number of indices send
1158 MPI_Allreduce(&publish, &maxPublish, 1, MPI_INT, MPI_MAX, comm_);
1159
1160 // allocate buffers
1161 typedef IndexPair<GlobalIndex,LocalIndex> PairType;
1162
1163 PairType** destPairs;
1164 PairType** sourcePairs = new PairType*[sourcePublish>0 ? sourcePublish : 1];
1165
1166 if(sendTwo)
1167 destPairs = new PairType*[destPublish>0 ? destPublish : 1];
1168 else
1169 destPairs=sourcePairs;
1170
1171 char** buffer = new char*[2];
1172 int bufferSize;
1173 int position=0;
1174 int intSize;
1175 int charSize;
1176
1177 // calculate buffer size
1178 MPI_Datatype type = MPITraits<PairType>::getType();
1179
1180 MPI_Pack_size(maxPublish, type, comm_,
1181 &bufferSize);
1182 MPI_Pack_size(1, MPI_INT, comm_,
1183 &intSize);
1184 MPI_Pack_size(1, MPI_CHAR, comm_,
1185 &charSize);
1186 // Our message will contain the following:
1187 // a bool whether two index sets where sent
1188 // the size of the source and the dest indexset,
1189 // then the source and destination indices
1190 bufferSize += 2 * intSize + charSize;
1191
1192 if(bufferSize<=0) bufferSize=1;
1193
1194 buffer[0] = new char[bufferSize];
1195 buffer[1] = new char[bufferSize];
1196
1197
1198 // pack entries into buffer[0], p_out below!
1199 MPI_Pack(&sendTwo, 1, MPI_CHAR, buffer[0], bufferSize, &position,
1200 comm_);
1201
1202 // The number of indices we send for each index set
1203 MPI_Pack(&sourcePublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1204 comm_);
1205 MPI_Pack(&destPublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1206 comm_);
1207
1208 // Now pack the source indices and setup the destination pairs
1209 packEntries<ignorePublic>(sourcePairs, *source_, buffer[0], type,
1210 bufferSize, &position, sourcePublish);
1211 // If necessary send the dest indices and setup the source pairs
1212 if(sendTwo)
1213 packEntries<ignorePublic>(destPairs, *target_, buffer[0], type,
1214 bufferSize, &position, destPublish);
1215
1216
1217 // Update remote indices for ourself
1218 if(sendTwo|| includeSelf_)
1219 unpackCreateRemote(buffer[0], sourcePairs, destPairs, rank, sourcePublish,
1220 destPublish, bufferSize, sendTwo, includeSelf_);
1221
1222 neighbourIds.erase(rank);
1223
1224 if(neighbourIds.size()==0)
1225 {
1226 Dune::dvverb<<rank<<": Sending messages in a ring"<<std::endl;
1227 // send messages in ring
1228 for(int proc=1; proc<procs; proc++) {
1229 // pointers to the current input and output buffers
1230 char* p_out = buffer[1-(proc%2)];
1231 char* p_in = buffer[proc%2];
1232
1233 MPI_Status status;
1234 if(rank%2==0) {
1235 MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1236 commTag_, comm_);
1237 MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1238 commTag_, comm_, &status);
1239 }else{
1240 MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1241 commTag_, comm_, &status);
1242 MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1243 commTag_, comm_);
1244 }
1245
1246
1247 // The process these indices are from
1248 int remoteProc = (rank+procs-proc)%procs;
1249
1250 unpackCreateRemote(p_in, sourcePairs, destPairs, remoteProc, sourcePublish,
1251 destPublish, bufferSize, sendTwo);
1252
1253 }
1254
1255 }
1256 else
1257 {
1258 MPI_Request* requests=new MPI_Request[neighbourIds.size()];
1259 MPI_Request* req=requests;
1260
1261 typedef typename std::set<int>::size_type size_type;
1262 size_type noNeighbours=neighbourIds.size();
1263
1264 // setup sends
1265 for(std::set<int>::iterator neighbour=neighbourIds.begin();
1266 neighbour!= neighbourIds.end(); ++neighbour) {
1267 // Only send the information to the neighbouring processors
1268 MPI_Issend(buffer[0], position , MPI_PACKED, *neighbour, commTag_, comm_, req++);
1269 }
1270
1271 //Test for received messages
1272
1273 for(size_type received=0; received <noNeighbours; ++received)
1274 {
1275 MPI_Status status;
1276 // probe for next message
1277 MPI_Probe(MPI_ANY_SOURCE, commTag_, comm_, &status);
1278 int remoteProc=status.MPI_SOURCE;
1279 int size;
1280 MPI_Get_count(&status, MPI_PACKED, &size);
1281 // receive message
1282 MPI_Recv(buffer[1], size, MPI_PACKED, remoteProc,
1283 commTag_, comm_, &status);
1284
1285 unpackCreateRemote(buffer[1], sourcePairs, destPairs, remoteProc, sourcePublish,
1286 destPublish, bufferSize, sendTwo);
1287 }
1288 // wait for completion of pending requests
1289 MPI_Status* statuses = new MPI_Status[neighbourIds.size()];
1290
1291 if(MPI_ERR_IN_STATUS==MPI_Waitall(neighbourIds.size(), requests, statuses)) {
1292 for(size_type i=0; i < neighbourIds.size(); ++i)
1293 if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
1294 std::cerr<<rank<<": MPI_Error occurred while receiving message."<<std::endl;
1295 MPI_Abort(comm_, 999);
1296 }
1297 }
1298 delete[] requests;
1299 delete[] statuses;
1300 }
1301
1302
1303 // delete allocated memory
1304 if(destPairs!=sourcePairs)
1305 delete[] destPairs;
1306
1307 delete[] sourcePairs;
1308 delete[] buffer[0];
1309 delete[] buffer[1];
1310 delete[] buffer;
1311 }
1312
1313 template<typename T, typename A>
1314 inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& remote,
1315 int remoteEntries,
1316 PairType** local,
1317 int localEntries,
1318 char* p_in,
1319 MPI_Datatype type,
1320 int* position,
1321 int bufferSize,
1322 bool fromOurSelf)
1323 {
1324 if(remoteEntries==0)
1325 return;
1326
1327 PairType index(1);
1328 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1329 type, comm_);
1330 GlobalIndex oldGlobal=index.global();
1331 int n_in=0, localIndex=0;
1332
1333 //Check if we know the global index
1334 while(localIndex<localEntries) {
1335 if(local[localIndex]->global()==index.global()) {
1336 int oldLocalIndex=localIndex;
1337
1338 while(localIndex<localEntries &&
1339 local[localIndex]->global()==index.global()) {
1340 if(!fromOurSelf || index.local().attribute() !=
1341 local[localIndex]->local().attribute())
1342 // if index is from us it has to have a different attribute
1343 remote.push_back(RemoteIndex(index.local().attribute(),
1344 local[localIndex]));
1345 localIndex++;
1346 }
1347
1348 // unpack next remote index
1349 if((++n_in) < remoteEntries) {
1350 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1351 type, comm_);
1352 if(index.global()==oldGlobal)
1353 // Restart comparison for the same global indices
1354 localIndex=oldLocalIndex;
1355 else
1356 oldGlobal=index.global();
1357 }else{
1358 // No more received indices
1359 break;
1360 }
1361 continue;
1362 }
1363
1364 if (local[localIndex]->global()<index.global()) {
1365 // compare with next entry in our list
1366 ++localIndex;
1367 }else{
1368 // We do not know the index, unpack next
1369 if((++n_in) < remoteEntries) {
1370 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1371 type, comm_);
1372 oldGlobal=index.global();
1373 }else
1374 // No more received indices
1375 break;
1376 }
1377 }
1378
1379 // Unpack the other received indices without doing anything
1380 while(++n_in < remoteEntries)
1381 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1382 type, comm_);
1383 }
1384
1385
1386 template<typename T, typename A>
1387 inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& send,
1388 RemoteIndexList& receive,
1389 int remoteEntries,
1390 PairType** localSource,
1391 int localSourceEntries,
1392 PairType** localDest,
1393 int localDestEntries,
1394 char* p_in,
1395 MPI_Datatype type,
1396 int* position,
1397 int bufferSize)
1398 {
1399 int n_in=0, sourceIndex=0, destIndex=0;
1400
1401 //Check if we know the global index
1402 while(n_in<remoteEntries && (sourceIndex<localSourceEntries || destIndex<localDestEntries)) {
1403 // Unpack next index
1404 PairType index;
1405 MPI_Unpack(p_in, bufferSize, position, &index, 1,
1406 type, comm_);
1407 n_in++;
1408
1409 // Advance until global index in localSource and localDest are >= than the one in the unpacked index
1410 while(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()<index.global())
1411 sourceIndex++;
1412
1413 while(destIndex<localDestEntries && localDest[destIndex]->global()<index.global())
1414 destIndex++;
1415
1416 // Add a remote index if we found the global index.
1417 if(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()==index.global())
1418 send.push_back(RemoteIndex(index.local().attribute(),
1419 localSource[sourceIndex]));
1420
1421 if(destIndex < localDestEntries && localDest[destIndex]->global() == index.global())
1422 receive.push_back(RemoteIndex(index.local().attribute(),
1423 localDest[sourceIndex]));
1424 }
1425
1426 }
1427
1428 template<typename T, typename A>
1430 {
1431 typedef typename RemoteIndexMap::iterator Iterator;
1432 Iterator lend = remoteIndices_.end();
1433 for(Iterator lists=remoteIndices_.begin(); lists != lend; ++lists) {
1434 if(lists->second.first==lists->second.second) {
1435 // there is only one remote index list.
1436 delete lists->second.first;
1437 }else{
1438 delete lists->second.first;
1439 delete lists->second.second;
1440 }
1441 }
1442 remoteIndices_.clear();
1443 firstBuild=true;
1444 }
1445
1446 template<typename T, typename A>
1448 {
1449 return remoteIndices_.size();
1450 }
1451
1452 template<typename T, typename A>
1453 template<bool ignorePublic>
1455 {
1456 // Test whether a rebuild is Needed.
1457 if(firstBuild ||
1458 ignorePublic!=publicIgnored || !
1459 isSynced()) {
1460 free();
1461
1462 buildRemote<ignorePublic>(includeSelf);
1463
1464 sourceSeqNo_ = source_->seqNo();
1465 destSeqNo_ = target_->seqNo();
1466 firstBuild=false;
1467 publicIgnored=ignorePublic;
1468 }
1469
1470
1471 }
1472
1473 template<typename T, typename A>
1475 {
1476 return sourceSeqNo_==source_->seqNo() && destSeqNo_ ==target_->seqNo();
1477 }
1478
1479 template<typename T, typename A>
1480 template<bool mode, bool send>
1482 {
1483
1484 // The user are on their own now!
1485 // We assume they know what they are doing and just set the
1486 // remote indices to synced status.
1487 sourceSeqNo_ = source_->seqNo();
1488 destSeqNo_ = target_->seqNo();
1489
1490 typename RemoteIndexMap::iterator found = remoteIndices_.find(process);
1491
1492 if(found == remoteIndices_.end())
1493 {
1494 if(source_ != target_)
1495 found = remoteIndices_.insert(found, std::make_pair(process,
1496 std::make_pair(new RemoteIndexList(),
1497 new RemoteIndexList())));
1498 else{
1499 RemoteIndexList* rlist = new RemoteIndexList();
1500 found = remoteIndices_.insert(found,
1501 std::make_pair(process,
1502 std::make_pair(rlist, rlist)));
1503 }
1504 }
1505
1506 firstBuild = false;
1507
1508 if(send)
1509 return RemoteIndexListModifier<T,A,mode>(*source_, *(found->second.first));
1510 else
1511 return RemoteIndexListModifier<T,A,mode>(*target_, *(found->second.second));
1512 }
1513
1514 template<typename T, typename A>
1515 inline typename RemoteIndices<T,A>::const_iterator
1517 {
1518 return remoteIndices_.find(proc);
1519 }
1520
1521 template<typename T, typename A>
1522 inline typename RemoteIndices<T,A>::const_iterator
1524 {
1525 return remoteIndices_.begin();
1526 }
1527
1528 template<typename T, typename A>
1529 inline typename RemoteIndices<T,A>::const_iterator
1531 {
1532 return remoteIndices_.end();
1533 }
1534
1535
1536 template<typename T, typename A>
1538 {
1539 if(neighbours()!=ri.neighbours())
1540 return false;
1541
1542 typedef RemoteIndexList RList;
1543 typedef typename std::map<int,std::pair<RList*,RList*> >::const_iterator const_iterator;
1544
1545 const const_iterator rend = remoteIndices_.end();
1546
1547 for(const_iterator rindex = remoteIndices_.begin(), rindex1=ri.remoteIndices_.begin(); rindex!=rend; ++rindex, ++rindex1) {
1548 if(rindex->first != rindex1->first)
1549 return false;
1550 if(*(rindex->second.first) != *(rindex1->second.first))
1551 return false;
1552 if(*(rindex->second.second) != *(rindex1->second.second))
1553 return false;
1554 }
1555 return true;
1556 }
1557
1558 template<class T, class A, bool mode>
1559 RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const ParallelIndexSet& indexSet,
1560 RemoteIndexList& rList)
1561 : rList_(&rList), indexSet_(&indexSet), iter_(rList.beginModify()), end_(rList.end()), first_(true)
1562 {
1563 if(MODIFYINDEXSET) {
1564 assert(indexSet_);
1565 for(ConstIterator iter=iter_; iter != end_; ++iter)
1566 glist_.push_back(iter->localIndexPair().global());
1567 giter_ = glist_.beginModify();
1568 }
1569 }
1570
1571 template<typename T, typename A, bool mode>
1572 RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const RemoteIndexListModifier<T,A,mode>& other)
1573 : rList_(other.rList_), indexSet_(other.indexSet_),
1574 glist_(other.glist_), iter_(other.iter_), giter_(other.giter_), end_(other.end_),
1575 first_(other.first_), last_(other.last_)
1576 {}
1577
1578 template<typename T, typename A, bool mode>
1580 {
1581 if(MODIFYINDEXSET) {
1582 // repair pointers to local index set.
1583#ifdef DUNE_ISTL_WITH_CHECKING
1584 if(indexSet_->state()!=GROUND)
1585 DUNE_THROW(InvalidIndexSetState, "Index has to be in ground mode for repairing pointers to indices");
1586#endif
1587 typedef typename ParallelIndexSet::const_iterator IndexIterator;
1588 typedef typename GlobalList::const_iterator GlobalIterator;
1589 typedef typename RemoteIndexList::iterator Iterator;
1590 GlobalIterator giter = glist_.begin();
1591 IndexIterator index = indexSet_->begin();
1592
1593 for(Iterator iter=rList_->begin(); iter != end_; ++iter) {
1594 while(index->global()<*giter) {
1595 ++index;
1596#ifdef DUNE_ISTL_WITH_CHECKING
1597 if(index == indexSet_->end())
1598 DUNE_THROW(InvalidPosition, "No such global index in set!");
1599#endif
1600 }
1601
1602#ifdef DUNE_ISTL_WITH_CHECKING
1603 if(index->global() != *giter)
1604 DUNE_THROW(InvalidPosition, "No such global index in set!");
1605#endif
1606 iter->localIndex_ = &(*index);
1607 }
1608 }
1609 }
1610
1611 template<typename T, typename A, bool mode>
1612 inline void RemoteIndexListModifier<T,A,mode>::insert(const RemoteIndex& index) throw(InvalidPosition)
1613 {
1614 static_assert(!mode,"Not allowed if the mode indicates that new indices"
1615 "might be added to the underlying index set. Use "
1616 "insert(const RemoteIndex&, const GlobalIndex&) instead");
1617
1618#ifdef DUNE_ISTL_WITH_CHECKING
1619 if(!first_ && index.localIndexPair().global()<last_)
1620 DUNE_THROW(InvalidPosition, "Modifcation of remote indices have to occur with ascending global index.");
1621#endif
1622 // Move to the correct position
1623 while(iter_ != end_ && iter_->localIndexPair().global() < index.localIndexPair().global()) {
1624 ++iter_;
1625 }
1626
1627 // No duplicate entries allowed
1628 assert(iter_==end_ || iter_->localIndexPair().global() != index.localIndexPair().global());
1629 iter_.insert(index);
1630 last_ = index.localIndexPair().global();
1631 first_ = false;
1632 }
1633
1634 template<typename T, typename A, bool mode>
1635 inline void RemoteIndexListModifier<T,A,mode>::insert(const RemoteIndex& index, const GlobalIndex& global) throw(InvalidPosition)
1636 {
1637 static_assert(mode,"Not allowed if the mode indicates that no new indices"
1638 "might be added to the underlying index set. Use "
1639 "insert(const RemoteIndex&) instead");
1640#ifdef DUNE_ISTL_WITH_CHECKING
1641 if(!first_ && global<last_)
1642 DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1643#endif
1644 // Move to the correct position
1645 while(iter_ != end_ && *giter_ < global) {
1646 ++giter_;
1647 ++iter_;
1648 }
1649
1650 // No duplicate entries allowed
1651 assert(iter_->localIndexPair().global() != global);
1652 iter_.insert(index);
1653 giter_.insert(global);
1654
1655 last_ = global;
1656 first_ = false;
1657 }
1658
1659 template<typename T, typename A, bool mode>
1660 bool RemoteIndexListModifier<T,A,mode>::remove(const GlobalIndex& global) throw(InvalidPosition)
1661 {
1662#ifdef DUNE_ISTL_WITH_CHECKING
1663 if(!first_ && global<last_)
1664 DUNE_THROW(InvalidPosition, "Modifcation of remote indices have to occur with ascending global index.");
1665#endif
1666
1667 bool found= false;
1668
1669 if(MODIFYINDEXSET) {
1670 // Move to the correct position
1671 while(iter_!=end_ && *giter_< global) {
1672 ++giter_;
1673 ++iter_;
1674 }
1675 if(*giter_ == global) {
1676 giter_.remove();
1677 iter_.remove();
1678 found=true;
1679 }
1680 }else{
1681 while(iter_!=end_ && iter_->localIndexPair().global() < global)
1682 ++iter_;
1683
1684 if(iter_->localIndexPair().global()==global) {
1685 iter_.remove();
1686 found = true;
1687 }
1688 }
1689
1690 last_ = global;
1691 first_ = false;
1692 return found;
1693 }
1694
1695 template<typename T, typename A>
1696 template<bool send>
1698 {
1699 return CollectiveIterator<T,A>(remoteIndices_, send);
1700 }
1701
1702 template<typename T, typename A>
1703 inline MPI_Comm RemoteIndices<T,A>::communicator() const
1704 {
1705 return comm_;
1706
1707 }
1708
1709 template<typename T, typename A>
1711 {
1712 typedef typename RemoteIndexMap::const_iterator const_iterator;
1713
1714 const const_iterator end=pmap.end();
1715 for(const_iterator process=pmap.begin(); process != end; ++process) {
1716 const RemoteIndexList* list = send ? process->second.first : process->second.second;
1718 map_.insert(std::make_pair(process->first,
1719 std::pair<iterator, const iterator>(list->begin(), list->end())));
1720 }
1721 }
1722
1723 template<typename T, typename A>
1724 inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index)
1725 {
1726 typedef typename Map::iterator iterator;
1727 typedef typename Map::const_iterator const_iterator;
1728 const const_iterator end = map_.end();
1729
1730 for(iterator iter = map_.begin(); iter != end;) {
1731 // Step the iterator until we are >= index
1732 typename RemoteIndexList::const_iterator current = iter->second.first;
1733 typename RemoteIndexList::const_iterator rend = iter->second.second;
1734 RemoteIndex remoteIndex;
1735 if(current != rend)
1736 remoteIndex = *current;
1737
1738 while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1739 ++(iter->second.first);
1740
1741 // erase from the map if there are no more entries.
1742 if(iter->second.first == iter->second.second)
1743 map_.erase(iter++);
1744 else{
1745 ++iter;
1746 }
1747 }
1748 index_=index;
1749 noattribute=true;
1750 }
1751
1752 template<typename T, typename A>
1753 inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index,
1754 const Attribute& attribute)
1755 {
1756 typedef typename Map::iterator iterator;
1757 typedef typename Map::const_iterator const_iterator;
1758 const const_iterator end = map_.end();
1759
1760 for(iterator iter = map_.begin(); iter != end;) {
1761 // Step the iterator until we are >= index
1762 typename RemoteIndexList::const_iterator current = iter->second.first;
1763 typename RemoteIndexList::const_iterator rend = iter->second.second;
1764 RemoteIndex remoteIndex;
1765 if(current != rend)
1766 remoteIndex = *current;
1767
1768 // Move to global index or bigger
1769 while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1770 ++(iter->second.first);
1771
1772 // move to attribute or bigger
1773 while(iter->second.first!=iter->second.second
1774 && iter->second.first->localIndexPair().global()==index
1775 && iter->second.first->localIndexPair().local().attribute()<attribute)
1776 ++(iter->second.first);
1777
1778 // erase from the map if there are no more entries.
1779 if(iter->second.first == iter->second.second)
1780 map_.erase(iter++);
1781 else{
1782 ++iter;
1783 }
1784 }
1785 index_=index;
1786 attribute_=attribute;
1787 noattribute=false;
1788 }
1789
1790 template<typename T, typename A>
1792 {
1793 typedef typename Map::iterator iterator;
1794 typedef typename Map::const_iterator const_iterator;
1795 const const_iterator end = map_.end();
1796
1797 for(iterator iter = map_.begin(); iter != end;) {
1798 // Step the iterator until we are >= index
1799 typename RemoteIndexList::const_iterator current = iter->second.first;
1800 typename RemoteIndexList::const_iterator rend = iter->second.second;
1801
1802 // move all iterators pointing to the current global index to next value
1803 if(iter->second.first->localIndexPair().global()==index_ &&
1804 (noattribute || iter->second.first->localIndexPair().local().attribute() == attribute_))
1805 ++(iter->second.first);
1806
1807 // erase from the map if there are no more entries.
1808 if(iter->second.first == iter->second.second)
1809 map_.erase(iter++);
1810 else{
1811 ++iter;
1812 }
1813 }
1814 return *this;
1815 }
1816
1817 template<typename T, typename A>
1819 {
1820 return map_.empty();
1821 }
1822
1823 template<typename T, typename A>
1824 inline typename CollectiveIterator<T,A>::iterator
1826 {
1827 if(noattribute)
1828 return iterator(map_.begin(), map_.end(), index_);
1829 else
1830 return iterator(map_.begin(), map_.end(), index_,
1831 attribute_);
1832 }
1833
1834 template<typename T, typename A>
1835 inline typename CollectiveIterator<T,A>::iterator
1836 CollectiveIterator<T,A>::end()
1837 {
1838 return iterator(map_.end(), map_.end(), index_);
1839 }
1840
1841 template<typename TG, typename TA>
1842 inline std::ostream& operator<<(std::ostream& os, const RemoteIndex<TG,TA>& index)
1843 {
1844 os<<"[global="<<index.localIndexPair().global()<<", remote attribute="<<index.attribute()<<" local attribute="<<index.localIndexPair().local().attribute()<<"]";
1845 return os;
1846 }
1847
1848 template<typename T, typename A>
1849 inline std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices)
1850 {
1851 int rank;
1852 MPI_Comm_rank(indices.comm_, &rank);
1853
1854 typedef typename RemoteIndices<T,A>::RemoteIndexList RList;
1855 typedef typename std::map<int,std::pair<RList*,RList*> >::const_iterator const_iterator;
1856
1857 const const_iterator rend = indices.remoteIndices_.end();
1858
1859 for(const_iterator rindex = indices.remoteIndices_.begin(); rindex!=rend; ++rindex) {
1860 os<<rank<<": Prozess "<<rindex->first<<":";
1861
1862 if(!rindex->second.first->empty()) {
1863 os<<" send:";
1864
1865 const typename RList::const_iterator send= rindex->second.first->end();
1866
1867 for(typename RList::const_iterator index = rindex->second.first->begin();
1868 index != send; ++index)
1869 os<<*index<<" ";
1870 os<<std::endl;
1871 }
1872 if(!rindex->second.second->empty()) {
1873 os<<rank<<": Prozess "<<rindex->first<<": "<<"receive: ";
1874
1875 for(const auto& index : *(rindex->second.second))
1876 os << index << " ";
1877 }
1878 os<<std::endl<<std::flush;
1879 }
1880 return os;
1881 }
1883}
1884
1885#endif
1886#endif
Iterator over the valid underlying iterators.
Definition: remoteindices.hh:787
bool operator==(const iterator &other)
Definition: remoteindices.hh:851
iterator(const RealIterator &iter, const ConstRealIterator &end, GlobalIndex &index)
Definition: remoteindices.hh:794
bool operator!=(const iterator &other)
Definition: remoteindices.hh:857
iterator(const iterator &other)
Definition: remoteindices.hh:812
const RemoteIndex & operator*() const
Definition: remoteindices.hh:833
iterator & operator++()
Definition: remoteindices.hh:817
const RemoteIndex * operator->() const
Definition: remoteindices.hh:845
int process() const
Definition: remoteindices.hh:839
A collective iterator for moving over the remote indices for all processes collectively.
Definition: remoteindices.hh:703
CollectiveIterator(const RemoteIndexMap &map_, bool send)
Constructor.
Definition: remoteindices.hh:1710
bool empty()
Checks whether there are still iterators in the map.
Definition: remoteindices.hh:1818
void advance(const GlobalIndex &global)
Advances all underlying iterators.
Definition: remoteindices.hh:1724
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:743
A constant random access iterator for the Dune::ArrayList class.
Definition: arraylist.hh:379
A pair consisting of a global and local index.
Definition: indexset.hh:84
Class for recomputing missing indices of a distributed index set.
Definition: indicessyncer.hh:40
Base class of all classes representing a communication interface.
Definition: interface.hh:33
Exception indicating that the index set is not in the expected state.
Definition: indexset.hh:204
A class setting up standard communication for a two-valued attribute set with owner/overlap/copy sema...
Definition: owneroverlapcopy.hh:172
Manager class for the mapping between local indices and globally unique indices.
Definition: indexset.hh:217
An index present on the local process with an additional attribute flag.
Definition: plocalindex.hh:47
Default exception class for range errors.
Definition: exceptions.hh:252
Modifier for adding and/or deleting remote indices from the remote index list.
Definition: remoteindices.hh:544
void repairLocalIndexPointers()
Repair the pointers to the local index pairs.
Definition: remoteindices.hh:1579
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:597
@ MODIFYINDEXSET
If true the index set corresponding to the remote indices might get modified.
Definition: remoteindices.hh:562
A Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:593
void insert(const RemoteIndex &index)
Insert an index to the list.
Definition: remoteindices.hh:1612
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:573
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:578
RemoteIndexList::const_iterator ConstIterator
The type of the remote index list iterator.
Definition: remoteindices.hh:607
SLListModifyIterator< RemoteIndex, Allocator > ModifyIterator
The type of the modifying iterator of the remote index list.
Definition: remoteindices.hh:602
bool remove(const GlobalIndex &global)
Remove a remote index.
Definition: remoteindices.hh:1660
T ParallelIndexSet
Type of the index set we use.
Definition: remoteindices.hh:568
RemoteIndexListModifier()
Default constructor.
Definition: remoteindices.hh:671
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:583
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:588
Information about an index residing on another processor.
Definition: remoteindices.hh:66
const Attribute attribute() const
Get the attribute of the index on the remote process.
Definition: remoteindices.hh:941
T1 GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: remoteindices.hh:83
T2 Attribute
The type of the attributes. Normally this will be an enumeration like.
Definition: remoteindices.hh:92
IndexPair< GlobalIndex, ParallelLocalIndex< Attribute > > PairType
The type of the index pair.
Definition: remoteindices.hh:98
const PairType & localIndexPair() const
Get the corresponding local index pair.
Definition: remoteindices.hh:947
RemoteIndex()
Parameterless Constructor.
Definition: remoteindices.hh:925
The indices present on remote processes.
Definition: remoteindices.hh:181
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:223
friend void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition: repartition.hh:58
void setIndexSets(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >())
Set the index sets and communicator we work with.
Definition: remoteindices.hh:979
void free()
Free the index lists.
Definition: remoteindices.hh:1429
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:207
void rebuild()
Rebuilds the set of remote indices.
Definition: remoteindices.hh:1454
T ParallelIndexSet
Type of the index set we use, e.g. ParallelLocalIndexSet.
Definition: remoteindices.hh:198
MPI_Comm communicator() const
Get the mpi communicator used.
Definition: remoteindices.hh:1703
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:218
CollectiveIteratorT iterator() const
Get an iterator for colletively iterating over the remote indices of all remote processes.
Definition: remoteindices.hh:1697
void setIncludeSelf(bool includeSelf)
Tell whether sending from indices of the processor to other indices on the same processor is enabled ...
Definition: remoteindices.hh:966
const_iterator end() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1530
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:237
RemoteIndexListModifier< T, A, mode > getModifier(int process)
Get a modifier for a remote index list.
Definition: remoteindices.hh:1481
const ParallelIndexSet & sourceIndexSet() const
Get the index set at the source.
Definition: remoteindices.hh:994
~RemoteIndices()
Destructor.
Definition: remoteindices.hh:1009
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:233
int neighbours() const
Get the number of processors we share indices with.
Definition: remoteindices.hh:1447
CollectiveIterator< T, A > CollectiveIteratorT
The type of the collective iterator over all remote indices.
Definition: remoteindices.hh:202
const ParallelIndexSet & destinationIndexSet() const
Get the index set at destination.
Definition: remoteindices.hh:1002
RemoteIndices(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >(), bool includeSelf=false)
Constructor.
Definition: remoteindices.hh:953
const_iterator find(int proc) const
Find an iterator over the remote index lists of a specific process.
Definition: remoteindices.hh:1516
bool isSynced() const
Checks whether the remote indices are synced with the indexsets.
Definition: remoteindices.hh:1474
const_iterator begin() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1523
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:213
A::template rebind< RemoteIndex >::other Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:229
A constant iterator for the SLList.
Definition: sllist.hh:369
A single linked list.
Definition: sllist.hh:42
A few common exception classes.
ArrayList< IndexPair, N >::const_iterator const_iterator
The constant iterator over the pairs.
Definition: indexset.hh:305
void repairLocalIndexPointers(std::map< int, SLList< std::pair< typename T::GlobalIndex, typename T::LocalIndex::Attribute >, A > > &globalMap, RemoteIndices< T, A1 > &remoteIndices, const T &indexSet)
Repair the pointers to the local indices in the remote indices.
Definition: indicessyncer.hh:490
iterator begin()
Get an iterator over the indices positioned at the first index.
iterator end()
Get an iterator over the indices positioned after the last index.
TL LocalIndex
The type of the local index, e.g. ParallelLocalIndex.
Definition: indexset.hh:238
TG GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: indexset.hh:225
@ GROUND
The default mode. Indicates that the index set is ready to be used.
Definition: indexset.hh:185
iterator end()
Get an iterator pointing to the end of the list.
Definition: sllist.hh:788
SLListConstIterator< RemoteIndex, Allocator > const_iterator
The constant iterator of the list.
Definition: sllist.hh:72
SLListModifyIterator< GlobalIndex, Allocator > ModifyIterator
The type of the iterator capable of deletion and insertion.
Definition: sllist.hh:101
iterator begin()
Get an iterator pointing to the first element in the list.
Definition: sllist.hh:776
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
EnableIfInterOperable< T1, T2, bool >::type operator==(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for equality.
Definition: iteratorfacades.hh:233
EnableIfInterOperable< T1, T2, bool >::type operator!=(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for inequality.
Definition: iteratorfacades.hh:255
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:93
Provides a map between global and local indices.
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignment.hh:11
Provides classes for use as the local index in ParallelIndexSet for distributed computing.
An stl-compliant pool allocator.
Implements a singly linked list together with the necessary iterators.
Standard Dune debug streams.
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:39
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentionally unused function parameters with.
Definition: unused.hh:18
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 12, 23:30, 2024)