Dune Core Modules (2.9.0)

remoteindices.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 // SPDX-FileCopyrightInfo: Copyright (C) DUNE Project contributors, see file LICENSE.md in module root
4 // SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
5 #ifndef DUNE_REMOTEINDICES_HH
6 #define DUNE_REMOTEINDICES_HH
7 
8 #if HAVE_MPI
9 
10 #include <cassert>
11 #include <iostream>
12 #include <ostream>
13 #include <map>
14 #include <memory>
15 #include <set>
16 #include <utility>
17 #include <vector>
18 
19 #include <mpi.h>
20 
25 #include <dune/common/sllist.hh>
27 
28 namespace Dune {
40  template<typename TG, typename TA>
42  {
43  public:
44  inline static MPI_Datatype getType();
45  private:
46  static MPI_Datatype type;
47  };
48 
49 
50  template<typename T, typename A>
51  class RemoteIndices;
52 
53  template<typename T1, typename T2>
54  class RemoteIndex;
55 
56  // forward declaration needed for friend declaration.
57  template<typename T>
58  class IndicesSyncer;
59 
60  template<typename T1, typename T2>
61  std::ostream& operator<<(std::ostream& os, const RemoteIndex<T1,T2>& index);
62 
63 
64  template<typename T, typename A, bool mode>
66 
67 
71  template<typename T1, typename T2>
73  {
74  template<typename T>
75  friend class IndicesSyncer;
76 
77  template<typename T, typename A, typename A1>
78  friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> >&,
80  const T&);
81 
82  template<typename T, typename A, bool mode>
83  friend class RemoteIndexListModifier;
84 
85  public:
90  typedef T1 GlobalIndex;
99  typedef T2 Attribute;
100 
106 
111  const Attribute attribute() const;
112 
118  const PairType& localIndexPair() const;
119 
123  RemoteIndex();
124 
125 
131  RemoteIndex(const T2& attribute,
132  const PairType* local);
133 
134 
140  RemoteIndex(const T2& attribute);
141 
142  bool operator==(const RemoteIndex& ri) const;
143 
144  bool operator!=(const RemoteIndex& ri) const;
145  private:
147  const PairType* localIndex_;
148 
150  char attribute_;
151  };
152 
153  template<class T, class A>
154  std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices);
155 
156  class InterfaceBuilder;
157 
158  template<class T, class A>
159  class CollectiveIterator;
160 
161  // forward declaration needed for friend declaration.
162  template<class T>
163  class IndicesSyncer;
164 
165  // forward declaration needed for friend declaration.
166  template<typename T1, typename T2>
168 
169 
186  template<class T, class A=std::allocator<RemoteIndex<typename T::GlobalIndex,
187  typename T::LocalIndex::Attribute> > >
189  {
190  friend class InterfaceBuilder;
191  friend class IndicesSyncer<T>;
192  template<typename T1, typename A2, typename A1>
193  friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T1::GlobalIndex, typename T1::LocalIndex::Attribute>,A2> >&,
195  const T1&);
196 
197  template<class G, class T1, class T2>
198  friend void fillIndexSetHoles(const G& graph, Dune::OwnerOverlapCopyCommunication<T1,T2>& oocomm);
199  friend std::ostream& operator<<<>(std::ostream&, const RemoteIndices<T>&);
200 
201  public:
202 
206  typedef T ParallelIndexSet;
207 
211 
216 
217 
222 
226  typedef typename LocalIndex::Attribute Attribute;
227 
232 
233 
237  using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
238 
242 
244  typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
246 
247  typedef typename RemoteIndexMap::const_iterator const_iterator;
248 
266  inline RemoteIndices(const ParallelIndexSet& source, const ParallelIndexSet& destination,
267  const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>(), bool includeSelf=false);
268 
269  RemoteIndices();
270 
278  void setIncludeSelf(bool includeSelf);
279 
296  void setIndexSets(const ParallelIndexSet& source, const ParallelIndexSet& destination,
297  const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>());
298 
299  template<typename C>
300  void setNeighbours(const C& neighbours)
301  {
302  neighbourIds.clear();
303  neighbourIds.insert(neighbours.begin(), neighbours.end());
304 
305  }
306 
307  const std::set<int>& getNeighbours() const
308  {
309  return neighbourIds;
310  }
311 
316 
326  template<bool ignorePublic>
327  void rebuild();
328 
329  bool operator==(const RemoteIndices& ri) const;
330 
338  inline bool isSynced() const;
339 
343  inline MPI_Comm communicator() const;
344 
359  template<bool mode, bool send>
361 
368  inline const_iterator find(int proc) const;
369 
374  inline const_iterator begin() const;
375 
380  inline const_iterator end() const;
381 
385  template<bool send>
387 
391  inline void free();
392 
397  inline int neighbours() const;
398 
400  inline const ParallelIndexSet& sourceIndexSet() const;
401 
403  inline const ParallelIndexSet& destinationIndexSet() const;
404 
405  private:
407  RemoteIndices(const RemoteIndices&) = delete;
408 
410  const ParallelIndexSet* source_;
411 
413  const ParallelIndexSet* target_;
414 
416  MPI_Comm comm_;
417 
420  std::set<int> neighbourIds;
421 
423  const static int commTag_=333;
424 
429  int sourceSeqNo_;
430 
435  int destSeqNo_;
436 
440  bool publicIgnored;
441 
445  bool firstBuild;
446 
447  /*
448  * @brief If true, sending from indices of the processor to other
449  * indices on the same processor is enabled even if the same indexset is used
450  * on both the
451  * sending and receiving side.
452  */
453  bool includeSelf;
454 
457  PairType;
458 
465  RemoteIndexMap remoteIndices_;
466 
477  template<bool ignorePublic>
478  inline void buildRemote(bool includeSelf);
479 
485  inline int noPublic(const ParallelIndexSet& indexSet);
486 
498  template<bool ignorePublic>
499  inline void packEntries(PairType** myPairs, const ParallelIndexSet& indexSet,
500  char* p_out, MPI_Datatype type, int bufferSize,
501  int* position, int n);
502 
516  inline void unpackIndices(RemoteIndexList& remote, int remoteEntries,
517  PairType** local, int localEntries, char* p_in,
518  MPI_Datatype type, int* position, int bufferSize,
519  bool fromOurself);
520 
521  inline void unpackIndices(RemoteIndexList& send, RemoteIndexList& receive,
522  int remoteEntries, PairType** localSource,
523  int localSourceEntries, PairType** localDest,
524  int localDestEntries, char* p_in,
525  MPI_Datatype type, int* position, int bufferSize);
526 
527  void unpackCreateRemote(char* p_in, PairType** sourcePairs, PairType** DestPairs,
528  int remoteProc, int sourcePublish, int destPublish,
529  int bufferSize, bool sendTwo, bool fromOurSelf=false);
530  };
531 
549  template<class T, class A, bool mode>
551  {
552 
553  template<typename T1, typename A1>
554  friend class RemoteIndices;
555 
556  public:
557  class InvalidPosition : public RangeError
558  {};
559 
568  constexpr static bool MODIFYINDEXSET = mode;
569 
573  typedef T ParallelIndexSet;
574 
579 
584 
588  typedef typename LocalIndex::Attribute Attribute;
589 
594 
598  typedef A Allocator;
599 
603 
608 
613 
627  void insert(const RemoteIndex& index);
628 
629 
644  void insert(const RemoteIndex& index, const GlobalIndex& global);
645 
653  bool remove(const GlobalIndex& global);
654 
668 
669 
671 
677  : glist_()
678  {}
679 
680  private:
681 
688  RemoteIndexList& rList);
689 
690  typedef SLList<GlobalIndex,Allocator> GlobalList;
691  typedef typename GlobalList::ModifyIterator GlobalModifyIterator;
692  RemoteIndexList* rList_;
693  const ParallelIndexSet* indexSet_;
694  GlobalList glist_;
695  ModifyIterator iter_;
696  GlobalModifyIterator giter_;
697  ConstIterator end_;
698  bool first_;
699  GlobalIndex last_;
700  };
701 
706  template<class T, class A>
708  {
709 
713  typedef T ParallelIndexSet;
714 
718  typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
719 
723  typedef typename ParallelIndexSet::LocalIndex LocalIndex;
724 
728  typedef typename LocalIndex::Attribute Attribute;
729 
732 
734  using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
735 
738 
740  typedef std::map<int,std::pair<typename RemoteIndexList::const_iterator,
741  const typename RemoteIndexList::const_iterator> >
742  Map;
743 
744  public:
745 
747  typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
749 
755  inline CollectiveIterator(const RemoteIndexMap& map_, bool send);
756 
765  inline void advance(const GlobalIndex& global);
766 
776  inline void advance(const GlobalIndex& global, const Attribute& attribute);
777 
778  CollectiveIterator& operator++();
779 
783  inline bool empty() const;
784 
791  class iterator
792  {
793  public:
794  typedef typename Map::iterator RealIterator;
795  typedef typename Map::iterator ConstRealIterator;
796 
797 
799  iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex& index)
800  : iter_(iter), end_(end), index_(index), hasAttribute(false)
801  {
802  // Move to the first valid entry
803  while(iter_!=end_ && iter_->second.first->localIndexPair().global()!=index_)
804  ++iter_;
805  }
806 
807  iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex index,
808  Attribute attribute)
809  : iter_(iter), end_(end), index_(index), attribute_(attribute), hasAttribute(true)
810  {
811  // Move to the first valid entry or the end
812  while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_
813  || iter_->second.first->localIndexPair().local().attribute()!=attribute))
814  ++iter_;
815  }
817  iterator(const iterator& other)
818  : iter_(other.iter_), end_(other.end_), index_(other.index_)
819  { }
820 
823  {
824  ++iter_;
825  // If entry is not valid move on
826  while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_ ||
827  (hasAttribute &&
828  iter_->second.first->localIndexPair().local().attribute()!=attribute_)))
829  ++iter_;
830  assert(iter_==end_ ||
831  (iter_->second.first->localIndexPair().global()==index_));
832  assert(iter_==end_ || !hasAttribute ||
833  (iter_->second.first->localIndexPair().local().attribute()==attribute_));
834  return *this;
835  }
836 
838  const RemoteIndex& operator*() const
839  {
840  return *(iter_->second.first);
841  }
842 
844  int process() const
845  {
846  return iter_->first;
847  }
848 
850  const RemoteIndex* operator->() const
851  {
852  return iter_->second.first.operator->();
853  }
854 
856  bool operator==(const iterator& other) const
857  {
858  return other.iter_==iter_;
859  }
860 
862  bool operator!=(const iterator& other) const
863  {
864  return other.iter_!=iter_;
865  }
866 
867  private:
868  iterator();
869 
870  RealIterator iter_;
871  RealIterator end_;
872  GlobalIndex index_;
873  Attribute attribute_;
874  bool hasAttribute;
875  };
876 
877  iterator begin();
878 
879  iterator end();
880 
881  private:
882 
883  Map map_;
884  GlobalIndex index_;
885  Attribute attribute_;
886  bool noattribute;
887  };
888 
889  template<typename TG, typename TA>
890  MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::getType()
891  {
892  if(type==MPI_DATATYPE_NULL) {
893  int length[2] = {1, 1};
894  MPI_Aint base;
895  MPI_Aint disp[2];
896  MPI_Datatype types[2] = {MPITraits<TG>::getType(),
897  MPITraits<ParallelLocalIndex<TA> >::getType()};
898  IndexPair<TG,ParallelLocalIndex<TA> > rep;
899  MPI_Get_address(&rep, &base); // lower bound of the datatype
900  MPI_Get_address(&(rep.global_), &disp[0]);
901  MPI_Get_address(&(rep.local_), &disp[1]);
902  for (MPI_Aint& d : disp)
903  d -= base;
904 
905  MPI_Datatype tmp;
906  MPI_Type_create_struct(2, length, disp, types, &tmp);
907 
908  MPI_Type_create_resized(tmp, 0, sizeof(IndexPair<TG,ParallelLocalIndex<TA> >), &type);
909  MPI_Type_commit(&type);
910 
911  MPI_Type_free(&tmp);
912  }
913  return type;
914  }
915 
916  template<typename TG, typename TA>
917  MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::type=MPI_DATATYPE_NULL;
918 
919  template<typename T1, typename T2>
920  RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute, const PairType* local)
921  : localIndex_(local), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
922  {}
923 
924  template<typename T1, typename T2>
925  RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute)
926  : localIndex_(0), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
927  {}
928 
929  template<typename T1, typename T2>
931  : localIndex_(0), attribute_()
932  {}
933  template<typename T1, typename T2>
934  inline bool RemoteIndex<T1,T2>::operator==(const RemoteIndex& ri) const
935  {
936  return localIndex_==ri.localIndex_ && attribute_==ri.attribute;
937  }
938 
939  template<typename T1, typename T2>
940  inline bool RemoteIndex<T1,T2>::operator!=(const RemoteIndex& ri) const
941  {
942  return localIndex_!=ri.localIndex_ || attribute_!=ri.attribute_;
943  }
944 
945  template<typename T1, typename T2>
946  inline const T2 RemoteIndex<T1,T2>::attribute() const
947  {
948  return T2(attribute_);
949  }
950 
951  template<typename T1, typename T2>
953  {
954  return *localIndex_;
955  }
956 
957  template<typename T, typename A>
959  const ParallelIndexSet& destination,
960  const MPI_Comm& comm,
961  const std::vector<int>& neighbours,
962  bool includeSelf_)
963  : source_(&source), target_(&destination), comm_(comm),
964  sourceSeqNo_(-1), destSeqNo_(-1), publicIgnored(false), firstBuild(true),
965  includeSelf(includeSelf_)
966  {
967  setNeighbours(neighbours);
968  }
969 
970  template<typename T, typename A>
972  {
973  includeSelf=b;
974  }
975 
976  template<typename T, typename A>
978  : source_(0), target_(0), sourceSeqNo_(-1),
979  destSeqNo_(-1), publicIgnored(false), firstBuild(true),
980  includeSelf(false)
981  {}
982 
983  template<class T, typename A>
985  const ParallelIndexSet& destination,
986  const MPI_Comm& comm,
987  const std::vector<int>& neighbours)
988  {
989  free();
990  source_ = &source;
991  target_ = &destination;
992  comm_ = comm;
993  firstBuild = true;
994  setNeighbours(neighbours);
995  }
996 
997  template<typename T, typename A>
1000  {
1001  return *source_;
1002  }
1003 
1004 
1005  template<typename T, typename A>
1006  const typename RemoteIndices<T,A>::ParallelIndexSet&
1008  {
1009  return *target_;
1010  }
1011 
1012 
1013  template<typename T, typename A>
1015  {
1016  free();
1017  }
1018 
1019  template<typename T, typename A>
1020  template<bool ignorePublic>
1022  const ParallelIndexSet& indexSet,
1023  char* p_out, MPI_Datatype type,
1024  int bufferSize,
1025  int *position,
1026  [[maybe_unused]] int n)
1027  {
1028  // fill with own indices
1029  const auto end = indexSet.end();
1030 
1031  //Now pack the source indices
1032  int i=0;
1033  for(auto index = indexSet.begin(); index != end; ++index)
1034  if(ignorePublic || index->local().isPublic()) {
1035 
1036  MPI_Pack(const_cast<PairType*>(&(*index)), 1,
1037  type,
1038  p_out, bufferSize, position, comm_);
1039  pairs[i++] = const_cast<PairType*>(&(*index));
1040 
1041  }
1042  assert(i==n);
1043  }
1044 
1045  template<typename T, typename A>
1046  inline int RemoteIndices<T,A>::noPublic(const ParallelIndexSet& indexSet)
1047  {
1048 
1049  int noPublic=0;
1050 
1051  const auto end=indexSet.end();
1052  for(auto index=indexSet.begin(); index!=end; ++index)
1053  if(index->local().isPublic())
1054  noPublic++;
1055 
1056  return noPublic;
1057 
1058  }
1059 
1060 
1061  template<typename T, typename A>
1062  inline void RemoteIndices<T,A>::unpackCreateRemote(char* p_in, PairType** sourcePairs,
1063  PairType** destPairs, int remoteProc,
1064  int sourcePublish, int destPublish,
1065  int bufferSize, bool sendTwo,
1066  bool fromOurSelf)
1067  {
1068 
1069  // unpack the number of indices we received
1070  int noRemoteSource=-1, noRemoteDest=-1;
1071  char twoIndexSets=0;
1072  int position=0;
1073  // Did we receive two index sets?
1074  MPI_Unpack(p_in, bufferSize, &position, &twoIndexSets, 1, MPI_CHAR, comm_);
1075  // The number of source indices received
1076  MPI_Unpack(p_in, bufferSize, &position, &noRemoteSource, 1, MPI_INT, comm_);
1077  // The number of destination indices received
1078  MPI_Unpack(p_in, bufferSize, &position, &noRemoteDest, 1, MPI_INT, comm_);
1079 
1080 
1081  // Indices for which we receive
1082  RemoteIndexList* receive= new RemoteIndexList();
1083  // Indices for which we send
1084  RemoteIndexList* send=0;
1085 
1086  MPI_Datatype type= MPITraits<PairType>::getType();
1087 
1088  if(!twoIndexSets) {
1089  if(sendTwo) {
1090  send = new RemoteIndexList();
1091  // Create both remote index sets simultaneously
1092  unpackIndices(*send, *receive, noRemoteSource, sourcePairs, sourcePublish,
1093  destPairs, destPublish, p_in, type, &position, bufferSize);
1094  }else{
1095  // we only need one list
1096  unpackIndices(*receive, noRemoteSource, sourcePairs, sourcePublish,
1097  p_in, type, &position, bufferSize, fromOurSelf);
1098  send=receive;
1099  }
1100  }else{
1101 
1102  int oldPos=position;
1103  // Two index sets received
1104  unpackIndices(*receive, noRemoteSource, destPairs, destPublish,
1105  p_in, type, &position, bufferSize, fromOurSelf);
1106  if(!sendTwo)
1107  //unpack source entries again as destination entries
1108  position=oldPos;
1109 
1110  send = new RemoteIndexList();
1111  unpackIndices(*send, noRemoteDest, sourcePairs, sourcePublish,
1112  p_in, type, &position, bufferSize, fromOurSelf);
1113  }
1114 
1115  if(receive->empty() && send->empty()) {
1116  if(send==receive) {
1117  delete send;
1118  }else{
1119  delete send;
1120  delete receive;
1121  }
1122  }else{
1123  remoteIndices_.insert(std::make_pair(remoteProc,
1124  std::make_pair(send,receive)));
1125  }
1126  }
1127 
1128 
1129  template<typename T, typename A>
1130  template<bool ignorePublic>
1131  inline void RemoteIndices<T,A>::buildRemote(bool includeSelf_)
1132  {
1133  // Processor configuration
1134  int rank, procs;
1135  MPI_Comm_rank(comm_, &rank);
1136  MPI_Comm_size(comm_, &procs);
1137 
1138  // number of local indices to publish
1139  // The indices of the destination will be send.
1140  int sourcePublish, destPublish;
1141 
1142  // Do we need to send two index sets?
1143  char sendTwo = (source_ != target_);
1144 
1145  if(procs==1 && !(sendTwo || includeSelf_))
1146  // Nothing to communicate
1147  return;
1148 
1149  sourcePublish = (ignorePublic) ? source_->size() : noPublic(*source_);
1150 
1151  if(sendTwo)
1152  destPublish = (ignorePublic) ? target_->size() : noPublic(*target_);
1153  else
1154  // we only need to send one set of indices
1155  destPublish = 0;
1156 
1157  int maxPublish, publish=sourcePublish+destPublish;
1158 
1159  // Calculate maximum number of indices send
1160  MPI_Allreduce(&publish, &maxPublish, 1, MPI_INT, MPI_MAX, comm_);
1161 
1162  // allocate buffers
1163  PairType** destPairs;
1164  PairType** sourcePairs = new PairType*[sourcePublish>0 ? sourcePublish : 1];
1165 
1166  if(sendTwo)
1167  destPairs = new PairType*[destPublish>0 ? destPublish : 1];
1168  else
1169  destPairs=sourcePairs;
1170 
1171  char** buffer = new char*[2];
1172  int bufferSize;
1173  int position=0;
1174  int intSize;
1175  int charSize;
1176 
1177  // calculate buffer size
1178  MPI_Datatype type = MPITraits<PairType>::getType();
1179 
1180  MPI_Pack_size(maxPublish, type, comm_,
1181  &bufferSize);
1182  MPI_Pack_size(1, MPI_INT, comm_,
1183  &intSize);
1184  MPI_Pack_size(1, MPI_CHAR, comm_,
1185  &charSize);
1186  // Our message will contain the following:
1187  // a bool whether two index sets where sent
1188  // the size of the source and the dest indexset,
1189  // then the source and destination indices
1190  bufferSize += 2 * intSize + charSize;
1191 
1192  if(bufferSize<=0) bufferSize=1;
1193 
1194  buffer[0] = new char[bufferSize];
1195  buffer[1] = new char[bufferSize];
1196 
1197 
1198  // pack entries into buffer[0], p_out below!
1199  MPI_Pack(&sendTwo, 1, MPI_CHAR, buffer[0], bufferSize, &position,
1200  comm_);
1201 
1202  // The number of indices we send for each index set
1203  MPI_Pack(&sourcePublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1204  comm_);
1205  MPI_Pack(&destPublish, 1, MPI_INT, buffer[0], bufferSize, &position,
1206  comm_);
1207 
1208  // Now pack the source indices and setup the destination pairs
1209  packEntries<ignorePublic>(sourcePairs, *source_, buffer[0], type,
1210  bufferSize, &position, sourcePublish);
1211  // If necessary send the dest indices and setup the source pairs
1212  if(sendTwo)
1213  packEntries<ignorePublic>(destPairs, *target_, buffer[0], type,
1214  bufferSize, &position, destPublish);
1215 
1216 
1217  // Update remote indices for ourself
1218  if(sendTwo|| includeSelf_)
1219  unpackCreateRemote(buffer[0], sourcePairs, destPairs, rank, sourcePublish,
1220  destPublish, bufferSize, sendTwo, includeSelf_);
1221 
1222  neighbourIds.erase(rank);
1223 
1224  if(neighbourIds.size()==0)
1225  {
1226  Dune::dvverb<<rank<<": Sending messages in a ring"<<std::endl;
1227  // send messages in ring
1228  for(int proc=1; proc<procs; proc++) {
1229  // pointers to the current input and output buffers
1230  char* p_out = buffer[1-(proc%2)];
1231  char* p_in = buffer[proc%2];
1232 
1233  MPI_Status status;
1234  if(rank%2==0) {
1235  MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1236  commTag_, comm_);
1237  MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1238  commTag_, comm_, &status);
1239  }else{
1240  MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
1241  commTag_, comm_, &status);
1242  MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
1243  commTag_, comm_);
1244  }
1245 
1246 
1247  // The process these indices are from
1248  int remoteProc = (rank+procs-proc)%procs;
1249 
1250  unpackCreateRemote(p_in, sourcePairs, destPairs, remoteProc, sourcePublish,
1251  destPublish, bufferSize, sendTwo);
1252 
1253  }
1254 
1255  }
1256  else
1257  {
1258  MPI_Request* requests=new MPI_Request[neighbourIds.size()];
1259  MPI_Request* req=requests;
1260 
1261  typedef typename std::set<int>::size_type size_type;
1262  size_type noNeighbours=neighbourIds.size();
1263 
1264  // setup sends
1265  for(std::set<int>::iterator neighbour=neighbourIds.begin();
1266  neighbour!= neighbourIds.end(); ++neighbour) {
1267  // Only send the information to the neighbouring processors
1268  MPI_Issend(buffer[0], position , MPI_PACKED, *neighbour, commTag_, comm_, req++);
1269  }
1270 
1271  //Test for received messages
1272 
1273  for(size_type received=0; received <noNeighbours; ++received)
1274  {
1275  MPI_Status status;
1276  // probe for next message
1277  MPI_Probe(MPI_ANY_SOURCE, commTag_, comm_, &status);
1278  int remoteProc=status.MPI_SOURCE;
1279  int size;
1280  MPI_Get_count(&status, MPI_PACKED, &size);
1281  // receive message
1282  MPI_Recv(buffer[1], size, MPI_PACKED, remoteProc,
1283  commTag_, comm_, &status);
1284 
1285  unpackCreateRemote(buffer[1], sourcePairs, destPairs, remoteProc, sourcePublish,
1286  destPublish, bufferSize, sendTwo);
1287  }
1288  // wait for completion of pending requests
1289  MPI_Status* statuses = new MPI_Status[neighbourIds.size()];
1290 
1291  if(MPI_ERR_IN_STATUS==MPI_Waitall(neighbourIds.size(), requests, statuses)) {
1292  for(size_type i=0; i < neighbourIds.size(); ++i)
1293  if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
1294  std::cerr<<rank<<": MPI_Error occurred while receiving message."<<std::endl;
1295  MPI_Abort(comm_, 999);
1296  }
1297  }
1298  delete[] requests;
1299  delete[] statuses;
1300  }
1301 
1302 
1303  // delete allocated memory
1304  if(destPairs!=sourcePairs)
1305  delete[] destPairs;
1306 
1307  delete[] sourcePairs;
1308  delete[] buffer[0];
1309  delete[] buffer[1];
1310  delete[] buffer;
1311  }
1312 
1313  template<typename T, typename A>
1314  inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& remote,
1315  int remoteEntries,
1316  PairType** local,
1317  int localEntries,
1318  char* p_in,
1319  MPI_Datatype type,
1320  int* position,
1321  int bufferSize,
1322  bool fromOurSelf)
1323  {
1324  if(remoteEntries==0)
1325  return;
1326 
1327  PairType index;
1328  MPI_Unpack(p_in, bufferSize, position, &index, 1,
1329  type, comm_);
1330  GlobalIndex oldGlobal=index.global();
1331  int n_in=0, localIndex=0;
1332 
1333  //Check if we know the global index
1334  while(localIndex<localEntries) {
1335  if(local[localIndex]->global()==index.global()) {
1336  int oldLocalIndex=localIndex;
1337 
1338  while(localIndex<localEntries &&
1339  local[localIndex]->global()==index.global()) {
1340  if(!fromOurSelf || index.local().attribute() !=
1341  local[localIndex]->local().attribute())
1342  // if index is from us it has to have a different attribute
1343  remote.push_back(RemoteIndex(index.local().attribute(),
1344  local[localIndex]));
1345  localIndex++;
1346  }
1347 
1348  // unpack next remote index
1349  if((++n_in) < remoteEntries) {
1350  MPI_Unpack(p_in, bufferSize, position, &index, 1,
1351  type, comm_);
1352  if(index.global()==oldGlobal)
1353  // Restart comparison for the same global indices
1354  localIndex=oldLocalIndex;
1355  else
1356  oldGlobal=index.global();
1357  }else{
1358  // No more received indices
1359  break;
1360  }
1361  continue;
1362  }
1363 
1364  if (local[localIndex]->global()<index.global()) {
1365  // compare with next entry in our list
1366  ++localIndex;
1367  }else{
1368  // We do not know the index, unpack next
1369  if((++n_in) < remoteEntries) {
1370  MPI_Unpack(p_in, bufferSize, position, &index, 1,
1371  type, comm_);
1372  oldGlobal=index.global();
1373  }else
1374  // No more received indices
1375  break;
1376  }
1377  }
1378 
1379  // Unpack the other received indices without doing anything
1380  while(++n_in < remoteEntries)
1381  MPI_Unpack(p_in, bufferSize, position, &index, 1,
1382  type, comm_);
1383  }
1384 
1385 
1386  template<typename T, typename A>
1387  inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& send,
1388  RemoteIndexList& receive,
1389  int remoteEntries,
1390  PairType** localSource,
1391  int localSourceEntries,
1392  PairType** localDest,
1393  int localDestEntries,
1394  char* p_in,
1395  MPI_Datatype type,
1396  int* position,
1397  int bufferSize)
1398  {
1399  int n_in=0, sourceIndex=0, destIndex=0;
1400 
1401  //Check if we know the global index
1402  while(n_in<remoteEntries && (sourceIndex<localSourceEntries || destIndex<localDestEntries)) {
1403  // Unpack next index
1404  PairType index;
1405  MPI_Unpack(p_in, bufferSize, position, &index, 1,
1406  type, comm_);
1407  n_in++;
1408 
1409  // Advance until global index in localSource and localDest are >= than the one in the unpacked index
1410  while(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()<index.global())
1411  sourceIndex++;
1412 
1413  while(destIndex<localDestEntries && localDest[destIndex]->global()<index.global())
1414  destIndex++;
1415 
1416  // Add a remote index if we found the global index.
1417  if(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()==index.global())
1418  send.push_back(RemoteIndex(index.local().attribute(),
1419  localSource[sourceIndex]));
1420 
1421  if(destIndex < localDestEntries && localDest[destIndex]->global() == index.global())
1422  receive.push_back(RemoteIndex(index.local().attribute(),
1423  localDest[sourceIndex]));
1424  }
1425 
1426  }
1427 
1428  template<typename T, typename A>
1430  {
1431  auto lend = remoteIndices_.end();
1432  for(auto lists=remoteIndices_.begin(); lists != lend; ++lists) {
1433  if(lists->second.first==lists->second.second) {
1434  // there is only one remote index list.
1435  delete lists->second.first;
1436  }else{
1437  delete lists->second.first;
1438  delete lists->second.second;
1439  }
1440  }
1441  remoteIndices_.clear();
1442  firstBuild=true;
1443  }
1444 
1445  template<typename T, typename A>
1447  {
1448  return remoteIndices_.size();
1449  }
1450 
1451  template<typename T, typename A>
1452  template<bool ignorePublic>
1454  {
1455  // Test whether a rebuild is Needed.
1456  if(firstBuild ||
1457  ignorePublic!=publicIgnored || !
1458  isSynced()) {
1459  free();
1460 
1461  buildRemote<ignorePublic>(includeSelf);
1462 
1463  sourceSeqNo_ = source_->seqNo();
1464  destSeqNo_ = target_->seqNo();
1465  firstBuild=false;
1466  publicIgnored=ignorePublic;
1467  }
1468 
1469 
1470  }
1471 
1472  template<typename T, typename A>
1473  inline bool RemoteIndices<T,A>::isSynced() const
1474  {
1475  return sourceSeqNo_==source_->seqNo() && destSeqNo_ ==target_->seqNo();
1476  }
1477 
1478  template<typename T, typename A>
1479  template<bool mode, bool send>
1481  {
1482 
1483  // The user are on their own now!
1484  // We assume they know what they are doing and just set the
1485  // remote indices to synced status.
1486  sourceSeqNo_ = source_->seqNo();
1487  destSeqNo_ = target_->seqNo();
1488 
1489  typename RemoteIndexMap::iterator found = remoteIndices_.find(process);
1490 
1491  if(found == remoteIndices_.end())
1492  {
1493  if(source_ != target_)
1494  found = remoteIndices_.insert(found, std::make_pair(process,
1495  std::make_pair(new RemoteIndexList(),
1496  new RemoteIndexList())));
1497  else{
1498  RemoteIndexList* rlist = new RemoteIndexList();
1499  found = remoteIndices_.insert(found,
1500  std::make_pair(process,
1501  std::make_pair(rlist, rlist)));
1502  }
1503  }
1504 
1505  firstBuild = false;
1506 
1507  if(send)
1508  return RemoteIndexListModifier<T,A,mode>(*source_, *(found->second.first));
1509  else
1510  return RemoteIndexListModifier<T,A,mode>(*target_, *(found->second.second));
1511  }
1512 
1513  template<typename T, typename A>
1514  inline typename RemoteIndices<T,A>::const_iterator
1516  {
1517  return remoteIndices_.find(proc);
1518  }
1519 
1520  template<typename T, typename A>
1521  inline typename RemoteIndices<T,A>::const_iterator
1523  {
1524  return remoteIndices_.begin();
1525  }
1526 
1527  template<typename T, typename A>
1528  inline typename RemoteIndices<T,A>::const_iterator
1530  {
1531  return remoteIndices_.end();
1532  }
1533 
1534 
1535  template<typename T, typename A>
1536  bool RemoteIndices<T,A>::operator==(const RemoteIndices& ri) const
1537  {
1538  if(neighbours()!=ri.neighbours())
1539  return false;
1540 
1541  const auto rend = remoteIndices_.end();
1542 
1543  for(auto rindex = remoteIndices_.begin(), rindex1=ri.remoteIndices_.begin(); rindex!=rend; ++rindex, ++rindex1) {
1544  if(rindex->first != rindex1->first)
1545  return false;
1546  if(*(rindex->second.first) != *(rindex1->second.first))
1547  return false;
1548  if(*(rindex->second.second) != *(rindex1->second.second))
1549  return false;
1550  }
1551  return true;
1552  }
1553 
1554  template<class T, class A, bool mode>
1555  RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const ParallelIndexSet& indexSet,
1556  RemoteIndexList& rList)
1557  : rList_(&rList), indexSet_(&indexSet), iter_(rList.beginModify()), end_(rList.end()), first_(true)
1558  {
1559  if(MODIFYINDEXSET) {
1560  assert(indexSet_);
1561  for(ConstIterator iter=iter_; iter != end_; ++iter)
1562  glist_.push_back(iter->localIndexPair().global());
1563  giter_ = glist_.beginModify();
1564  }
1565  }
1566 
1567  template<typename T, typename A, bool mode>
1568  RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const RemoteIndexListModifier<T,A,mode>& other)
1569  : rList_(other.rList_), indexSet_(other.indexSet_),
1570  glist_(other.glist_), iter_(other.iter_), giter_(other.giter_), end_(other.end_),
1571  first_(other.first_), last_(other.last_)
1572  {}
1573 
1574  template<typename T, typename A, bool mode>
1576  {
1577  if(MODIFYINDEXSET) {
1578  // repair pointers to local index set.
1579 #ifdef DUNE_ISTL_WITH_CHECKING
1580  if(indexSet_->state()!=GROUND)
1581  DUNE_THROW(InvalidIndexSetState, "Index has to be in ground mode for repairing pointers to indices");
1582 #endif
1583  auto giter = glist_.begin();
1584  auto index = indexSet_->begin();
1585 
1586  for(auto iter=rList_->begin(); iter != end_; ++iter) {
1587  while(index->global()<*giter) {
1588  ++index;
1589 #ifdef DUNE_ISTL_WITH_CHECKING
1590  if(index == indexSet_->end())
1591  DUNE_THROW(InvalidPosition, "No such global index in set!");
1592 #endif
1593  }
1594 
1595 #ifdef DUNE_ISTL_WITH_CHECKING
1596  if(index->global() != *giter)
1597  DUNE_THROW(InvalidPosition, "No such global index in set!");
1598 #endif
1599  iter->localIndex_ = &(*index);
1600  }
1601  }
1602  }
1603 
1604  template<typename T, typename A, bool mode>
1606  {
1607  static_assert(!mode,"Not allowed if the mode indicates that new indices"
1608  "might be added to the underlying index set. Use "
1609  "insert(const RemoteIndex&, const GlobalIndex&) instead");
1610 
1611 #ifdef DUNE_ISTL_WITH_CHECKING
1612  if(!first_ && index.localIndexPair().global()<last_)
1613  DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1614 #endif
1615  // Move to the correct position
1616  while(iter_ != end_ && iter_->localIndexPair().global() < index.localIndexPair().global()) {
1617  ++iter_;
1618  }
1619 
1620  // No duplicate entries allowed
1621  assert(iter_==end_ || iter_->localIndexPair().global() != index.localIndexPair().global());
1622  iter_.insert(index);
1623  last_ = index.localIndexPair().global();
1624  first_ = false;
1625  }
1626 
1627  template<typename T, typename A, bool mode>
1628  inline void RemoteIndexListModifier<T,A,mode>::insert(const RemoteIndex& index, const GlobalIndex& global)
1629  {
1630  static_assert(mode,"Not allowed if the mode indicates that no new indices"
1631  "might be added to the underlying index set. Use "
1632  "insert(const RemoteIndex&) instead");
1633 #ifdef DUNE_ISTL_WITH_CHECKING
1634  if(!first_ && global<last_)
1635  DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1636 #endif
1637  // Move to the correct position
1638  while(iter_ != end_ && *giter_ < global) {
1639  ++giter_;
1640  ++iter_;
1641  }
1642 
1643  // No duplicate entries allowed
1644  assert(iter_->localIndexPair().global() != global);
1645  iter_.insert(index);
1646  giter_.insert(global);
1647 
1648  last_ = global;
1649  first_ = false;
1650  }
1651 
1652  template<typename T, typename A, bool mode>
1654  {
1655 #ifdef DUNE_ISTL_WITH_CHECKING
1656  if(!first_ && global<last_)
1657  DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
1658 #endif
1659 
1660  bool found= false;
1661 
1662  if(MODIFYINDEXSET) {
1663  // Move to the correct position
1664  while(iter_!=end_ && *giter_< global) {
1665  ++giter_;
1666  ++iter_;
1667  }
1668  if(*giter_ == global) {
1669  giter_.remove();
1670  iter_.remove();
1671  found=true;
1672  }
1673  }else{
1674  while(iter_!=end_ && iter_->localIndexPair().global() < global)
1675  ++iter_;
1676 
1677  if(iter_->localIndexPair().global()==global) {
1678  iter_.remove();
1679  found = true;
1680  }
1681  }
1682 
1683  last_ = global;
1684  first_ = false;
1685  return found;
1686  }
1687 
1688  template<typename T, typename A>
1689  template<bool send>
1691  {
1692  return CollectiveIterator<T,A>(remoteIndices_, send);
1693  }
1694 
1695  template<typename T, typename A>
1696  inline MPI_Comm RemoteIndices<T,A>::communicator() const
1697  {
1698  return comm_;
1699 
1700  }
1701 
1702  template<typename T, typename A>
1704  {
1705 
1706  const auto end = pmap.end();
1707  for(auto process = pmap.begin(); process != end; ++process) {
1708  const RemoteIndexList* list = send ? process->second.first : process->second.second;
1709  using ri_iterator = typename RemoteIndexList::const_iterator;
1710  map_.insert(std::make_pair(process->first,
1711  std::pair<ri_iterator, const ri_iterator>(list->begin(), list->end())));
1712  }
1713  }
1714 
1715  template<typename T, typename A>
1716  inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index)
1717  {
1718  const auto end = map_.end();
1719 
1720  for(auto iter = map_.begin(); iter != end;) {
1721  // Step the iterator until we are >= index
1722  typename RemoteIndexList::const_iterator current = iter->second.first;
1723  typename RemoteIndexList::const_iterator rend = iter->second.second;
1724  RemoteIndex remoteIndex;
1725  if(current != rend)
1726  remoteIndex = *current;
1727 
1728  while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1729  ++(iter->second.first);
1730 
1731  // erase from the map if there are no more entries.
1732  if(iter->second.first == iter->second.second)
1733  map_.erase(iter++);
1734  else{
1735  ++iter;
1736  }
1737  }
1738  index_=index;
1739  noattribute=true;
1740  }
1741 
1742  template<typename T, typename A>
1743  inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index,
1744  const Attribute& attribute)
1745  {
1746  const auto end = map_.end();
1747 
1748  for(auto iter = map_.begin(); iter != end;) {
1749  // Step the iterator until we are >= index
1750  typename RemoteIndexList::const_iterator current = iter->second.first;
1751  typename RemoteIndexList::const_iterator rend = iter->second.second;
1752  RemoteIndex remoteIndex;
1753  if(current != rend)
1754  remoteIndex = *current;
1755 
1756  // Move to global index or bigger
1757  while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
1758  ++(iter->second.first);
1759 
1760  // move to attribute or bigger
1761  while(iter->second.first!=iter->second.second
1762  && iter->second.first->localIndexPair().global()==index
1763  && iter->second.first->localIndexPair().local().attribute()<attribute)
1764  ++(iter->second.first);
1765 
1766  // erase from the map if there are no more entries.
1767  if(iter->second.first == iter->second.second)
1768  map_.erase(iter++);
1769  else{
1770  ++iter;
1771  }
1772  }
1773  index_=index;
1774  attribute_=attribute;
1775  noattribute=false;
1776  }
1777 
1778  template<typename T, typename A>
1780  {
1781  const auto end = map_.end();
1782 
1783  for(auto iter = map_.begin(); iter != end;) {
1784  // Step the iterator until we are >= index
1785  auto current = iter->second.first;
1786  auto rend = iter->second.second;
1787 
1788  // move all iterators pointing to the current global index to next value
1789  if(iter->second.first->localIndexPair().global()==index_ &&
1790  (noattribute || iter->second.first->localIndexPair().local().attribute() == attribute_))
1791  ++(iter->second.first);
1792 
1793  // erase from the map if there are no more entries.
1794  if(iter->second.first == iter->second.second)
1795  map_.erase(iter++);
1796  else{
1797  ++iter;
1798  }
1799  }
1800  return *this;
1801  }
1802 
1803  template<typename T, typename A>
1805  {
1806  return map_.empty();
1807  }
1808 
1809  template<typename T, typename A>
1810  inline typename CollectiveIterator<T,A>::iterator
1812  {
1813  if(noattribute)
1814  return iterator(map_.begin(), map_.end(), index_);
1815  else
1816  return iterator(map_.begin(), map_.end(), index_,
1817  attribute_);
1818  }
1819 
1820  template<typename T, typename A>
1821  inline typename CollectiveIterator<T,A>::iterator
1822  CollectiveIterator<T,A>::end()
1823  {
1824  return iterator(map_.end(), map_.end(), index_);
1825  }
1826 
1827  template<typename TG, typename TA>
1828  inline std::ostream& operator<<(std::ostream& os, const RemoteIndex<TG,TA>& index)
1829  {
1830  os<<"[global="<<index.localIndexPair().global()<<", remote attribute="<<index.attribute()<<" local attribute="<<index.localIndexPair().local().attribute()<<"]";
1831  return os;
1832  }
1833 
1834  template<typename T, typename A>
1835  inline std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices)
1836  {
1837  int rank;
1838  MPI_Comm_rank(indices.comm_, &rank);
1839  const auto rend = indices.remoteIndices_.end();
1840 
1841  for(auto rindex = indices.remoteIndices_.begin(); rindex!=rend; ++rindex) {
1842  os<<rank<<": Process "<<rindex->first<<":";
1843 
1844  if(!rindex->second.first->empty()) {
1845  os<<" send:";
1846 
1847  const auto send= rindex->second.first->end();
1848 
1849  for(auto index = rindex->second.first->begin();
1850  index != send; ++index)
1851  os<<*index<<" ";
1852  os<<std::endl;
1853  }
1854  if(!rindex->second.second->empty()) {
1855  os<<rank<<": Process "<<rindex->first<<": "<<"receive: ";
1856 
1857  for(const auto& index : *(rindex->second.second))
1858  os << index << " ";
1859  }
1860  os<<std::endl<<std::flush;
1861  }
1862  return os;
1863  }
1865 }
1866 
1867 #endif // HAVE_MPI
1868 
1869 #endif
Iterator over the valid underlying iterators.
Definition: remoteindices.hh:792
const RemoteIndex * operator->() const
Definition: remoteindices.hh:850
iterator & operator++()
Definition: remoteindices.hh:822
iterator(const RealIterator &iter, const ConstRealIterator &end, GlobalIndex &index)
Definition: remoteindices.hh:799
iterator(const iterator &other)
Definition: remoteindices.hh:817
const RemoteIndex & operator*() const
Definition: remoteindices.hh:838
bool operator==(const iterator &other) const
Definition: remoteindices.hh:856
int process() const
Definition: remoteindices.hh:844
bool operator!=(const iterator &other) const
Definition: remoteindices.hh:862
A collective iterator for moving over the remote indices for all processes collectively.
Definition: remoteindices.hh:708
CollectiveIterator(const RemoteIndexMap &map_, bool send)
Constructor.
Definition: remoteindices.hh:1703
bool empty() const
Checks whether there are still iterators in the map.
Definition: remoteindices.hh:1804
void advance(const GlobalIndex &global)
Advances all underlying iterators.
Definition: remoteindices.hh:1716
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:748
A pair consisting of a global and local index.
Definition: indexset.hh:85
Class for recomputing missing indices of a distributed index set.
Definition: indicessyncer.hh:41
Base class of all classes representing a communication interface.
Definition: interface.hh:35
Exception indicating that the index set is not in the expected state.
Definition: indexset.hh:205
A class setting up standard communication for a two-valued attribute set with owner/overlap/copy sema...
Definition: owneroverlapcopy.hh:174
Manager class for the mapping between local indices and globally unique indices.
Definition: indexset.hh:218
An index present on the local process with an additional attribute flag.
Definition: plocalindex.hh:49
Default exception class for range errors.
Definition: exceptions.hh:254
Modifier for adding and/or deleting remote indices from the remote index list.
Definition: remoteindices.hh:551
void repairLocalIndexPointers()
Repair the pointers to the local index pairs.
Definition: remoteindices.hh:1575
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:602
constexpr static bool MODIFYINDEXSET
If true the index set corresponding to the remote indices might get modified.
Definition: remoteindices.hh:568
A Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:598
void insert(const RemoteIndex &index)
Insert an index to the list.
Definition: remoteindices.hh:1605
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:578
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:583
RemoteIndexList::const_iterator ConstIterator
The type of the remote index list iterator.
Definition: remoteindices.hh:612
SLListModifyIterator< RemoteIndex, Allocator > ModifyIterator
The type of the modifying iterator of the remote index list.
Definition: remoteindices.hh:607
bool remove(const GlobalIndex &global)
Remove a remote index.
Definition: remoteindices.hh:1653
T ParallelIndexSet
Type of the index set we use.
Definition: remoteindices.hh:573
RemoteIndexListModifier()
Default constructor.
Definition: remoteindices.hh:676
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:588
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:593
Information about an index residing on another processor.
Definition: remoteindices.hh:73
const Attribute attribute() const
Get the attribute of the index on the remote process.
Definition: remoteindices.hh:946
T1 GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: remoteindices.hh:90
T2 Attribute
The type of the attributes. Normally this will be an enumeration like.
Definition: remoteindices.hh:99
IndexPair< GlobalIndex, ParallelLocalIndex< Attribute > > PairType
The type of the index pair.
Definition: remoteindices.hh:105
const PairType & localIndexPair() const
Get the corresponding local index pair.
Definition: remoteindices.hh:952
RemoteIndex()
Parameterless Constructor.
Definition: remoteindices.hh:930
The indices present on remote processes.
Definition: remoteindices.hh:189
Dune::RemoteIndex< GlobalIndex, Attribute > RemoteIndex
Type of the remote indices we manage.
Definition: remoteindices.hh:231
friend void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition: repartition.hh:83
void setIndexSets(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >())
Set the index sets and communicator we work with.
Definition: remoteindices.hh:984
void free()
Free the index lists.
Definition: remoteindices.hh:1429
ParallelIndexSet::GlobalIndex GlobalIndex
The type of the global index.
Definition: remoteindices.hh:215
void rebuild()
Rebuilds the set of remote indices.
Definition: remoteindices.hh:1453
T ParallelIndexSet
Type of the index set we use, e.g. ParallelLocalIndexSet.
Definition: remoteindices.hh:206
MPI_Comm communicator() const
Get the mpi communicator used.
Definition: remoteindices.hh:1696
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:226
CollectiveIteratorT iterator() const
Get an iterator for colletively iterating over the remote indices of all remote processes.
Definition: remoteindices.hh:1690
void setIncludeSelf(bool includeSelf)
Tell whether sending from indices of the processor to other indices on the same processor is enabled ...
Definition: remoteindices.hh:971
const_iterator end() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1529
std::map< int, std::pair< RemoteIndexList *, RemoteIndexList * > > RemoteIndexMap
The type of the map from rank to remote index list.
Definition: remoteindices.hh:245
RemoteIndexListModifier< T, A, mode > getModifier(int process)
Get a modifier for a remote index list.
Definition: remoteindices.hh:1480
const ParallelIndexSet & sourceIndexSet() const
Get the index set at the source.
Definition: remoteindices.hh:999
~RemoteIndices()
Destructor.
Definition: remoteindices.hh:1014
Dune::SLList< RemoteIndex, Allocator > RemoteIndexList
The type of the remote index list.
Definition: remoteindices.hh:241
int neighbours() const
Get the number of processors we share indices with.
Definition: remoteindices.hh:1446
CollectiveIterator< T, A > CollectiveIteratorT
The type of the collective iterator over all remote indices.
Definition: remoteindices.hh:210
const ParallelIndexSet & destinationIndexSet() const
Get the index set at destination.
Definition: remoteindices.hh:1007
RemoteIndices(const ParallelIndexSet &source, const ParallelIndexSet &destination, const MPI_Comm &comm, const std::vector< int > &neighbours=std::vector< int >(), bool includeSelf=false)
Constructor.
Definition: remoteindices.hh:958
typename std::allocator_traits< A >::template rebind_alloc< RemoteIndex > Allocator
The type of the allocator for the remote index list.
Definition: remoteindices.hh:237
const_iterator find(int proc) const
Find an iterator over the remote index lists of a specific process.
Definition: remoteindices.hh:1515
bool isSynced() const
Checks whether the remote indices are synced with the indexsets.
Definition: remoteindices.hh:1473
const_iterator begin() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1522
ParallelIndexSet::LocalIndex LocalIndex
The type of the local index.
Definition: remoteindices.hh:221
A constant iterator for the SLList.
Definition: sllist.hh:371
A single linked list.
Definition: sllist.hh:44
A few common exception classes.
Provides a map between global and local indices.
void repairLocalIndexPointers(std::map< int, SLList< std::pair< typename T::GlobalIndex, typename T::LocalIndex::Attribute >, A > > &globalMap, RemoteIndices< T, A1 > &remoteIndices, const T &indexSet)
Repair the pointers to the local indices in the remote indices.
Definition: indicessyncer.hh:487
iterator begin()
Get an iterator over the indices positioned at the first index.
iterator end()
Get an iterator over the indices positioned after the last index.
const GlobalIndex & global() const
Get the global index.
TL LocalIndex
The type of the local index, e.g. ParallelLocalIndex.
Definition: indexset.hh:239
TG GlobalIndex
the type of the global index. This type has to provide at least a operator< for sorting.
Definition: indexset.hh:226
@ GROUND
The default mode. Indicates that the index set is ready to be used.
Definition: indexset.hh:186
iterator end()
Get an iterator pointing to the end of the list.
Definition: sllist.hh:774
SLListConstIterator< T, A > const_iterator
The constant iterator of the list.
Definition: sllist.hh:74
SLListModifyIterator< GlobalIndex, Allocator > ModifyIterator
The type of the iterator capable of deletion and insertion.
Definition: sllist.hh:103
iterator begin()
Get an iterator pointing to the first element in the list.
Definition: sllist.hh:762
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
EnableIfInterOperable< T1, T2, bool >::type operator!=(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for inequality.
Definition: iteratorfacades.hh:259
EnableIfInterOperable< T1, T2, bool >::type operator==(const ForwardIteratorFacade< T1, V1, R1, D > &lhs, const ForwardIteratorFacade< T2, V2, R2, D > &rhs)
Checks for equality.
Definition: iteratorfacades.hh:237
DVVerbType dvverb(std::cout)
stream for very verbose output.
Definition: stdstreams.hh:95
Traits classes for mapping types onto MPI_Datatype.
Dune namespace.
Definition: alignedallocator.hh:13
Provides classes for use as the local index in ParallelIndexSet for distributed computing.
Implements a singly linked list together with the necessary iterators.
Standard Dune debug streams.
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:41
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (Apr 27, 22:29, 2024)