DUNE PDELab (2.8)

parmetisgridpartitioner.hh
Go to the documentation of this file.
1#ifndef DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
2#define DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
3
8#include <algorithm>
9#include <vector>
10
13
14#include <dune/geometry/referenceelements.hh>
15
18#include <dune/grid/common/rangegenerators.hh>
19
20#if HAVE_PARMETIS
21
22#include <parmetis.h>
23
24// only enable for ParMETIS because the implementation uses functions that
25// are not emulated by scotch
26#ifdef PARMETIS_MAJOR_VERSION
27
28namespace Dune
29{
30
35 template<class GridView>
36 struct ParMetisGridPartitioner {
37
38 // define index type as provided by ParMETIS
39#if PARMETIS_MAJOR_VERSION > 3
40 typedef idx_t idx_type;
41 typedef ::real_t real_type;
42#else
43 typedef int idx_type;
44 typedef float real_type;
45#endif // PARMETIS_MAJOR_VERSION > 3
46
47 enum {
48 dimension = GridView::dimension
49 };
50
51
62 static std::vector<unsigned> partition(const GridView& gv, const Dune::MPIHelper& mpihelper) {
63 const unsigned numElements = gv.size(0);
64
65 std::vector<unsigned> part(numElements);
66
67 // Setup parameters for ParMETIS
68 idx_type wgtflag = 0; // we don't use weights
69 idx_type numflag = 0; // we are using C-style arrays
70 idx_type ncon = 1; // number of balance constraints
71 idx_type ncommonnodes = 2; // number of nodes elements must have in common to be considered adjacent to each other
72 idx_type options[4] = {0, 0, 0, 0}; // use default values for random seed, output and coupling
73 idx_type edgecut; // will store number of edges cut by partition
74 idx_type nparts = mpihelper.size(); // number of parts equals number of processes
75 std::vector<real_type> tpwgts(ncon*nparts, 1./nparts); // load per subdomain and weight (same load on every process)
76 std::vector<real_type> ubvec(ncon, 1.05); // weight tolerance (same weight tolerance for every weight there is)
77
78 // The difference elmdist[i+1] - elmdist[i] is the number of nodes that are on process i
79 std::vector<idx_type> elmdist(nparts+1);
80 elmdist[0] = 0;
81 std::fill(elmdist.begin()+1, elmdist.end(), gv.size(0)); // all elements are on process zero
82
83 // Create and fill arrays "eptr", where eptr[i] is the number of vertices that belong to the i-th element, and
84 // "eind" contains the vertex-numbers of the i-the element in eind[eptr[i]] to eind[eptr[i+1]-1]
85 std::vector<idx_type> eptr, eind;
86 int numVertices = 0;
87 eptr.push_back(numVertices);
88
89 for (const auto& element : elements(gv, Partitions::interior)) {
90 const size_t curNumVertices = referenceElement<double, dimension>(element.type()).size(dimension);
91
92 numVertices += curNumVertices;
93 eptr.push_back(numVertices);
94
95 for (size_t k = 0; k < curNumVertices; ++k)
96 eind.push_back(gv.indexSet().subIndex(element, k, dimension));
97 }
98
99 // Partition mesh using ParMETIS
100 if (0 == mpihelper.rank()) {
102
103#if PARMETIS_MAJOR_VERSION >= 4
104 const int OK =
105#endif
106 ParMETIS_V3_PartMeshKway(elmdist.data(), eptr.data(), eind.data(), NULL, &wgtflag, &numflag,
107 &ncon, &ncommonnodes, &nparts, tpwgts.data(), ubvec.data(),
108 options, &edgecut, reinterpret_cast<idx_type*>(part.data()), &comm);
109
110#if PARMETIS_MAJOR_VERSION >= 4
111 if (OK != METIS_OK)
112 DUNE_THROW(Dune::Exception, "ParMETIS returned an error code.");
113#endif
114 }
115
116 return part;
117 }
118
130 static std::vector<unsigned> repartition(const GridView& gv, const Dune::MPIHelper& mpihelper, real_type itr = 1000) {
131
132 // Create global index map
133 GlobalIndexSet<GridView> globalIndex(gv,0);
134
135 int numElements = std::distance(gv.template begin<0, Interior_Partition>(),
136 gv.template end<0, Interior_Partition>());
137
138 std::vector<unsigned> interiorPart(numElements);
139
140 // Setup parameters for ParMETIS
141 idx_type wgtflag = 0; // we don't use weights
142 idx_type numflag = 0; // we are using C-style arrays
143 idx_type ncon = 1; // number of balance constraints
144 idx_type options[4] = {0, 0, 0, 0}; // use default values for random seed, output and coupling
145 idx_type edgecut; // will store number of edges cut by partition
146 idx_type nparts = mpihelper.size(); // number of parts equals number of processes
147 std::vector<real_type> tpwgts(ncon*nparts, 1./nparts); // load per subdomain and weight (same load on every process)
148 std::vector<real_type> ubvec(ncon, 1.05); // weight tolerance (same weight tolerance for every weight there is)
149
150 MPI_Comm comm = Dune::MPIHelper::getCommunicator();
151
152 // Make the number of interior elements of each processor available to all processors
153 std::vector<int> offset(gv.comm().size());
154 std::fill(offset.begin(), offset.end(), 0);
155
156 gv.comm().template allgather<int>(&numElements, 1, offset.data());
157
158 // The difference vtxdist[i+1] - vtxdist[i] is the number of elements that are on process i
159 std::vector<idx_type> vtxdist(gv.comm().size()+1);
160 vtxdist[0] = 0;
161
162 for (unsigned int i=1; i<vtxdist.size(); ++i)
163 vtxdist[i] = vtxdist[i-1] + offset[i-1];
164
165 // Set up element adjacency lists
166 std::vector<idx_type> xadj, adjncy;
167 xadj.push_back(0);
168
169 for (const auto& element : elements(gv, Partitions::interior)) {
170 size_t numNeighbors = 0;
171
172 for (const auto& in : intersections(gv, element)) {
173 if (in.neighbor()) {
174 adjncy.push_back(globalIndex.index(in.outside()));
175
176 ++numNeighbors;
177 }
178 }
179
180 xadj.push_back(xadj.back() + numNeighbors);
181 }
182
183#if PARMETIS_MAJOR_VERSION >= 4
184 const int OK =
185#endif
186 ParMETIS_V3_AdaptiveRepart(vtxdist.data(), xadj.data(), adjncy.data(), NULL, NULL, NULL,
187 &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec.data(),
188 &itr, options, &edgecut, reinterpret_cast<idx_type*>(interiorPart.data()), &comm);
189
190#if PARMETIS_MAJOR_VERSION >= 4
191 if (OK != METIS_OK)
192 DUNE_THROW(Dune::Exception, "ParMETIS returned error code " << OK);
193#endif
194
195 // At this point, interiorPart contains a target rank for each interior element, and they are sorted
196 // by the order in which the grid view traverses them. Now we need to do two things:
197 // a) Add additional dummy entries for the ghost elements
198 // b) Use the element index for the actual ordering. Since there may be different types of elements,
199 // we cannot use the index set directly, but have to go through a Mapper.
200
201 typedef MultipleCodimMultipleGeomTypeMapper<GridView> ElementMapper;
202 ElementMapper elementMapper(gv, mcmgElementLayout());
203
204 std::vector<unsigned int> part(gv.size(0));
205 std::fill(part.begin(), part.end(), 0);
206 unsigned int c = 0;
207 for (const auto& element : elements(gv, Partitions::interior))
208 part[elementMapper.index(element)] = interiorPart[c++];
209
210 return part;
211 }
212 };
213
214} // namespace Dune
215
216#else // PARMETIS_MAJOR_VERSION
217#warning "You seem to be using the ParMETIS emulation layer of scotch, which does not work with this file."
218#endif
219
220#else // HAVE_PARMETIS
221#warning "PARMETIS was not found, please check your configuration"
222#endif
223
224#endif // DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
Base class for Dune-Exceptions.
Definition: exceptions.hh:94
A real mpi helper.
Definition: mpihelper.hh:175
int size() const
return number of processes
Definition: mpihelper.hh:266
int rank() const
return rank of process
Definition: mpihelper.hh:262
static MPICommunicator getCommunicator()
get the default communicator
Definition: mpihelper.hh:196
static MPICommunicator getLocalCommunicator()
get a local communicator
Definition: mpihelper.hh:207
A few common exception classes.
Provides a globally unique index for all entities of a distributed Dune grid.
typename FieldTraits< Type >::real_type real_t
Convenient access to FieldTraits<Type>::real_type.
Definition: typetraits.hh:301
#define DUNE_THROW(E, m)
Definition: exceptions.hh:216
@ dimension
The dimension of the grid.
Definition: gridview.hh:130
MCMGLayout mcmgElementLayout()
layout for elements (codim-0 entities)
Definition: mcmgmapper.hh:95
Mapper for multiple codim and multiple geometry types.
Helpers for dealing with MPI.
constexpr Interior interior
PartitionSet for the interior partition.
Definition: partitionset.hh:270
Dune namespace.
Definition: alignedallocator.hh:11
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Dec 22, 23:30, 2024)