DUNE PDELab (git)

recipe-communication.cc

See explanation at Communication in parallel programs

// -*- tab-width: 4; indent-tabs-mode: nil -*-
// always include the config file
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
// C++ includes
#include<math.h>
#include<iostream>
// dune-common includes
// dune-geometry includes
#include<dune/geometry/referenceelements.hh>
// dune-grid includes
#include<dune/grid/io/file/gmshreader.hh>
#if HAVE_DUNE_UGGRID
#endif
#if HAVE_DUNE_ALUGRID
#include<dune/alugrid/grid.hh>
#include<dune/alugrid/dgf.hh>
#include<dune/grid/io/file/dgfparser/dgfparser.hh>
#endif
#include <dune/pdelab.hh>
#ifndef COMMUNICATE_HH
#define COMMUNICATE_HH
template <typename GV>
void communicate(const GV& gv, int communicationType){
using RF = int; // RangeField
using CON = Dune::PDELab::NoConstraints;
using VBE = Dune::PDELab::ISTL::VectorBackend<>;
FEM fem(gv);
GFS gfs(gv,fem);
using Z = Dune::PDELab::Backend::Vector<GFS, RF>; // data type
Z z(gfs);
// [Define DataHandle]
using DH = Dune::PDELab::AddDataHandle<GFS,Z>;
DH dh(gfs,z);
// Create collective communication object
// [Collective communication object]
auto comm = gv.comm();
// [Get rank]
int myrank = comm.rank();
// Store the 100^rank or rank of the current processor as data for each element.
using Dune::PDELab::Backend::native;
int size{0};
for(auto& v : native(z)){
v = 1;
++size;
for(int j=0 ; j<myrank; ++j)
// v *= 1000; // makes it easy to see which ranks communicated ths entity
v += 1; // makes VTK outputs readable
}
// Different communication types for DataHandles:
//
// InteriorBorder_InteriorBorder_Interface: send/receive interior and border entities
// InteriorBorder_All_Interface: send interior and border, receive all entities
// Overlap_OverlapFront_Interface: send overlap, receive overlap and front entities
// Overlap_All_Interface: send overlap, receive all entities
// All_All_Interface: send all and receive all entities
// [Communication type]
switch (communicationType){
case 4: gv.communicate(dh, Dune::Overlap_All_Interface ,Dune::ForwardCommunication); break;
default: gv.communicate(dh, Dune::All_All_Interface ,Dune::ForwardCommunication);
}
// Calculate the sum of the data vector on this rank
int sum = z.one_norm();
// If we are on rank 0 print the results.
if (myrank==0){
std::cout << std::endl;
std::cout << "== Output for rank " << myrank << std::endl;
std::cout << std::endl;
std::cout << "Each process stores values equal to 1000 powered to its rank, or only rank." << std::endl;
std::cout << "The sum shows how many entities are communicated and from which ranks they are." << std::endl;
std::cout << "The size of the data vector is equal to the number of all entities of this processor." << std::endl;
std::cout << std::endl;
std::cout << "Sum of the data vector: " << sum << std::endl;
std::cout << "Size of the data vector: " << size << std::endl;
}
// Find the maximal and total sum on all ranks:
int globmax{0};
int globsum{0};
// [Collective communication]
globmax = comm.max(sum);
globsum = comm.sum(sum);
if (myrank==0){
std::cout << "Maximal sum on all ranks is " << globmax << std::endl;
std::cout << "Total sum on all ranks is " << globsum << std::endl;
}
// Make a grid function out of z
ZDGF zdgf(gfs,z);
// prepare VTK writer and write the file,
int subsampling{1};
using VTKWRITER = Dune::SubsamplingVTKWriter<GV>;
VTKWRITER vtkwriter(gv,Dune::refinementIntervals(subsampling));
std::string filename="recipe-communication";
std::string outputname="commType_"+std::to_string(communicationType);
vtkwriter.addVertexData(std::shared_ptr<VTKF>(new VTKF(zdgf,outputname)));
vtkwriter.write(filename,Dune::VTK::appendedraw);
}
#endif
//===============================================================
// Main program with grid setup
//===============================================================
int main(int argc, char** argv)
{
// Maybe initialize Mpi
helper = Dune::MPIHelper::instance(argc, argv);
std::cout<< "This is a sequential program." << std::endl;
else
std::cout << "Parallel code run on "
<< helper.size() << " process(es)" << std::endl;
// read ini file
const int overlap = 2;
const int refinement = 0;
// Create 2D YaspGrid
constexpr int dim=2;
typedef Dune::YaspGrid<dim> Grid;
typedef Grid::ctype DF;
std::array<int,dim> N{16,16};
std::bitset<dim> B(false); // periodic boundary (left-right, up-bottom)
std::shared_ptr<Grid> gridp = std::shared_ptr<Grid>(new Grid(L,N,B,overlap));//,Dune::MPIHelper::getCommunication()));
gridp->refineOptions(false); // keep overlap in cells
gridp->globalRefine(refinement);
typedef Grid::LeafGridView GV;
GV gv=gridp->leafGridView();
int communicationType = 1;
communicate(gv,communicationType);
}
vector space out of a tensor product of fields.
Definition: fvector.hh:91
A real mpi helper.
Definition: mpihelper.hh:181
int size() const
return number of processes
Definition: mpihelper.hh:298
static constexpr bool isFake
Are we fake (i. e. pretend to have MPI support but are compiled without.
Definition: mpihelper.hh:187
static DUNE_EXPORT MPIHelper & instance(int &argc, char **&argv)
Get the singleton instance of the helper.
Definition: mpihelper.hh:252
convert a grid function space and a coefficient vector into a grid function
Definition: gridfunctionspaceutilities.hh:76
A grid function space.
Definition: gridfunctionspace.hh:191
wrap a GridFunction so it can be used with the VTKWriter from dune-grid.
Definition: vtkexport.hh:25
Writer for the output of subsampled grid functions in the vtk format.
Definition: subsamplingvtkwriter.hh:40
[ provides Dune::Grid ]
Definition: yaspgrid.hh:166
@ ForwardCommunication
communicate as given in InterfaceType
Definition: gridenums.hh:171
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:88
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:91
@ Overlap_All_Interface
send overlap, receive all entities
Definition: gridenums.hh:90
@ Overlap_OverlapFront_Interface
send overlap, receive overlap and front entities
Definition: gridenums.hh:89
@ InteriorBorder_InteriorBorder_Interface
send/receive interior and border entities
Definition: gridenums.hh:87
RefinementIntervals refinementIntervals(int intervals)
Creates a RefinementIntervals object.
Definition: base.cc:108
Helpers for dealing with MPI.
The OneDGrid class.
Various parser methods to get data into a ParameterTree object.
Provides subsampled file i/o for the visualization toolkit.
A simple timing class.
The UGGrid class.
A class to construct structured cube and simplex grids using the grid factory.
Provides file i/o for the visualization toolkit.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.111.3 (Nov 23, 23:29, 2024)