dune-pdelab  2.0.0
parallelhelper.hh
Go to the documentation of this file.
1 // -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=8 sw=2 sts=2:
3 #ifndef DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
4 #define DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
5 
6 #include <limits>
7 
8 #include <dune/common/deprecated.hh>
9 #include <dune/common/parallel/mpihelper.hh>
10 #include <dune/common/static_assert.hh>
11 #include <dune/common/stdstreams.hh>
12 
13 #include <dune/istl/owneroverlapcopy.hh>
14 #include <dune/istl/solvercategory.hh>
15 #include <dune/istl/operators.hh>
16 #include <dune/istl/solvers.hh>
17 #include <dune/istl/preconditioners.hh>
18 #include <dune/istl/scalarproducts.hh>
19 #include <dune/istl/paamg/amg.hh>
20 #include <dune/istl/paamg/pinfo.hh>
21 #include <dune/istl/io.hh>
22 #include <dune/istl/superlu.hh>
23 
29 
30 namespace Dune {
31  namespace PDELab {
32  namespace istl {
33 
37 
38 
39  //========================================================
40  // A parallel helper class providing a nonoverlapping
41  // decomposition of all degrees of freedom
42  //========================================================
43 
44  template<typename GFS>
46  {
47 
49  typedef int RankIndex;
50 
54  typedef typename Dune::PDELab::BackendVectorSelector<GFS,bool>::Type GhostVector;
55 
57  typedef typename GFS::Ordering::Traits::ContainerIndex ContainerIndex;
58 
59  public:
60 
61  ParallelHelper (const GFS& gfs, int verbose = 1)
62  : _gfs(gfs)
63  , _rank(gfs.gridView().comm().rank())
64  , _ranks(gfs,_rank)
65  , _ghosts(gfs,false)
66  , _verbose(verbose)
67  {
68 
69  // Let's try to be clever and reduce the communication overhead by picking the smallest
70  // possible communication interface depending on the overlap structure of the GFS.
71  if (gfs.ordering().containedPartitions() == NonOverlappingPartitionSelector::partition_mask)
72  {
73  // The GFS only spans the interior and border partitions, so we can skip sending or
74  // receiving anything else.
75  _interiorBorder_all_interface = InteriorBorder_InteriorBorder_Interface;
76  _all_all_interface = InteriorBorder_InteriorBorder_Interface;
77  }
78  else
79  {
80  // In general, we have to transmit more.
81  _interiorBorder_all_interface = InteriorBorder_All_Interface;
82  _all_all_interface = All_All_Interface;
83  }
84 
85  if (_gfs.gridView().comm().size()>1)
86  {
87 
88  // find out about ghosts
89  //GFSDataHandle<GFS,GhostVector,GhostGatherScatter>
91  gdh(_gfs,_ghosts,false);
92  _gfs.gridView().communicate(gdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
93 
94  // create disjoint DOF partitioning
95  // GFSDataHandle<GFS,RankVector,DisjointPartitioningGatherScatter<RankIndex> >
96  // ibdh(_gfs,_ranks,DisjointPartitioningGatherScatter<RankIndex>(_rank));
98  _gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
99 
100  }
101 
102  }
103 
105  template<typename X>
106  void maskForeignDOFs(X& x) const
107  {
108  // dispatch to implementation.
110  }
111 
112  private:
113 
114  // Implementation for block vector; recursively masks blocks.
115  template<typename X, typename Mask>
116  void maskForeignDOFs(istl::tags::block_vector, X& x, const Mask& mask) const
117  {
118  typename Mask::const_iterator mask_it = mask.begin();
119  for (typename X::iterator it = x.begin(),
120  end_it = x.end();
121  it != end_it;
122  ++it, ++mask_it)
123  maskForeignDOFs(istl::container_tag(*it),*it,*mask_it);
124  }
125 
126  // Implementation for field vector, iterates over entries and masks them individually.
127  template<typename X, typename Mask>
128  void maskForeignDOFs(istl::tags::field_vector, X& x, const Mask& mask) const
129  {
130  typename Mask::const_iterator mask_it = mask.begin();
131  for (typename X::iterator it = x.begin(),
132  end_it = x.end();
133  it != end_it;
134  ++it, ++mask_it)
135  *it = (*mask_it == _rank ? *it : typename X::field_type(0));
136  }
137 
138  public:
139 
141  bool owned(const ContainerIndex& i) const
142  {
143  return _ranks[i] == _rank;
144  }
145 
147  bool isGhost(const ContainerIndex& i) const
148  {
149  return _ghosts[i];
150  }
151 
153  template<typename X, typename Y>
154  typename PromotionTraits<
155  typename X::field_type,
156  typename Y::field_type
157  >::PromotedType
158  disjointDot(const X& x, const Y& y) const
159  {
161  istl::raw(x),
162  istl::raw(y),
163  istl::raw(_ranks)
164  );
165  }
166 
167  private:
168 
169  // Implementation for BlockVector, collects the result of recursively
170  // invoking the algorithm on the vector blocks.
171  template<typename X, typename Y, typename Mask>
172  typename PromotionTraits<
173  typename X::field_type,
174  typename Y::field_type
175  >::PromotedType
176  disjointDot(istl::tags::block_vector, const X& x, const Y& y, const Mask& mask) const
177  {
178  typedef typename PromotionTraits<
179  typename X::field_type,
180  typename Y::field_type
181  >::PromotedType result_type;
182 
183  result_type r(0);
184 
185  typename Y::const_iterator y_it = y.begin();
186  typename Mask::const_iterator mask_it = mask.begin();
187  for (typename X::const_iterator x_it = x.begin(),
188  end_it = x.end();
189  x_it != end_it;
190  ++x_it, ++y_it, ++mask_it)
191  r += disjointDot(istl::container_tag(*x_it),*x_it,*y_it,*mask_it);
192 
193  return r;
194  }
195 
196  // Implementation for FieldVector, iterates over the entries and calls Dune::dot() for DOFs
197  // associated with the current rank.
198  template<typename X, typename Y, typename Mask>
199  typename PromotionTraits<
200  typename X::field_type,
201  typename Y::field_type
202  >::PromotedType
203  disjointDot(istl::tags::field_vector, const X& x, const Y& y, const Mask& mask) const
204  {
205  typedef typename PromotionTraits<
206  typename X::field_type,
207  typename Y::field_type
208  >::PromotedType result_type;
209 
210  result_type r(0);
211 
212  typename Y::const_iterator y_it = y.begin();
213  typename Mask::const_iterator mask_it = mask.begin();
214  for (typename X::const_iterator x_it = x.begin(),
215  end_it = x.end();
216  x_it != end_it;
217  ++x_it, ++y_it, ++mask_it)
218  r += (*mask_it == _rank ? Dune::dot(*x_it,*y_it) : result_type(0));
219 
220  return r;
221  }
222 
223  public:
224 
226  RankIndex rank() const
227  {
228  return _rank;
229  }
230 
231 #if HAVE_MPI
232 
234 
250  template<typename MatrixType, typename Comm>
251  void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c);
252 
253  private:
254 
255  // Checks whether a matrix block is owned by the current process. Used for the AMG
256  // construction and thus assumes a single level of blocking and blocks with ownership
257  // restricted to a single DOF.
258  bool owned_for_amg(std::size_t i) const
259  {
260  return _ranks.base()[i][0] == _rank;
261  }
262 
263  // Checks whether a matrix block is associated with a ghost entity. Used for the AMG
264  // construction and thus assumes a single level of blocking and blocks with ownership
265  // restricted to a single DOF.
266  bool is_ghost_for_amg(std::size_t i) const
267  {
268  return _ghosts.base()[i][0];
269  }
270 
271 #endif // HAVE_MPI
272 
273  private:
274 
275  const GFS& _gfs;
276  const RankIndex _rank;
277  RankVector _ranks; // vector to identify unique decomposition
278  GhostVector _ghosts; //vector to identify ghost dofs
279  int _verbose; //verbosity
280 
282  InterfaceType _interiorBorder_all_interface;
283 
285  InterfaceType _all_all_interface;
286  };
287 
288 #if HAVE_MPI
289 
290  template<typename GFS>
291  template<typename M, typename C>
293  {
294 
295  const bool is_bcrs_matrix =
296  is_same<
297  typename istl::tags::container<
298  typename istl::raw_type<M>::type
299  >::type::base_tag,
301  >::value;
302 
303  const bool block_type_is_field_matrix =
304  is_same<
305  typename istl::tags::container<
307  >::type::base_tag,
309  >::value;
310 
311  // We assume M to be a BCRSMatrix in the following, so better check for that
312  dune_static_assert(is_bcrs_matrix && block_type_is_field_matrix, "matrix structure not compatible with AMG");
313 
314  // ********************************************************************************
315  // In the following, the code will always assume that all DOFs stored in a single
316  // block of the BCRSMatrix are attached to the same entity and can be handled
317  // identically. For that reason, the code often restricts itself to inspecting the
318  // first entry of the blocks in the diverse BlockVectors.
319  // ********************************************************************************
320 
321  typedef typename GFS::Traits::GridViewType GV;
322  typedef typename RankVector::size_type size_type;
323  const GV& gv = _gfs.gridView();
324 
325  // Do we need to communicate at all?
326  const bool need_communication = _gfs.gridView().comm().size() > 1;
327 
328  // First find out which dofs we share with other processors
329  typedef typename BackendVectorSelector<GFS,bool>::Type BoolVector;
330  BoolVector sharedDOF(_gfs, false);
331 
332  if (need_communication)
333  {
334  SharedDOFDataHandle<GFS,BoolVector> data_handle(_gfs,sharedDOF,false);
335  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
336  }
337 
338  // Count shared dofs that we own
339  typedef typename C::ParallelIndexSet::GlobalIndex GlobalIndex;
340  GlobalIndex count = 0;
341 
342  for (size_type i = 0; i < sharedDOF.N(); ++i)
343  if (owned_for_amg(i) && sharedDOF.base()[i][0])
344  ++count;
345 
346  dverb << gv.comm().rank() << ": shared block count is " << count.touint() << std::endl;
347 
348  // Communicate per-rank count of owned and shared DOFs to all processes.
349  std::vector<GlobalIndex> counts(_gfs.gridView().comm().size());
350  _gfs.gridView().comm().allgather(&count, 1, &(counts[0]));
351 
352  // Compute start index start_p = \sum_{i=0}^{i<p} counts_i
353  GlobalIndex start = std::accumulate(counts.begin(),counts.begin() + _rank,GlobalIndex(0));
354 
356  GIVector scalarIndices(_gfs, std::numeric_limits<GlobalIndex>::max());
357 
358  for (size_type i = 0; i < sharedDOF.N(); ++i)
359  if (owned_for_amg(i) && sharedDOF.base()[i][0])
360  {
361  scalarIndices.base()[i][0] = start;
362  ++start;
363  }
364 
365  // Publish global indices for the shared DOFS to other processors.
366  if (need_communication)
367  {
368  MinDataHandle<GFS,GIVector> data_handle(_gfs,scalarIndices);
369  _gfs.gridView().communicate(data_handle,_interiorBorder_all_interface,Dune::ForwardCommunication);
370  }
371 
372  // Setup the index set
373  c.indexSet().beginResize();
374  for (size_type i=0; i<scalarIndices.N(); ++i)
375  {
376  Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr;
377  if(scalarIndices.base()[i][0] != std::numeric_limits<GlobalIndex>::max())
378  {
379  // global index exist in index set
380  if (owned_for_amg(i))
381  {
382  // This dof is managed by us.
383  attr = Dune::OwnerOverlapCopyAttributeSet::owner;
384  }
385  else if (is_ghost_for_amg(i) && c.getSolverCategory() == static_cast<int>(SolverCategory::nonoverlapping))
386  {
387  //use attribute overlap for ghosts in novlp grids
388  attr = Dune::OwnerOverlapCopyAttributeSet::overlap;
389  }
390  else
391  {
392  attr = Dune::OwnerOverlapCopyAttributeSet::copy;
393  }
394  c.indexSet().add(scalarIndices.base()[i][0], typename C::ParallelIndexSet::LocalIndex(i,attr));
395  }
396  }
397  c.indexSet().endResize();
398 
399  // Compute neighbors using communication
400  std::set<int> neighbors;
401 
402  if (need_communication)
403  {
404  GFSNeighborDataHandle<GFS,int> data_handle(_gfs,_rank,neighbors);
405  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
406  }
407 
408  c.remoteIndices().setNeighbours(neighbors);
409  c.remoteIndices().template rebuild<false>();
410  }
411 
412 #endif // HAVE_MPI
413 
414  template<int s, bool isFakeMPIHelper>
416  {
417  typedef Dune::Amg::SequentialInformation type;
418  };
419 
420 
421 #if HAVE_MPI
422 
423  // Need MPI for OwnerOverlapCopyCommunication
424  template<int s>
425  struct CommSelector<s,false>
426  {
427  typedef OwnerOverlapCopyCommunication<bigunsignedint<s>,int> type;
428  };
429 
430 #endif // HAVE_MPI
431 
433 
434  } // namespace istl
435  } // namespace PDELab
436 } // namespace Dune
437 
438 #endif // DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
Extracts the container tag from T.
Definition: backend/istl/tags.hh:143
void maskForeignDOFs(X &x) const
Mask out all DOFs not owned by the current process with 0.
Definition: parallelhelper.hh:106
Returns the raw ISTL type associated with C, or C itself it is already an ISTL type.
Definition: backend/istl/utility.hh:40
Tag describing a BCRSMatrix.
Definition: backend/istl/tags.hh:61
Data handle for marking shared DOFs.
Definition: genericdatahandle.hh:1015
ParallelHelper(const GFS &gfs, int verbose=1)
Definition: parallelhelper.hh:61
Tag describing a BlockVector.
Definition: backend/istl/tags.hh:24
bool isGhost(const ContainerIndex &i) const
Tests whether the given index belongs to a ghost DOF.
Definition: parallelhelper.hh:147
Definition: parallelhelper.hh:415
GatherScatter data handle for creating a disjoint DOF partitioning.
Definition: genericdatahandle.hh:931
bool owned(const ContainerIndex &i) const
Tests whether the given index is owned by this process.
Definition: parallelhelper.hh:141
C type
Definition: backend/istl/utility.hh:42
V & raw(V &v)
Returns the raw ISTL object associated with v, or v itself it is already an ISTL object.
Definition: backend/istl/utility.hh:26
Definition: parallelhelper.hh:45
Definition: backendselector.hh:11
void createIndexSetAndProjectForAMG(MatrixType &m, Comm &c)
Makes the matrix consistent and creates the parallel information for AMG.
static const unsigned int value
Definition: gridfunctionspace/tags.hh:175
BackendVectorSelectorHelper< Backend, GridFunctionSpace, FieldType >::Type Type
Definition: backendselector.hh:14
Definition: genericdatahandle.hh:717
Data handle for marking ghost DOFs.
Definition: genericdatahandle.hh:813
Tag describing an arbitrary FieldVector.
Definition: backend/istl/tags.hh:44
PromotionTraits< typename X::field_type, typename Y::field_type >::PromotedType disjointDot(const X &x, const Y &y) const
Calculates the (rank-local) dot product of x and y on the disjoint partition defined by the helper...
Definition: parallelhelper.hh:158
tags::container< T >::type container_tag(const T &)
Gets instance of container tag associated with T.
Definition: backend/istl/tags.hh:247
OwnerOverlapCopyCommunication< bigunsignedint< s >, int > type
Definition: parallelhelper.hh:427
Dune::Amg::SequentialInformation type
Definition: parallelhelper.hh:417
RankIndex rank() const
Returns the MPI rank of this process.
Definition: parallelhelper.hh:226
Data handle for collecting set of neighboring MPI ranks.
Definition: genericdatahandle.hh:1061
const std::string s
Definition: function.hh:1103
Tag describing an arbitrary FieldMatrix.
Definition: backend/istl/tags.hh:81