dune-common  2.2.0
mpicollectivecommunication.hh
Go to the documentation of this file.
00001 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
00002 // vi: set et ts=4 sw=2 sts=2:
00003 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
00004 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
00005 
00011 #include<iostream>
00012 #include<complex>
00013 #include<algorithm>
00014 #include<functional>
00015 
00016 #include"exceptions.hh"
00017 #include"collectivecommunication.hh"
00018 #include"binaryfunctions.hh"
00019 #include"shared_ptr.hh"
00020 #include"mpitraits.hh"
00021 
00022 #if HAVE_MPI
00023 // MPI header
00024 #include<mpi.h>
00025 
00026 namespace Dune
00027 {
00028 
00029   //=======================================================
00030   // use singleton pattern and template specialization to 
00031   // generate MPI operations
00032   //=======================================================
00033 
00034   template<typename Type, typename BinaryFunction>
00035   class Generic_MPI_Op
00036   {
00037     
00038   public:
00039         static MPI_Op get ()
00040         {
00041           if (!op)
00042                 {
00043                   op = shared_ptr<MPI_Op>(new MPI_Op);
00044                   MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
00045                 }
00046           return *op;
00047         }
00048   private:
00049         static void operation (Type *in, Type *inout, int *len, MPI_Datatype *dptr)
00050         {
00051           BinaryFunction func;
00052           
00053           for (int i=0; i< *len; ++i, ++in, ++inout){
00054             Type temp;
00055             temp = func(*in, *inout);
00056             *inout = temp;
00057           }
00058         }
00059         Generic_MPI_Op () {}
00060         Generic_MPI_Op (const Generic_MPI_Op& ) {}
00061         static shared_ptr<MPI_Op> op;
00062   };
00063 
00064   
00065   template<typename Type, typename BinaryFunction>
00066   shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(static_cast<MPI_Op*>(0));
00067   
00068 #define ComposeMPIOp(type,func,op) \
00069   template<> \
00070   class Generic_MPI_Op<type, func<type> >{ \
00071   public:\
00072     static MPI_Op get(){ \
00073       return op; \
00074     } \
00075   private:\
00076     Generic_MPI_Op () {}\
00077     Generic_MPI_Op (const Generic_MPI_Op& ) {}\
00078   }
00079 
00080     
00081   ComposeMPIOp(char, std::plus, MPI_SUM);
00082   ComposeMPIOp(unsigned char, std::plus, MPI_SUM);
00083   ComposeMPIOp(short, std::plus, MPI_SUM);
00084   ComposeMPIOp(unsigned short, std::plus, MPI_SUM);
00085   ComposeMPIOp(int, std::plus, MPI_SUM);
00086   ComposeMPIOp(unsigned int, std::plus, MPI_SUM);
00087   ComposeMPIOp(long, std::plus, MPI_SUM);
00088   ComposeMPIOp(unsigned long, std::plus, MPI_SUM);
00089   ComposeMPIOp(float, std::plus, MPI_SUM);
00090   ComposeMPIOp(double, std::plus, MPI_SUM);
00091   ComposeMPIOp(long double, std::plus, MPI_SUM);
00092 
00093   ComposeMPIOp(char, std::multiplies, MPI_PROD);
00094   ComposeMPIOp(unsigned char, std::multiplies, MPI_PROD);
00095   ComposeMPIOp(short, std::multiplies, MPI_PROD);
00096   ComposeMPIOp(unsigned short, std::multiplies, MPI_PROD);
00097   ComposeMPIOp(int, std::multiplies, MPI_PROD);
00098   ComposeMPIOp(unsigned int, std::multiplies, MPI_PROD);
00099   ComposeMPIOp(long, std::multiplies, MPI_PROD);
00100   ComposeMPIOp(unsigned long, std::multiplies, MPI_PROD);
00101   ComposeMPIOp(float, std::multiplies, MPI_PROD);
00102   ComposeMPIOp(double, std::multiplies, MPI_PROD);
00103   ComposeMPIOp(long double, std::multiplies, MPI_PROD);
00104 
00105   ComposeMPIOp(char, Min, MPI_MIN);
00106   ComposeMPIOp(unsigned char, Min, MPI_MIN);
00107   ComposeMPIOp(short, Min, MPI_MIN);
00108   ComposeMPIOp(unsigned short, Min, MPI_MIN);
00109   ComposeMPIOp(int, Min, MPI_MIN);
00110   ComposeMPIOp(unsigned int, Min, MPI_MIN);
00111   ComposeMPIOp(long, Min, MPI_MIN);
00112   ComposeMPIOp(unsigned long, Min, MPI_MIN);
00113   ComposeMPIOp(float, Min, MPI_MIN);
00114   ComposeMPIOp(double, Min, MPI_MIN);
00115   ComposeMPIOp(long double, Min, MPI_MIN);
00116 
00117   ComposeMPIOp(char, Max, MPI_MAX);
00118   ComposeMPIOp(unsigned char, Max, MPI_MAX);
00119   ComposeMPIOp(short, Max, MPI_MAX);
00120   ComposeMPIOp(unsigned short, Max, MPI_MAX);
00121   ComposeMPIOp(int, Max, MPI_MAX);
00122   ComposeMPIOp(unsigned int, Max, MPI_MAX);
00123   ComposeMPIOp(long, Max, MPI_MAX);
00124   ComposeMPIOp(unsigned long, Max, MPI_MAX);
00125   ComposeMPIOp(float, Max, MPI_MAX);
00126   ComposeMPIOp(double, Max, MPI_MAX);
00127   ComposeMPIOp(long double, Max, MPI_MAX);
00128 
00129 #undef ComposeMPIOp
00130 
00131 
00132   //=======================================================
00133   // use singleton pattern and template specialization to 
00134   // generate MPI operations
00135   //=======================================================
00136 
00140   template<>
00141   class CollectiveCommunication<MPI_Comm>
00142   {
00143   public:
00145         CollectiveCommunication (const MPI_Comm& c)
00146           : communicator(c)
00147         {
00148           if(communicator!=MPI_COMM_NULL){
00149             MPI_Comm_rank(communicator,&me);
00150             MPI_Comm_size(communicator,&procs);
00151           }else{
00152             procs=0;
00153             me=-1;
00154           }
00155         }
00156 
00158         int rank () const
00159         {
00160           return me;
00161         }
00162 
00164         int size () const
00165         {
00166           return procs;
00167         }
00168 
00170         template<typename T>
00171         T sum (T& in) const // MPI does not know about const :-(
00172         {
00173           T out;
00174           allreduce<std::plus<T> >(&in,&out,1);
00175           return out;
00176         }
00177 
00179         template<typename T>
00180         int sum (T* inout, int len) const
00181         {
00182           return allreduce<std::plus<T> >(inout,len);
00183         }
00184 
00186         template<typename T>
00187         T prod (T& in) const // MPI does not know about const :-(
00188         {
00189           T out;
00190           allreduce<std::multiplies<T> >(&in,&out,1);
00191           return out;
00192         }
00193 
00195         template<typename T>
00196         int prod (T* inout, int len) const
00197         {
00198           return allreduce<std::plus<T> >(inout,len);
00199         }
00200 
00202         template<typename T>
00203         T min (T& in) const // MPI does not know about const :-(
00204         {
00205           T out;
00206           allreduce<Min<T> >(&in,&out,1);
00207           return out;
00208         }
00209 
00211         template<typename T>
00212         int min (T* inout, int len) const
00213         {
00214           return allreduce<Min<T> >(inout,len);
00215         }
00216 
00217     
00219         template<typename T>
00220         T max (T& in) const // MPI does not know about const :-(
00221         {
00222           T out;
00223           allreduce<Max<T> >(&in,&out,1);
00224           return out;
00225         }
00226 
00228         template<typename T>
00229         int max (T* inout, int len) const
00230         {
00231           return allreduce<Max<T> >(inout,len);
00232         }
00233 
00235         int barrier () const
00236         {
00237           return MPI_Barrier(communicator);
00238         }
00239 
00241         template<typename T>
00242         int broadcast (T* inout, int len, int root) const
00243         {
00244           return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
00245         }
00246         
00248         template<typename T>
00249         int gather (T* in, T* out, int len, int root) const // note out must have space for P*len elements
00250         {
00251           return MPI_Gather(in,len,MPITraits<T>::getType(),
00252                                                 out,len,MPITraits<T>::getType(),
00253                                                 root,communicator);
00254         }
00255 
00257     template<typename T>
00258     int scatter (T* send, T* recv, int len, int root) const // note out must have space for P*len elements
00259     {
00260       return MPI_Scatter(send,len,MPITraits<T>::getType(),
00261                          recv,len,MPITraits<T>::getType(),
00262                          root,communicator);
00263     }
00264 
00265         operator MPI_Comm () const
00266         {
00267           return communicator;
00268         }
00269 
00271     template<typename T, typename T1>
00272     int allgather(T* sbuf, int count, T1* rbuf) const
00273     {
00274       return MPI_Allgather(sbuf, count, MPITraits<T>::getType(),
00275                            rbuf, count, MPITraits<T1>::getType(),
00276                            communicator);
00277     }
00278 
00280     template<typename BinaryFunction, typename Type>
00281     int allreduce(Type* inout, int len) const
00282     {
00283       Type* out = new Type[len];
00284       int ret = allreduce<BinaryFunction>(inout,out,len);
00285       std::copy(out, out+len, inout);
00286       delete[] out;
00287       return ret;
00288     }
00289     
00291     template<typename BinaryFunction, typename Type>
00292     int allreduce(Type* in, Type* out, int len) const
00293     {
00294       return MPI_Allreduce(in, out, len, MPITraits<Type>::getType(),
00295                     (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
00296     }
00297     
00298   private:
00299         MPI_Comm communicator;
00300         int me;
00301         int procs;
00302   };
00303 } // namespace dune
00304 
00305 #endif
00306 #endif