Bimetric 3+1 toolkit for spherical symmetry
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines
sys/mpiWorld.h
Go to the documentation of this file.
00001 /**
00002  *  @file      mpiWorld.h
00003  *  @brief     The MPI implementation that splits a grid among the ranks.
00004  *  @author    Mikica Kocic
00005  *  @copyright GNU General Public License (GPLv3).
00006  */
00007 
00008 #include <mpi.h>
00009 
00010 /** Implements the message passing interface (MPI) for high-performance computing (HPC).
00011  *
00012  *  In the MPI environment, the calculations on the grid are split among the ranks
00013  *  so the edges are exchanged between each two neighbors in the following way:
00014  *  <pre>
00015  *        .---- left -----.-------.---- right ----.
00016  *        | ghost | edge  |       | edge  | ghost |
00017  *        +-------+-----------------------+-------+
00018  *   ...  |       |     grid chunk i      |       |
00019  *        +-------+-----------------------+-------+------------+-------+
00020  *                                |       |   grid chunk i+1   |       |  ...
00021  *                                +-------+--------------------+-------+
00022  *                                :     left      :
00023  *                                : ghost | edge  :
00024  *                                `-------v-------´
00025  *                         exchanged between two neighbors              </pre>
00026  *
00027  *  The overlapping edges of one rank become other's ghosts (of the same size).
00028  *  The ghost of the left-most rank is determined from the inner boundary conditions and
00029  *  the ghost of the right-most rank is determined from the outer boundary conditions.
00030  *
00031  *  When compiling, use `mpic++` with `-D_USEMPI`. To run, use `mpiexec` or `mpirun`.
00032  *
00033  *  @warning The maximal slicing is not compliant with MPI since the boundary value
00034  *  problem for the slicing differential equation requires access to the whole grid!
00035  *
00036  *  @todo Implement the collective I/O with MPI_File_open and MPI_File_write.
00037  */
00038 class MPIWorld
00039 {
00040     int worldSize;           //!< The size of the MPI world
00041     int myRank;              //!< Our rank in the MPI world
00042     int leftRank;            //!< The rank of the left neighbor
00043     int rightRank;           //!< The rank of the right neighbor
00044     MPI_Request  waitLeft;   //!< Outstanding MPI_Isend request to the left rank
00045     MPI_Request  waitRight;  //!< Outstanding MPI_Isend request to the right rank
00046     MPI_Datatype ghostChunk; //!< Describes a ghost-size block at each slice
00047 
00048 public:
00049 
00050     /** Initializes the MPI environment and finds our rank in the MPI world.
00051      */
00052     MPIWorld ()
00053     {
00054         // Initialize the MPI environment
00055         //
00056         MPI_Init( NULL, NULL );
00057 
00058         // Find out the rank and the size of the MPI
00059         //
00060         MPI_Comm_rank( MPI_COMM_WORLD, &myRank );
00061         MPI_Comm_size( MPI_COMM_WORLD, &worldSize );
00062 
00063         leftRank  = isFirstInRank() ? -1 : myRank - 1;
00064         rightRank = isLastInRank()  ? -1 : myRank + 1;
00065 
00066         waitLeft = waitRight = MPI_REQUEST_NULL;
00067     }
00068 
00069     /** Wait all outstanding MPI requests to finish, then finalize MPI.
00070      */
00071     void cleanup ()
00072     {
00073         if( waitLeft != MPI_REQUEST_NULL ) {
00074             MPI_Cancel( &waitLeft );
00075         }
00076         if( waitRight != MPI_REQUEST_NULL ) {
00077             MPI_Cancel( &waitRight );
00078         }
00079         MPI_Barrier( MPI_COMM_WORLD );
00080         MPI_Finalize ();
00081     }
00082 
00083     ~MPIWorld ()
00084     {
00085         cleanup ();
00086     }
00087 
00088     void quit( int rc )
00089     {
00090         cleanup ();
00091         std::exit( rc );
00092     }
00093 
00094     int size () const {
00095         return worldSize;
00096     }
00097 
00098     int rank () const {
00099         return myRank;
00100     }
00101 
00102     bool isFirstInRank () const {
00103         return myRank == 0;
00104     }
00105 
00106     bool isLastInRank () const {
00107         return myRank == worldSize - 1;
00108     }
00109 
00110     /** Defines a MPI datatype for the ghost chunk, which consists of nGFs blocks
00111      *  where each block size is nGhost Reals, repeating every nTotal strides.
00112      *  We have:
00113      *  `grid index (gf,m,n) = (m) * nGFs * nTotal + (gf) * nTotal + (n)`, hence:
00114      *
00115      *  Note that nGFs does not need to be GFCNT (we can transfer a truncated list
00116      *  of grid functions).
00117      *
00118      *  @code
00119      *  MPI_Type_vector( count, blocklen, stride, oldtype, newtype )
00120      *
00121      *     count    = nGFs,         number of blocks
00122      *     blocklen = nGhost,       number of elements in each block
00123      *     stride   = nTotal,       number of elements between start of each block
00124      *     oldtype  = MPI_DOUBLE,   old datatype (handle)
00125      *     newtype  = &ghostChunk,  new datatype (handle)
00126      *  @endcode
00127      *  <pre>
00128      *   |<- blocklen ->|     |<- blocklen ->|    ...    |<- blocklen ->|
00129      *   :                    :                                         :
00130      *   |<----- stride ----->|                                         :
00131      *   :                                                              :
00132      *    `------------------------------.-----------------------------´
00133      *                                 count
00134      *   Total size = count * blocklen                             </pre>
00135      */
00136     void defineGhostChunk( Int nGFs, Int nTotal, Int nGhost )
00137     {
00138         MPI_Datatype data_type = sizeof(Real) == sizeof(double)
00139                                ? MPI_DOUBLE : MPI_LONG_DOUBLE;
00140         MPI_Type_vector( nGFs, nGhost, nTotal, data_type, &ghostChunk );
00141         MPI_Type_commit( &ghostChunk );
00142     }
00143 
00144     /** Exchange the grid regions at boundaries with the other MPI ranks.
00145      */
00146     bool exchangeBoundaries
00147     (
00148         Real* leftGhost,  //!< Where to put the right edge of the left neighbor
00149         Real* leftEdge,   //!< Pointer to the left edge of our data
00150         Real* rightEdge,  //!< Pointer to the right edge of our data
00151         Real* rightGhost, //!< Where to put the left edge of the right neighbor
00152         bool receiveEdges = true //!< Whether to receive the edges (or just send ghosts)
00153         )
00154     {
00155         waitLeft = waitRight = MPI_REQUEST_NULL;
00156 
00157         /// - Send our edges (they become other's ghosts)
00158         ///
00159         if( leftRank >= 0 ) {
00160             if( MPI_SUCCESS != MPI_Isend( leftEdge, 1, ghostChunk, leftRank, 0,
00161                                           MPI_COMM_WORLD, &waitLeft ) ) {
00162                 return false;
00163             }
00164         }
00165         if( rightRank >= 0 ) {
00166             if( MPI_SUCCESS != MPI_Isend( rightEdge, 1, ghostChunk, rightRank, 0,
00167                        MPI_COMM_WORLD, &waitRight ) ) {
00168                 return false;
00169             }
00170         }
00171 
00172         /// - Optionally receive other's edges so they become our ghosts
00173         ///
00174         if( ! receiveEdges ) {
00175             return true;
00176         }
00177 
00178         if( leftRank >= 0 )
00179         {
00180             MPI_Status status;
00181             if( MPI_SUCCESS != MPI_Recv( leftGhost, 1, ghostChunk,
00182                                          leftRank, MPI_ANY_TAG,
00183                                          MPI_COMM_WORLD, &status )
00184                 || status.MPI_TAG != 0 )
00185             {
00186                 // std::cerr << "[MPI #" << rank() << " got abort from left]";
00187                 abortExchange( leftGhost );
00188                 return false;
00189             }
00190         }
00191         if( rightRank >= 0 )
00192         {
00193             MPI_Status status;
00194             if( MPI_SUCCESS != MPI_Recv( rightGhost, 1, ghostChunk,
00195                                          rightRank, MPI_ANY_TAG,
00196                                          MPI_COMM_WORLD, &status )
00197                 ||status.MPI_TAG != 0 )
00198             {
00199                 // std::cerr << "[MPI #" << rank() << " got abort from right]";
00200                 abortExchange( leftGhost );
00201                 return false;
00202             }
00203         }
00204 
00205         return true;
00206     }
00207 
00208     /** Wait the other ranks to have received our edges.
00209      */
00210     bool waitExchange ()
00211     {
00212         if( waitLeft != MPI_REQUEST_NULL ) {
00213             MPI_Status status;
00214             if ( MPI_SUCCESS != MPI_Wait( &waitLeft, &status ) ) {
00215                 return false;
00216             }
00217         }
00218         if( waitRight != MPI_REQUEST_NULL ) {
00219             MPI_Status status;
00220             if( MPI_SUCCESS != MPI_Wait( &waitRight, &status ) ) {
00221                 return false;
00222             }
00223         }
00224         return true;
00225     }
00226 
00227     /** Nonblocking signal to all the ranks that we are quitting the integration.
00228      */
00229     void abortExchange( Real* dummyData )
00230     {
00231         if( leftRank >= 0 ) {
00232             MPI_Request request;
00233             MPI_Isend( dummyData, 1, ghostChunk, leftRank, 1, MPI_COMM_WORLD, &request );
00234             MPI_Request_free( &request );
00235         }
00236         if( rightRank >= 0 ) {
00237             MPI_Request request;
00238             MPI_Isend( dummyData, 1, ghostChunk, rightRank, 1, MPI_COMM_WORLD, &request );
00239             MPI_Request_free( &request );
00240         }
00241     }
00242 };