![]() |
Bimetric 3+1 toolkit for spherical symmetry
|
00001 /** 00002 * @file gridDriver.h 00003 * @brief Implements a spatially cell-centered uniform grid. 00004 * @author Mikica Kocic 00005 * @copyright GNU General Public License (GPLv3). 00006 * 00007 * @page fdm Finite Difference Approximation 00008 * 00009 * At the core of finite difference approximation is a discretization of the 00010 * spacetime, or a numerical grid (shown below).\ Then, a function `f(t,r)` is 00011 * represented by values at a discrete set of points `(m,n)`. 00012 * 00013 * <pre> 00014 * p in M <--> (t,r) <--> (m,n) 00015 * (point in a manifold) (in a chart) (discretized point) 00016 * | | 00017 * V V 00018 * f(t,r) f(m,n) </pre> 00019 * 00020 * The points `(m,n)` are called the grid points or nodes. The grid points are chosen 00021 * to reside at the center of grid cells (the other approach would be to associate the 00022 * grid points with the vertices of the grid cells). 00023 * 00024 * The data structure @ref GridPoint is a wrapper for the set of fields `{f,...}` that 00025 * arekept in the memory for the point `(m,n)`. 00026 * 00027 * The grid consists of the *interior points*, and the *virtual points* at the boundary. 00028 * The virtual points are used to impose the boundary conditions. 00029 * 00030 * A cell-centered uniform grid with `nLen` grid points in each grid row is shown below 00031 * (see also @cite Baumgarte:2010nr, p.192). 00032 * 00033 * <pre> 00034 * left virtual grid cell grid point right virtual 00035 * point(s) .---^---. | point(s) 00036 * . | n-1 ' n ' n+1 | | 00037 * : +----|----+-------+-------+---------+-------+---|---+-------+----|----+ 00038 * m+1 | O | | | (m+1,n) | | X | | O | 00039 * +---------+-------+-------.=========.-------+-------+-------+---------+ 00040 * m | | |(m,n-1)| (m,n) |(m,n+1)| | | | 00041 * +---------+-------+-------'========='-------+-------+-------+---------+ 00042 * m-1 | | | | | | | | | 00043 * . +---------'-------+-------+---------+-------+-------+-------'---------+ 00044 * : 0 | 1 2 . . . nLen | nLen+1 00045 * ^ ^ 00046 * r_min r_max </pre> 00047 * 00048 * The cell-centered uniform grid implies: 00049 * @code 00050 * r = r_min + ( n - 1/2 ) delta_r 00051 * delta_r = ( r_max - r_min ) / nLen 00052 * @endcode 00053 * @li if `nGhost = 1`, `n` ranges between 1 and `nLen`; 00054 * the boundary virtual points are `grid[m][0]` and `grid[m][nLen+1]` 00055 * 00056 * @li `m` ranges between 1 and mLen, and wraps around; 00057 * the slices `grid[mLen+1]`... are used for intermediate integration steps. 00058 * 00059 * In parallel simulations, the grid is split across processes using MPI, 00060 * allowing storage of large grid functions and parallel work on each part. 00061 * 00062 */ 00063 00064 ///////////////////////////////////////////////////////////////////////////////////////// 00065 /** @defgroup g4 Grid driver */ 00066 ///////////////////////////////////////////////////////////////////////////////////////// 00067 /*@{*/ 00068 /** Uniform grid of grid functions. 00069 */ 00070 class UniformGrid 00071 : public MPIWorld 00072 { 00073 /** Allocates memory for the numerical grid. 00074 * 00075 * We allocate `mExtra` extra m grid rows for the intermediate steps. 00076 * We also allocate 2*nGhost n-cells as the L/R spatial boundary. 00077 */ 00078 void createGrid () 00079 { 00080 size_t mDim = mLen + mExtra; 00081 grid = new Real[ mDim * nTotal * GFCNT ]; 00082 std::fill_n( grid, mDim * nTotal * GFCNT, NAN ); 00083 00084 slog << " Allocated " << mDim << " * " << nTotal 00085 << " * " << GFCNT 00086 << " * Real" << 8 * sizeof(Real) 00087 << " = " << mDim * nTotal * GFCNT 00088 << " bytes" << std::endl 00089 << " Tracking " << GFCNT << " grid functions at a point" 00090 << std::endl << std::endl; 00091 } 00092 00093 /** Releases memory allocated for the numerical grid. 00094 */ 00095 void deleteGrid () 00096 { 00097 if ( grid != NULL ) { 00098 delete[] grid; 00099 grid = NULL; 00100 } 00101 } 00102 00103 protected: 00104 00105 Real* grid; //!< The grid storage 00106 // Spatial discretization: 00107 Int nOffset; //!< Offset in the MPI world 00108 Int nWorldLen; //!< Number of cells in the MPI world 00109 Int nLen; //!< Number of our cells in `r`-direction 00110 Int nGhost; //!< Number of virtual (ghost) cells on the `r`-boundary 00111 Int nTotal; //!< Total number of `r`-cells (`nTotal = nLen + 2 * nGhost`) 00112 Real delta_r; //!< The grid spacing accross `r` 00113 // Temporal discretization: 00114 Int mLen; //!< Number of cached cells in `t`-direction 00115 Int mExtra; //!< Number of extra cells (for integration substeps) 00116 00117 public: 00118 00119 Real* get_grid () { return grid; } 00120 00121 Int get_nOffset () const { return nOffset; } 00122 Int get_nWorldLen () const { return nWorldLen; } 00123 Int get_nLen () const { return nLen; } 00124 Int get_nGhost () const { return nGhost; } 00125 Int get_nTotal () const { return nTotal; } 00126 Int get_mLen () const { return mLen; } 00127 Real get_delta_r () const { return delta_r; } 00128 00129 Int mpiSize () const { return size (); } 00130 Int mpiRank () const { return rank (); } 00131 00132 /** Constructs the uniform grid as specified in the parameter file. 00133 */ 00134 UniformGrid( Parameters& params ) 00135 { 00136 if ( ! isFirstInRank () ) { 00137 slog.disabled = true; // only the first in rank can access the output 00138 } 00139 00140 params.get( "grid.nLen", nWorldLen, 10 ); 00141 params.get( "grid.nGhost", nGhost, 10 ); 00142 params.get( "grid.delta_r", delta_r, 0.1 ); 00143 params.get( "grid.mLen", mLen, 5 ); 00144 params.get( "grid.mExtra", mExtra, 9 ); 00145 00146 /// - Split the data equally among all the ranks in the MPI world 00147 /// 00148 nLen = nWorldLen / mpiSize(); 00149 nOffset = mpiRank() * nLen; // Seek offset in the MPI world 00150 00151 /// - The last in rank gets 'all the rest' 00152 /// 00153 if ( mpiRank() == mpiSize() - 1 && mpiSize() > 1 ) { 00154 nLen += ( nWorldLen % mpiSize() ); 00155 } 00156 00157 nTotal = nLen + 2 * nGhost; 00158 00159 slog << "Uniform Grid Driver:" << std::endl << std::endl 00160 << " nLen = " << nLen << ", nGhost = " << nGhost 00161 << ", delta_r = " << delta_r 00162 << ", mLen = " << mLen << ", mExtra = " << mExtra << std::endl 00163 << " Compile-time: CFDS_ORDER = " << CFDS_ORDER << std::endl; 00164 00165 #if _OPENMP 00166 if ( mpiSize() > 1 ) { 00167 omp_set_num_threads( 1 ); /// @todo make omp.nthreads configurable 00168 } 00169 slog << " Using OpenMP: #procs = " 00170 << omp_get_num_procs() << ", max #threads = " 00171 << omp_get_max_threads() << std::endl; 00172 #endif 00173 00174 if ( mpiSize() > 1 ) { 00175 slog << " Using MPI: world size = " << mpiSize() 00176 << ", sum of world's nLen " << nWorldLen << ", nOffset = " << nOffset 00177 << std::endl; 00178 } 00179 00180 /// - Sanity check that nGhost > CFDS_ORDER/2 00181 /// 00182 if( nGhost < CFDS_ORDER/2 ) { 00183 slog << "Error: Ghost region does not fit the FD order." << std::endl; 00184 quit( -1 ); 00185 } 00186 00187 /// - Allocate memory for the grid 00188 /// 00189 createGrid (); 00190 00191 /// - Configure MPI ghost chunk datatype 00192 /// 00193 /// @warning Using fld::mpiBoundary to split GFs works only if the grid index 00194 /// is organized as `(<m> * GFCNT + <gf> ) * nTotal + <n>`. 00195 /// Replace fld::mpiBoundary by GFCNT to exchange all the GFs. 00196 /// 00197 defineGhostChunk( fld::mpiBoundary + 1, nTotal, nGhost ); 00198 } 00199 00200 /** Access the given grid function data. 00201 */ 00202 inline Real& GF( Int gf, Int m, Int n ) { 00203 return grid[ (m * GFCNT + gf ) * nTotal + n ]; 00204 } 00205 00206 /** Exchange the boundaries at the given time slice. 00207 */ 00208 bool exchangeBoundaries( Int m, bool receiveEdges = true ) 00209 { 00210 Real* timeSlice = &GF( fld::t, m, 0 ); 00211 return MPIWorld::exchangeBoundaries( 00212 timeSlice, // Left ghost (received) 00213 timeSlice + nGhost, // Left edge (sent) 00214 timeSlice + nGhost + nLen - nGhost, // Right_edge (sent) 00215 timeSlice + nGhost + nLen, // Right ghost (received) 00216 receiveEdges // True if to receive the edges (otherwise just send ghosts) 00217 ); 00218 } 00219 00220 /** Nonblocking signal to all the ranks that we are quitting integration. 00221 */ 00222 void abortExchange () 00223 { 00224 MPIWorld::abortExchange( /* dummy data */ &GF( fld::t, 0, 0 ) ); 00225 } 00226 }; 00227 00228 ///////////////////////////////////////////////////////////////////////////////////////// 00229 00230 /** GridUser contains cached variables from the grid-driver. 00231 * @note Finite differences are defined for the grid users. 00232 */ 00233 class GridUser 00234 { 00235 private: 00236 Real* grid; //!< Private access to the grid storage (not shared with descendants) 00237 00238 protected: 00239 UniformGrid* gridDriver; //!< The attached driver (available to descendants) 00240 00241 Int nOffset; //!< Offset in the MPI world 00242 Int nWorldLen; //!< Number of cells in the MPI world 00243 Int nLen; //!< Number of cells in `r`-direction 00244 Int nGhost; //!< Number of virtual (ghost) cells on the `r`-boundary 00245 Int nTotal; //!< Total number of cells in `r`-direction including ghosts 00246 Int mLen; //!< Number of cells in `t`-direction 00247 00248 Real delta_r; //!< The grid spacing accross `r` 00249 Real inv_delta_r; //!< `1 / delta_r` 00250 Real inv_delta_rr; //!< `1 / delta_r^2` 00251 00252 public: 00253 ///////////////////////////////////////////////////////////////////////////////////// 00254 00255 Int mpiSize () const { return gridDriver->mpiSize (); } 00256 Int mpiRank () const { return gridDriver->mpiRank (); } 00257 00258 GridUser( UniformGrid& ug ) 00259 : gridDriver( &ug ) 00260 { 00261 grid = ug.get_grid (); 00262 nOffset = ug.get_nOffset (); 00263 nWorldLen = ug.get_nWorldLen (); 00264 nLen = ug.get_nLen (); 00265 mLen = ug.get_mLen (); 00266 nGhost = ug.get_nGhost (); 00267 nTotal = ug.get_nTotal (); 00268 delta_r = ug.get_delta_r (); 00269 00270 // Calculate various constants involving 1/delta_r 00271 // 00272 inv_delta_r = 1 / ug.get_delta_r (); 00273 inv_delta_rr = 1 / ( ug.get_delta_r () * ug.get_delta_r () ); 00274 } 00275 00276 /** Access the given grid function data. 00277 */ 00278 inline Real& GF( Int gf, Int m, Int n ) { 00279 return grid[ (m * GFCNT + gf ) * nTotal + n ]; 00280 } 00281 00282 /** `emitField` is a macro that emits the method which encapsulates a grid function. 00283 * Such method can be later used to access data in a grid at a point given by (m,n). 00284 * For example, `emitField(r)` defines `r(m,n)`. 00285 */ 00286 #define emitField(gf) \ 00287 inline Real& gf( Int m, Int n ) { return GF( fld::gf, m, n ); } 00288 00289 #if 1 00290 #define emitDerivative_r(gf) emitField(gf##_r) 00291 #define emitDerivative_rr(gf) emitField(gf##_rr) 00292 #else 00293 #define emitDerivative_r(gf) \ 00294 inline Real gf##_r( Int m, Int n ) { return GF_r( gf, m, n ); } 00295 #define emitDerivative_rr(gf) \ 00296 inline Real gf##_rr( Int m, Int n ) { return GF_rr( gf, m, n ); } 00297 #endif 00298 00299 // The system GFs are 'time' and 'space', which are defined in "gridFunctions.h" 00300 // 00301 emitField( t ) 00302 emitField( r ) 00303 00304 ///////////////////////////////////////////////////////////////////////////////////// 00305 00306 /** Cubic spline smoother of a grid function. 00307 */ 00308 void cubicSplineShmooth( Int m, Int gf, Int lin2n, Int cub2n ) 00309 { 00310 // The accuracy is very low at low r. Assume that a few first cells are linear. 00311 // 00312 if( lin2n > 0 ) 00313 { 00314 Real dydx = delta_r * GF(gf,m,nGhost+lin2n) / r(m,nGhost+lin2n); 00315 for ( Int i = 0; i < lin2n; ++i ) { 00316 GF(gf,m,nGhost+i) = ( i + 0.5 ) * dydx; 00317 } 00318 } 00319 00320 // A six-point cubic spline to smooth the region around r = 0. 00321 // 00322 if( cub2n > 0 ) 00323 { 00324 static CubicSpline spline( 6 ); 00325 00326 Real pts[6][2] = { 00327 { r(m,nGhost ), GF(gf,m,nGhost ) }, 00328 { r(m,nGhost + 3 ), GF(gf,m,nGhost + 3 ) }, 00329 { r(m,nGhost + cub2n/2 ), GF(gf,m,nGhost + cub2n/2 ) }, 00330 { r(m,nGhost + cub2n-6 ), GF(gf,m,nGhost + cub2n-6 ) }, 00331 { r(m,nGhost + cub2n-3 ), GF(gf,m,nGhost + cub2n-3 ) }, 00332 { r(m,nGhost + cub2n ), GF(gf,m,nGhost + cub2n ) } 00333 }; 00334 spline.initialize( pts ); 00335 00336 for( Int i = 0; i < cub2n; ++i ) { 00337 GF(gf,m,nGhost+i) = spline( r(m,nGhost+i) ); 00338 } 00339 } 00340 } 00341 00342 ///////////////////////////////////////////////////////////////////////////////////// 00343 00344 void applyBoundaryConditions( Int m, Int gf, Int parity ); 00345 void smoothenGF( Int m, Int outgf, Int tmpgf, Int ingf, Int parity ); 00346 void smoothenGF2( Int m, Int outgf, Int tmpgf, Int ingf, Int parity ); 00347 }; 00348 00349 ///////////////////////////////////////////////////////////////////////////////////////// 00350 00351 /** A wrapper for a grid point so that the all of the grid functions 00352 * can be treated as a compound object when reading/writing. 00353 */ 00354 class GridPoint : GridUser 00355 { 00356 Int m, n; 00357 public: 00358 00359 GridPoint( UniformGrid& ug, Int mPos, Int nPos ) 00360 : GridUser( ug ), m( mPos ), n( nPos ) 00361 {} 00362 00363 /** Resets all the variables to a specific value (by default NAN). 00364 */ 00365 void clear( Real value = NAN ) 00366 { 00367 for( Int gf = 0; gf < GFCNT; ++gf ) { 00368 GF( gf, m, n ) = value; 00369 } 00370 } 00371 00372 /** Reads the variable values from a file stream. 00373 */ 00374 bool read( FILE* inf, bool isBinary, const std::vector<Int>& input ) 00375 { 00376 auto len = input.size(); 00377 Real* data = (Real*)alloca( len * sizeof(Real) ); 00378 00379 if( isBinary ) 00380 { 00381 if( len != fread( data, sizeof(data[0]), len, inf ) ) { 00382 std::cerr << "Error: Premature end of grid." << std::endl; 00383 return false; 00384 } 00385 } 00386 else 00387 { 00388 char* line = (char*)alloca( input.size() * 80 * sizeof(char) ); 00389 line[0] = '\0'; 00390 do { 00391 if( ! fgets( line, input.size() * 80 - 1, inf ) ) { 00392 std::cerr << "Error: Premature end of grid." << std::endl; 00393 return false; 00394 } 00395 } while( line[0] == '*' ); // Ignore comments (starting with '*') 00396 00397 size_t count = sscanf_Real( line, data, len ); 00398 if( count != len ) { 00399 std::cerr << "Error: Wrong number of variables." << std::endl; 00400 return false; 00401 } 00402 } 00403 00404 for( size_t gf = 0; gf < len; ++gf ) { 00405 GF( input[gf], m, n ) = data[gf]; 00406 } 00407 00408 return true; 00409 } 00410 00411 /** Writes the variable values to a file stream. 00412 * @warning The order of fields in data[] (output data record) 00413 * should match the Mathematica notebook! 00414 */ 00415 void write( FILE* outf, bool isBinary, const std::vector<GF_Descriptor> output ) 00416 { 00417 auto len = output.size(); /// @todo fld::output should be dynamic in ID class 00418 Real* data = (Real*)alloca( len * sizeof(Real) ); 00419 00420 for( size_t gf = 0; gf < len; ++gf ) { 00421 data[gf] = GF( output[gf].gf, m, n ); 00422 } 00423 00424 if( isBinary ) { 00425 fwrite( data, sizeof(data[0]), len, outf ); 00426 } 00427 else { 00428 for( size_t i = 0; i < len; ++i ) { 00429 if ( i > 0 ) fputs( "\t", outf ); 00430 fputReal( outf, data[i] ); 00431 } 00432 fputs( "\n", outf ); 00433 } 00434 } 00435 }; 00436 00437 ///////////////////////////////////////////////////////////////////////////////////////// 00438 00439 void GridUser::applyBoundaryConditions( Int m, Int gf, Int parity ) 00440 { 00441 // Left ghost region 00442 for( Int i = 0; i < nGhost; ++i ) { 00443 GF( gf, m, nGhost - i - 1 ) = parity * GF( gf, m, nGhost + i ); 00444 } 00445 00446 // Right ghost region 00447 for( Int n = nGhost + nLen; n < nTotal; ++n ) { 00448 extrapolate_R( gf, m, n ); 00449 } 00450 } 00451 00452 void GridUser::smoothenGF2( Int m, Int copy2gf, Int outgf, Int ingf, Int parity ) 00453 { 00454 smoothenGF( m, -1, fld::gdbg, ingf, parity ); 00455 smoothenGF( m, copy2gf, outgf, fld::gdbg, parity ); 00456 } 00457 00458 /** Smooth data using a local polynomial regression. The convolution is done with 00459 * respect to the Savitzky-Golay matrix that corresponds to a smoothing kernel of 00460 * radius `sgRadius` and a polynomial regression of degree `polyDeg`. 00461 * @pre `outf` must not be equal to `ingf` 00462 * @post `outf` is copied to `copy2gf` if `copy2gf >= 0` 00463 * @warning Does not work with MPI! 00464 * @see Mathematica notebook `Savitzky-Golay smoothing.nb`, which tests the algorithm. 00465 */ 00466 void GridUser::smoothenGF( Int m, Int copy2gf, Int outgf, Int ingf, Int parity ) 00467 { 00468 // Smoothing parameters: 00469 Int sgRadius = 32; // Default kernel radius 00470 Int order = 2; // Polynomial order is twice of this 00471 Int guard = 3; // guard buffer between NaN and the convolution window 00472 00473 // Find the first not-NaN from the left 00474 // 00475 Int nFrom = nGhost; 00476 while( nFrom < nTotal && std::isnan( GF( ingf, m, nFrom ) ) ) { 00477 ++nFrom; 00478 } 00479 00480 // If there were no NaN's, move nFrom as far as possible to the left 00481 // 00482 if( nFrom == nGhost ) { 00483 nFrom = -nLen; 00484 } 00485 00486 // Find the last not-NaN from the right (do not average with the extrapolated region) 00487 // 00488 Int nTo = nGhost + nLen; 00489 while( nTo >= nGhost && std::isnan( GF( ingf, m, nTo - 1 ) ) ) { 00490 --nTo; 00491 } 00492 00493 OMP_parallel_for( Int n = nFrom < 0 ? 0 : nFrom ; n < nTo; ++n ) 00494 { 00495 // Find the minimum radius (between 0 and sgRadius) 00496 // 00497 Int left = n - nFrom; 00498 Int right = nTo - guard - 1 - n; 00499 Int rad = right < left ? right : left; 00500 if ( rad < 0 ) { 00501 rad = 0; 00502 } 00503 else if( rad > sgRadius ) { 00504 rad = sgRadius; 00505 } 00506 00507 // Convolve with the coefficients. 00508 // Parity pair on the staggered grid where r(m,nGhost-1) = -r(m,nGhost) reads: 00509 // 00510 // gf( m, nGhost + k ) == gf( m, nGhost-1 - k ) * parity, where k >= 0 00511 // 00512 // Let k = n + i - nGhost; then: 00513 // 00514 // if( k >= 0 ) 00515 // then: use gf( m, nGhost + k ) 00516 // else: use gf( m, nGhost-1 - k ) * parity 00517 // which is the same as gf( m, nGhost-1 - (n + i - nGhost) ) * parity 00518 // or gf( m, 2*nGhost - n - i - 1 ) * parity 00519 // 00520 Real sum = 0; 00521 for( Int i = -rad; i <= rad; ++i ) { 00522 sum += SG_coeff[rad][order][ rad + i ] 00523 * ( n + i >= nGhost ? GF( ingf, m, n + i ) 00524 : parity * GF( ingf, m, 2 * nGhost - n - i - 1 ) ); 00525 } 00526 GF( outgf, m, n ) = sum; 00527 } 00528 00529 // Extrapolate to the right 00530 // 00531 for( Int n = nTo; n < nTotal; ++n ) 00532 { 00533 extrapolate_lin( outgf, m, n ); 00534 } 00535 00536 // Optionally copy the output to a new location 00537 // 00538 if( copy2gf >= 0 ) 00539 { 00540 OMP_parallel_for( Int n = 0; n < nTotal; ++n ) { 00541 GF( copy2gf, m, n ) = GF( outgf, m, n ); 00542 } 00543 } 00544 } 00545 /*@}*/ 00546 /////////////////////////////////////////////////////////////////////////////////////////