本文整理汇总了C++中MultiFab类的典型用法代码示例。如果您正苦于以下问题:C++ MultiFab类的具体用法?C++ MultiFab怎么用?C++ MultiFab使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MultiFab类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ARLIM
void
MultiGrid::interpolate (MultiFab& f,
const MultiFab& c)
{
BL_PROFILE("MultiGrid::interpolate()");
//
// Use fortran function to interpolate up (prolong) c to f
// Note: returns f=f+P(c) , i.e. ADDS interp'd c to f.
//
// OMP over boxes
#ifdef _OPENMP
#pragma omp parallel
#endif
for (MFIter mfi(c); mfi.isValid(); ++mfi)
{
const int k = mfi.index();
const Box& bx = c.boxArray()[k];
const int nc = f.nComp();
const FArrayBox& cfab = c[mfi];
FArrayBox& ffab = f[mfi];
FORT_INTERP(ffab.dataPtr(),
ARLIM(ffab.loVect()), ARLIM(ffab.hiVect()),
cfab.dataPtr(),
ARLIM(cfab.loVect()), ARLIM(cfab.hiVect()),
bx.loVect(), bx.hiVect(), &nc);
}
}
示例2: average_down
void average_down (MultiFab& S_fine, MultiFab& S_crse,
int scomp, int ncomp, const IntVect& ratio)
{
BL_ASSERT(S_crse.nComp() == S_fine.nComp());
//
// Coarsen() the fine stuff on processors owning the fine data.
//
BoxArray crse_S_fine_BA = S_fine.boxArray(); crse_S_fine_BA.coarsen(ratio);
MultiFab crse_S_fine(crse_S_fine_BA,ncomp,0);
#ifdef _OPENMP
#pragma omp parallel
#endif
for (MFIter mfi(crse_S_fine,true); mfi.isValid(); ++mfi)
{
// NOTE: The tilebox is defined at the coarse level.
const Box& tbx = mfi.tilebox();
// NOTE: We copy from component scomp of the fine fab into component 0 of the crse fab
// because the crse fab is a temporary which was made starting at comp 0, it is
// not part of the actual crse multifab which came in.
BL_FORT_PROC_CALL(BL_AVGDOWN,bl_avgdown)
(tbx.loVect(), tbx.hiVect(),
BL_TO_FORTRAN_N(S_fine[mfi],scomp),
BL_TO_FORTRAN_N(crse_S_fine[mfi],0),
ratio.getVect(),&ncomp);
}
S_crse.copy(crse_S_fine,0,scomp,ncomp);
}
示例3: BL_PROFILE
void
Nyx::strang_second_step (Real time, Real dt, MultiFab& S_new, MultiFab& D_new)
{
BL_PROFILE("Nyx::strang_second_step()");
Real half_dt = 0.5*dt;
int min_iter = 100000;
int max_iter = 0;
int min_iter_grid;
int max_iter_grid;
// Set a at the half of the time step in the second strang
const Real a = get_comoving_a(time-half_dt);
MultiFab reset_e_src(S_new.boxArray(), S_new.DistributionMap(), 1, NUM_GROW);
reset_e_src.setVal(0.0);
reset_internal_energy(S_new,D_new,reset_e_src);
compute_new_temp (S_new,D_new);
#ifndef FORCING
{
const Real z = 1.0/a - 1.0;
fort_interp_to_this_z(&z);
}
#endif
#ifdef _OPENMP
#pragma omp parallel private(min_iter_grid,max_iter_grid) reduction(min:min_iter) reduction(max:max_iter)
#endif
for (MFIter mfi(S_new,true); mfi.isValid(); ++mfi)
{
// Here bx is just the valid region
const Box& bx = mfi.tilebox();
min_iter_grid = 100000;
max_iter_grid = 0;
integrate_state
(bx.loVect(), bx.hiVect(),
BL_TO_FORTRAN(S_new[mfi]),
BL_TO_FORTRAN(D_new[mfi]),
&a, &half_dt, &min_iter_grid, &max_iter_grid);
if (S_new[mfi].contains_nan(bx,0,S_new.nComp()))
{
std::cout << "NANS IN THIS GRID " << bx << std::endl;
}
min_iter = std::min(min_iter,min_iter_grid);
max_iter = std::max(max_iter,max_iter_grid);
}
ParallelDescriptor::ReduceIntMax(max_iter);
ParallelDescriptor::ReduceIntMin(min_iter);
if (heat_cool_type == 1)
if (ParallelDescriptor::IOProcessor())
std::cout << "Min/Max Number of Iterations in Second Strang: " << min_iter << " " << max_iter << std::endl;
}
示例4: BL_ASSERT
void
ABec4::aCoefficients (const MultiFab& _a)
{
BL_ASSERT(_a.ok());
BL_ASSERT(_a.boxArray() == (acoefs[0])->boxArray());
invalidate_a_to_level(0);
MultiFab::Copy(*acoefs[0],_a,0,0,acoefs[0]->nComp(),acoefs[0]->nGrow());
}
示例5: advance
void advance (MultiFab& old_phi, MultiFab& new_phi, PArray<MultiFab>& flux,
Real time, Real dt, const Geometry& geom, PhysBCFunct& physbcf,
BCRec& bcr)
{
// Fill the ghost cells of each grid from the other grids
// includes periodic domain boundaries
old_phi.FillBoundary(geom.periodicity());
// Fill physical boundaries
physbcf.FillBoundary(old_phi, time);
int Ncomp = old_phi.nComp();
int ng_p = old_phi.nGrow();
int ng_f = flux[0].nGrow();
const Real* dx = geom.CellSize();
//
// Note that this simple example is not optimized.
// The following two MFIter loops could be merged
// and we do not have to use flux MultiFab.
//
// Compute fluxes one grid at a time
for ( MFIter mfi(old_phi); mfi.isValid(); ++mfi )
{
const Box& bx = mfi.validbox();
compute_flux(old_phi[mfi].dataPtr(),
&ng_p,
flux[0][mfi].dataPtr(),
flux[1][mfi].dataPtr(),
#if (BL_SPACEDIM == 3)
flux[2][mfi].dataPtr(),
#endif
&ng_f, bx.loVect(), bx.hiVect(),
(geom.Domain()).loVect(),
(geom.Domain()).hiVect(),
bcr.vect(),
&dx[0]);
}
// Advance the solution one grid at a time
for ( MFIter mfi(old_phi); mfi.isValid(); ++mfi )
{
const Box& bx = mfi.validbox();
update_phi(old_phi[mfi].dataPtr(),
new_phi[mfi].dataPtr(),
&ng_p,
flux[0][mfi].dataPtr(),
flux[1][mfi].dataPtr(),
#if (BL_SPACEDIM == 3)
flux[2][mfi].dataPtr(),
#endif
&ng_f, bx.loVect(), bx.hiVect(), &dx[0] , &dt);
}
}
示例6: MFNorm
//
// What's the slowest way I can think of to compute all the norms??
//
Real
MFNorm (const MultiFab& mfab,
const int exponent,
const int srcComp,
const int numComp,
const int numGrow)
{
BL_ASSERT (numGrow <= mfab.nGrow());
BoxArray boxes = mfab.boxArray();
boxes.grow(numGrow);
//
// Get a copy of the multifab
//
MultiFab mftmp(mfab.boxArray(), numComp, 0);
MultiFab::Copy(mftmp,mfab,srcComp,0,numComp,numGrow);
//
// Calculate the Norms
//
Real myNorm = 0;
if ( exponent == 0 )
{
for ( MFIter mftmpmfi(mftmp); mftmpmfi.isValid(); ++mftmpmfi)
{
mftmp[mftmpmfi].abs(boxes[mftmpmfi.index()], 0, numComp);
myNorm = std::max(myNorm, mftmp[mftmpmfi].norm(0, 0, numComp));
}
ParallelDescriptor::ReduceRealMax(myNorm);
} else if ( exponent == 1 )
{
for ( MFIter mftmpmfi(mftmp); mftmpmfi.isValid(); ++mftmpmfi)
{
mftmp[mftmpmfi].abs(boxes[mftmpmfi.index()], 0, numComp);
myNorm += mftmp[mftmpmfi].norm(1, 0, numComp);
}
ParallelDescriptor::ReduceRealSum(myNorm);
} else if ( exponent == 2 )
{
for ( MFIter mftmpmfi(mftmp); mftmpmfi.isValid(); ++mftmpmfi)
{
mftmp[mftmpmfi].abs(boxes[mftmpmfi.index()], 0, numComp);
myNorm += pow(mftmp[mftmpmfi].norm(2, 0, numComp), 2);
}
ParallelDescriptor::ReduceRealSum(myNorm);
myNorm = sqrt( myNorm );
} else {
BoxLib::Error("Invalid exponent to norm function");
}
return myNorm;
}
示例7: fill_boundary
void fill_boundary(MultiFab& mf, int scomp, int ncomp, const Geometry& geom, bool cross)
{
if (mf.nGrow() <= 0) return;
bool local = false; // Don't think we ever want it to be true.
mf.FillBoundary(scomp, ncomp, local, cross);
bool do_corners = !cross;
geom.FillPeriodicBoundary(mf, scomp, ncomp, do_corners, local);
}
示例8:
void
AuxBoundaryData::copyTo (MultiFab& mf,
int src_comp,
int dst_comp,
int num_comp) const
{
BL_ASSERT(m_initialized);
if (!m_empty && mf.size() > 0)
{
mf.copy(m_fabs,src_comp,dst_comp,num_comp);
}
}
示例9: apply
void
MCLinOp::residual (MultiFab& residL,
const MultiFab& rhsL,
MultiFab& solnL,
int level,
MCBC_Mode bc_mode)
{
apply(residL, solnL, level, bc_mode);
for (MFIter solnLmfi(solnL); solnLmfi.isValid(); ++solnLmfi)
{
int nc = residL.nComp();
const Box& vbox = solnLmfi.validbox();
FArrayBox& resfab = residL[solnLmfi];
const FArrayBox& rhsfab = rhsL[solnLmfi];
FORT_RESIDL(
resfab.dataPtr(),
ARLIM(resfab.loVect()), ARLIM(resfab.hiVect()),
rhsfab.dataPtr(),
ARLIM(rhsfab.loVect()), ARLIM(rhsfab.hiVect()),
resfab.dataPtr(),
ARLIM(resfab.loVect()), ARLIM(resfab.hiVect()),
vbox.loVect(), vbox.hiVect(), &nc);
}
}
示例10: average_face_to_cellcenter
void average_face_to_cellcenter (MultiFab& cc, const PArray<MultiFab>& fc, const Geometry& geom)
{
BL_ASSERT(cc.nComp() >= BL_SPACEDIM);
BL_ASSERT(fc.size() == BL_SPACEDIM);
BL_ASSERT(fc[0].nComp() == 1); // We only expect fc to have the gradient perpendicular to the face
const Real* dx = geom.CellSize();
const Real* problo = geom.ProbLo();
int coord_type = Geometry::Coord();
#ifdef _OPENMP
#pragma omp parallel
#endif
for (MFIter mfi(cc,true); mfi.isValid(); ++mfi)
{
const Box& bx = mfi.tilebox();
BL_FORT_PROC_CALL(BL_AVG_FC_TO_CC,bl_avg_fc_to_cc)
(bx.loVect(), bx.hiVect(),
BL_TO_FORTRAN(cc[mfi]),
D_DECL(BL_TO_FORTRAN(fc[0][mfi]),
BL_TO_FORTRAN(fc[1][mfi]),
BL_TO_FORTRAN(fc[2][mfi])),
dx, problo, coord_type);
}
}
示例11: solve
void solve(MultiFab& soln, const MultiFab& anaSoln,
Real a, Real b, MultiFab& alpha, MultiFab beta[],
MultiFab& rhs, const BoxArray& bs, const Geometry& geom,
solver_t solver)
{
BL_PROFILE("solve");
soln.setVal(0.0);
const Real run_strt = ParallelDescriptor::second();
BndryData bd(bs, 1, geom);
set_boundary(bd, rhs);
ABecLaplacian abec_operator(bd, dx);
abec_operator.setScalars(a, b);
abec_operator.setCoefficients(alpha, beta);
MultiGrid mg(abec_operator);
mg.setMaxIter(maxiter);
mg.setVerbose(verbose);
mg.setFixedIter(1);
mg.solve(soln, rhs, tolerance_rel, tolerance_abs);
Real run_time = ParallelDescriptor::second() - run_strt;
ParallelDescriptor::ReduceRealMax(run_time, ParallelDescriptor::IOProcessorNumber());
if (ParallelDescriptor::IOProcessor()) {
std::cout << "Run time : " << run_time << std::endl;
}
}
示例12: solve_with_Cpp
void solve_with_Cpp(MultiFab& soln, MultiFab& gphi, Real a, Real b, MultiFab& alpha,
PArray<MultiFab>& beta, MultiFab& rhs, const BoxArray& bs, const Geometry& geom)
{
BL_PROFILE("solve_with_Cpp()");
BndryData bd(bs, 1, geom);
set_boundary(bd, rhs, 0);
ABecLaplacian abec_operator(bd, dx);
abec_operator.setScalars(a, b);
abec_operator.setCoefficients(alpha, beta);
MultiGrid mg(abec_operator);
mg.setVerbose(verbose);
mg.solve(soln, rhs, tolerance_rel, tolerance_abs);
PArray<MultiFab> grad_phi(BL_SPACEDIM, PArrayManage);
for (int n = 0; n < BL_SPACEDIM; ++n)
grad_phi.set(n, new MultiFab(BoxArray(soln.boxArray()).surroundingNodes(n), 1, 0));
#if (BL_SPACEDIM == 2)
abec_operator.compFlux(grad_phi[0],grad_phi[1],soln);
#elif (BL_SPACEDIM == 3)
abec_operator.compFlux(grad_phi[0],grad_phi[1],grad_phi[2],soln);
#endif
// Average edge-centered gradients to cell centers.
BoxLib::average_face_to_cellcenter(gphi, grad_phi, geom);
}
示例13:
void
MultiFab_C_to_F::share (MultiFab& cmf, const std::string& fmf_name)
{
const Box& bx = cmf.boxArray()[0];
int nodal[BL_SPACEDIM];
for ( int i = 0; i < BL_SPACEDIM; ++i ) {
nodal[i] = (bx.type(i) == IndexType::NODE) ? 1 : 0;
}
share_multifab_with_f (fmf_name.c_str(), cmf.nComp(), cmf.nGrow(), nodal);
for (MFIter mfi(cmf); mfi.isValid(); ++mfi)
{
int li = mfi.LocalIndex();
const FArrayBox& fab = cmf[mfi];
share_fab_with_f (li, fab.dataPtr());
}
}
示例14: dotxy
//
// Do a one-component dot product of r & z using supplied components.
//
static
Real
dotxy (const MultiFab& r,
int rcomp,
const MultiFab& z,
int zcomp,
bool local)
{
BL_PROFILE("CGSolver::dotxy()");
BL_ASSERT(r.nComp() > rcomp);
BL_ASSERT(z.nComp() > zcomp);
BL_ASSERT(r.boxArray() == z.boxArray());
const int ncomp = 1;
const int nghost = 0;
return MultiFab::Dot(r,rcomp,z,zcomp,ncomp,nghost,local);
}
示例15: Write_N_Read
static
void
Write_N_Read (const MultiFab& mf,
const std::string& mf_name)
{
if (ParallelDescriptor::IOProcessor())
{
std::cout << "Writing the MultiFab to disk ...\n";
}
double start, end;
ParallelDescriptor::Barrier();
if (ParallelDescriptor::IOProcessor())
{
start = BoxLib::wsecond();
}
ParallelDescriptor::Barrier();
if (ParallelDescriptor::IOProcessor())
{
end = BoxLib::wsecond();
std::cout << "\nWallclock time for MF write: " << (end-start) << '\n';
std::cout << "Reading the MultiFab from disk ...\n";
}
VisMF vmf(mf_name);
BL_ASSERT(vmf.size() == mf.boxArray().size());
for (MFIter mfi(mf); mfi.isValid(); ++mfi)
{
//const FArrayBox& fab = vmf[mfi.index()];
const FArrayBox& fab = vmf.GetFab(mfi.index(), 0);
std::cout << "\tCPU #"
<< ParallelDescriptor::MyProc()
<< " read FAB #"
<< mfi.index()
<< '\n';
}
ParallelDescriptor::Barrier();
if (ParallelDescriptor::IOProcessor())
{
std::cout << "Building new MultiFab from disk version ....\n\n";
}
MultiFab new_mf;
VisMF::Read(new_mf, mf_name);
}