本文整理汇总了C++中MultiGrid类的典型用法代码示例。如果您正苦于以下问题:C++ MultiGrid类的具体用法?C++ MultiGrid怎么用?C++ MultiGrid使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MultiGrid类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_phi_at
Real MultiGrid::get_phi_at(Real x, Real y, Real z) {
Real p, tmp;
MultiGrid* g;
p = 0.0;
for (int l = 0; l < get_local_node_cnt(); l++) {
g = dynamic_cast<MultiGrid*>(get_local_node(l));
if (MPI_rank() == g->proc()) {
for (int k = 1; k < PNX - 1; k++) {
if (z >= g->MultiGrid::zf(k) && z < g->MultiGrid::zf(k + 1)) {
for (int j = 1; j < PNX - 1; j++) {
if (y >= g->MultiGrid::yf(j) && y < g->MultiGrid::yf(j + 1)) {
for (int i = 1; i < PNX - 1; i++) {
// printf("%e %e %e\n", g->MultiGrid::xf(i), x, g->MultiGrid::xf(i + 1));
if (x >= g->MultiGrid::xf(i) && x < g->MultiGrid::xf(i + 1)) {
if (!g->poisson_zone_is_refined(i, j, k)) {
p = g->phi(i, j, k);
// printf("%e\n", p);
}
}
}
}
}
}
}
}
}
tmp = p;
MPI_Allreduce(&tmp, &p, 1, MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD );
return p;
}
示例2: CopyGridLevel
void CopyGridLevel(MultiGrid& srcMG, Grid& destGrid,
ISubsetHandler& srcSH, ISubsetHandler& destSH,
int lvl, TAPos aPos)
{
Grid::VertexAttachmentAccessor<TAPos> aaPos(destGrid, aPos);
Grid::VertexAttachmentAccessor<TAPos> aaSrcPos(srcMG, aPos);
GridObjectCollection goc = srcMG.get_grid_objects();
AVertex aNewVrt;
srcMG.attach_to_vertices(aNewVrt);
Grid::VertexAttachmentAccessor<AVertex> aaNewVrt(srcMG, aNewVrt);
for(int si = destSH.num_subsets(); si < srcSH.num_subsets(); ++si)
{
destSH.subset_info(si) = srcSH.subset_info(si);
}
for(VertexIterator vrtIter = goc.begin<Vertex>(lvl); vrtIter != goc.end<Vertex>(lvl); ++vrtIter)
{
Vertex* srcVrt = *vrtIter;
Vertex* destVrt = *destGrid.create_by_cloning(srcVrt);
aaNewVrt[srcVrt] = destVrt;
aaPos[destVrt] = aaSrcPos[srcVrt];
destSH.assign_subset(destVrt, srcSH.get_subset_index(srcVrt));
}
CopyGridLevelElements<Edge>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);
CopyGridLevelElements<Face>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);
CopyGridLevelElements<Volume>(srcMG, destGrid, srcSH, destSH, lvl, aNewVrt);
srcMG.detach_from_vertices(aNewVrt);
}
示例3: SaveGridHierarchyTransformed
bool SaveGridHierarchyTransformed(MultiGrid& mg, ISubsetHandler& sh,
const char* filename, number offset)
{
PROFILE_FUNC_GROUP("grid");
APosition aPos;
// uses auto-attach
Grid::AttachmentAccessor<Vertex, APosition> aaPos(mg, aPos, true);
// copy the existing position to aPos. We take care of dimension differences.
// Note: if the method was implemented for domains, this could be implemented
// in a nicer way.
if(mg.has_vertex_attachment(aPosition))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition, aPos);
else if(mg.has_vertex_attachment(aPosition2))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition2, aPos);
else if(mg.has_vertex_attachment(aPosition1))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition1, aPos);
// iterate through all vertices and apply an offset depending on their level.
for(size_t lvl = 0; lvl < mg.num_levels(); ++lvl){
for(VertexIterator iter = mg.begin<Vertex>(lvl);
iter != mg.end<Vertex>(lvl); ++iter)
{
aaPos[*iter].z() += (number)lvl * offset;
}
}
// finally save the grid
bool writeSuccess = SaveGridToFile(mg, sh, filename, aPos);
// clean up
mg.detach_from_vertices(aPos);
return writeSuccess;
}
示例4: TestGridLayoutMap
bool TestGridLayoutMap(MultiGrid& mg, GridLayoutMap& glm)
{
if(mg.has_vertex_attachment(aPosition))
return TestGridLayoutMap(mg, glm, aPosition);
else if(mg.has_vertex_attachment(aPosition2))
return TestGridLayoutMap(mg, glm, aPosition2);
else if(mg.has_vertex_attachment(aPosition1))
return TestGridLayoutMap(mg, glm, aPosition1);
else
UG_LOG("ERROR in TestGridLayoutMap: A standard position attachment"
" is required.\n");
return false;
}
示例5: SaveGridLevelToFile
bool SaveGridLevelToFile(MultiGrid& srcMG, ISubsetHandler& srcSH, int lvl, const char* filename)
{
// check whether one of the standard attachments is attached and call
// SaveGridLevel with that attachment
if(srcMG.has_vertex_attachment(aPosition))
return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition);
if(srcMG.has_vertex_attachment(aPosition2))
return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition2);
if(srcMG.has_vertex_attachment(aPosition1))
return SaveGridLevel(srcMG, srcSH, lvl, filename, aPosition1);
return false;
}
示例6: SaveSurfaceViewTransformed
bool SaveSurfaceViewTransformed(MultiGrid& mg, const SurfaceView& sv,
const char* filename, number offset)
{
PROFILE_FUNC_GROUP("grid");
APosition aPos;
// uses auto-attach
Grid::AttachmentAccessor<Vertex, APosition> aaPos(mg, aPos, true);
// copy the existing position to aPos. We take care of dimension differences.
// Note: if the method was implemented for domains, this could be implemented
// in a nicer way.
if(mg.has_vertex_attachment(aPosition))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition, aPos);
else if(mg.has_vertex_attachment(aPosition2))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition2, aPos);
else if(mg.has_vertex_attachment(aPosition1))
ConvertMathVectorAttachmentValues<Vertex>(mg, aPosition1, aPos);
// iterate through all vertices and apply an offset depending on their level.
for(size_t lvl = 0; lvl < mg.num_levels(); ++lvl){
for(VertexIterator iter = mg.begin<Vertex>(lvl);
iter != mg.end<Vertex>(lvl); ++iter)
{
aaPos[*iter].z() += (number)lvl * offset;
}
}
// create a subset handler which holds different subsets for the different interface types
SubsetHandler sh(mg);
AssignSubsetsBySurfaceViewState<Vertex>(sh, sv, mg);
AssignSubsetsBySurfaceViewState<Edge>(sh, sv, mg);
AssignSubsetsBySurfaceViewState<Face>(sh, sv, mg);
AssignSubsetsBySurfaceViewState<Volume>(sh, sv, mg);
AssignSubsetColors(sh);
EraseEmptySubsets(sh);
// finally save the grid
bool writeSuccess = SaveGridToFile(mg, sh, filename, aPos);
// clean up
mg.detach_from_vertices(aPos);
return writeSuccess;
}
示例7: compute_err_est_M_elem
void ConvectionDiffusionFE<TDomain>::
compute_err_est_M_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
// note: mass parts only enter volume term
err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());
if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());
typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
if (elem_list.size() != 1)
UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");
// request geometry
static const TFEGeom& geo = GeomProvider<TFEGeom>::get();
// loop integration points
try
{
for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++)
{
number total = 0.0;
// mass scale //
if (m_imMassScale.data_given())
{
number val = 0.0;
for (size_t sh = 0; sh < geo.num_sh(); sh++)
val += u(_C_,sh) * m_shapeValues.shapeAtElemIP(sh,ip);
total += m_imMassScale[ip] * val;
}
// mass //
if (m_imMass.data_given())
{
total += m_imMass[ip];
}
(*err_est_data)(elem_list[0],ip) += scale * total;
}
}
UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
示例8: CreateSmoothHierarchy
bool CreateSmoothHierarchy(MultiGrid& mg, size_t numRefs)
{
PROFILE_FUNC_GROUP("grid");
IRefinementCallback* refCallback = NULL;
// we're only checking for the main attachments here.
//todo: improve this - add a domain-based hierarchy creator.
if(mg.has_vertex_attachment(aPosition1))
refCallback = new SubdivisionLoopProjector<APosition1>(mg, aPosition1, aPosition1);
else if(mg.has_vertex_attachment(aPosition2))
refCallback = new SubdivisionLoopProjector<APosition2>(mg, aPosition2, aPosition2);
else if(mg.has_vertex_attachment(aPosition))
refCallback = new SubdivisionLoopProjector<APosition>(mg, aPosition, aPosition);
if(!refCallback){
UG_LOG("No standard position attachment found. Aborting.\n");
return false;
}
GlobalMultiGridRefiner ref(mg, refCallback);
for(size_t lvl = 0; lvl < numRefs; ++lvl){
ref.refine();
}
if(mg.has_vertex_attachment(aPosition1))
ProjectToLimitPLoop(mg, aPosition1, aPosition1);
else if(mg.has_vertex_attachment(aPosition2))
ProjectToLimitPLoop(mg, aPosition2, aPosition2);
else if(mg.has_vertex_attachment(aPosition))
ProjectToLimitPLoop(mg, aPosition, aPosition);
delete refCallback;
return true;
}
示例9: TestSubdivision
void TestSubdivision(const char* fileIn, const char* fileOut, int numRefs)
{
PROFILE_FUNC_GROUP("grid");
//todo: Callbacks have to make sure that their attachment is accessible in the grid.
// even if they were initialized before the attachment was attached to the grid.
MultiGrid mg;
SubsetHandler sh(mg);
SubdivisionLoopProjector<APosition> refCallback(mg, aPosition, aPosition);
GlobalMultiGridRefiner ref(mg, &refCallback);
if(LoadGridFromFile(mg, sh, fileIn)){
for(int lvl = 0; lvl < numRefs; ++lvl){
ref.refine();
}
ProjectToLimitPLoop(mg, aPosition, aPosition);
SaveGridToFile(mg, mg.get_hierarchy_handler(), fileOut);
}
else{
UG_LOG("Load failed. aborting...\n");
}
}
示例10: AssignSubsetsByInterfaceType
static void AssignSubsetsByInterfaceType(SubsetHandler& sh, MultiGrid& mg)
{
const int siNormal = 0;
const int siHMaster = 1;
const int siHSlave = 1 << 1;
const int siVMaster = 1 << 2;
const int siVSlave = 1 << 3;
const char* subsetNames[] = {"normal", "hmaster", "hslave", "hslave+hmaster",
"vmaster", "vmaster+hmaster", "vmaster+hslave",
"vmaster+hslave+hmaster", "vslave", "vslave+hmaster",
"vslave+hslave", "vslave+hslave+hmaster",
"vslave+vmaster", "vslave+vmaster+hmaster",
"vslave+vmaster+hslave", "vslave+vmaster+hmaster+hslave"};
for(int i = 0; i < 16; ++i)
sh.subset_info(i).name = subsetNames[i];
typedef typename Grid::traits<TElem>::iterator TIter;
for(TIter iter = mg.begin<TElem>(); iter != mg.end<TElem>(); ++iter){
int status = ES_NONE;
#ifdef UG_PARALLEL
DistributedGridManager* distGridMgr = mg.distributed_grid_manager();
if(distGridMgr)
status = distGridMgr->get_status(*iter);
#endif
int index = siNormal;
if(status & ES_H_MASTER)
index |= siHMaster;
if(status & ES_H_SLAVE)
index |= siHSlave;
if(status & ES_V_MASTER)
index |= siVMaster;
if(status & ES_V_SLAVE)
index |= siVSlave;
sh.assign_subset(*iter, index);
}
}
示例11: CopyGridLevelElements
void CopyGridLevelElements(MultiGrid& srcMG, Grid& destGrid,
ISubsetHandler& srcSH, ISubsetHandler& destSH,
int lvl, AVertex& aNewVrt)
{
Grid::VertexAttachmentAccessor<AVertex> aaNewVrt(srcMG, aNewVrt);
GridObjectCollection goc = srcMG.get_grid_objects();
CustomVertexGroup vrts;
typedef typename Grid::traits<TElem>::iterator iter_t;
for(iter_t eIter = goc.begin<TElem>(lvl); eIter != goc.end<TElem>(lvl); ++eIter)
{
TElem* e = *eIter;
vrts.resize(e->num_vertices());
for(size_t iv = 0; iv < e->num_vertices(); ++iv)
{
vrts.set_vertex(iv, aaNewVrt[e->vertex(iv)]);
}
TElem* ne = *destGrid.create_by_cloning(e, vrts);
destSH.assign_subset(ne, srcSH.get_subset_index(e));
}
}
示例12: main
int main(int argc, char* argv[])
{
#ifdef CH_MPI
MPI_Init (&argc, &argv);
#endif
// test parameters
const int nGrids = 3;
const int nCells0 = 32;
// xLo has to be zero in order for DiriBc to work.
const RealVect xLo = RealVect::Zero;
const Real xHi = 1.0;
const Box box0(IntVect::Zero, (nCells0-1)*IntVect::Unit);
const int nGhosts = 1;
const int resNT = 2; // norm Type
const int errNT = 0;
// A test is considered as a failure
// if its convergence rate is smaller than below.
const Real targetConvergeRate = 1.75;
// solver parameters
// To converge within 10 V-Cycles in 1D,
// nRelax=3 is the minimum number of relaxations.
const int nRelax = 3; // m_pre=m_post
// cycle Type, 1 : V-Cycle; -1 : FMG-Cycle
const int cycleType[2] =
{
1, -1
};
const std::string cycleStr[2] =
{
" V" , " FMG"
};
// test results holder
const int nCycles[2] =
{
9, 5
};
const int maxCycles = 10; // > max(nCycles)
// Real resNorm[nGrids][nCycles+1], errNorm[nGrids][nCycles+1];
Real resNorm[nGrids][maxCycles], errNorm[nGrids][maxCycles];
Real convergeRate[nGrids-1][2];
const Real log2r = 1.0/log(2.0);
// status records the number of errors detected.
int status = 0;
for (int j=0; j<2; j++)
{
pout() << "\n**************************************************\n"
<< "\nTesting MultiGrid::oneCycle(correction, residual)\n"
<< " cycle type = " << cycleStr[j]
<< "; m_pre = m_post = " << nRelax << "\n";
for (int iGrid=0; iGrid<nGrids; iGrid++)
{
int ref = 1;
for (int i=0; i<iGrid; i++)
ref*=2;
const Real dx = xHi/nCells0/ref;
const Box domain = refine(box0,ref);
const Box ghostBox = grow(domain,nGhosts);
pout() << "\n----------------------------------------------------\n";
pout() << "nCells = " << nCells0*ref << " ; dx = " << dx << " \n";
FArrayBox phi(ghostBox, 1);
FArrayBox correction(ghostBox, 1);
FArrayBox rhs(domain, 1);
FArrayBox error(domain, 1);
FArrayBox phiExact(domain, 1);
FArrayBox residual(domain, 1);
// set initial guess
phi.setVal(0.0);
// set RHS and the exact solution
for (BoxIterator bit(domain); bit.ok(); ++bit)
{
const RealVect offset = bit()-domain.smallEnd();
const RealVect x = xLo + dx*(0.5+offset);
rhs(bit()) = rhsFunc( x );
phiExact(bit()) = exactSolution( x );
}
// Initialize big objects
NewPoissonOpFactory opFactory;
opFactory.define(dx*RealVect(IntVect::Unit), constDiriBC);
MultiGrid<FArrayBox> solver;
BiCGStabSolver<FArrayBox> bottomSolver;
bottomSolver.m_verbosity = 0;
MGLevelOp<FArrayBox>* op = opFactory.MGnewOp(domain,0);
solver.m_numMG = 1;
solver.m_bottom = 1;
solver.m_pre = nRelax;
solver.m_post = nRelax;
solver.m_cycle = cycleType[j];
solver.define(opFactory, &bottomSolver, domain);
// put the data into residual-correction form
//.........这里部分代码省略.........
示例13: compute_err_est_rhs_elem
void ConvectionDiffusionFE<TDomain>::
compute_err_est_rhs_elem(GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());
if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());
// SIDE TERMS //
// get the sides of the element
typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list;
pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem);
if (side_list.size() != (size_t) ref_elem_type::numSides)
UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");
// loop sides
size_t passedIPs = 0;
for (size_t side = 0; side < (size_t) ref_elem_type::numSides; side++)
{
// normal on side
MathVector<dim> normal;
SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords);
VecNormalize(normal, normal);
try
{
for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++)
{
size_t ip = passedIPs + sip;
// vector source //
if (m_imVectorSource.data_given())
(*err_est_data)(side_list[side],sip) += scale * VecDot(m_imVectorSource[ip], normal);
}
passedIPs += err_est_data->num_side_ips(side_list[side]);
}
UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
// VOLUME TERMS //
if (!m_imSource.data_given()) return;
typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
if (elem_list.size() != 1)
UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");
// source //
try
{
for (size_t ip = 0; ip < err_est_data->num_elem_ips(elem->reference_object_id()); ip++)
(*err_est_data)(elem_list[0],ip) += scale * m_imSource[ip];
}
UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
示例14: compute_err_est_A_elem
void ConvectionDiffusionFE<TDomain>::
compute_err_est_A_elem(const LocalVector& u, GridObject* elem, const MathVector<dim> vCornerCoords[], const number& scale)
{
typedef typename reference_element_traits<TElem>::reference_element_type ref_elem_type;
err_est_type* err_est_data = dynamic_cast<err_est_type*>(this->m_spErrEstData.get());
if (err_est_data->surface_view().get() == NULL) {UG_THROW("Error estimator has NULL surface view.");}
MultiGrid* pErrEstGrid = (MultiGrid*) (err_est_data->surface_view()->subset_handler()->multi_grid());
// request geometry
static const TFEGeom& geo = GeomProvider<TFEGeom>::get();
// SIDE TERMS //
// get the sides of the element
// We have to cast elem to a pointer of type SideAndElemErrEstData::elem_type
// for the SideAndElemErrEstData::operator() to work properly.
// This cannot generally be achieved by casting to TElem*, since this method is also registered for
// lower-dimensional types TElem, and must therefore be compilable, even if it is never EVER to be executed.
// The way we achieve this here, is by calling associated_elements_sorted() which has an implementation for
// all possible types. Whatever comes out of it is of course complete nonsense if (and only if)
// SideAndElemErrEstData::elem_type != TElem. To be on the safe side, we throw an error if the number of
// entries in the list is not as it should be.
typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::side_type>::secure_container side_list;
pErrEstGrid->associated_elements_sorted(side_list, (TElem*) elem);
if (side_list.size() != (size_t) ref_elem_type::numSides)
UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");
// some help variables
MathVector<dim> fluxDensity, gradC, normal;
// FIXME: The computation of the gradient has to be reworked.
// In the case of P1 shape functions, it is valid. For Q1 shape functions, however,
// the gradient is not constant (but bilinear) on the element - and along the sides.
// We cannot use the FVGeom here. Instead, we need to calculate the gradient in each IP!
// calculate grad u as average (over scvf)
VecSet(gradC, 0.0);
for(size_t ii = 0; ii < geo.num_ip(); ++ii)
{
for (size_t j=0; j<m_shapeValues.num_sh(); j++)
VecScaleAppend(gradC, u(_C_,j), geo.global_grad(ii, j));
}
VecScale(gradC, gradC, (1.0/geo.num_ip()));
// calculate flux through the sides
size_t passedIPs = 0;
for (size_t side=0; side < (size_t) ref_elem_type::numSides; side++)
{
// normal on side
SideNormal<ref_elem_type,dim>(normal, side, vCornerCoords);
VecNormalize(normal, normal);
try
{
for (size_t sip = 0; sip < err_est_data->num_side_ips(side_list[side]); sip++)
{
size_t ip = passedIPs + sip;
VecSet(fluxDensity, 0.0);
// diffusion //
if (m_imDiffusion.data_given())
MatVecScaleMultAppend(fluxDensity, -1.0, m_imDiffusion[ip], gradC);
// convection //
if (m_imVelocity.data_given())
{
number val = 0.0;
for (size_t sh = 0; sh < m_shapeValues.num_sh(); sh++)
val += u(_C_,sh) * m_shapeValues.shapeAtSideIP(sh,sip);
VecScaleAppend(fluxDensity, val, m_imVelocity[ip]);
}
// general flux //
if (m_imFlux.data_given())
VecAppend(fluxDensity, m_imFlux[ip]);
(*err_est_data)(side_list[side],sip) += scale * VecDot(fluxDensity, normal);
}
passedIPs += err_est_data->num_side_ips(side_list[side]);
}
UG_CATCH_THROW("Values for the error estimator could not be assembled at every IP." << std::endl
<< "Maybe wrong type of ErrEstData object? This implementation needs: SideAndElemErrEstData.");
}
// VOLUME TERMS //
typename MultiGrid::traits<typename SideAndElemErrEstData<TDomain>::elem_type>::secure_container elem_list;
pErrEstGrid->associated_elements_sorted(elem_list, (TElem*) elem);
if (elem_list.size() != 1)
UG_THROW ("Mismatch of numbers of sides in 'ConvectionDiffusionFE::compute_err_est_elem'");
try
{
//.........这里部分代码省略.........
示例15: SaveGridHierarchy
bool SaveGridHierarchy(MultiGrid& mg, const char* filename)
{
PROFILE_FUNC_GROUP("grid");
return SaveGridToFile(mg, mg.get_hierarchy_handler(), filename);
}