本文整理汇总了C++中MooseVariable::prepareAux方法的典型用法代码示例。如果您正苦于以下问题:C++ MooseVariable::prepareAux方法的具体用法?C++ MooseVariable::prepareAux怎么用?C++ MooseVariable::prepareAux使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MooseVariable
的用法示例。
在下文中一共展示了MooseVariable::prepareAux方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void
ComputeElemAuxVarsThread::subdomainChanged()
{
// prepare variables
for (std::map<std::string, MooseVariable *>::iterator it = _aux_sys._elem_vars[_tid].begin(); it != _aux_sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
// block setup
for (std::vector<AuxKernel *>::const_iterator aux_it=_auxs[_tid].activeBlockElementKernels(_subdomain).begin();
aux_it != _auxs[_tid].activeBlockElementKernels(_subdomain).end();
aux_it++)
(*aux_it)->subdomainSetup();
std::set<MooseVariable *> needed_moose_vars;
for (std::vector<AuxKernel*>::const_iterator block_element_aux_it = _auxs[_tid].activeBlockElementKernels(_subdomain).begin();
block_element_aux_it != _auxs[_tid].activeBlockElementKernels(_subdomain).end(); ++block_element_aux_it)
{
const std::set<MooseVariable *> & mv_deps = (*block_element_aux_it)->getMooseVariableDependencies();
needed_moose_vars.insert(mv_deps.begin(), mv_deps.end());
}
_fe_problem.setActiveElementalMooseVariables(needed_moose_vars, _tid);
_fe_problem.prepareMaterials(_subdomain, _tid);
}
示例2: lock
void
ComputeNodalKernelsThread::onNode(ConstNodeRange::const_iterator & node_it)
{
const Node * node = *node_it;
// prepare variables
for (const auto & it : _aux_sys._nodal_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
_fe_problem.reinitNode(node, _tid);
const std::set<SubdomainID> & block_ids = _aux_sys.mesh().getNodeBlockIds(*node);
for (const auto & block : block_ids)
if (_nodal_kernels.hasActiveBlockObjects(block, _tid))
{
const std::vector<MooseSharedPointer<NodalKernel> > & objects = _nodal_kernels.getActiveBlockObjects(block, _tid);
for (const auto & nodal_kernel : objects)
nodal_kernel->computeResidual();
}
_num_cached++;
if (_num_cached == 20) // Cache 20 nodes worth before adding into the residual
{
_num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.addCachedResidual(_tid);
}
}
示例3: lock
void
ComputeNodalKernelBcsThread::onNode(ConstBndNodeRange::const_iterator & node_it)
{
const BndNode * bnode = *node_it;
BoundaryID boundary_id = bnode->_bnd_id;
// prepare variables
for (const auto & it : _aux_sys._nodal_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
if (_nodal_kernels.hasActiveBoundaryObjects(boundary_id, _tid))
{
Node * node = bnode->_node;
if (node->processor_id() == _fe_problem.processor_id())
{
_fe_problem.reinitNodeFace(node, boundary_id, _tid);
const std::vector<MooseSharedPointer<NodalKernel> > & objects = _nodal_kernels.getActiveBoundaryObjects(boundary_id, _tid);
for (const auto & nodal_kernel : objects)
nodal_kernel->computeResidual();
_num_cached++;
}
}
if (_num_cached == 20) //cache 20 nodes worth before adding into the residual
{
_num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.addCachedResidual(_tid);
}
}
示例4: sentinel
void
ComputeIndicatorThread::onElement(const Elem * elem)
{
for (const auto & it : _aux_sys._elem_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
_fe_problem.prepare(elem, _tid);
_fe_problem.reinitElem(elem, _tid);
// Set up Sentinel class so that, even if reinitMaterials() throws, we
// still remember to swap back during stack unwinding.
SwapBackSentinel sentinel(_fe_problem, &FEProblemBase::swapBackMaterials, _tid);
_fe_problem.reinitMaterials(_subdomain, _tid);
// Compute
if (!_finalize)
{
if (_indicator_whs.hasActiveBlockObjects(_subdomain, _tid))
{
const std::vector<std::shared_ptr<Indicator>> & indicators =
_indicator_whs.getActiveBlockObjects(_subdomain, _tid);
for (const auto & indicator : indicators)
indicator->computeIndicator();
}
}
// Finalize
else
{
if (_indicator_whs.hasActiveBlockObjects(_subdomain, _tid))
{
const std::vector<std::shared_ptr<Indicator>> & indicators =
_indicator_whs.getActiveBlockObjects(_subdomain, _tid);
for (const auto & indicator : indicators)
indicator->finalize();
}
if (_internal_side_indicators.hasActiveBlockObjects(_subdomain, _tid))
{
const std::vector<std::shared_ptr<InternalSideIndicator>> & internal_indicators =
_internal_side_indicators.getActiveBlockObjects(_subdomain, _tid);
for (const auto & internal_indicator : internal_indicators)
internal_indicator->finalize();
}
}
if (!_finalize) // During finalize the Indicators should be setting values in the vectors manually
{
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
for (const auto & it : _aux_sys._elem_vars[_tid])
{
MooseVariable * var = it.second;
var->add(_aux_sys.solution());
}
}
}
示例5:
void
ComputeElemAuxVarsThread::subdomainChanged()
{
_fe_problem.subdomainSetup(_subdomain, _tid);
// prepare variables
for (const auto & it : _aux_sys._elem_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
std::set<MooseVariableFE *> needed_moose_vars;
std::set<unsigned int> needed_mat_props;
if (_aux_kernels.hasActiveBlockObjects(_subdomain, _tid))
{
const std::vector<std::shared_ptr<AuxKernel>> & kernels =
_aux_kernels.getActiveBlockObjects(_subdomain, _tid);
for (const auto & aux : kernels)
{
aux->subdomainSetup();
const std::set<MooseVariableFE *> & mv_deps = aux->getMooseVariableDependencies();
const std::set<unsigned int> & mp_deps = aux->getMatPropDependencies();
needed_moose_vars.insert(mv_deps.begin(), mv_deps.end());
needed_mat_props.insert(mp_deps.begin(), mp_deps.end());
}
}
_fe_problem.setActiveElementalMooseVariables(needed_moose_vars, _tid);
_fe_problem.setActiveMaterialProperties(needed_mat_props, _tid);
_fe_problem.prepareMaterials(_subdomain, _tid);
}
示例6: lock
void
ComputeElemAuxBcsThread::operator() (const ConstBndElemRange & range)
{
ParallelUniqueId puid;
_tid = puid.id;
for (ConstBndElemRange::const_iterator elem_it = range.begin() ; elem_it != range.end(); ++elem_it)
{
const BndElement * belem = *elem_it;
const Elem * elem = belem->_elem;
unsigned short int side = belem->_side;
BoundaryID boundary_id = belem->_bnd_id;
if (elem->processor_id() == _problem.processor_id())
{
// prepare variables
for (std::map<std::string, MooseVariable *>::iterator it = _sys._elem_vars[_tid].begin(); it != _sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
if (_auxs[_tid].elementalBCs(boundary_id).size() > 0)
{
_problem.prepare(elem, _tid);
_problem.reinitElemFace(elem, side, boundary_id, _tid);
_problem.reinitMaterialsBoundary(boundary_id, _tid);
const std::vector<AuxKernel*> & bcs = _auxs[_tid].elementalBCs(boundary_id);
for (std::vector<AuxKernel*>::const_iterator element_bc_it = bcs.begin(); element_bc_it != bcs.end(); ++element_bc_it)
(*element_bc_it)->compute();
_problem.swapBackMaterialsFace(_tid);
}
// update the solution vector
{
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
for (std::map<std::string, MooseVariable *>::iterator it = _sys._elem_vars[_tid].begin(); it != _sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->insert(_sys.solution());
}
}
}
}
}
示例7: lock
void
ComputeIndicatorThread::onElement(const Elem *elem)
{
for (std::map<std::string, MooseVariable *>::iterator it = _aux_sys._elem_vars[_tid].begin(); it != _aux_sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
_fe_problem.prepare(elem, _tid);
_fe_problem.reinitElem(elem, _tid);
_fe_problem.reinitMaterials(_subdomain, _tid);
const std::vector<Indicator *> & indicators = _indicator_whs[_tid].active();
if (!_finalize)
for (std::vector<Indicator *>::const_iterator it = indicators.begin(); it != indicators.end(); ++it)
(*it)->computeIndicator();
else
{
for (std::vector<Indicator *>::const_iterator it = indicators.begin(); it != indicators.end(); ++it)
(*it)->finalize();
// Now finalize the side integral side_indicators as well
{
const std::vector<Indicator *> & side_indicators = _indicator_whs[_tid].activeInternalSideIndicators();
for (std::vector<Indicator *>::const_iterator it = side_indicators.begin(); it != side_indicators.end(); ++it)
(*it)->finalize();
}
}
_fe_problem.swapBackMaterials(_tid);
if (!_finalize) // During finalize the Indicators should be setting values in the vectors manually
{
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
for (std::map<std::string, MooseVariable *>::iterator it = _aux_sys._elem_vars[_tid].begin(); it != _aux_sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->add(_aux_sys.solution());
}
}
}
示例8: face_sentinel
void
ComputeIndicatorThread::onInternalSide(const Elem * elem, unsigned int side)
{
if (_finalize) // If finalizing we only do something on the elements
return;
// Pointer to the neighbor we are currently working on.
const Elem * neighbor = elem->neighbor_ptr(side);
// Get the global id of the element and the neighbor
const dof_id_type elem_id = elem->id(), neighbor_id = neighbor->id();
if ((neighbor->active() && (neighbor->level() == elem->level()) && (elem_id < neighbor_id)) ||
(neighbor->level() < elem->level()))
{
for (const auto & it : _aux_sys._elem_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
SubdomainID block_id = elem->subdomain_id();
if (_internal_side_indicators.hasActiveBlockObjects(block_id, _tid))
{
_fe_problem.reinitNeighbor(elem, side, _tid);
// Set up Sentinels so that, even if one of the reinitMaterialsXXX() calls throws, we
// still remember to swap back during stack unwinding.
SwapBackSentinel face_sentinel(_fe_problem, &FEProblemBase::swapBackMaterialsFace, _tid);
_fe_problem.reinitMaterialsFace(block_id, _tid);
SwapBackSentinel neighbor_sentinel(
_fe_problem, &FEProblemBase::swapBackMaterialsNeighbor, _tid);
_fe_problem.reinitMaterialsNeighbor(neighbor->subdomain_id(), _tid);
const std::vector<std::shared_ptr<InternalSideIndicator>> & indicators =
_internal_side_indicators.getActiveBlockObjects(block_id, _tid);
for (const auto & indicator : indicators)
indicator->computeIndicator();
}
}
}
示例9:
void
ComputeIndicatorThread::onInternalSide(const Elem *elem, unsigned int side)
{
if (_finalize) // If finalizing we only do something on the elements
return;
// Pointer to the neighbor we are currently working on.
const Elem * neighbor = elem->neighbor(side);
// Get the global id of the element and the neighbor
const dof_id_type
elem_id = elem->id(),
neighbor_id = neighbor->id();
if ((neighbor->active() && (neighbor->level() == elem->level()) && (elem_id < neighbor_id)) || (neighbor->level() < elem->level()))
{
for (std::map<std::string, MooseVariable *>::iterator it = _aux_sys._elem_vars[_tid].begin(); it != _aux_sys._elem_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
const std::vector<Indicator *> & indicators = _indicator_whs[_tid].activeInternalSideIndicators();
if (indicators.size() > 0)
{
_fe_problem.reinitNeighbor(elem, side, _tid);
_fe_problem.reinitMaterialsFace(elem->subdomain_id(), _tid);
_fe_problem.reinitMaterialsNeighbor(neighbor->subdomain_id(), _tid);
for (std::vector<Indicator *>::const_iterator it = indicators.begin(); it != indicators.end(); ++it)
(*it)->computeIndicator();
_fe_problem.swapBackMaterialsFace(_tid);
_fe_problem.swapBackMaterialsNeighbor(_tid);
}
}
}
示例10: lock
void
ComputeNodalKernelBcsThread::onNode(ConstBndNodeRange::const_iterator & node_it)
{
const BndNode * bnode = *node_it;
BoundaryID boundary_id = bnode->_bnd_id;
// prepare variables
for (std::map<std::string, MooseVariable *>::iterator it = _sys._nodal_vars[_tid].begin(); it != _sys._nodal_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
if (_nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).size() > 0)
{
Node * node = bnode->_node;
if (node->processor_id() == _fe_problem.processor_id())
{
_fe_problem.reinitNodeFace(node, boundary_id, _tid);
for (std::vector<MooseSharedPointer<NodalKernel> >::const_iterator nodal_kernel_it = _nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).begin();
nodal_kernel_it != _nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).end();
++nodal_kernel_it)
(*nodal_kernel_it)->computeResidual();
_num_cached++;
}
}
if (_num_cached == 20) //cache 20 nodes worth before adding into the residual
{
_num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.addCachedResidual(_tid);
}
}
示例11: lock
void
ComputeNodalKernelBCJacobiansThread::onNode(ConstBndNodeRange::const_iterator & node_it)
{
const BndNode * bnode = *node_it;
BoundaryID boundary_id = bnode->_bnd_id;
std::vector<std::pair<MooseVariable *, MooseVariable *> > & ce = _fe_problem.couplingEntries(_tid);
for (std::vector<std::pair<MooseVariable *, MooseVariable *> >::iterator it = ce.begin(); it != ce.end(); ++it)
{
MooseVariable & ivariable = *(*it).first;
MooseVariable & jvariable = *(*it).second;
unsigned int ivar = ivariable.number();
unsigned int jvar = jvariable.number();
// The NodalKernels that are active and are coupled to the jvar in question
std::vector<MooseSharedPointer<NodalKernel> > active_involved_kernels;
if (_nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).size() > 0)
{
// Loop over each NodalKernel to see if it's involved with the jvar
for (std::vector<MooseSharedPointer<NodalKernel> >::iterator nodal_kernel_it = _nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).begin();
nodal_kernel_it != _nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).end();
++nodal_kernel_it)
{
MooseSharedPointer<NodalKernel> & nodal_kernel = *nodal_kernel_it;
// If this NodalKernel isn't operating on this ivar... skip it
if (nodal_kernel->variable().number() != ivar)
break;
// If this NodalKernel is acting on the jvar add it to the list and short-circuit the loop
if (nodal_kernel->variable().number() == jvar)
{
active_involved_kernels.push_back(nodal_kernel);
continue;
}
// See if this NodalKernel is coupled to the jvar
const std::vector<MooseVariable *> & coupled_vars = (*nodal_kernel_it)->getCoupledMooseVars();
for (std::vector<MooseVariable *>::iterator var_it; var_it != coupled_vars.end(); ++var_it)
{
if ( (*var_it)->number() == jvar )
{
active_involved_kernels.push_back(nodal_kernel);
break; // It only takes one
}
}
}
}
// Did we find any NodalKernels coupled to this jvar?
if (!active_involved_kernels.empty())
{
// prepare variables
for (std::map<std::string, MooseVariable *>::iterator it = _sys._nodal_vars[_tid].begin(); it != _sys._nodal_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
if (_nodal_kernels[_tid].activeBoundaryNodalKernels(boundary_id).size() > 0)
{
Node * node = bnode->_node;
if (node->processor_id() == _fe_problem.processor_id())
{
_fe_problem.reinitNodeFace(node, boundary_id, _tid);
for (std::vector<MooseSharedPointer<NodalKernel> >::iterator nodal_kernel_it = active_involved_kernels.begin();
nodal_kernel_it != active_involved_kernels.end();
++nodal_kernel_it)
(*nodal_kernel_it)->computeOffDiagJacobian(jvar);
_num_cached++;
}
}
if (_num_cached == 20) //cache 20 nodes worth before adding into the jacobian
{
_num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.assembly(_tid).addCachedJacobianContributions(_jacobian);
}
}
}
}
示例12: lock
void
ComputeNodalKernelJacobiansThread::operator() (const ConstNodeRange & range)
{
ParallelUniqueId puid;
_tid = puid.id;
unsigned int num_cached = 0;
for (ConstNodeRange::const_iterator node_it = range.begin() ; node_it != range.end(); ++node_it)
{
const Node * node = *node_it;
std::vector<std::pair<MooseVariable *, MooseVariable *> > & ce = _fe_problem.couplingEntries(_tid);
for (std::vector<std::pair<MooseVariable *, MooseVariable *> >::iterator it = ce.begin(); it != ce.end(); ++it)
{
MooseVariable & ivariable = *(*it).first;
MooseVariable & jvariable = *(*it).second;
unsigned int ivar = ivariable.number();
unsigned int jvar = jvariable.number();
// The NodalKernels that are active and are coupled to the jvar in question
std::vector<MooseSharedPointer<NodalKernel> > active_involved_kernels;
const std::set<SubdomainID> & block_ids = _sys.mesh().getNodeBlockIds(*node);
for (std::set<SubdomainID>::const_iterator block_it = block_ids.begin(); block_it != block_ids.end(); ++block_it)
{
// Loop over each NodalKernel to see if it's involved with the jvar
for (std::vector<MooseSharedPointer<NodalKernel> >::iterator nodal_kernel_it = _nodal_kernels[_tid].activeBlockNodalKernels(*block_it).begin();
nodal_kernel_it != _nodal_kernels[_tid].activeBlockNodalKernels(*block_it).end();
++nodal_kernel_it)
{
MooseSharedPointer<NodalKernel> & nodal_kernel = *nodal_kernel_it;
// If this NodalKernel isn't operating on this ivar... skip it
if (nodal_kernel->variable().number() != ivar)
break;
// If this NodalKernel is acting on the jvar add it to the list and short-circuit the loop
if (nodal_kernel->variable().number() == jvar)
{
active_involved_kernels.push_back(nodal_kernel);
continue;
}
// See if this NodalKernel is coupled to the jvar
const std::vector<MooseVariable *> & coupled_vars = (*nodal_kernel_it)->getCoupledMooseVars();
for (std::vector<MooseVariable *>::iterator var_it; var_it != coupled_vars.end(); ++var_it)
{
if ( (*var_it)->number() == jvar )
{
active_involved_kernels.push_back(nodal_kernel);
break; // It only takes one
}
}
}
}
// Did we find any NodalKernels coupled to this jvar?
if (!active_involved_kernels.empty())
{
// prepare variables
for (std::map<std::string, MooseVariable *>::iterator it = _sys._nodal_vars[_tid].begin(); it != _sys._nodal_vars[_tid].end(); ++it)
{
MooseVariable * var = it->second;
var->prepareAux();
}
_fe_problem.reinitNode(node, _tid);
for (std::vector<MooseSharedPointer<NodalKernel> >::iterator nodal_kernel_it = active_involved_kernels.begin();
nodal_kernel_it != active_involved_kernels.end();
++nodal_kernel_it)
(*nodal_kernel_it)->computeOffDiagJacobian(jvar);
num_cached++;
if (num_cached % 20 == 0) // Cache 20 nodes worth before adding into the residual
{
num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.assembly(_tid).addCachedJacobianContributions(_jacobian);
}
}
}
}
}
示例13: lock
void
ComputeNodalKernelJacobiansThread::onNode(ConstNodeRange::const_iterator & node_it)
{
const Node * node = *node_it;
std::vector<std::pair<MooseVariable *, MooseVariable *>> & ce = _fe_problem.couplingEntries(_tid);
for (const auto & it : ce)
{
MooseVariable & ivariable = *(it.first);
MooseVariable & jvariable = *(it.second);
unsigned int ivar = ivariable.number();
unsigned int jvar = jvariable.number();
// The NodalKernels that are active and are coupled to the jvar in question
std::vector<std::shared_ptr<NodalKernel>> active_involved_kernels;
const std::set<SubdomainID> & block_ids = _aux_sys.mesh().getNodeBlockIds(*node);
for (const auto & block : block_ids)
{
if (_nodal_kernels.hasActiveBlockObjects(block, _tid))
{
// Loop over each NodalKernel to see if it's involved with the jvar
const auto & objects = _nodal_kernels.getActiveBlockObjects(block, _tid);
for (const auto & nodal_kernel : objects)
{
// If this NodalKernel isn't operating on this ivar... skip it
if (nodal_kernel->variable().number() != ivar)
break;
// If this NodalKernel is acting on the jvar add it to the list and short-circuit the loop
if (nodal_kernel->variable().number() == jvar)
{
active_involved_kernels.push_back(nodal_kernel);
continue;
}
// See if this NodalKernel is coupled to the jvar
const std::vector<MooseVariable *> & coupled_vars = nodal_kernel->getCoupledMooseVars();
for (const auto & var : coupled_vars)
if (var->number() == jvar)
{
active_involved_kernels.push_back(nodal_kernel);
break; // It only takes one
}
}
}
}
// Did we find any NodalKernels coupled to this jvar?
if (!active_involved_kernels.empty())
{
// prepare variables
for (const auto & it : _aux_sys._nodal_vars[_tid])
{
MooseVariable * var = it.second;
var->prepareAux();
}
_fe_problem.reinitNode(node, _tid);
for (const auto & nodal_kernel : active_involved_kernels)
nodal_kernel->computeOffDiagJacobian(jvar);
_num_cached++;
if (_num_cached == 20) // Cache 20 nodes worth before adding into the residual
{
_num_cached = 0;
Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
_fe_problem.assembly(_tid).addCachedJacobianContributions(_jacobian);
}
}
}
}