本文整理汇总了C++中MeshBase::active_element_ptr_range方法的典型用法代码示例。如果您正苦于以下问题:C++ MeshBase::active_element_ptr_range方法的具体用法?C++ MeshBase::active_element_ptr_range怎么用?C++ MeshBase::active_element_ptr_range使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MeshBase
的用法示例。
在下文中一共展示了MeshBase::active_element_ptr_range方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: set_node_processor_ids
void Partitioner::set_node_processor_ids(MeshBase & mesh)
{
LOG_SCOPE("set_node_processor_ids()","Partitioner");
// This function must be run on all processors at once
libmesh_parallel_only(mesh.comm());
// If we have any unpartitioned elements at this
// stage there is a problem
libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
mesh.unpartitioned_elements_end()) == 0);
// const dof_id_type orig_n_local_nodes = mesh.n_local_nodes();
// libMesh::err << "[" << mesh.processor_id() << "]: orig_n_local_nodes="
// << orig_n_local_nodes << std::endl;
// Build up request sets. Each node is currently owned by a processor because
// it is connected to an element owned by that processor. However, during the
// repartitioning phase that element may have been assigned a new processor id, but
// it is still resident on the original processor. We need to know where to look
// for new ids before assigning new ids, otherwise we may be asking the wrong processors
// for the wrong information.
//
// The only remaining issue is what to do with unpartitioned nodes. Since they are required
// to live on all processors we can simply rely on ourselves to number them properly.
std::vector<std::vector<dof_id_type>>
requested_node_ids(mesh.n_processors());
// Loop over all the nodes, count the ones on each processor. We can skip ourself
std::vector<dof_id_type> ghost_nodes_from_proc(mesh.n_processors(), 0);
for (auto & node : mesh.node_ptr_range())
{
libmesh_assert(node);
const processor_id_type current_pid = node->processor_id();
if (current_pid != mesh.processor_id() &&
current_pid != DofObject::invalid_processor_id)
{
libmesh_assert_less (current_pid, ghost_nodes_from_proc.size());
ghost_nodes_from_proc[current_pid]++;
}
}
// We know how many objects live on each processor, so reserve()
// space for each.
for (processor_id_type pid=0; pid != mesh.n_processors(); ++pid)
requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]);
// We need to get the new pid for each node from the processor
// which *currently* owns the node. We can safely skip ourself
for (auto & node : mesh.node_ptr_range())
{
libmesh_assert(node);
const processor_id_type current_pid = node->processor_id();
if (current_pid != mesh.processor_id() &&
current_pid != DofObject::invalid_processor_id)
{
libmesh_assert_less (current_pid, requested_node_ids.size());
libmesh_assert_less (requested_node_ids[current_pid].size(),
ghost_nodes_from_proc[current_pid]);
requested_node_ids[current_pid].push_back(node->id());
}
// Unset any previously-set node processor ids
node->invalidate_processor_id();
}
// Loop over all the active elements
for (auto & elem : mesh.active_element_ptr_range())
{
libmesh_assert(elem);
libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
// For each node, set the processor ID to the min of
// its current value and this Element's processor id.
//
// TODO: we would probably get better parallel partitioning if
// we did something like "min for even numbered nodes, max for
// odd numbered". We'd need to be careful about how that would
// affect solution ordering for I/O, though.
for (unsigned int n=0; n<elem->n_nodes(); ++n)
elem->node_ptr(n)->processor_id() = std::min(elem->node_ptr(n)->processor_id(),
elem->processor_id());
}
// And loop over the subactive elements, but don't reassign
// nodes that are already active on another processor.
MeshBase::element_iterator sub_it = mesh.subactive_elements_begin();
const MeshBase::element_iterator sub_end = mesh.subactive_elements_end();
for ( ; sub_it != sub_end; ++sub_it)
{
Elem * elem = *sub_it;
libmesh_assert(elem);
libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
//.........这里部分代码省略.........
示例2:
void LocationMap<Elem>::fill(MeshBase & mesh)
{
// Populate the elem map
for (auto & elem : mesh.active_element_ptr_range())
this->insert(*elem);
}
示例3: set_parent_processor_ids
void Partitioner::set_parent_processor_ids(MeshBase & mesh)
{
// Ignore the parameter when !LIBMESH_ENABLE_AMR
libmesh_ignore(mesh);
LOG_SCOPE("set_parent_processor_ids()", "Partitioner");
#ifdef LIBMESH_ENABLE_AMR
// If the mesh is serial we have access to all the elements,
// in particular all the active ones. We can therefore set
// the parent processor ids indirectly through their children, and
// set the subactive processor ids while examining their active
// ancestors.
// By convention a parent is assigned to the minimum processor
// of all its children, and a subactive is assigned to the processor
// of its active ancestor.
if (mesh.is_serial())
{
for (auto & child : mesh.active_element_ptr_range())
{
// First set descendents
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (std::size_t i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
// Then set ancestors
Elem * parent = child->parent();
while (parent)
{
// invalidate the parent id, otherwise the min below
// will not work if the current parent id is less
// than all the children!
parent->invalidate_processor_id();
for (auto & child : parent->child_ref_range())
{
libmesh_assert(!child.is_remote());
libmesh_assert_not_equal_to (child.processor_id(), DofObject::invalid_processor_id);
parent->processor_id() = std::min(parent->processor_id(),
child.processor_id());
}
parent = parent->parent();
}
}
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
else
{
// Setting subactive processor ids is easy: we can guarantee
// that children have access to all their parents.
// Loop over all the active elements in the mesh
for (auto & child : mesh.active_element_ptr_range())
{
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (std::size_t i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
// We will use a brute-force approach here. Each processor finds its parent
// elements and sets the parent pid to the minimum of its
// semilocal descendants.
// A global reduction is then performed to make sure the true minimum is found.
// As noted, this is required because we cannot guarantee that a parent has
// access to all its children on any single processor.
libmesh_parallel_only(mesh.comm());
libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
mesh.unpartitioned_elements_end()) == 0);
const dof_id_type max_elem_id = mesh.max_elem_id();
std::vector<processor_id_type>
parent_processor_ids (std::min(communication_blocksize,
max_elem_id));
for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
{
last_elem_id =
std::min(static_cast<dof_id_type>((blk+1)*communication_blocksize),
max_elem_id);
const dof_id_type first_elem_id = blk*communication_blocksize;
std::fill (parent_processor_ids.begin(),
parent_processor_ids.end(),
DofObject::invalid_processor_id);
// first build up local contributions to parent_processor_ids
MeshBase::element_iterator not_it = mesh.ancestor_elements_begin();
const MeshBase::element_iterator not_end = mesh.ancestor_elements_end();
bool have_parent_in_block = false;
//.........这里部分代码省略.........