本文整理汇总了C++中MeshBase::active_elements_end方法的典型用法代码示例。如果您正苦于以下问题:C++ MeshBase::active_elements_end方法的具体用法?C++ MeshBase::active_elements_end怎么用?C++ MeshBase::active_elements_end使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MeshBase
的用法示例。
在下文中一共展示了MeshBase::active_elements_end方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void LocationMap<Elem>::fill(MeshBase& mesh)
{
// Populate the elem map
MeshBase::element_iterator it = mesh.active_elements_begin(),
end = mesh.active_elements_end();
for (; it != end; ++it)
this->insert(**it);
}
示例2: _do_partition
void MetisPartitioner::_do_partition (MeshBase & mesh,
const unsigned int n_pieces)
{
this->partition_range(mesh,
mesh.active_elements_begin(),
mesh.active_elements_end(),
n_pieces);
}
示例3: _do_partition
void SFCPartitioner::_do_partition (MeshBase & mesh,
const unsigned int n)
{
this->partition_range(mesh,
mesh.active_elements_begin(),
mesh.active_elements_end(),
n);
}
示例4: n_active_levels_on_processor
unsigned int CheckpointIO::n_active_levels_on_processor(const MeshBase & mesh) const
{
unsigned int max_level = 0;
MeshBase::const_element_iterator el = mesh.active_elements_begin();
const MeshBase::const_element_iterator end_el = mesh.active_elements_end();
for( ; el != end_el; ++el)
max_level = std::max((*el)->level(), max_level);
return max_level + 1;
}
示例5: _do_partition
// ------------------------------------------------------------
// LinearPartitioner implementation
void LinearPartitioner::_do_partition (MeshBase& mesh,
const unsigned int n)
{
libmesh_assert_greater (n, 0);
// Check for an easy return
if (n == 1)
{
this->single_partition (mesh);
return;
}
// Create a simple linear partitioning
{
START_LOG ("partition()", "LinearPartitioner");
const dof_id_type n_active_elem = mesh.n_active_elem();
const dof_id_type blksize = n_active_elem/n;
dof_id_type e = 0;
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
for ( ; elem_it != elem_end; ++elem_it)
{
if ((e/blksize) < n)
{
Elem *elem = *elem_it;
elem->processor_id() =
libmesh_cast_int<processor_id_type>(e/blksize);
}
else
{
Elem *elem = *elem_it;
elem->processor_id() = 0;
elem = elem->parent();
}
e++;
}
STOP_LOG ("partition()", "LinearPartitioner");
}
}
示例6: _do_partition
// ------------------------------------------------------------
// SFCPartitioner implementation
void SFCPartitioner::_do_partition (MeshBase & mesh,
const unsigned int n)
{
libmesh_assert_greater (n, 0);
// Check for an easy return
if (n == 1)
{
this->single_partition (mesh);
return;
}
// What to do if the sfcurves library IS NOT present
#ifndef LIBMESH_HAVE_SFCURVES
libmesh_here();
libMesh::err << "ERROR: The library has been built without" << std::endl
<< "Space Filling Curve support. Using a linear" << std::endl
<< "partitioner instead!" << std::endl;
LinearPartitioner lp;
lp.partition (mesh, n);
// What to do if the sfcurves library IS present
#else
LOG_SCOPE("sfc_partition()", "SFCPartitioner");
const dof_id_type n_active_elem = mesh.n_active_elem();
const dof_id_type n_elem = mesh.n_elem();
// the forward_map maps the active element id
// into a contiguous block of indices
std::vector<dof_id_type>
forward_map (n_elem, DofObject::invalid_id);
// the reverse_map maps the contiguous ids back
// to active elements
std::vector<Elem *> reverse_map (n_active_elem, libmesh_nullptr);
int size = static_cast<int>(n_active_elem);
std::vector<double> x (size);
std::vector<double> y (size);
std::vector<double> z (size);
std::vector<int> table (size);
// We need to map the active element ids into a
// contiguous range.
{
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
dof_id_type el_num = 0;
for (; elem_it != elem_end; ++elem_it)
{
libmesh_assert_less ((*elem_it)->id(), forward_map.size());
libmesh_assert_less (el_num, reverse_map.size());
forward_map[(*elem_it)->id()] = el_num;
reverse_map[el_num] = *elem_it;
el_num++;
}
libmesh_assert_equal_to (el_num, n_active_elem);
}
// Get the centroid for each active element
{
// const_active_elem_iterator elem_it (mesh.const_elements_begin());
// const const_active_elem_iterator elem_end(mesh.const_elements_end());
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
libmesh_assert_less (elem->id(), forward_map.size());
const Point p = elem->centroid();
x[forward_map[elem->id()]] = p(0);
y[forward_map[elem->id()]] = p(1);
z[forward_map[elem->id()]] = p(2);
}
}
// build the space-filling curve
if (_sfc_type == "Hilbert")
Sfc::hilbert (&x[0], &y[0], &z[0], &size, &table[0]);
else if (_sfc_type == "Morton")
Sfc::morton (&x[0], &y[0], &z[0], &size, &table[0]);
//.........这里部分代码省略.........
示例7: set_node_processor_ids
void Partitioner::set_node_processor_ids(MeshBase & mesh)
{
LOG_SCOPE("set_node_processor_ids()","Partitioner");
// This function must be run on all processors at once
libmesh_parallel_only(mesh.comm());
// If we have any unpartitioned elements at this
// stage there is a problem
libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
mesh.unpartitioned_elements_end()) == 0);
// const dof_id_type orig_n_local_nodes = mesh.n_local_nodes();
// libMesh::err << "[" << mesh.processor_id() << "]: orig_n_local_nodes="
// << orig_n_local_nodes << std::endl;
// Build up request sets. Each node is currently owned by a processor because
// it is connected to an element owned by that processor. However, during the
// repartitioning phase that element may have been assigned a new processor id, but
// it is still resident on the original processor. We need to know where to look
// for new ids before assigning new ids, otherwise we may be asking the wrong processors
// for the wrong information.
//
// The only remaining issue is what to do with unpartitioned nodes. Since they are required
// to live on all processors we can simply rely on ourselves to number them properly.
std::vector<std::vector<dof_id_type> >
requested_node_ids(mesh.n_processors());
// Loop over all the nodes, count the ones on each processor. We can skip ourself
std::vector<dof_id_type> ghost_nodes_from_proc(mesh.n_processors(), 0);
MeshBase::node_iterator node_it = mesh.nodes_begin();
const MeshBase::node_iterator node_end = mesh.nodes_end();
for (; node_it != node_end; ++node_it)
{
Node * node = *node_it;
libmesh_assert(node);
const processor_id_type current_pid = node->processor_id();
if (current_pid != mesh.processor_id() &&
current_pid != DofObject::invalid_processor_id)
{
libmesh_assert_less (current_pid, ghost_nodes_from_proc.size());
ghost_nodes_from_proc[current_pid]++;
}
}
// We know how many objects live on each processor, so reserve()
// space for each.
for (processor_id_type pid=0; pid != mesh.n_processors(); ++pid)
requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]);
// We need to get the new pid for each node from the processor
// which *currently* owns the node. We can safely skip ourself
for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it)
{
Node * node = *node_it;
libmesh_assert(node);
const processor_id_type current_pid = node->processor_id();
if (current_pid != mesh.processor_id() &&
current_pid != DofObject::invalid_processor_id)
{
libmesh_assert_less (current_pid, requested_node_ids.size());
libmesh_assert_less (requested_node_ids[current_pid].size(),
ghost_nodes_from_proc[current_pid]);
requested_node_ids[current_pid].push_back(node->id());
}
// Unset any previously-set node processor ids
node->invalidate_processor_id();
}
// Loop over all the active elements
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
for ( ; elem_it != elem_end; ++elem_it)
{
Elem * elem = *elem_it;
libmesh_assert(elem);
libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
// For each node, set the processor ID to the min of
// its current value and this Element's processor id.
//
// TODO: we would probably get better parallel partitioning if
// we did something like "min for even numbered nodes, max for
// odd numbered". We'd need to be careful about how that would
// affect solution ordering for I/O, though.
for (unsigned int n=0; n<elem->n_nodes(); ++n)
elem->node_ptr(n)->processor_id() = std::min(elem->node_ptr(n)->processor_id(),
elem->processor_id());
}
// And loop over the subactive elements, but don't reassign
// nodes that are already active on another processor.
MeshBase::element_iterator sub_it = mesh.subactive_elements_begin();
//.........这里部分代码省略.........
示例8: set_parent_processor_ids
void Partitioner::set_parent_processor_ids(MeshBase & mesh)
{
// Ignore the parameter when !LIBMESH_ENABLE_AMR
libmesh_ignore(mesh);
LOG_SCOPE("set_parent_processor_ids()", "Partitioner");
#ifdef LIBMESH_ENABLE_AMR
// If the mesh is serial we have access to all the elements,
// in particular all the active ones. We can therefore set
// the parent processor ids indirecly through their children, and
// set the subactive processor ids while examining their active
// ancestors.
// By convention a parent is assigned to the minimum processor
// of all its children, and a subactive is assigned to the processor
// of its active ancestor.
if (mesh.is_serial())
{
// Loop over all the active elements in the mesh
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
for ( ; it!=end; ++it)
{
Elem * child = *it;
// First set descendents
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (unsigned int i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
// Then set ancestors
Elem * parent = child->parent();
while (parent)
{
// invalidate the parent id, otherwise the min below
// will not work if the current parent id is less
// than all the children!
parent->invalidate_processor_id();
for (unsigned int c=0; c<parent->n_children(); c++)
{
child = parent->child_ptr(c);
libmesh_assert(child);
libmesh_assert(!child->is_remote());
libmesh_assert_not_equal_to (child->processor_id(), DofObject::invalid_processor_id);
parent->processor_id() = std::min(parent->processor_id(),
child->processor_id());
}
parent = parent->parent();
}
}
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
else
{
// Setting subactive processor ids is easy: we can guarantee
// that children have access to all their parents.
// Loop over all the active elements in the mesh
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
for ( ; it!=end; ++it)
{
Elem * child = *it;
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (unsigned int i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
// We will use a brute-force approach here. Each processor finds its parent
// elements and sets the parent pid to the minimum of its
// semilocal descendants.
// A global reduction is then performed to make sure the true minimum is found.
// As noted, this is required because we cannot guarantee that a parent has
// access to all its children on any single processor.
libmesh_parallel_only(mesh.comm());
libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
mesh.unpartitioned_elements_end()) == 0);
const dof_id_type max_elem_id = mesh.max_elem_id();
std::vector<processor_id_type>
parent_processor_ids (std::min(communication_blocksize,
max_elem_id));
for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
//.........这里部分代码省略.........
示例9: move_mesh
void LinearElasticityWithContact::move_mesh (MeshBase & input_mesh,
const NumericVector<Number> & input_solution)
{
// Maintain a set of node ids that we've encountered.
LIBMESH_BEST_UNORDERED_SET<dof_id_type> encountered_node_ids;
// Localize input_solution so that we have the data to move all
// elements (not just elements local to this processor).
UniquePtr< NumericVector<Number> > localized_input_solution =
NumericVector<Number>::build(input_solution.comm());
localized_input_solution->init (input_solution.size(), false, SERIAL);
input_solution.localize(*localized_input_solution);
MeshBase::const_element_iterator el = input_mesh.active_elements_begin();
const MeshBase::const_element_iterator end_el = input_mesh.active_elements_end();
for ( ; el != end_el; ++el)
{
Elem * elem = *el;
Elem * orig_elem = _sys.get_mesh().elem_ptr(elem->id());
for (unsigned int node_id=0; node_id<elem->n_nodes(); node_id++)
{
Node & node = elem->node_ref(node_id);
if (encountered_node_ids.find(node.id()) != encountered_node_ids.end())
continue;
encountered_node_ids.insert(node.id());
std::vector<std::string> uvw_names(3);
uvw_names[0] = "u";
uvw_names[1] = "v";
uvw_names[2] = "w";
{
const Point master_point = elem->master_point(node_id);
Point uvw;
for (unsigned int index=0; index<uvw_names.size(); index++)
{
const unsigned int var = _sys.variable_number(uvw_names[index]);
const FEType & fe_type = _sys.get_dof_map().variable_type(var);
FEComputeData data (_sys.get_equation_systems(), master_point);
FEInterface::compute_data(elem->dim(),
fe_type,
elem,
data);
std::vector<dof_id_type> dof_indices_var;
_sys.get_dof_map().dof_indices (orig_elem, dof_indices_var, var);
for (unsigned int i=0; i<dof_indices_var.size(); i++)
{
Number value = (*localized_input_solution)(dof_indices_var[i]) * data.shape[i];
#ifdef LIBMESH_USE_COMPLEX_NUMBERS
// We explicitly store the real part in uvw
uvw(index) += value.real();
#else
uvw(index) += value;
#endif
}
}
// Update the node's location
node += uvw;
}
}
}
}
示例10: assign_partitioning
void ParmetisPartitioner::assign_partitioning (MeshBase & mesh)
{
// This function must be run on all processors at once
libmesh_parallel_only(mesh.comm());
const dof_id_type
first_local_elem = _pmetis->vtxdist[mesh.processor_id()];
std::vector<std::vector<dof_id_type> >
requested_ids(mesh.n_processors()),
requests_to_fill(mesh.n_processors());
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
MeshBase::element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
Elem * elem = *elem_it;
// we need to get the index from the owning processor
// (note we cannot assign it now -- we are iterating
// over elements again and this will be bad!)
libmesh_assert_less (elem->processor_id(), requested_ids.size());
requested_ids[elem->processor_id()].push_back(elem->id());
}
// Trade with all processors (including self) to get their indices
for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
{
// Trade my requests with processor procup and procdown
const processor_id_type procup = (mesh.processor_id() + pid) % mesh.n_processors();
const processor_id_type procdown = (mesh.n_processors() +
mesh.processor_id() - pid) % mesh.n_processors();
mesh.comm().send_receive (procup, requested_ids[procup],
procdown, requests_to_fill[procdown]);
// we can overwrite these requested ids in-place.
for (std::size_t i=0; i<requests_to_fill[procdown].size(); i++)
{
const dof_id_type requested_elem_index =
requests_to_fill[procdown][i];
libmesh_assert(_global_index_by_pid_map.count(requested_elem_index));
const dof_id_type global_index_by_pid =
_global_index_by_pid_map[requested_elem_index];
const dof_id_type local_index =
global_index_by_pid - first_local_elem;
libmesh_assert_less (local_index, _pmetis->part.size());
libmesh_assert_less (local_index, mesh.n_active_local_elem());
const unsigned int elem_procid =
static_cast<unsigned int>(_pmetis->part[local_index]);
libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));
requests_to_fill[procdown][i] = elem_procid;
}
// Trade back
mesh.comm().send_receive (procdown, requests_to_fill[procdown],
procup, requested_ids[procup]);
}
// and finally assign the partitioning.
// note we are iterating in exactly the same order
// used to build up the request, so we can expect the
// required entries to be in the proper sequence.
elem_it = mesh.active_elements_begin();
elem_end = mesh.active_elements_end();
for (std::vector<unsigned int> counters(mesh.n_processors(), 0);
elem_it != elem_end; ++elem_it)
{
Elem * elem = *elem_it;
const processor_id_type current_pid = elem->processor_id();
libmesh_assert_less (counters[current_pid], requested_ids[current_pid].size());
const processor_id_type elem_procid =
requested_ids[current_pid][counters[current_pid]++];
libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));
elem->processor_id() = elem_procid;
}
}
示例11: build_graph
void ParmetisPartitioner::build_graph (const MeshBase & mesh)
{
// build the graph in distributed CSR format. Note that
// the edges in the graph will correspond to
// face neighbors
const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
// If we have boundary elements in this mesh, we want to account for
// the connectivity between them and interior elements. We can find
// interior elements from boundary elements, but we need to build up
// a lookup map to do the reverse.
typedef LIBMESH_BEST_UNORDERED_MULTIMAP<const Elem *, const Elem *>
map_type;
map_type interior_to_boundary_map;
{
MeshBase::const_element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
// If we don't have an interior_parent then there's nothing to look us
// up.
if ((elem->dim() >= LIBMESH_DIM) ||
!elem->interior_parent())
continue;
// get all relevant interior elements
std::set<const Elem *> neighbor_set;
elem->find_interior_neighbors(neighbor_set);
std::set<const Elem *>::iterator n_it = neighbor_set.begin();
for (; n_it != neighbor_set.end(); ++n_it)
{
// FIXME - non-const versions of the Elem set methods
// would be nice
Elem * neighbor = const_cast<Elem *>(*n_it);
#if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \
defined(LIBMESH_HAVE_TR1_UNORDERED_MAP) || \
defined(LIBMESH_HAVE_HASH_MAP) || \
defined(LIBMESH_HAVE_EXT_HASH_MAP)
interior_to_boundary_map.insert
(std::make_pair(neighbor, elem));
#else
interior_to_boundary_map.insert
(interior_to_boundary_map.begin(),
std::make_pair(neighbor, elem));
#endif
}
}
}
#ifdef LIBMESH_ENABLE_AMR
std::vector<const Elem *> neighbors_offspring;
#endif
std::vector<std::vector<dof_id_type> > graph(n_active_local_elem);
dof_id_type graph_size=0;
const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()];
MeshBase::const_element_iterator elem_it = mesh.active_local_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
libmesh_assert (_global_index_by_pid_map.count(elem->id()));
const dof_id_type global_index_by_pid =
_global_index_by_pid_map[elem->id()];
const dof_id_type local_index =
global_index_by_pid - first_local_elem;
libmesh_assert_less (local_index, n_active_local_elem);
std::vector<dof_id_type> & graph_row = graph[local_index];
// Loop over the element's neighbors. An element
// adjacency corresponds to a face neighbor
for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
{
const Elem * neighbor = elem->neighbor(ms);
if (neighbor != libmesh_nullptr)
{
// If the neighbor is active treat it
// as a connection
if (neighbor->active())
{
libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
const dof_id_type neighbor_global_index_by_pid =
_global_index_by_pid_map[neighbor->id()];
graph_row.push_back(neighbor_global_index_by_pid);
graph_size++;
//.........这里部分代码省略.........
示例12: initialize
//.........这里部分代码省略.........
{
std::vector<dof_id_type> global_index;
// create the mapping which is contiguous by processor
dof_id_type pid_offset=0;
for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
{
MeshBase::const_element_iterator it = mesh.active_pid_elements_begin(pid);
const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid);
// note that we may not have all (or any!) the active elements which belong on this processor,
// but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid])
// is constructed. Only the indices for the elements we pass in are returned in the array.
MeshCommunication().find_global_indices (mesh.comm(),
bbox, it, end,
global_index);
for (dof_id_type cnt=0; it != end; ++it)
{
const Elem * elem = *it;
libmesh_assert (!_global_index_by_pid_map.count(elem->id()));
libmesh_assert_less (cnt, global_index.size());
libmesh_assert_less (global_index[cnt], _n_active_elem_on_proc[pid]);
_global_index_by_pid_map.insert(std::make_pair(elem->id(), global_index[cnt++] + pid_offset));
}
pid_offset += _n_active_elem_on_proc[pid];
}
// create the unique mapping for all active elements independent of partitioning
{
MeshBase::const_element_iterator it = mesh.active_elements_begin();
const MeshBase::const_element_iterator end = mesh.active_elements_end();
// Calling this on all processors a unique range in [0,n_active_elem) is constructed.
// Only the indices for the elements we pass in are returned in the array.
MeshCommunication().find_global_indices (mesh.comm(),
bbox, it, end,
global_index);
for (dof_id_type cnt=0; it != end; ++it)
{
const Elem * elem = *it;
libmesh_assert (!global_index_map.count(elem->id()));
libmesh_assert_less (cnt, global_index.size());
libmesh_assert_less (global_index[cnt], n_active_elem);
global_index_map.insert(std::make_pair(elem->id(), global_index[cnt++]));
}
}
// really, shouldn't be close!
libmesh_assert_less_equal (global_index_map.size(), n_active_elem);
libmesh_assert_less_equal (_global_index_by_pid_map.size(), n_active_elem);
// At this point the two maps should be the same size. If they are not
// then the number of active elements is not the same as the sum over all
// processors of the number of active elements per processor, which means
// there must be some unpartitioned objects out there.
if (global_index_map.size() != _global_index_by_pid_map.size())
libmesh_error_msg("ERROR: ParmetisPartitioner cannot handle unpartitioned objects!");
}
// Finally, we need to initialize the vertex (partition) weights and the initial subdomain
// mapping. The subdomain mapping will be independent of the processor mapping, and is
// defined by a simple mapping of the global indices we just found.
示例13: _do_partition
// ------------------------------------------------------------
// MetisPartitioner implementation
void MetisPartitioner::_do_partition (MeshBase & mesh,
const unsigned int n_pieces)
{
libmesh_assert_greater (n_pieces, 0);
libmesh_assert (mesh.is_serial());
// Check for an easy return
if (n_pieces == 1)
{
this->single_partition (mesh);
return;
}
// What to do if the Metis library IS NOT present
#ifndef LIBMESH_HAVE_METIS
libmesh_here();
libMesh::err << "ERROR: The library has been built without" << std::endl
<< "Metis support. Using a space-filling curve" << std::endl
<< "partitioner instead!" << std::endl;
SFCPartitioner sfcp;
sfcp.partition (mesh, n_pieces);
// What to do if the Metis library IS present
#else
LOG_SCOPE("partition()", "MetisPartitioner");
const dof_id_type n_active_elem = mesh.n_active_elem();
// build the graph
// std::vector<Metis::idx_t> options(5);
std::vector<Metis::idx_t> vwgt(n_active_elem);
std::vector<Metis::idx_t> part(n_active_elem);
Metis::idx_t
n = static_cast<Metis::idx_t>(n_active_elem), // number of "nodes" (elements)
// in the graph
// wgtflag = 2, // weights on vertices only,
// // none on edges
// numflag = 0, // C-style 0-based numbering
nparts = static_cast<Metis::idx_t>(n_pieces), // number of subdomains to create
edgecut = 0; // the numbers of edges cut by the
// resulting partition
// Set the options
// options[0] = 0; // use default options
// Metis will only consider the active elements.
// We need to map the active element ids into a
// contiguous range. Further, we want the unique range indexing to be
// independent of the element ordering, otherwise a circular dependency
// can result in which the partitioning depends on the ordering which
// depends on the partitioning...
vectormap<dof_id_type, dof_id_type> global_index_map;
global_index_map.reserve (n_active_elem);
{
std::vector<dof_id_type> global_index;
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
MeshCommunication().find_global_indices (mesh.comm(),
MeshTools::bounding_box(mesh),
it, end, global_index);
libmesh_assert_equal_to (global_index.size(), n_active_elem);
for (std::size_t cnt=0; it != end; ++it)
{
const Elem * elem = *it;
global_index_map.insert (std::make_pair(elem->id(), global_index[cnt++]));
}
libmesh_assert_equal_to (global_index_map.size(), n_active_elem);
}
// If we have boundary elements in this mesh, we want to account for
// the connectivity between them and interior elements. We can find
// interior elements from boundary elements, but we need to build up
// a lookup map to do the reverse.
typedef LIBMESH_BEST_UNORDERED_MULTIMAP<const Elem *, const Elem *>
map_type;
map_type interior_to_boundary_map;
{
MeshBase::const_element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
// If we don't have an interior_parent then there's nothing to look us
//.........这里部分代码省略.........
示例14: _do_partition
// ------------------------------------------------------------
// MetisPartitioner implementation
void MetisPartitioner::_do_partition (MeshBase& mesh,
const unsigned int n_pieces)
{
libmesh_assert_greater (n_pieces, 0);
libmesh_assert (mesh.is_serial());
// Check for an easy return
if (n_pieces == 1)
{
this->single_partition (mesh);
return;
}
// What to do if the Metis library IS NOT present
#ifndef LIBMESH_HAVE_METIS
libmesh_here();
libMesh::err << "ERROR: The library has been built without" << std::endl
<< "Metis support. Using a space-filling curve" << std::endl
<< "partitioner instead!" << std::endl;
SFCPartitioner sfcp;
sfcp.partition (mesh, n_pieces);
// What to do if the Metis library IS present
#else
START_LOG("partition()", "MetisPartitioner");
const dof_id_type n_active_elem = mesh.n_active_elem();
// build the graph
// std::vector<int> options(5);
std::vector<int> vwgt(n_active_elem);
std::vector<int> part(n_active_elem);
int
n = static_cast<int>(n_active_elem), // number of "nodes" (elements)
// in the graph
// wgtflag = 2, // weights on vertices only,
// // none on edges
// numflag = 0, // C-style 0-based numbering
nparts = static_cast<int>(n_pieces), // number of subdomains to create
edgecut = 0; // the numbers of edges cut by the
// resulting partition
// Set the options
// options[0] = 0; // use default options
// Metis will only consider the active elements.
// We need to map the active element ids into a
// contiguous range. Further, we want the unique range indexing to be
// independednt of the element ordering, otherwise a circular dependency
// can result in which the partitioning depends on the ordering which
// depends on the partitioning...
std::map<const Elem*, dof_id_type> global_index_map;
{
std::vector<dof_id_type> global_index;
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
MeshCommunication().find_global_indices (MeshTools::bounding_box(mesh),
it, end, global_index);
libmesh_assert_equal_to (global_index.size(), n_active_elem);
for (std::size_t cnt=0; it != end; ++it)
{
const Elem *elem = *it;
libmesh_assert (!global_index_map.count(elem));
global_index_map[elem] = global_index[cnt++];
}
libmesh_assert_equal_to (global_index_map.size(), n_active_elem);
}
// build the graph in CSR format. Note that
// the edges in the graph will correspond to
// face neighbors
std::vector<int> xadj, adjncy;
{
std::vector<const Elem*> neighbors_offspring;
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
// This will be exact when there is no refinement and all the
// elements are of the same type.
std::size_t graph_size=0;
std::vector<std::vector<dof_id_type> > graph(n_active_elem);
for (; elem_it != elem_end; ++elem_it)
{
const Elem* elem = *elem_it;
//.........这里部分代码省略.........