本文整理汇总了C++中MeshBase::n_active_local_elem方法的典型用法代码示例。如果您正苦于以下问题:C++ MeshBase::n_active_local_elem方法的具体用法?C++ MeshBase::n_active_local_elem怎么用?C++ MeshBase::n_active_local_elem使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MeshBase
的用法示例。
在下文中一共展示了MeshBase::n_active_local_elem方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assign_partitioning
void ParmetisPartitioner::assign_partitioning (MeshBase & mesh)
{
// This function must be run on all processors at once
libmesh_parallel_only(mesh.comm());
const dof_id_type
first_local_elem = _pmetis->vtxdist[mesh.processor_id()];
std::vector<std::vector<dof_id_type> >
requested_ids(mesh.n_processors()),
requests_to_fill(mesh.n_processors());
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
MeshBase::element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
Elem * elem = *elem_it;
// we need to get the index from the owning processor
// (note we cannot assign it now -- we are iterating
// over elements again and this will be bad!)
libmesh_assert_less (elem->processor_id(), requested_ids.size());
requested_ids[elem->processor_id()].push_back(elem->id());
}
// Trade with all processors (including self) to get their indices
for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
{
// Trade my requests with processor procup and procdown
const processor_id_type procup = (mesh.processor_id() + pid) % mesh.n_processors();
const processor_id_type procdown = (mesh.n_processors() +
mesh.processor_id() - pid) % mesh.n_processors();
mesh.comm().send_receive (procup, requested_ids[procup],
procdown, requests_to_fill[procdown]);
// we can overwrite these requested ids in-place.
for (std::size_t i=0; i<requests_to_fill[procdown].size(); i++)
{
const dof_id_type requested_elem_index =
requests_to_fill[procdown][i];
libmesh_assert(_global_index_by_pid_map.count(requested_elem_index));
const dof_id_type global_index_by_pid =
_global_index_by_pid_map[requested_elem_index];
const dof_id_type local_index =
global_index_by_pid - first_local_elem;
libmesh_assert_less (local_index, _pmetis->part.size());
libmesh_assert_less (local_index, mesh.n_active_local_elem());
const unsigned int elem_procid =
static_cast<unsigned int>(_pmetis->part[local_index]);
libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));
requests_to_fill[procdown][i] = elem_procid;
}
// Trade back
mesh.comm().send_receive (procdown, requests_to_fill[procdown],
procup, requested_ids[procup]);
}
// and finally assign the partitioning.
// note we are iterating in exactly the same order
// used to build up the request, so we can expect the
// required entries to be in the proper sequence.
elem_it = mesh.active_elements_begin();
elem_end = mesh.active_elements_end();
for (std::vector<unsigned int> counters(mesh.n_processors(), 0);
elem_it != elem_end; ++elem_it)
{
Elem * elem = *elem_it;
const processor_id_type current_pid = elem->processor_id();
libmesh_assert_less (counters[current_pid], requested_ids[current_pid].size());
const processor_id_type elem_procid =
requested_ids[current_pid][counters[current_pid]++];
libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));
elem->processor_id() = elem_procid;
}
}
示例2: initialize
void ParmetisPartitioner::initialize (const MeshBase & mesh,
const unsigned int n_sbdmns)
{
const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
// Set parameters.
_pmetis->wgtflag = 2; // weights on vertices only
_pmetis->ncon = 1; // one weight per vertex
_pmetis->numflag = 0; // C-style 0-based numbering
_pmetis->nparts = static_cast<Parmetis::idx_t>(n_sbdmns); // number of subdomains to create
_pmetis->edgecut = 0; // the numbers of edges cut by the
// partition
// Initialize data structures for ParMETIS
_pmetis->vtxdist.resize (mesh.n_processors()+1); std::fill (_pmetis->vtxdist.begin(), _pmetis->vtxdist.end(), 0);
_pmetis->tpwgts.resize (_pmetis->nparts); std::fill (_pmetis->tpwgts.begin(), _pmetis->tpwgts.end(), 1./_pmetis->nparts);
_pmetis->ubvec.resize (_pmetis->ncon); std::fill (_pmetis->ubvec.begin(), _pmetis->ubvec.end(), 1.05);
_pmetis->part.resize (n_active_local_elem); std::fill (_pmetis->part.begin(), _pmetis->part.end(), 0);
_pmetis->options.resize (5);
_pmetis->vwgt.resize (n_active_local_elem);
// Set the options
_pmetis->options[0] = 1; // don't use default options
_pmetis->options[1] = 0; // default (level of timing)
_pmetis->options[2] = 15; // random seed (default)
_pmetis->options[3] = 2; // processor distribution and subdomain distribution are decoupled
// Find the number of active elements on each processor. We cannot use
// mesh.n_active_elem_on_proc(pid) since that only returns the number of
// elements assigned to pid which are currently stored on the calling
// processor. This will not in general be correct for parallel meshes
// when (pid!=mesh.processor_id()).
_n_active_elem_on_proc.resize(mesh.n_processors());
mesh.comm().allgather(n_active_local_elem, _n_active_elem_on_proc);
// count the total number of active elements in the mesh. Note we cannot
// use mesh.n_active_elem() in general since this only returns the number
// of active elements which are stored on the calling processor.
// We should not use n_active_elem for any allocation because that will
// be inheritly unscalable, but it can be useful for libmesh_assertions.
dof_id_type n_active_elem=0;
// Set up the vtxdist array. This will be the same on each processor.
// ***** Consult the Parmetis documentation. *****
libmesh_assert_equal_to (_pmetis->vtxdist.size(),
cast_int<std::size_t>(mesh.n_processors()+1));
libmesh_assert_equal_to (_pmetis->vtxdist[0], 0);
for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
{
_pmetis->vtxdist[pid+1] = _pmetis->vtxdist[pid] + _n_active_elem_on_proc[pid];
n_active_elem += _n_active_elem_on_proc[pid];
}
libmesh_assert_equal_to (_pmetis->vtxdist.back(), static_cast<Parmetis::idx_t>(n_active_elem));
// ParMetis expects the elements to be numbered in contiguous blocks
// by processor, i.e. [0, ne0), [ne0, ne0+ne1), ...
// Since we only partition active elements we should have no expectation
// that we currently have such a distribution. So we need to create it.
// Also, at the same time we are going to map all the active elements into a globally
// unique range [0,n_active_elem) which is *independent* of the current partitioning.
// This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled
// from the partitioning of the objects themselves). This allows us to get the same
// resultant partitioning independed of the input partitioning.
MeshTools::BoundingBox bbox =
MeshTools::bounding_box(mesh);
_global_index_by_pid_map.clear();
// Maps active element ids into a contiguous range independent of partitioning.
// (only needs local scope)
vectormap<dof_id_type, dof_id_type> global_index_map;
{
std::vector<dof_id_type> global_index;
// create the mapping which is contiguous by processor
dof_id_type pid_offset=0;
for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
{
MeshBase::const_element_iterator it = mesh.active_pid_elements_begin(pid);
const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid);
// note that we may not have all (or any!) the active elements which belong on this processor,
// but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid])
// is constructed. Only the indices for the elements we pass in are returned in the array.
MeshCommunication().find_global_indices (mesh.comm(),
bbox, it, end,
global_index);
for (dof_id_type cnt=0; it != end; ++it)
{
const Elem * elem = *it;
libmesh_assert (!_global_index_by_pid_map.count(elem->id()));
libmesh_assert_less (cnt, global_index.size());
libmesh_assert_less (global_index[cnt], _n_active_elem_on_proc[pid]);
_global_index_by_pid_map.insert(std::make_pair(elem->id(), global_index[cnt++] + pid_offset));
}
//.........这里部分代码省略.........
示例3: build_graph
void ParmetisPartitioner::build_graph (const MeshBase & mesh)
{
// build the graph in distributed CSR format. Note that
// the edges in the graph will correspond to
// face neighbors
const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
// If we have boundary elements in this mesh, we want to account for
// the connectivity between them and interior elements. We can find
// interior elements from boundary elements, but we need to build up
// a lookup map to do the reverse.
typedef LIBMESH_BEST_UNORDERED_MULTIMAP<const Elem *, const Elem *>
map_type;
map_type interior_to_boundary_map;
{
MeshBase::const_element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
// If we don't have an interior_parent then there's nothing to look us
// up.
if ((elem->dim() >= LIBMESH_DIM) ||
!elem->interior_parent())
continue;
// get all relevant interior elements
std::set<const Elem *> neighbor_set;
elem->find_interior_neighbors(neighbor_set);
std::set<const Elem *>::iterator n_it = neighbor_set.begin();
for (; n_it != neighbor_set.end(); ++n_it)
{
// FIXME - non-const versions of the Elem set methods
// would be nice
Elem * neighbor = const_cast<Elem *>(*n_it);
#if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \
defined(LIBMESH_HAVE_TR1_UNORDERED_MAP) || \
defined(LIBMESH_HAVE_HASH_MAP) || \
defined(LIBMESH_HAVE_EXT_HASH_MAP)
interior_to_boundary_map.insert
(std::make_pair(neighbor, elem));
#else
interior_to_boundary_map.insert
(interior_to_boundary_map.begin(),
std::make_pair(neighbor, elem));
#endif
}
}
}
#ifdef LIBMESH_ENABLE_AMR
std::vector<const Elem *> neighbors_offspring;
#endif
std::vector<std::vector<dof_id_type> > graph(n_active_local_elem);
dof_id_type graph_size=0;
const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()];
MeshBase::const_element_iterator elem_it = mesh.active_local_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem * elem = *elem_it;
libmesh_assert (_global_index_by_pid_map.count(elem->id()));
const dof_id_type global_index_by_pid =
_global_index_by_pid_map[elem->id()];
const dof_id_type local_index =
global_index_by_pid - first_local_elem;
libmesh_assert_less (local_index, n_active_local_elem);
std::vector<dof_id_type> & graph_row = graph[local_index];
// Loop over the element's neighbors. An element
// adjacency corresponds to a face neighbor
for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
{
const Elem * neighbor = elem->neighbor(ms);
if (neighbor != libmesh_nullptr)
{
// If the neighbor is active treat it
// as a connection
if (neighbor->active())
{
libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
const dof_id_type neighbor_global_index_by_pid =
_global_index_by_pid_map[neighbor->id()];
graph_row.push_back(neighbor_global_index_by_pid);
graph_size++;
//.........这里部分代码省略.........
示例4: build_graph
void ParmetisPartitioner::build_graph (const MeshBase& mesh)
{
// build the graph in distributed CSR format. Note that
// the edges in the graph will correspond to
// face neighbors
const unsigned int n_active_local_elem = mesh.n_active_local_elem();
std::vector<const Elem*> neighbors_offspring;
std::vector<std::vector<unsigned int> > graph(n_active_local_elem);
unsigned int graph_size=0;
const unsigned int first_local_elem = _vtxdist[libMesh::processor_id()];
MeshBase::const_element_iterator elem_it = mesh.active_local_elements_begin();
const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();
for (; elem_it != elem_end; ++elem_it)
{
const Elem* elem = *elem_it;
libmesh_assert (_global_index_by_pid_map.count(elem->id()));
const unsigned int global_index_by_pid =
_global_index_by_pid_map[elem->id()];
const unsigned int local_index =
global_index_by_pid - first_local_elem;
libmesh_assert_less (local_index, n_active_local_elem);
std::vector<unsigned int> &graph_row = graph[local_index];
// Loop over the element's neighbors. An element
// adjacency corresponds to a face neighbor
for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
{
const Elem* neighbor = elem->neighbor(ms);
if (neighbor != NULL)
{
// If the neighbor is active treat it
// as a connection
if (neighbor->active())
{
libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
const unsigned int neighbor_global_index_by_pid =
_global_index_by_pid_map[neighbor->id()];
graph_row.push_back(neighbor_global_index_by_pid);
graph_size++;
}
#ifdef LIBMESH_ENABLE_AMR
// Otherwise we need to find all of the
// neighbor's children that are connected to
// us and add them
else
{
// The side of the neighbor to which
// we are connected
const unsigned int ns =
neighbor->which_neighbor_am_i (elem);
libmesh_assert_less (ns, neighbor->n_neighbors());
// Get all the active children (& grandchildren, etc...)
// of the neighbor.
neighbor->active_family_tree (neighbors_offspring);
// Get all the neighbor's children that
// live on that side and are thus connected
// to us
for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++)
{
const Elem* child =
neighbors_offspring[nc];
// This does not assume a level-1 mesh.
// Note that since children have sides numbered
// coincident with the parent then this is a sufficient test.
if (child->neighbor(ns) == elem)
{
libmesh_assert (child->active());
libmesh_assert (_global_index_by_pid_map.count(child->id()));
const unsigned int child_global_index_by_pid =
_global_index_by_pid_map[child->id()];
graph_row.push_back(child_global_index_by_pid);
graph_size++;
}
}
}
#endif /* ifdef LIBMESH_ENABLE_AMR */
}
}
}
// Reserve space in the adjacency array
//.........这里部分代码省略.........