本文整理汇总了C++中MeshBase::is_serial方法的典型用法代码示例。如果您正苦于以下问题:C++ MeshBase::is_serial方法的具体用法?C++ MeshBase::is_serial怎么用?C++ MeshBase::is_serial使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MeshBase
的用法示例。
在下文中一共展示了MeshBase::is_serial方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void LocationMap<T>::init(MeshBase& mesh)
{
// This function must be run on all processors at once
// for non-serial meshes
if (!mesh.is_serial())
parallel_only();
START_LOG("init()", "LocationMap");
// Clear the old map
_map.clear();
// Cache a bounding box
_lower_bound.clear();
_lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max());
_upper_bound.clear();
_upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max());
MeshBase::node_iterator it = mesh.nodes_begin();
const MeshBase::node_iterator end = mesh.nodes_end();
for (; it != end; ++it)
{
Node* node = *it;
for (unsigned int i=0; i != LIBMESH_DIM; ++i)
{
// Expand the bounding box if necessary
_lower_bound[i] = std::min(_lower_bound[i],
(*node)(i));
_upper_bound[i] = std::max(_upper_bound[i],
(*node)(i));
}
}
// On a parallel mesh we might not yet have a full bounding box
if (!mesh.is_serial())
{
CommWorld.min(_lower_bound);
CommWorld.max(_upper_bound);
}
this->fill(mesh);
STOP_LOG("init()", "LocationMap");
}
示例2:
void LocationMap<T>::init(MeshBase & mesh)
{
// This function must be run on all processors at once
// for non-serial meshes
if (!mesh.is_serial())
libmesh_parallel_only(mesh.comm());
LOG_SCOPE("init()", "LocationMap");
// Clear the old map
_map.clear();
// Cache a bounding box
_lower_bound.clear();
_lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max());
_upper_bound.clear();
_upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max());
for (auto & node : mesh.node_ptr_range())
for (unsigned int i=0; i != LIBMESH_DIM; ++i)
{
// Expand the bounding box if necessary
_lower_bound[i] = std::min(_lower_bound[i],
(*node)(i));
_upper_bound[i] = std::max(_upper_bound[i],
(*node)(i));
}
// On a parallel mesh we might not yet have a full bounding box
if (!mesh.is_serial())
{
mesh.comm().min(_lower_bound);
mesh.comm().max(_upper_bound);
}
this->fill(mesh);
}
示例3: init
//--------------------------------------------------------------------------
void TopologyMap::init(MeshBase& mesh)
{
// This function must be run on all processors at once
// for non-serial meshes
if (!mesh.is_serial())
libmesh_parallel_only(mesh.comm());
START_LOG("init()", "TopologyMap");
// Clear the old map
_map.clear();
this->fill(mesh);
STOP_LOG("init()", "TopologyMap");
}
示例4: set_parent_processor_ids
void Partitioner::set_parent_processor_ids(MeshBase & mesh)
{
// Ignore the parameter when !LIBMESH_ENABLE_AMR
libmesh_ignore(mesh);
LOG_SCOPE("set_parent_processor_ids()", "Partitioner");
#ifdef LIBMESH_ENABLE_AMR
// If the mesh is serial we have access to all the elements,
// in particular all the active ones. We can therefore set
// the parent processor ids indirecly through their children, and
// set the subactive processor ids while examining their active
// ancestors.
// By convention a parent is assigned to the minimum processor
// of all its children, and a subactive is assigned to the processor
// of its active ancestor.
if (mesh.is_serial())
{
// Loop over all the active elements in the mesh
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
for ( ; it!=end; ++it)
{
Elem * child = *it;
// First set descendents
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (unsigned int i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
// Then set ancestors
Elem * parent = child->parent();
while (parent)
{
// invalidate the parent id, otherwise the min below
// will not work if the current parent id is less
// than all the children!
parent->invalidate_processor_id();
for (unsigned int c=0; c<parent->n_children(); c++)
{
child = parent->child_ptr(c);
libmesh_assert(child);
libmesh_assert(!child->is_remote());
libmesh_assert_not_equal_to (child->processor_id(), DofObject::invalid_processor_id);
parent->processor_id() = std::min(parent->processor_id(),
child->processor_id());
}
parent = parent->parent();
}
}
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
else
{
// Setting subactive processor ids is easy: we can guarantee
// that children have access to all their parents.
// Loop over all the active elements in the mesh
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
for ( ; it!=end; ++it)
{
Elem * child = *it;
std::vector<const Elem *> subactive_family;
child->total_family_tree(subactive_family);
for (unsigned int i = 0; i != subactive_family.size(); ++i)
const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
}
// When the mesh is parallel we cannot guarantee that parents have access to
// all their children.
// We will use a brute-force approach here. Each processor finds its parent
// elements and sets the parent pid to the minimum of its
// semilocal descendants.
// A global reduction is then performed to make sure the true minimum is found.
// As noted, this is required because we cannot guarantee that a parent has
// access to all its children on any single processor.
libmesh_parallel_only(mesh.comm());
libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
mesh.unpartitioned_elements_end()) == 0);
const dof_id_type max_elem_id = mesh.max_elem_id();
std::vector<processor_id_type>
parent_processor_ids (std::min(communication_blocksize,
max_elem_id));
for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
//.........这里部分代码省略.........
示例5: partition_range
void MetisPartitioner::partition_range(MeshBase & mesh,
MeshBase::element_iterator beg,
MeshBase::element_iterator end,
unsigned int n_pieces)
{
libmesh_assert_greater (n_pieces, 0);
// We don't yet support distributed meshes with this Partitioner
if (!mesh.is_serial())
libmesh_not_implemented();
// Check for an easy return
if (n_pieces == 1)
{
this->single_partition_range (beg, end);
return;
}
// What to do if the Metis library IS NOT present
#ifndef LIBMESH_HAVE_METIS
libmesh_here();
libMesh::err << "ERROR: The library has been built without" << std::endl
<< "Metis support. Using a space-filling curve" << std::endl
<< "partitioner instead!" << std::endl;
SFCPartitioner sfcp;
sfcp.partition_range (mesh, beg, end, n_pieces);
// What to do if the Metis library IS present
#else
LOG_SCOPE("partition_range()", "MetisPartitioner");
const dof_id_type n_range_elem = std::distance(beg, end);
// Metis will only consider the elements in the range.
// We need to map the range element ids into a
// contiguous range. Further, we want the unique range indexing to be
// independent of the element ordering, otherwise a circular dependency
// can result in which the partitioning depends on the ordering which
// depends on the partitioning...
vectormap<dof_id_type, dof_id_type> global_index_map;
global_index_map.reserve (n_range_elem);
{
std::vector<dof_id_type> global_index;
MeshCommunication().find_global_indices (mesh.comm(),
MeshTools::create_bounding_box(mesh),
beg, end, global_index);
libmesh_assert_equal_to (global_index.size(), n_range_elem);
MeshBase::element_iterator it = beg;
for (std::size_t cnt=0; it != end; ++it)
{
const Elem * elem = *it;
global_index_map.insert (std::make_pair(elem->id(), global_index[cnt++]));
}
libmesh_assert_equal_to (global_index_map.size(), n_range_elem);
}
// If we have boundary elements in this mesh, we want to account for
// the connectivity between them and interior elements. We can find
// interior elements from boundary elements, but we need to build up
// a lookup map to do the reverse.
typedef std::unordered_multimap<const Elem *, const Elem *> map_type;
map_type interior_to_boundary_map;
{
MeshBase::element_iterator it = beg;
for (; it != end; ++it)
{
const Elem * elem = *it;
// If we don't have an interior_parent then there's nothing
// to look us up.
if ((elem->dim() >= LIBMESH_DIM) ||
!elem->interior_parent())
continue;
// get all relevant interior elements
std::set<const Elem *> neighbor_set;
elem->find_interior_neighbors(neighbor_set);
std::set<const Elem *>::iterator n_it = neighbor_set.begin();
for (; n_it != neighbor_set.end(); ++n_it)
{
// FIXME - non-const versions of the std::set<const Elem
// *> returning methods would be nice
Elem * neighbor = const_cast<Elem *>(*n_it);
#if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \
defined(LIBMESH_HAVE_TR1_UNORDERED_MULTIMAP) || \
defined(LIBMESH_HAVE_HASH_MULTIMAP) || \
defined(LIBMESH_HAVE_EXT_HASH_MULTIMAP)
interior_to_boundary_map.insert(std::make_pair(neighbor, elem));
#else
//.........这里部分代码省略.........
示例6: partition_range
void CentroidPartitioner::partition_range(MeshBase & mesh,
MeshBase::element_iterator it,
MeshBase::element_iterator end,
unsigned int n)
{
// Check for an easy return
if (n == 1)
{
this->single_partition_range (it, end);
return;
}
// We don't yet support distributed meshes with this Partitioner
if (!mesh.is_serial())
libmesh_not_implemented();
// Compute the element centroids. Note: we used to skip this step
// if the number of elements was unchanged from the last call, but
// that doesn't account for elements that have moved a lot since the
// last time the Partitioner was called...
this->compute_centroids (it, end);
switch (this->sort_method())
{
case X:
{
std::sort(_elem_centroids.begin(),
_elem_centroids.end(),
CentroidPartitioner::sort_x);
break;
}
case Y:
{
std::sort(_elem_centroids.begin(),
_elem_centroids.end(),
CentroidPartitioner::sort_y);
break;
}
case Z:
{
std::sort(_elem_centroids.begin(),
_elem_centroids.end(),
CentroidPartitioner::sort_z);
break;
}
case RADIAL:
{
std::sort(_elem_centroids.begin(),
_elem_centroids.end(),
CentroidPartitioner::sort_radial);
break;
}
default:
libmesh_error_msg("Unknown sort method: " << this->sort_method());
}
// Make sure the user has not handed us an
// invalid number of partitions.
libmesh_assert_greater (n, 0);
// Compute target_size, the approximate number of elements on each processor.
const dof_id_type target_size = _elem_centroids.size() / n;
for (dof_id_type i=0; i<_elem_centroids.size(); i++)
{
Elem * elem = _elem_centroids[i].second;
// FIXME: All "extra" elements go on the last processor... this
// could probably be improved.
elem->processor_id() =
std::min (cast_int<processor_id_type>(i / target_size),
cast_int<processor_id_type>(n-1));
}
}
示例7: _do_partition
// ------------------------------------------------------------
// MetisPartitioner implementation
void MetisPartitioner::_do_partition (MeshBase& mesh,
const unsigned int n_pieces)
{
libmesh_assert_greater (n_pieces, 0);
libmesh_assert (mesh.is_serial());
// Check for an easy return
if (n_pieces == 1)
{
this->single_partition (mesh);
return;
}
// What to do if the Metis library IS NOT present
#ifndef LIBMESH_HAVE_METIS
libmesh_here();
libMesh::err << "ERROR: The library has been built without" << std::endl
<< "Metis support. Using a space-filling curve" << std::endl
<< "partitioner instead!" << std::endl;
SFCPartitioner sfcp;
sfcp.partition (mesh, n_pieces);
// What to do if the Metis library IS present
#else
START_LOG("partition()", "MetisPartitioner");
const dof_id_type n_active_elem = mesh.n_active_elem();
// build the graph
// std::vector<int> options(5);
std::vector<int> vwgt(n_active_elem);
std::vector<int> part(n_active_elem);
int
n = static_cast<int>(n_active_elem), // number of "nodes" (elements)
// in the graph
// wgtflag = 2, // weights on vertices only,
// // none on edges
// numflag = 0, // C-style 0-based numbering
nparts = static_cast<int>(n_pieces), // number of subdomains to create
edgecut = 0; // the numbers of edges cut by the
// resulting partition
// Set the options
// options[0] = 0; // use default options
// Metis will only consider the active elements.
// We need to map the active element ids into a
// contiguous range. Further, we want the unique range indexing to be
// independednt of the element ordering, otherwise a circular dependency
// can result in which the partitioning depends on the ordering which
// depends on the partitioning...
std::map<const Elem*, dof_id_type> global_index_map;
{
std::vector<dof_id_type> global_index;
MeshBase::element_iterator it = mesh.active_elements_begin();
const MeshBase::element_iterator end = mesh.active_elements_end();
MeshCommunication().find_global_indices (MeshTools::bounding_box(mesh),
it, end, global_index);
libmesh_assert_equal_to (global_index.size(), n_active_elem);
for (std::size_t cnt=0; it != end; ++it)
{
const Elem *elem = *it;
libmesh_assert (!global_index_map.count(elem));
global_index_map[elem] = global_index[cnt++];
}
libmesh_assert_equal_to (global_index_map.size(), n_active_elem);
}
// build the graph in CSR format. Note that
// the edges in the graph will correspond to
// face neighbors
std::vector<int> xadj, adjncy;
{
std::vector<const Elem*> neighbors_offspring;
MeshBase::element_iterator elem_it = mesh.active_elements_begin();
const MeshBase::element_iterator elem_end = mesh.active_elements_end();
// This will be exact when there is no refinement and all the
// elements are of the same type.
std::size_t graph_size=0;
std::vector<std::vector<dof_id_type> > graph(n_active_elem);
for (; elem_it != elem_end; ++elem_it)
{
const Elem* elem = *elem_it;
//.........这里部分代码省略.........