本文整理汇总了C++中stk::mesh::BulkData::parallel方法的典型用法代码示例。如果您正苦于以下问题:C++ BulkData::parallel方法的具体用法?C++ BulkData::parallel怎么用?C++ BulkData::parallel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类stk::mesh::BulkData
的用法示例。
在下文中一共展示了BulkData::parallel方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_non_unique_keys
std::vector<stk::mesh::EntityKeyProc> get_non_unique_key_procs(const stk::mesh::BulkData& bulkData)
{
stk::parallel::DistributedIndex distributedIndex( bulkData.parallel(), stk::mesh::impl::convert_entity_keys_to_spans(bulkData.mesh_meta_data()));
add_keys_to_distributed_index(bulkData, distributedIndex);
stk::parallel::DistributedIndex::KeyTypeVector localKeys = get_all_local_keys(bulkData);
return get_non_unique_keys(bulkData, distributedIndex, localKeys);
}
示例2: populate_part_ordinals_for_remote_edges
void populate_part_ordinals_for_remote_edges(const stk::mesh::BulkData& bulkData, const ElemElemGraph& graph, ParallelPartInfo ¶llelPartInfo)
{
parallelPartInfo.clear();
stk::CommSparse comm(bulkData.parallel());
pack_data_for_part_ordinals(comm, graph, bulkData);
comm.allocate_buffers();
pack_data_for_part_ordinals(comm, graph, bulkData);
comm.communicate();
unpack_and_update_part_ordinals(comm, bulkData, graph, parallelPartInfo);
}
示例3: populate_selected_value_for_remote_elements
void populate_selected_value_for_remote_elements(const stk::mesh::BulkData& bulkData,
ElemElemGraph& graph,
stk::mesh::Selector selector,
ParallelSelectedInfo &selInfo)
{
selInfo.clear();
stk::CommSparse comm(bulkData.parallel());
pack_and_communicate_selector(bulkData, comm, graph, selector);
unpack_and_update_selector_value(comm, bulkData, graph, selInfo);
}
示例4: use_case_5_generate_mesh
void use_case_5_generate_mesh(
const std::string& mesh_options ,
stk::mesh::BulkData & mesh ,
const VectorFieldType & node_coord ,
stk::mesh::Part & hex_block ,
stk::mesh::Part & quad_shell_block )
{
mesh.modification_begin();
const unsigned parallel_size = mesh.parallel_size();
const unsigned parallel_rank = mesh.parallel_rank();
double t = 0 ;
size_t num_hex = 0 ;
size_t num_shell = 0 ;
size_t num_nodes = 0 ;
size_t num_block = 0 ;
int error_flag = 0 ;
try {
Iogn::GeneratedMesh gmesh( mesh_options, parallel_size, parallel_rank );
num_nodes = gmesh.node_count_proc();
num_block = gmesh.block_count();
t = stk::wall_time();
std::vector<int> node_map( num_nodes , 0 );
gmesh.node_map( node_map );
{
for ( size_t i = 1 ; i <= num_block ; ++i ) {
const size_t num_elem = gmesh.element_count_proc(i);
const std::pair<std::string,int> top_info = gmesh.topology_type(i);
std::vector<int> elem_map( num_elem , 0 );
std::vector<int> elem_conn( num_elem * top_info.second );
gmesh.element_map( i, elem_map );
gmesh.connectivity( i , elem_conn );
if ( top_info.second == 8 ) {
for ( size_t j = 0 ; j < num_elem ; ++j ) {
const int * const local_node_id = & elem_conn[ j * 8 ] ;
const stk::mesh::EntityId node_id[8] = {
local_node_id[0] ,
local_node_id[1] ,
local_node_id[2] ,
local_node_id[3] ,
local_node_id[4] ,
local_node_id[5] ,
local_node_id[6] ,
local_node_id[7]
};
const stk::mesh::EntityId elem_id = elem_map[ j ];
stk::mesh::fem::declare_element( mesh , hex_block , elem_id , node_id );
++num_hex ;
}
}
else if ( top_info.second == 4 ) {
for ( size_t j = 0 ; j < num_elem ; ++j ) {
const int * const local_node_id = & elem_conn[ j * 4 ] ;
const stk::mesh::EntityId node_id[4] = {
local_node_id[0] ,
local_node_id[1] ,
local_node_id[2] ,
local_node_id[3]
};
const stk::mesh::EntityId elem_id = elem_map[ j ];
stk::mesh::fem::declare_element( mesh , quad_shell_block , elem_id , node_id );
++num_shell ;
}
}
}
}
std::vector<double> node_coordinates( 3 * node_map.size() );
gmesh.coordinates( node_coordinates );
if ( 3 * node_map.size() != node_coordinates.size() ) {
std::ostringstream msg ;
msg << " P" << mesh.parallel_rank()
<< ": ERROR, node_map.size() = "
<< node_map.size()
//.........这里部分代码省略.........
示例5: communicate_field_data
void communicate_field_data( const stk::mesh::BulkData & mesh ,
const std::vector< const stk::mesh::FieldBase * > & fields )
{
if ( fields.empty() ) { return; }
const unsigned parallel_size = mesh.parallel_size();
const unsigned parallel_rank = mesh.parallel_rank();
// Sizing for send and receive
const unsigned zero = 0 ;
std::vector<unsigned> send_size( parallel_size , zero );
std::vector<unsigned> recv_size( parallel_size , zero );
std::vector<unsigned> procs ;
for ( std::vector<stk::mesh::Entity*>::const_iterator
i = mesh.entity_comm().begin() ;
i != mesh.entity_comm().end() ; ++i ) {
stk::mesh::Entity & e = **i ;
unsigned size = 0 ;
for ( std::vector<const stk::mesh::FieldBase *>::const_iterator
fi = fields.begin() ; fi != fields.end() ; ++fi ) {
const stk::mesh::FieldBase & f = **fi ;
size += stk::mesh::field_data_size( f , e );
}
if ( size ) {
if ( e.owner_rank() == parallel_rank ) {
// owner sends
stk::mesh::comm_procs( e , procs );
for ( std::vector<unsigned>::iterator
ip = procs.begin() ; ip != procs.end() ; ++ip ) {
send_size[ *ip ] += size ;
}
}
else {
// non-owner receives
recv_size[ e.owner_rank() ] += size ;
}
}
}
// Allocate send and receive buffers:
stk::CommAll sparse ;
{
const unsigned * const s_size = & send_size[0] ;
const unsigned * const r_size = & recv_size[0] ;
sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size);
}
// Send packing:
for ( std::vector<stk::mesh::Entity*>::const_iterator
i = mesh.entity_comm().begin() ;
i != mesh.entity_comm().end() ; ++i ) {
stk::mesh::Entity & e = **i ;
if ( e.owner_rank() == parallel_rank ) {
stk::mesh::comm_procs( e , procs );
for ( std::vector<const stk::mesh::FieldBase *>::const_iterator
fi = fields.begin() ; fi != fields.end() ; ++fi ) {
const stk::mesh::FieldBase & f = **fi ;
const unsigned size = stk::mesh::field_data_size( f , e );
if ( size ) {
unsigned char * ptr =
reinterpret_cast<unsigned char *>(stk::mesh::field_data( f , e ));
for ( std::vector<unsigned>::iterator
ip = procs.begin() ; ip != procs.end() ; ++ip ) {
stk::CommBuffer & b = sparse.send_buffer( *ip );
b.pack<unsigned char>( ptr , size );
}
}
}
}
}
// Communicate:
sparse.communicate();
// Unpack for recv:
for ( std::vector<stk::mesh::Entity*>::const_iterator
i = mesh.entity_comm().begin() ;
i != mesh.entity_comm().end() ; ++i ) {
stk::mesh::Entity & e = **i ;
if ( e.owner_rank() != parallel_rank ) {
for ( std::vector<const stk::mesh::FieldBase *>::const_iterator
fi = fields.begin() ; fi != fields.end() ; ++fi ) {
const stk::mesh::FieldBase & f = **fi ;
const unsigned size = stk::mesh::field_data_size( f , e );
//.........这里部分代码省略.........
示例6: fixup_ghosted_to_shared_nodes
void fixup_ghosted_to_shared_nodes(stk::mesh::BulkData & bulk)
{
stk::mesh::EntityVector ghosted_nodes_that_are_now_shared;
find_ghosted_nodes_that_need_to_be_shared(bulk, ghosted_nodes_that_are_now_shared);
stk::CommSparse comm(bulk.parallel());
for (int phase=0;phase<2;++phase)
{
for (size_t i = 0; i < ghosted_nodes_that_are_now_shared.size(); ++i)
{
stk::mesh::Entity node = ghosted_nodes_that_are_now_shared[i];
int proc = bulk.parallel_owner_rank(node);
comm.send_buffer(proc).pack<stk::mesh::EntityKey>(bulk.entity_key(node));
}
if (phase == 0 )
{
comm.allocate_buffers();
}
else
{
comm.communicate();
}
}
stk::mesh::EntityVector sharedNodes;
for (int process=0;process<bulk.parallel_size();++process)
{
while(comm.recv_buffer(process).remaining())
{
stk::mesh::EntityKey key;
comm.recv_buffer(process).unpack<stk::mesh::EntityKey>(key);
stk::mesh::Entity entity = bulk.get_entity(key);
if ( bulk.state(entity) != stk::mesh::Deleted && bulk.is_valid(entity) )
{
bulk.add_node_sharing(entity, process);
sharedNodes.push_back(entity);
}
}
}
/////////////////////////
stk::CommSparse commSecondStage(bulk.parallel());
for (int phase=0;phase<2;++phase)
{
for (size_t i=0;i<sharedNodes.size();++i)
{
std::vector<int> procs;
stk::mesh::EntityKey key = bulk.entity_key(sharedNodes[i]);
bulk.comm_shared_procs(key, procs);
for (size_t j=0;j<procs.size();++j)
{
if ( procs[j] != bulk.parallel_rank() )
{
commSecondStage.send_buffer(procs[j]).pack<int>(bulk.parallel_rank()).pack<stk::mesh::EntityKey>(key);
for (size_t k=0;k<procs.size();++k)
{
commSecondStage.send_buffer(procs[j]).pack<int>(procs[k]).pack<stk::mesh::EntityKey>(key);
}
}
}
}
if (phase == 0 )
{
commSecondStage.allocate_buffers();
}
else
{
commSecondStage.communicate();
}
}
for (int proc_that_sent_message=0;proc_that_sent_message<bulk.parallel_size();++proc_that_sent_message)
{
if ( proc_that_sent_message == bulk.parallel_rank() ) continue;
while(commSecondStage.recv_buffer(proc_that_sent_message).remaining())
{
stk::mesh::EntityKey key;
int sharingProc;
commSecondStage.recv_buffer(proc_that_sent_message).unpack<int>(sharingProc).unpack<stk::mesh::EntityKey>(key);
if ( sharingProc != bulk.parallel_rank() )
{
stk::mesh::Entity entity = bulk.get_entity(key);
if ( bulk.state(entity) != stk::mesh::Deleted && bulk.is_valid(entity) && !bulk.in_shared(key, sharingProc) )
{
bulk.add_node_sharing(entity, sharingProc);
}
}
}
}
}