当前位置: 首页>>代码示例>>C++>>正文


C++ MeshEditor::add_vertex_global方法代码示例

本文整理汇总了C++中MeshEditor::add_vertex_global方法的典型用法代码示例。如果您正苦于以下问题:C++ MeshEditor::add_vertex_global方法的具体用法?C++ MeshEditor::add_vertex_global怎么用?C++ MeshEditor::add_vertex_global使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在MeshEditor的用法示例。


在下文中一共展示了MeshEditor::add_vertex_global方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: compute_boundary


//.........这里部分代码省略.........
      global_indices[global_mesh_index] = current_index++;
  }

  // Send and receive requests from other processes
  std::vector<std::vector<std::size_t>> global_index_requests(num_processes);
  MPI::all_to_all(mesh.mpi_comm(), request_global_indices,
                  global_index_requests);

  // Find response to requests of global indices
  std::vector<std::vector<std::size_t>> respond_global_indices(num_processes);
  for (std::size_t i = 0; i < num_processes; i++)
  {
    const std::size_t N = global_index_requests[i].size();
    respond_global_indices[i].resize(N);

    for (std::size_t j = 0; j < N; j++)
      respond_global_indices[i][j]
        = global_indices[global_index_requests[i][j]];
  }

  // Scatter responses back to requesting processes
  std::vector<std::vector<std::size_t>> global_index_responses(num_processes);
  MPI::all_to_all(mesh.mpi_comm(), respond_global_indices,
                  global_index_responses);

  // Update global_indices
  for (std::size_t i = 0; i < num_processes; i++)
  {
    const std::size_t N = global_index_responses[i].size();
    // Check that responses are the same size as the requests made
    dolfin_assert(global_index_responses[i].size()
                  == request_global_indices[i].size());
    for (std::size_t j = 0; j < N; j++)
    {
      const std::size_t global_mesh_index = request_global_indices[i][j];
      const std::size_t global_boundary_index = global_index_responses[i][j];
      global_indices[global_mesh_index] = global_boundary_index;
    }
  }

  // Create vertices
  for (std::size_t local_boundary_index = 0;
       local_boundary_index < num_boundary_vertices; local_boundary_index++)
  {
    const std::size_t local_mesh_index = vertex_map[local_boundary_index];
    const std::size_t global_mesh_index
      = mesh.topology().global_indices(0)[local_mesh_index];
    const std::size_t global_boundary_index = global_indices[global_mesh_index];

    Vertex v(mesh, local_mesh_index);

    editor.add_vertex_global(local_boundary_index, global_boundary_index,
                             v.point());
  }

  // Find global index to start cell numbering from for current process
  std::vector<std::size_t> cell_distribution(num_processes);
  MPI::all_gather(mesh.mpi_comm(), num_boundary_cells, cell_distribution);
  std::size_t start_cell_index = 0;
  for (std::size_t i = 0; i < my_rank; i++)
    start_cell_index += cell_distribution[i];

  // Create cells (facets) and map between boundary mesh cells and facets parent
  MeshFunction<std::size_t>& cell_map = boundary.entity_map(D - 1);
  if (num_boundary_cells > 0)
  {
    cell_map.init(reference_to_no_delete_pointer(boundary), D - 1,
                  num_boundary_cells);
  }
  std::vector<std::size_t>
    cell(boundary.type().num_vertices(boundary.topology().dim()));
  std::size_t current_cell = 0;
  for (FacetIterator f(mesh); !f.end(); ++f)
  {
    if (boundary_facet[*f])
    {
      // Compute new vertex numbers for cell
      const unsigned int* vertices = f->entities(0);
      for (std::size_t i = 0; i < cell.size(); i++)
        cell[i] = boundary_vertices[vertices[i]];

      // Reorder vertices so facet is right-oriented w.r.t. facet
      // normal
      reorder(cell, *f);

      // Create mapping from boundary cell to mesh facet if requested
      if (!cell_map.empty())
        cell_map[current_cell] = f->index();

      // Add cell
      editor.add_cell(current_cell, start_cell_index+current_cell, cell);
      current_cell++;
    }
  }

  // Close mesh editor. Note the argument order=false to prevent
  // ordering from destroying the orientation of facets accomplished
  // by calling reorder() below.
  editor.close(false);
}
开发者ID:vincentqb,项目名称:dolfin,代码行数:101,代码来源:BoundaryComputation.cpp

示例2: build_mesh

//-----------------------------------------------------------------------------
void MeshPartitioning::build_mesh(Mesh& mesh,
              const std::vector<std::size_t>& global_cell_indices,
              const boost::multi_array<std::size_t, 2>& cell_global_vertices,
              const std::vector<std::size_t>& vertex_indices,
              const boost::multi_array<double, 2>& vertex_coordinates,
              const std::map<std::size_t, std::size_t>& vertex_global_to_local,
              std::size_t tdim, std::size_t gdim, std::size_t num_global_cells,
              std::size_t num_global_vertices)
{
  Timer timer("PARALLEL 3: Build mesh (from local mesh data)");

  // Get number of processes and process number
  const std::size_t num_processes = MPI::num_processes();
  const std::size_t process_number = MPI::process_number();

  // Open mesh for editing
  mesh.clear();
  MeshEditor editor;
  editor.open(mesh, tdim, gdim);

  // Add vertices
  editor.init_vertices(vertex_coordinates.size());
  Point point(gdim);
  dolfin_assert(vertex_indices.size() == vertex_coordinates.size());
  for (std::size_t i = 0; i < vertex_coordinates.size(); ++i)
  {
    for (std::size_t j = 0; j < gdim; ++j)
      point[j] = vertex_coordinates[i][j];
    editor.add_vertex_global(i, vertex_indices[i], point);
  }

  // Add cells
  editor.init_cells(cell_global_vertices.size());
  const std::size_t num_cell_vertices = tdim + 1;
  std::vector<std::size_t> cell(num_cell_vertices);
  for (std::size_t i = 0; i < cell_global_vertices.size(); ++i)
  {
    for (std::size_t j = 0; j < num_cell_vertices; ++j)
    {
      // Get local cell vertex
      std::map<std::size_t, std::size_t>::const_iterator iter
          = vertex_global_to_local.find(cell_global_vertices[i][j]);
      dolfin_assert(iter != vertex_global_to_local.end());
      cell[j] = iter->second;
    }
    editor.add_cell(i, global_cell_indices[i], cell);
  }

  // Close mesh: Note that this must be done after creating the global
  // vertex map or otherwise the ordering in mesh.close() will be wrong
  // (based on local numbers).
  editor.close();

  // Set global number of cells and vertices
  mesh.topology().init_global(0, num_global_vertices);
  mesh.topology().init_global(tdim,  num_global_cells);

  // Construct boundary mesh
  BoundaryMesh bmesh(mesh, "exterior");

  const MeshFunction<std::size_t>& boundary_vertex_map = bmesh.entity_map(0);
  const std::size_t boundary_size = boundary_vertex_map.size();

  // Build sorted array of global boundary vertex indices (global
  // numbering)
  std::vector<std::size_t> global_vertex_send(boundary_size);
  for (std::size_t i = 0; i < boundary_size; ++i)
    global_vertex_send[i] = vertex_indices[boundary_vertex_map[i]];
  std::sort(global_vertex_send.begin(), global_vertex_send.end());

  // Receive buffer
  std::vector<std::size_t> global_vertex_recv;

  // Create shared_vertices data structure: mapping from shared vertices
  // to list of neighboring processes
  std::map<unsigned int, std::set<unsigned int> >& shared_vertices
        = mesh.topology().shared_entities(0);
  shared_vertices.clear();

  // FIXME: Remove computation from inside communication loop

  // Build shared vertex to sharing processes map
  for (std::size_t i = 1; i < num_processes; ++i)
  {
    // We send data to process p - i (i steps to the left)
    const int p = (process_number - i + num_processes) % num_processes;

    // We receive data from process p + i (i steps to the right)
    const int q = (process_number + i) % num_processes;

    // Send and receive
    MPI::send_recv(global_vertex_send, p, global_vertex_recv, q);

    // Compute intersection of global indices
    std::vector<std::size_t> intersection(std::min(global_vertex_send.size(),
                                                   global_vertex_recv.size()));
    std::vector<std::size_t>::iterator intersection_end
      = std::set_intersection(global_vertex_send.begin(),
                              global_vertex_send.end(),
//.........这里部分代码省略.........
开发者ID:maciekswat,项目名称:dolfin_1.3.0,代码行数:101,代码来源:MeshPartitioning.cpp


注:本文中的MeshEditor::add_vertex_global方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。