本文整理汇总了C++中GenericTensor::rank方法的典型用法代码示例。如果您正苦于以下问题:C++ GenericTensor::rank方法的具体用法?C++ GenericTensor::rank怎么用?C++ GenericTensor::rank使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GenericTensor
的用法示例。
在下文中一共展示了GenericTensor::rank方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assemble
//-----------------------------------------------------------------------------
void MultiMeshAssembler::assemble(GenericTensor& A, const MultiMeshForm& a)
{
// Developer note: This implementation does not yet handle
// - subdomains
// - interior facets
// - exterior facets
begin(PROGRESS, "Assembling tensor over multimesh function space.");
// Initialize global tensor
_init_global_tensor(A, a);
// Assemble over uncut cells
_assemble_uncut_cells(A, a);
// Assemble over cut cells
_assemble_cut_cells(A, a);
// Assemble over interface
_assemble_interface(A, a);
// Assemble over overlap
_assemble_overlap(A, a);
// Finalize assembly of global tensor
if (finalize_tensor)
A.apply("add");
// Lock any remaining inactive dofs
if (A.rank() == 2)
static_cast<GenericMatrix&>(A).ident_zeros();
end();
}
示例2: init_global_tensor
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
dolfin_assert(a.ufc_form());
// Get dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < a.rank(); ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
if (A.empty())
{
Timer t0("Build sparsity");
// Create layout for initialising tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions and mapping across processes for each dimension
std::vector<std::shared_ptr<const IndexMap> > index_maps;
for (std::size_t i = 0; i < a.rank(); i++)
{
dolfin_assert(dofmaps[i]);
index_maps.push_back(dofmaps[i]->index_map());
}
// Initialise tensor layout
// FIXME: somewhere need to check block sizes are same on both axes
// NOTE: Jan: that will be done on the backend side; IndexMap will
// provide tabulate functions with arbitrary block size;
// moreover the functions will tabulate directly using a
// correct int type
tensor_layout->init(a.mesh().mpi_comm(), index_maps,
TensorLayout::Ghosts::UNGHOSTED);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build(pattern,
a.mesh(), dofmaps,
a.ufc_form()->has_cell_integrals(),
a.ufc_form()->has_interior_facet_integrals(),
a.ufc_form()->has_exterior_facet_integrals(),
a.ufc_form()->has_vertex_integrals(),
keep_diagonal);
}
t0.stop();
// Initialize tensor
Timer t1("Init tensor");
A.init(*tensor_layout);
t1.stop();
// Insert zeros on the diagonal as diagonal entries may be
// prematurely optimised away by the linear algebra backend when
// calling GenericMatrix::apply, e.g. PETSc does this then errors
// when matrices have no diagonal entry inserted.
if (A.rank() == 2 && keep_diagonal)
{
// Down cast to GenericMatrix
GenericMatrix& _matA = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_matA.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Delete sparsity pattern
Timer t2("Delete sparsity");
t2.stop();
}
else
{
// If tensor is not reset, check that dimensions are correct
for (std::size_t i = 0; i < a.rank(); ++i)
{
if (A.size(i) != dofmaps[i]->global_dimension())
{
dolfin_error("AssemblerBase.cpp",
"assemble form",
"Dim %d of tensor does not match form", i);
}
}
}
if (!add_values)
A.zero();
}
示例3: init_global_tensor
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
dolfin_assert(a.ufc_form());
// Get dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < a.rank(); ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
if (A.size(0) == 0)
{
Timer t0("Build sparsity");
// Create layout for initialising tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions
std::vector<std::size_t> global_dimensions;
std::vector<std::pair<std::size_t, std::size_t> > local_range;
std::vector<std::size_t> block_sizes;
for (std::size_t i = 0; i < a.rank(); i++)
{
dolfin_assert(dofmaps[i]);
global_dimensions.push_back(dofmaps[i]->global_dimension());
local_range.push_back(dofmaps[i]->ownership_range());
block_sizes.push_back(dofmaps[i]->block_size);
}
// Set block size for sparsity graphs
std::size_t block_size = 1;
if (a.rank() == 2)
{
const std::vector<std::size_t> _bs(a.rank(), dofmaps[0]->block_size);
block_size = (block_sizes == _bs) ? dofmaps[0]->block_size : 1;
}
// Initialise tensor layout
tensor_layout->init(a.mesh().mpi_comm(), global_dimensions, block_size,
local_range);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build(pattern,
a.mesh(), dofmaps,
a.ufc_form()->has_cell_integrals(),
a.ufc_form()->has_interior_facet_integrals(),
a.ufc_form()->has_exterior_facet_integrals(),
keep_diagonal);
}
t0.stop();
// Initialize tensor
Timer t1("Init tensor");
A.init(*tensor_layout);
t1.stop();
// Insert zeros on the diagonal as diagonal entries may be prematurely
// optimised away by the linear algebra backend when calling
// GenericMatrix::apply, e.g. PETSc does this then errors when matrices
// have no diagonal entry inserted.
if (A.rank() == 2 && keep_diagonal)
{
// Down cast to GenericMatrix
GenericMatrix& _A = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_A.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Delete sparsity pattern
Timer t2("Delete sparsity");
t2.stop();
}
else
{
// If tensor is not reset, check that dimensions are correct
for (std::size_t i = 0; i < a.rank(); ++i)
{
if (A.size(i) != dofmaps[i]->global_dimension())
{
dolfin_error("AssemblerBase.cpp",
"assemble form",
"Reset of tensor in assembly not requested, but dim %d of tensor does not match form", i);
}
}
}
//.........这里部分代码省略.........
示例4: assemble_vertices
//-----------------------------------------------------------------------------
void Assembler::assemble_vertices(
GenericTensor& A,
const Form& a,
UFC& ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains)
{
// Skip assembly if there are no point integrals
if (!ufc.form.has_vertex_integrals())
return;
// Set timer
Timer timer("Assemble vertices");
// Extract mesh
const Mesh& mesh = a.mesh();
// Compute cell and vertex - cell connectivity if not already
// computed
const std::size_t D = mesh.topology().dim();
mesh.init(0);
mesh.init(0, D);
dolfin_assert(mesh.ordered());
// Logics for shared vertices
const bool has_shared_vertices = mesh.topology().have_shared_entities(0);
const std::map<unsigned int, std::set<unsigned int>>&
shared_vertices = mesh.topology().shared_entities(0);
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps(form_rank);
// Create a vector for storying local to local map for vertex entity
// dofs
std::vector<std::vector<std::size_t>> local_to_local_dofs(form_rank);
// Create a values vector to be used to fan out local tabulated
// values to the global tensor
std::vector<double> local_values(1);
// Vector to hold local dof map for a vertex
std::vector<std::vector<dolfin::la_index>> global_dofs(form_rank);
std::vector<ArrayView<const dolfin::la_index>> global_dofs_p(form_rank);
std::vector<dolfin::la_index> local_dof_size(form_rank);
for (std::size_t i = 0; i < form_rank; ++i)
{
dofmaps[i] = a.function_space(i)->dofmap().get();
// Check that the test and trial space as dofs on the vertices
if (dofmaps[i]->num_entity_dofs(0) == 0)
{
dolfin_error("Assembler.cpp",
"assemble form over vertices",
"Expecting test and trial spaces to have dofs on "\
"vertices for point integrals");
}
// Check that the test and trial spaces do not have dofs other
// than on vertices
for (std::size_t j = 1; j <= D; j++)
{
if (dofmaps[i]->num_entity_dofs(j)!=0)
{
dolfin_error("Assembler.cpp",
"assemble form over vertices",
"Expecting test and trial spaces to only have dofs on " \
"vertices for point integrals");
}
}
// Resize local values so it can hold dofs on one vertex
local_values.resize(local_values.size()*dofmaps[i]->num_entity_dofs(0));
// Resize local to local map according to the number of vertex
// entities dofs
local_to_local_dofs[i].resize(dofmaps[i]->num_entity_dofs(0));
// Resize local dof map vector
global_dofs[i].resize(dofmaps[i]->num_entity_dofs(0));
// Get size of local dofs
local_dof_size[i] = dofmaps[i]->ownership_range().second
- dofmaps[i]->ownership_range().first;
// Get pointer to global dofs
global_dofs_p[i].set(global_dofs[i]);
}
// Vector to hold dof map for a cell
std::vector<ArrayView<const dolfin::la_index>> dofs(form_rank);
// Exterior point integral
const ufc::vertex_integral* integral
= ufc.default_vertex_integral.get();
// Check whether integral is domain-dependent
bool use_domains = domains && !domains->empty();
//.........这里部分代码省略.........
示例5: assemble_interior_facets
//-----------------------------------------------------------------------------
void Assembler::assemble_interior_facets(
GenericTensor& A,
const Form& a,
UFC& ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains,
std::shared_ptr<const MeshFunction<std::size_t>> cell_domains,
std::vector<double>* values)
{
// Skip assembly if there are no interior facet integrals
if (!ufc.form.has_interior_facet_integrals())
return;
// Set timer
Timer timer("Assemble interior facets");
// Extract mesh and coefficients
const Mesh& mesh = a.mesh();
// MPI rank
const int my_mpi_rank = MPI::rank(mesh.mpi_comm());
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dofs for cells, and a vector holding pointers to same
std::vector<std::vector<dolfin::la_index>> macro_dofs(form_rank);
std::vector<ArrayView<const dolfin::la_index>> macro_dof_ptrs(form_rank);
// Interior facet integral
const ufc::interior_facet_integral* integral
= ufc.default_interior_facet_integral.get();
// Check whether integral is domain-dependent
bool use_domains = domains && !domains->empty();
bool use_cell_domains = cell_domains && !cell_domains->empty();
// Compute facets and facet - cell connectivity if not already computed
const std::size_t D = mesh.topology().dim();
mesh.init(D - 1);
mesh.init(D - 1, D);
dolfin_assert(mesh.ordered());
// Assemble over interior facets (the facets of the mesh)
ufc::cell ufc_cell[2];
std::vector<double> coordinate_dofs[2];
Progress p(AssemblerBase::progress_message(A.rank(), "interior facets"),
mesh.num_facets());
for (FacetIterator facet(mesh); !facet.end(); ++facet)
{
if (facet->num_entities(D) == 1)
continue;
// Check that facet is not a ghost
dolfin_assert(!facet->is_ghost());
// Get integral for sub domain (if any)
if (use_domains)
integral = ufc.get_interior_facet_integral((*domains)[*facet]);
// Skip integral if zero
if (!integral)
continue;
// Get cells incident with facet (which is 0 and 1 here is arbitrary)
dolfin_assert(facet->num_entities(D) == 2);
std::size_t cell_index_plus = facet->entities(D)[0];
std::size_t cell_index_minus = facet->entities(D)[1];
if (use_cell_domains && (*cell_domains)[cell_index_plus]
< (*cell_domains)[cell_index_minus])
{
std::swap(cell_index_plus, cell_index_minus);
}
// The convention '+' = 0, '-' = 1 is from ffc
const Cell cell0(mesh, cell_index_plus);
const Cell cell1(mesh, cell_index_minus);
// Get local index of facet with respect to each cell
std::size_t local_facet0 = cell0.index(*facet);
std::size_t local_facet1 = cell1.index(*facet);
// Update to current pair of cells
cell0.get_cell_data(ufc_cell[0], local_facet0);
cell0.get_coordinate_dofs(coordinate_dofs[0]);
cell1.get_cell_data(ufc_cell[1], local_facet1);
cell1.get_coordinate_dofs(coordinate_dofs[1]);
ufc.update(cell0, coordinate_dofs[0], ufc_cell[0],
cell1, coordinate_dofs[1], ufc_cell[1],
integral->enabled_coefficients());
// Tabulate dofs for each dimension on macro element
for (std::size_t i = 0; i < form_rank; i++)
//.........这里部分代码省略.........
示例6: assemble_exterior_facets
//-----------------------------------------------------------------------------
void Assembler::assemble_exterior_facets(
GenericTensor& A,
const Form& a,
UFC& ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains,
std::vector<double>* values)
{
// Skip assembly if there are no exterior facet integrals
if (!ufc.form.has_exterior_facet_integrals())
return;
// Set timer
Timer timer("Assemble exterior facets");
// Extract mesh
const Mesh& mesh = a.mesh();
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dof map for a cell
std::vector<ArrayView<const dolfin::la_index>> dofs(form_rank);
// Exterior facet integral
const ufc::exterior_facet_integral* integral
= ufc.default_exterior_facet_integral.get();
// Check whether integral is domain-dependent
bool use_domains = domains && !domains->empty();
// Compute facets and facet - cell connectivity if not already computed
const std::size_t D = mesh.topology().dim();
mesh.init(D - 1);
mesh.init(D - 1, D);
dolfin_assert(mesh.ordered());
// Assemble over exterior facets (the cells of the boundary)
ufc::cell ufc_cell;
std::vector<double> coordinate_dofs;
Progress p(AssemblerBase::progress_message(A.rank(), "exterior facets"),
mesh.num_facets());
for (FacetIterator facet(mesh); !facet.end(); ++facet)
{
// Only consider exterior facets
if (!facet->exterior())
{
p++;
continue;
}
// Get integral for sub domain (if any)
if (use_domains)
integral = ufc.get_exterior_facet_integral((*domains)[*facet]);
// Skip integral if zero
if (!integral)
continue;
// Get mesh cell to which mesh facet belongs (pick first, there is
// only one)
dolfin_assert(facet->num_entities(D) == 1);
Cell mesh_cell(mesh, facet->entities(D)[0]);
// Check that cell is not a ghost
dolfin_assert(!mesh_cell.is_ghost());
// Get local index of facet with respect to the cell
const std::size_t local_facet = mesh_cell.index(*facet);
// Update UFC cell
mesh_cell.get_cell_data(ufc_cell, local_facet);
mesh_cell.get_coordinate_dofs(coordinate_dofs);
// Update UFC object
ufc.update(mesh_cell, coordinate_dofs, ufc_cell,
integral->enabled_coefficients());
// Get local-to-global dof maps for cell
for (std::size_t i = 0; i < form_rank; ++i)
dofs[i] = dofmaps[i]->cell_dofs(mesh_cell.index());
// Tabulate exterior facet tensor
integral->tabulate_tensor(ufc.A.data(),
ufc.w(),
coordinate_dofs.data(),
local_facet,
ufc_cell.orientation);
// Add entries to global tensor
A.add_local(ufc.A.data(), dofs);
p++;
}
}
示例7: assemble_cells
//-----------------------------------------------------------------------------
void Assembler::assemble_cells(
GenericTensor& A,
const Form& a,
UFC& ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains,
std::vector<double>* values)
{
// Skip assembly if there are no cell integrals
if (!ufc.form.has_cell_integrals())
return;
// Set timer
Timer timer("Assemble cells");
// Extract mesh
const Mesh& mesh = a.mesh();
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Check if form is a functional
const bool is_cell_functional = (values && form_rank == 0) ? true : false;
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dof map for a cell
std::vector<ArrayView<const dolfin::la_index>> dofs(form_rank);
// Cell integral
ufc::cell_integral* integral = ufc.default_cell_integral.get();
// Check whether integral is domain-dependent
bool use_domains = domains && !domains->empty();
// Assemble over cells
ufc::cell ufc_cell;
std::vector<double> coordinate_dofs;
Progress p(AssemblerBase::progress_message(A.rank(), "cells"),
mesh.num_cells());
for (CellIterator cell(mesh); !cell.end(); ++cell)
{
// Get integral for sub domain (if any)
if (use_domains)
integral = ufc.get_cell_integral((*domains)[*cell]);
// Skip if no integral on current domain
if (!integral)
continue;
// Check that cell is not a ghost
dolfin_assert(!cell->is_ghost());
// Update to current cell
cell->get_cell_data(ufc_cell);
cell->get_coordinate_dofs(coordinate_dofs);
ufc.update(*cell, coordinate_dofs, ufc_cell,
integral->enabled_coefficients());
// Get local-to-global dof maps for cell
bool empty_dofmap = false;
for (std::size_t i = 0; i < form_rank; ++i)
{
dofs[i] = dofmaps[i]->cell_dofs(cell->index());
empty_dofmap = empty_dofmap || dofs[i].size() == 0;
}
// Skip if at least one dofmap is empty
if (empty_dofmap)
continue;
// Tabulate cell tensor
integral->tabulate_tensor(ufc.A.data(), ufc.w(),
coordinate_dofs.data(),
ufc_cell.orientation);
// Add entries to global tensor. Either store values cell-by-cell
// (currently only available for functionals)
if (is_cell_functional)
(*values)[cell->index()] = ufc.A[0];
else
A.add_local(ufc.A.data(), dofs);
p++;
}
}
示例8: assemble_interior_facets
//-----------------------------------------------------------------------------
void OpenMpAssembler::assemble_interior_facets(
GenericTensor& A,
const Form& a, UFC& _ufc,
std::shared_ptr<const MeshFunction<std::size_t>> domains,
std::shared_ptr<const MeshFunction<std::size_t>> cell_domains,
std::vector<double>* values)
{
warning("OpenMpAssembler::assemble_interior_facets is untested.");
// Extract mesh
const Mesh& mesh = a.mesh();
// Topological dimension
const std::size_t D = mesh.topology().dim();
dolfin_assert(!values);
// Skip assembly if there are no interior facet integrals
if (!_ufc.form.has_interior_facet_integrals())
return;
Timer timer("Assemble interior facets");
// Set number of OpenMP threads (from parameter systems)
omp_set_num_threads(parameters["num_threads"]);
// Get integral for sub domain (if any)
bool use_domains = domains && !domains->empty();
bool use_cell_domains = cell_domains && !cell_domains->empty();
if (use_domains)
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"Subdomains are not yet handled");
}
// Color mesh
std::vector<std::size_t> coloring_type = a.coloring(D - 1);
mesh.color(coloring_type);
// Dummy UFC object since each thread needs to created its own UFC object
UFC ufc(_ufc);
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dofs for cells
std::vector<std::vector<dolfin::la_index>> macro_dofs(form_rank);
// Interior facet integral
const ufc::interior_facet_integral* integral
= ufc.default_interior_facet_integral.get();
// Compute facets and facet - cell connectivity if not already computed
mesh.init(D - 1);
mesh.init(D - 1, D);
dolfin_assert(mesh.ordered());
// Get coloring data
std::map<const std::vector<std::size_t>,
std::pair<std::vector<std::size_t>,
std::vector<std::vector<std::size_t>>>>::const_iterator
mesh_coloring;
mesh_coloring = mesh.topology().coloring.find(coloring_type);
// Check that requested coloring has been computed
if (mesh_coloring == mesh.topology().coloring.end())
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"Requested mesh coloring has not been computed");
}
// Get coloring data
const std::vector<std::vector<std::size_t>>& entities_of_color
= mesh_coloring->second.second;
// UFC cells and vertex coordinates
ufc::cell ufc_cell0, ufc_cell1;
std::vector<double> vertex_coordinates0, vertex_coordinates1;
// Assemble over interior facets (loop over colours, then cells of same color)
const std::size_t num_colors = entities_of_color.size();
for (std::size_t color = 0; color < num_colors; ++color)
{
// Get the array of facet indices of current color
const std::vector<std::size_t>& colored_facets = entities_of_color[color];
// Number of facets of current color
const int num_facets = colored_facets.size();
// OpenMP test loop over cells of the same color
Progress p(AssemblerBase::progress_message(A.rank(), "interior facets"),
//.........这里部分代码省略.........
示例9: assemble_cells_and_exterior_facets
//-----------------------------------------------------------------------------
void OpenMpAssembler::assemble_cells_and_exterior_facets(
GenericTensor& A,
const Form& a, UFC& _ufc,
std::shared_ptr<const MeshFunction<std::size_t>> cell_domains,
std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains,
std::vector<double>* values)
{
Timer timer("Assemble cells and exterior facets");
// Set number of OpenMP threads (from parameter systems)
const int num_threads = parameters["num_threads"];
omp_set_num_threads(num_threads);
// Extract mesh
const Mesh& mesh = a.mesh();
// Compute facets and facet - cell connectivity if not already computed
const std::size_t D = mesh.topology().dim();
mesh.init(D - 1);
mesh.init(D - 1, D);
dolfin_assert(mesh.ordered());
// Get connectivity
const MeshConnectivity& connectivity = mesh.topology()(D, D - 1);
dolfin_assert(!connectivity.empty());
// Dummy UFC object since each thread needs to created its own UFC object
UFC ufc(_ufc);
// Form rank
const std::size_t form_rank = ufc.form.rank();
// Cell and facet integrals
ufc::cell_integral* cell_integral = ufc.default_cell_integral.get();
ufc::exterior_facet_integral* facet_integral
= ufc.default_exterior_facet_integral.get();
// Check whether integrals are domain-dependent
bool use_cell_domains = cell_domains && !cell_domains->empty();
bool use_exterior_facet_domains
= exterior_facet_domains && !exterior_facet_domains->empty();
// Collect pointers to dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < form_rank; ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
// Vector to hold dof maps for a cell
std::vector<ArrayView<const dolfin::la_index>> dofs(form_rank);
// FIXME: Pass or determine coloring type
// Define graph type
std::vector<std::size_t> coloring_type = a.coloring(mesh.topology().dim());
mesh.color(coloring_type);
// Get coloring data
std::map<const std::vector<std::size_t>,
std::pair<std::vector<std::size_t>,
std::vector<std::vector<std::size_t>>>>::const_iterator
mesh_coloring;
mesh_coloring = mesh.topology().coloring.find(coloring_type);
if (mesh_coloring == mesh.topology().coloring.end())
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"Requested mesh coloring has not been computed");
}
// Get coloring data
const std::vector<std::vector<std::size_t>>& entities_of_color
= mesh_coloring->second.second;
// If assembling a scalar we need to ensure each threads assemble
// its own scalar
std::vector<double> scalars(num_threads, 0.0);
// UFC cell and vertex coordinates
ufc::cell ufc_cell;
std::vector<double> vertex_coordinates;
// Assemble over cells (loop over colors, then cells of same color)
const std::size_t num_colors = entities_of_color.size();
for (std::size_t color = 0; color < num_colors; ++color)
{
// Get the array of cell indices of current color
const std::vector<std::size_t>& colored_cells = entities_of_color[color];
// Number of cells of current color
const int num_cell_in_color = colored_cells.size();
// OpenMP test loop over cells of the same color
Progress p(AssemblerBase::progress_message(A.rank(), "cells"), num_colors);
#pragma omp parallel for schedule(guided, 20) firstprivate(ufc, ufc_cell, vertex_coordinates, dofs, cell_integral, facet_integral)
for (int index = 0; index < num_cell_in_color; ++index)
{
// Cell index
const std::size_t cell_index = colored_cells[index];
// Create cell
//.........这里部分代码省略.........
示例10: _init_global_tensor
//-----------------------------------------------------------------------------
void MultiMeshAssembler::_init_global_tensor(GenericTensor& A,
const MultiMeshForm& a)
{
log(PROGRESS, "Initializing global tensor.");
// This function initializes the big system matrix corresponding to
// all dofs (including inactive dofs) on all parts of the MultiMesh
// function space.
// Create layout for initializing tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions
std::vector<std::shared_ptr<const IndexMap>> index_maps;
for (std::size_t i = 0; i < a.rank(); i++)
{
std::shared_ptr<const MultiMeshFunctionSpace> V = a.function_space(i);
dolfin_assert(V);
index_maps.push_back(std::shared_ptr<const IndexMap>
(new IndexMap(MPI_COMM_WORLD, V->dim(), 1)));
}
// Initialise tensor layout
tensor_layout->init(MPI_COMM_WORLD, index_maps,
TensorLayout::Ghosts::UNGHOSTED);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build_multimesh_sparsity_pattern(pattern, a);
}
// Initialize tensor
A.init(*tensor_layout);
// Insert zeros on the diagonal as diagonal entries may be prematurely
// optimised away by the linear algebra backend when calling
// GenericMatrix::apply, e.g. PETSc does this then errors when matrices
// have no diagonal entry inserted.
if (A.rank() == 2)
{
// Down cast to GenericMatrix
GenericMatrix& _matA = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_matA.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Set tensor to zero
A.zero();
}