本文整理汇总了C++中GenericTensor::apply方法的典型用法代码示例。如果您正苦于以下问题:C++ GenericTensor::apply方法的具体用法?C++ GenericTensor::apply怎么用?C++ GenericTensor::apply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GenericTensor
的用法示例。
在下文中一共展示了GenericTensor::apply方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assemble
//-----------------------------------------------------------------------------
void MultiMeshAssembler::assemble(GenericTensor& A, const MultiMeshForm& a)
{
// Developer note: This implementation does not yet handle
// - subdomains
// - interior facets
// - exterior facets
begin(PROGRESS, "Assembling tensor over multimesh function space.");
// Initialize global tensor
_init_global_tensor(A, a);
// Assemble over uncut cells
_assemble_uncut_cells(A, a);
// Assemble over cut cells
_assemble_cut_cells(A, a);
// Assemble over interface
_assemble_interface(A, a);
// Assemble over overlap
_assemble_overlap(A, a);
// Finalize assembly of global tensor
if (finalize_tensor)
A.apply("add");
// Lock any remaining inactive dofs
if (A.rank() == 2)
static_cast<GenericMatrix&>(A).ident_zeros();
end();
}
示例2: assemble
//-----------------------------------------------------------------------------
void MultiMeshAssembler::assemble(GenericTensor& A, const MultiMeshForm& a)
{
// Developer note: This implementation does not yet handle
// - subdomains
// - interior facets
// - exterior facets
begin(PROGRESS, "Assembling tensor over multimesh function space.");
// Initialize global tensor
_init_global_tensor(A, a);
// Assemble over uncut cells
_assemble_uncut_cells(A, a);
// Assemble over exterior facets
_assemble_uncut_exterior_facets(A, a);
// Assemble over cut cells
_assemble_cut_cells(A, a);
// Assemble over interface
_assemble_interface(A, a);
// Assemble over overlap
_assemble_overlap(A, a);
// Finalize assembly of global tensor
if (finalize_tensor)
A.apply("add");
end();
}
示例3: assemble
//----------------------------------------------------------------------------
void OpenMpAssembler::assemble(GenericTensor& A, const Form& a)
{
// Get mesh
const Mesh& mesh = a.mesh();
if (MPI::size(mesh.mpi_comm()) > 1)
{
dolfin_error("OpenMPAssembler.cpp",
"perform multithreaded assembly using OpenMP assembler",
"The OpenMp assembler has not been tested in combination with MPI");
}
dolfin_assert(a.ufc_form());
// All assembler functions above end up calling this function, which
// in turn calls the assembler functions below to assemble over
// cells, exterior and interior facets. Note the importance of
// treating empty mesh functions as null pointers for the PyDOLFIN
// interface.
// Get cell domains
std::shared_ptr<const MeshFunction<std::size_t>> cell_domains
= a.cell_domains();
// Get exterior facet domains
std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains
= a.exterior_facet_domains();
// Get interior facet domains
std::shared_ptr<const MeshFunction<std::size_t>> interior_facet_domains
= a.interior_facet_domains();
// Check form
AssemblerBase::check(a);
// Create data structure for local assembly data
UFC ufc(a);
// Initialize global tensor
init_global_tensor(A, a);
// FIXME: The below selections should be made robust
if (a.ufc_form()->has_interior_facet_integrals())
assemble_interior_facets(A, a, ufc, interior_facet_domains, cell_domains, 0);
if (a.ufc_form()->has_exterior_facet_integrals())
{
assemble_cells_and_exterior_facets(A, a, ufc, cell_domains,
exterior_facet_domains, 0);
}
else
assemble_cells(A, a, ufc, cell_domains, 0);
// Finalize assembly of global tensor
if (finalize_tensor)
A.apply("add");
}
示例4: init_global_tensor
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
dolfin_assert(a.ufc_form());
// Get dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < a.rank(); ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
if (A.empty())
{
Timer t0("Build sparsity");
// Create layout for initialising tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions and mapping across processes for each dimension
std::vector<std::shared_ptr<const IndexMap> > index_maps;
for (std::size_t i = 0; i < a.rank(); i++)
{
dolfin_assert(dofmaps[i]);
index_maps.push_back(dofmaps[i]->index_map());
}
// Initialise tensor layout
// FIXME: somewhere need to check block sizes are same on both axes
// NOTE: Jan: that will be done on the backend side; IndexMap will
// provide tabulate functions with arbitrary block size;
// moreover the functions will tabulate directly using a
// correct int type
tensor_layout->init(a.mesh().mpi_comm(), index_maps,
TensorLayout::Ghosts::UNGHOSTED);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build(pattern,
a.mesh(), dofmaps,
a.ufc_form()->has_cell_integrals(),
a.ufc_form()->has_interior_facet_integrals(),
a.ufc_form()->has_exterior_facet_integrals(),
a.ufc_form()->has_vertex_integrals(),
keep_diagonal);
}
t0.stop();
// Initialize tensor
Timer t1("Init tensor");
A.init(*tensor_layout);
t1.stop();
// Insert zeros on the diagonal as diagonal entries may be
// prematurely optimised away by the linear algebra backend when
// calling GenericMatrix::apply, e.g. PETSc does this then errors
// when matrices have no diagonal entry inserted.
if (A.rank() == 2 && keep_diagonal)
{
// Down cast to GenericMatrix
GenericMatrix& _matA = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_matA.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Delete sparsity pattern
Timer t2("Delete sparsity");
t2.stop();
}
else
{
// If tensor is not reset, check that dimensions are correct
for (std::size_t i = 0; i < a.rank(); ++i)
{
if (A.size(i) != dofmaps[i]->global_dimension())
{
dolfin_error("AssemblerBase.cpp",
"assemble form",
"Dim %d of tensor does not match form", i);
}
}
}
if (!add_values)
A.zero();
}
示例5: init_global_tensor
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
dolfin_assert(a.ufc_form());
// Get dof maps
std::vector<const GenericDofMap*> dofmaps;
for (std::size_t i = 0; i < a.rank(); ++i)
dofmaps.push_back(a.function_space(i)->dofmap().get());
if (A.size(0) == 0)
{
Timer t0("Build sparsity");
// Create layout for initialising tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions
std::vector<std::size_t> global_dimensions;
std::vector<std::pair<std::size_t, std::size_t> > local_range;
std::vector<std::size_t> block_sizes;
for (std::size_t i = 0; i < a.rank(); i++)
{
dolfin_assert(dofmaps[i]);
global_dimensions.push_back(dofmaps[i]->global_dimension());
local_range.push_back(dofmaps[i]->ownership_range());
block_sizes.push_back(dofmaps[i]->block_size);
}
// Set block size for sparsity graphs
std::size_t block_size = 1;
if (a.rank() == 2)
{
const std::vector<std::size_t> _bs(a.rank(), dofmaps[0]->block_size);
block_size = (block_sizes == _bs) ? dofmaps[0]->block_size : 1;
}
// Initialise tensor layout
tensor_layout->init(a.mesh().mpi_comm(), global_dimensions, block_size,
local_range);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build(pattern,
a.mesh(), dofmaps,
a.ufc_form()->has_cell_integrals(),
a.ufc_form()->has_interior_facet_integrals(),
a.ufc_form()->has_exterior_facet_integrals(),
keep_diagonal);
}
t0.stop();
// Initialize tensor
Timer t1("Init tensor");
A.init(*tensor_layout);
t1.stop();
// Insert zeros on the diagonal as diagonal entries may be prematurely
// optimised away by the linear algebra backend when calling
// GenericMatrix::apply, e.g. PETSc does this then errors when matrices
// have no diagonal entry inserted.
if (A.rank() == 2 && keep_diagonal)
{
// Down cast to GenericMatrix
GenericMatrix& _A = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_A.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Delete sparsity pattern
Timer t2("Delete sparsity");
t2.stop();
}
else
{
// If tensor is not reset, check that dimensions are correct
for (std::size_t i = 0; i < a.rank(); ++i)
{
if (A.size(i) != dofmaps[i]->global_dimension())
{
dolfin_error("AssemblerBase.cpp",
"assemble form",
"Reset of tensor in assembly not requested, but dim %d of tensor does not match form", i);
}
}
}
//.........这里部分代码省略.........
示例6: assemble
//----------------------------------------------------------------------------
void Assembler::assemble(GenericTensor& A, const Form& a)
{
// All assembler functions above end up calling this function, which
// in turn calls the assembler functions below to assemble over
// cells, exterior and interior facets.
// Check whether we should call the multi-core assembler
#ifdef HAS_OPENMP
const std::size_t num_threads = parameters["num_threads"];
if (num_threads > 0)
{
OpenMpAssembler assembler;
assembler.add_values = add_values;
assembler.finalize_tensor = finalize_tensor;
assembler.keep_diagonal = keep_diagonal;
assembler.assemble(A, a);
return;
}
#endif
// Get cell domains
std::shared_ptr<const MeshFunction<std::size_t>>
cell_domains = a.cell_domains();
// Get exterior facet domains
std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains
= a.exterior_facet_domains();
// Get interior facet domains
std::shared_ptr<const MeshFunction<std::size_t>> interior_facet_domains
= a.interior_facet_domains();
// Get vertex domains
std::shared_ptr<const MeshFunction<std::size_t>> vertex_domains
= a.vertex_domains();
// Check form
AssemblerBase::check(a);
// Create data structure for local assembly data
UFC ufc(a);
// Update off-process coefficients
const std::vector<std::shared_ptr<const GenericFunction>>
coefficients = a.coefficients();
// Initialize global tensor
init_global_tensor(A, a);
// Assemble over cells
assemble_cells(A, a, ufc, cell_domains, NULL);
// Assemble over exterior facets
assemble_exterior_facets(A, a, ufc, exterior_facet_domains, NULL);
// Assemble over interior facets
assemble_interior_facets(A, a, ufc, interior_facet_domains,
cell_domains, NULL);
// Assemble over vertices
assemble_vertices(A, a, ufc, vertex_domains);
// Finalize assembly of global tensor
if (finalize_tensor)
A.apply("add");
}
示例7: _init_global_tensor
//-----------------------------------------------------------------------------
void MultiMeshAssembler::_init_global_tensor(GenericTensor& A,
const MultiMeshForm& a)
{
log(PROGRESS, "Initializing global tensor.");
// This function initializes the big system matrix corresponding to
// all dofs (including inactive dofs) on all parts of the MultiMesh
// function space.
// Create layout for initializing tensor
std::shared_ptr<TensorLayout> tensor_layout;
tensor_layout = A.factory().create_layout(a.rank());
dolfin_assert(tensor_layout);
// Get dimensions
std::vector<std::shared_ptr<const IndexMap>> index_maps;
for (std::size_t i = 0; i < a.rank(); i++)
{
std::shared_ptr<const MultiMeshFunctionSpace> V = a.function_space(i);
dolfin_assert(V);
index_maps.push_back(std::shared_ptr<const IndexMap>
(new IndexMap(MPI_COMM_WORLD, V->dim(), 1)));
}
// Initialise tensor layout
tensor_layout->init(MPI_COMM_WORLD, index_maps,
TensorLayout::Ghosts::UNGHOSTED);
// Build sparsity pattern if required
if (tensor_layout->sparsity_pattern())
{
GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
SparsityPatternBuilder::build_multimesh_sparsity_pattern(pattern, a);
}
// Initialize tensor
A.init(*tensor_layout);
// Insert zeros on the diagonal as diagonal entries may be prematurely
// optimised away by the linear algebra backend when calling
// GenericMatrix::apply, e.g. PETSc does this then errors when matrices
// have no diagonal entry inserted.
if (A.rank() == 2)
{
// Down cast to GenericMatrix
GenericMatrix& _matA = A.down_cast<GenericMatrix>();
// Loop over rows and insert 0.0 on the diagonal
const double block = 0.0;
const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
const std::size_t range = std::min(row_range.second, A.size(1));
for (std::size_t i = row_range.first; i < range; i++)
{
dolfin::la_index _i = i;
_matA.set(&block, 1, &_i, 1, &_i);
}
A.apply("flush");
}
// Set tensor to zero
A.zero();
}