本文整理汇总了C++中Space::globalDofCount方法的典型用法代码示例。如果您正苦于以下问题:C++ Space::globalDofCount方法的具体用法?C++ Space::globalDofCount怎么用?C++ Space::globalDofCount使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Space
的用法示例。
在下文中一共展示了Space::globalDofCount方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: runtime_error
shared_ptr<const DiscreteBoundaryOperator<ResultType>>
assembleDenseBlock(
int rowStart, int rowEnd, int colStart, int colEnd,
const Space<BasisFunctionType> &testSpace,
const Space<BasisFunctionType> &trialSpace,
Fiber::LocalAssemblerForIntegralOperators<ResultType>& assembler,
const ParameterList ¶meterList) {
int numberOfRows = rowEnd - rowStart;
int numberOfColumns = colEnd - colStart;
if (colEnd > trialSpace.globalDofCount() ||
rowEnd > testSpace.globalDofCount() || colStart < 0 || rowStart < 0)
throw std::runtime_error("DenseGlobalBlockAssember::assembleWeakForm(): "
"Indices out of bounds");
Context<BasisFunctionType, ResultType> context(parameterList);
const AssemblyOptions &options = context.assemblyOptions();
// Create the operator's matrix
Matrix<ResultType> result(numberOfRows, numberOfColumns);
result.setZero();
std::unordered_map<int, std::vector<GlobalDofIndex>> trialIndexMap,
testIndexMap;
std::unordered_map<int, std::vector<BasisFunctionType>> trialDofWeights,
testDofWeights;
gatherElementInformation(colStart, colEnd, trialSpace, trialIndexMap,
trialDofWeights);
gatherElementInformation(rowStart, rowEnd, testSpace, testIndexMap,
testDofWeights);
std::vector<int> testIndices;
testIndices.reserve(testIndexMap.size());
for (const auto &p : testIndexMap)
testIndices.push_back(p.first);
typedef DenseWeakFormAssemblerLoopBody<BasisFunctionType, ResultType> Body;
typename Body::MutexType mutex;
{
Fiber::SerialBlasRegion region;
tbb::parallel_for_each(trialIndexMap.begin(), trialIndexMap.end(),
Body(rowStart, colStart, testIndices, testIndexMap,
trialIndexMap, testDofWeights, trialDofWeights,
assembler, result, mutex));
}
// Create and return a discrete operator represented by the matrix that
// has just been calculated
return shared_ptr<DiscreteBoundaryOperator<ResultType>>(
new DiscreteDenseBoundaryOperator<ResultType>(result));
}
示例2: reallyCalculateProjections
Vector<ResultType> reallyCalculateProjections(
const Space<BasisFunctionType> &dualSpace,
Fiber::LocalAssemblerForGridFunctions<ResultType> &assembler,
const AssemblyOptions &options) {
// TODO: parallelise using TBB (the parameter options will then start be used)
// Get the grid's leaf view so that we can iterate over elements
const GridView &view = dualSpace.gridView();
const size_t elementCount = view.entityCount(0);
// Global DOF indices corresponding to local DOFs on elements
std::vector<std::vector<GlobalDofIndex>> testGlobalDofs(elementCount);
std::vector<std::vector<BasisFunctionType>> testLocalDofWeights(elementCount);
// Gather global DOF lists
const Mapper &mapper = view.elementMapper();
std::unique_ptr<EntityIterator<0>> it = view.entityIterator<0>();
while (!it->finished()) {
const Entity<0> &element = it->entity();
const int elementIndex = mapper.entityIndex(element);
dualSpace.getGlobalDofs(element, testGlobalDofs[elementIndex],
testLocalDofWeights[elementIndex]);
it->next();
}
// Make a vector of all element indices
std::vector<int> testIndices(elementCount);
for (size_t i = 0; i < elementCount; ++i)
testIndices[i] = i;
// Create the weak form's column vector
Vector<ResultType> result(dualSpace.globalDofCount());
result.setZero();
std::vector<Vector<ResultType>> localResult;
// Evaluate local weak forms
assembler.evaluateLocalWeakForms(testIndices, localResult);
// Loop over test indices
for (size_t testIndex = 0; testIndex < elementCount; ++testIndex)
// Add the integrals to appropriate entries in the global weak form
for (size_t testDof = 0; testDof < testGlobalDofs[testIndex].size();
++testDof) {
int testGlobalDof = testGlobalDofs[testIndex][testDof];
if (testGlobalDof >= 0) // if it's negative, it means that this
// local dof is constrained (not used)
result(testGlobalDof) +=
conj(testLocalDofWeights[testIndex][testDof]) *
localResult[testIndex](testDof);
}
// Return the vector of projections <phi_i, f>
return result;
}
示例3: scheduler
std::auto_ptr<DiscreteBoundaryOperator<ResultType> >
DenseGlobalAssembler<BasisFunctionType, ResultType>::
assembleDetachedWeakForm(
const Space<BasisFunctionType>& testSpace,
const Space<BasisFunctionType>& trialSpace,
LocalAssemblerForIntegralOperators& assembler,
const Context<BasisFunctionType, ResultType>& context)
{
const AssemblyOptions& options = context.assemblyOptions();
// Global DOF indices corresponding to local DOFs on elements
std::vector<std::vector<GlobalDofIndex> > testGlobalDofs, trialGlobalDofs;
std::vector<std::vector<BasisFunctionType> > testLocalDofWeights,
trialLocalDofWeights;
gatherGlobalDofs(testSpace, testGlobalDofs, testLocalDofWeights);
if (&testSpace == &trialSpace) {
trialGlobalDofs = testGlobalDofs;
trialLocalDofWeights = testLocalDofWeights;
} else
gatherGlobalDofs(trialSpace, trialGlobalDofs, trialLocalDofWeights);
const size_t testElementCount = testGlobalDofs.size();
const size_t trialElementCount = trialGlobalDofs.size();
// Make a vector of all element indices
std::vector<int> testIndices(testElementCount);
for (int i = 0; i < testElementCount; ++i)
testIndices[i] = i;
// Create the operator's matrix
arma::Mat<ResultType> result(testSpace.globalDofCount(),
trialSpace.globalDofCount());
result.fill(0.);
typedef DenseWeakFormAssemblerLoopBody<BasisFunctionType, ResultType> Body;
typename Body::MutexType mutex;
const ParallelizationOptions& parallelOptions =
options.parallelizationOptions();
int maxThreadCount = 1;
if (!parallelOptions.isOpenClEnabled()) {
if (parallelOptions.maxThreadCount() == ParallelizationOptions::AUTO)
maxThreadCount = tbb::task_scheduler_init::automatic;
else
maxThreadCount = parallelOptions.maxThreadCount();
}
tbb::task_scheduler_init scheduler(maxThreadCount);
{
Fiber::SerialBlasRegion region;
tbb::parallel_for(tbb::blocked_range<size_t>(0, trialElementCount),
Body(testIndices, testGlobalDofs, trialGlobalDofs,
testLocalDofWeights, trialLocalDofWeights,
assembler, result, mutex));
}
//// Old serial code (TODO: decide whether to keep it behind e.g. #ifndef PARALLEL)
// std::vector<arma::Mat<ValueType> > localResult;
// // Loop over trial elements
// for (int trialIndex = 0; trialIndex < trialElementCount; ++trialIndex)
// {
// // Evaluate integrals over pairs of the current trial element and
// // all the test elements
// assembler.evaluateLocalWeakForms(TEST_TRIAL, testIndices, trialIndex,
// ALL_DOFS, localResult);
// // Loop over test indices
// for (int testIndex = 0; testIndex < testElementCount; ++testIndex)
// // Add the integrals to appropriate entries in the operator's matrix
// for (int trialDof = 0; trialDof < trialGlobalDofs[trialIndex].size(); ++trialDof)
// for (int testDof = 0; testDof < testGlobalDofs[testIndex].size(); ++testDof)
// result(testGlobalDofs[testIndex][testDof],
// trialGlobalDofs[trialIndex][trialDof]) +=
// localResult[testIndex](testDof, trialDof);
// }
// Create and return a discrete operator represented by the matrix that
// has just been calculated
return std::auto_ptr<DiscreteBoundaryOperator<ResultType> >(
new DiscreteDenseBoundaryOperator<ResultType>(result));
}
示例4: runtime_error
std::auto_ptr<DiscreteBoundaryOperator<ResultType> >
AcaGlobalAssembler<BasisFunctionType, ResultType>::assembleDetachedWeakForm(
const Space<BasisFunctionType>& testSpace,
const Space<BasisFunctionType>& trialSpace,
const std::vector<LocalAssembler*>& localAssemblers,
const std::vector<const DiscreteBndOp*>& sparseTermsToAdd,
const std::vector<ResultType>& denseTermsMultipliers,
const std::vector<ResultType>& sparseTermsMultipliers,
const AssemblyOptions& options,
int symmetry)
{
#ifdef WITH_AHMED
typedef AhmedDofWrapper<CoordinateType> AhmedDofType;
typedef ExtendedBemCluster<AhmedDofType> AhmedBemCluster;
typedef bemblcluster<AhmedDofType, AhmedDofType> AhmedBemBlcluster;
typedef DiscreteAcaBoundaryOperator<ResultType> DiscreteAcaLinOp;
const AcaOptions& acaOptions = options.acaOptions();
const bool indexWithGlobalDofs = acaOptions.globalAssemblyBeforeCompression;
const bool verbosityAtLeastDefault =
(options.verbosityLevel() >= VerbosityLevel::DEFAULT);
const bool verbosityAtLeastHigh =
(options.verbosityLevel() >= VerbosityLevel::HIGH);
// Currently we don't support Hermitian ACA operators. This is because we
// don't have the means to really test them -- we would need complex-valued
// basis functions for that. (Assembly of such a matrix would be very easy
// -- just change complex_sym from true to false in the call to apprx_sym()
// in AcaWeakFormAssemblerLoopBody::operator() -- but operations on
// symmetric/Hermitian matrices are not always trivial and we do need to be
// able to test them properly.)
bool symmetric = symmetry & SYMMETRIC;
if (symmetry & HERMITIAN && !(symmetry & SYMMETRIC) &&
verbosityAtLeastDefault)
std::cout << "Warning: assembly of non-symmetric Hermitian H-matrices "
"is not supported yet. A general H-matrix will be assembled"
<< std::endl;
#ifndef WITH_TRILINOS
if (!indexWithGlobalDofs)
throw std::runtime_error("AcaGlobalAssembler::assembleDetachedWeakForm(): "
"ACA assembly with globalAssemblyBeforeCompression "
"set to false requires BEM++ to be linked with "
"Trilinos");
#endif // WITH_TRILINOS
const size_t testDofCount = indexWithGlobalDofs ?
testSpace.globalDofCount() : testSpace.flatLocalDofCount();
const size_t trialDofCount = indexWithGlobalDofs ?
trialSpace.globalDofCount() : trialSpace.flatLocalDofCount();
if (symmetric && testDofCount != trialDofCount)
throw std::invalid_argument("AcaGlobalAssembler::assembleDetachedWeakForm(): "
"you cannot generate a symmetric weak form "
"using test and trial spaces with different "
"numbers of DOFs");
// o2p: map of original indices to permuted indices
// p2o: map of permuted indices to original indices
typedef ClusterConstructionHelper<BasisFunctionType> CCH;
shared_ptr<AhmedBemCluster> testClusterTree;
shared_ptr<IndexPermutation> test_o2pPermutation, test_p2oPermutation;
CCH::constructBemCluster(testSpace, indexWithGlobalDofs, acaOptions,
testClusterTree,
test_o2pPermutation, test_p2oPermutation);
shared_ptr<AhmedBemCluster> trialClusterTree;
shared_ptr<IndexPermutation> trial_o2pPermutation, trial_p2oPermutation;
if (symmetric || &testSpace == &trialSpace) {
trialClusterTree = testClusterTree;
trial_o2pPermutation = test_o2pPermutation;
trial_p2oPermutation = test_p2oPermutation;
} else
CCH::constructBemCluster(trialSpace, indexWithGlobalDofs, acaOptions,
trialClusterTree,
trial_o2pPermutation, trial_p2oPermutation);
// // Export VTK plots showing the disctribution of leaf cluster ids
// std::vector<unsigned int> testClusterIds;
// getClusterIds(*testClusterTree, test_p2oPermutation->permutedIndices(), testClusterIds);
// testSpace.dumpClusterIds("testClusterIds", testClusterIds,
// indexWithGlobalDofs ? GLOBAL_DOFS : FLAT_LOCAL_DOFS);
// std::vector<unsigned int> trialClusterIds;
// getClusterIds(*trialClusterTree, trial_p2oPermutation->permutedIndices(), trialClusterIds);
// trialSpace.dumpClusterIds("trialClusterIds", trialClusterIds,
// indexWithGlobalDofs ? GLOBAL_DOFS : FLAT_LOCAL_DOFS);
if (verbosityAtLeastHigh)
std::cout << "Test cluster count: " << testClusterTree->getncl()
<< "\nTrial cluster count: " << trialClusterTree->getncl()
<< std::endl;
unsigned int blockCount = 0;
shared_ptr<AhmedBemBlcluster> bemBlclusterTree(
CCH::constructBemBlockCluster(acaOptions, symmetric,
*testClusterTree, *trialClusterTree,
blockCount).release());
if (verbosityAtLeastHigh)
std::cout << "Mblock count: " << blockCount << std::endl;
//.........这里部分代码省略.........