本文整理汇总了C++中DataVector::setAll方法的典型用法代码示例。如果您正苦于以下问题:C++ DataVector::setAll方法的具体用法?C++ DataVector::setAll怎么用?C++ DataVector::setAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DataVector
的用法示例。
在下文中一共展示了DataVector::setAll方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mult_transpose
/**
* Performs a transposed mass evaluation
*
* @param storage GridStorage object that contains the grid's points information
* @param basis a reference to a class that implements a specific basis
* @param source the coefficients of the grid points
* @param x the d-dimensional vector with data points (row-wise)
* @param result the result vector of the matrix vector multiplication
*/
void mult_transpose(GridStorage* storage, BASIS& basis, DataVector& source,
DataMatrix& x, DataVector& result) {
result.setAll(0.0);
size_t source_size = source.getSize();
#pragma omp parallel
{
DataVector privateResult(result.getSize());
privateResult.setAll(0.0);
DataVector line(x.getNcols());
AlgorithmEvaluationTransposed<BASIS> AlgoEvalTrans(storage);
privateResult.setAll(0.0);
#pragma omp for schedule(static)
for (size_t i = 0; i < source_size; i++) {
x.getRow(i, line);
AlgoEvalTrans(basis, line, source[i], privateResult);
}
#pragma omp critical
{
result.add(privateResult);
}
}
}
示例2: mult
/**
* Performs a mass evaluation
*
* @param storage GridStorage object that contains the grid's points information
* @param basis a reference to a class that implements a specific basis
* @param source the coefficients of the grid points
* @param x the d-dimensional vector with data points (row-wise)
* @param result the result vector of the matrix vector multiplication
*/
void mult(GridStorage* storage, BASIS& basis, DataVector& source, DataMatrix& x,
DataVector& result) {
result.setAll(0.0);
size_t result_size = result.getSize();
#pragma omp parallel
{
DataVector line(x.getNcols());
AlgorithmEvaluation<BASIS> AlgoEval(storage);
#pragma omp for schedule(static)
for (size_t i = 0; i < result_size; i++) {
x.getRow(i, line);
result[i] = AlgoEval(basis, line, source);
}
}
}
示例3: evalGradient
float_t OperationNaiveEvalGradientWavelet::evalGradient(
const DataVector& alpha, const DataVector& point, DataVector& gradient) {
const size_t n = storage->size();
const size_t d = storage->dim();
float_t result = 0.0;
gradient.resize(storage->dim());
gradient.setAll(0.0);
DataVector curGradient(d);
for (size_t i = 0; i < n; i++) {
const GridIndex& gp = *(*storage)[i];
float_t curValue = 1.0;
curGradient.setAll(alpha[i]);
for (size_t t = 0; t < d; t++) {
const float_t val1d = base.eval(gp.getLevel(t), gp.getIndex(t), point[t]);
const float_t dx1d = base.evalDx(gp.getLevel(t), gp.getIndex(t),
point[t]);
curValue *= val1d;
for (size_t t2 = 0; t2 < d; t2++) {
if (t2 == t) {
curGradient[t2] *= dx1d;
} else {
curGradient[t2] *= val1d;
}
}
}
result += alpha[i] * curValue;
gradient.add(curGradient);
}
return result;
}
示例4: evalHessian
float_t OperationNaiveEvalHessianModFundamentalSpline::evalHessian(
const DataVector& alpha, const DataVector& point,
DataVector& gradient, DataMatrix& hessian) {
const size_t n = storage->size();
const size_t d = storage->dim();
float_t result = 0.0;
gradient.resize(storage->dim());
gradient.setAll(0.0);
hessian = DataMatrix(d, d);
hessian.setAll(0.0);
DataVector curGradient(d);
DataMatrix curHessian(d, d);
for (size_t i = 0; i < n; i++) {
const GridIndex& gp = *(*storage)[i];
float_t curValue = 1.0;
curGradient.setAll(alpha[i]);
curHessian.setAll(alpha[i]);
for (size_t t = 0; t < d; t++) {
const float_t val1d = base.eval(gp.getLevel(t), gp.getIndex(t), point[t]);
const float_t dx1d = base.evalDx(gp.getLevel(t), gp.getIndex(t),
point[t]);
const float_t dxdx1d = base.evalDxDx(gp.getLevel(t), gp.getIndex(t), point[t]);
curValue *= val1d;
for (size_t t2 = 0; t2 < d; t2++) {
if (t2 == t) {
curGradient[t2] *= dx1d;
for (size_t t3 = 0; t3 < d; t3++) {
if (t3 == t) {
curHessian(t2, t3) *= dxdx1d;
} else {
curHessian(t2, t3) *= dx1d;
}
}
} else {
curGradient[t2] *= val1d;
for (size_t t3 = 0; t3 < d; t3++) {
if (t3 == t) {
curHessian(t2, t3) *= dx1d;
} else {
curHessian(t2, t3) *= val1d;
}
}
}
}
}
result += alpha[i] * curValue;
gradient.add(curGradient);
hessian.add(curHessian);
}
return result;
}