本文整理汇总了C++中arma::mat::zeros方法的典型用法代码示例。如果您正苦于以下问题:C++ mat::zeros方法的具体用法?C++ mat::zeros怎么用?C++ mat::zeros使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类arma::mat
的用法示例。
在下文中一共展示了mat::zeros方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: solve_P
void uDIIS::solve_P(arma::mat & Pa, arma::mat & Pb) {
arma::vec sol=get_w();
// Form weighted density matrix
Pa.zeros();
Pb.zeros();
for(size_t i=0;i<stack.size();i++) {
Pa+=sol(i)*stack[i].Pa;
Pb+=sol(i)*stack[i].Pb;
}
}
示例2: solve_F
void uDIIS::solve_F(arma::mat & Fa, arma::mat & Fb) {
arma::vec sol=get_w();
// Form weighted Fock matrix
Fa.zeros();
Fb.zeros();
for(size_t i=0;i<stack.size();i++) {
Fa+=sol(i)*stack[i].Fa;
Fb+=sol(i)*stack[i].Fb;
}
}
示例3: backwardProb
void HMM<Distribution>::Backward(const arma::mat& dataSeq,
const arma::vec& scales,
arma::mat& backwardProb) const
{
// Our goal is to calculate the backward probabilities:
// P(X_k | o_{k + 1:T}) for all possible states X_k, for each time point k.
backwardProb.zeros(transition.n_rows, dataSeq.n_cols);
// The last element probability is 1.
backwardProb.col(dataSeq.n_cols - 1).fill(1);
// Now step backwards through all other observations.
for (size_t t = dataSeq.n_cols - 2; t + 1 > 0; t--)
{
for (size_t j = 0; j < transition.n_rows; j++)
{
// The backward probability of state j at time t is the sum over all state
// of the probability of the next state having been a transition from the
// current state multiplied by the probability of each of those states
// emitting the given observation.
for (size_t state = 0; state < transition.n_rows; state++)
backwardProb(j, t) += transition(state, j) * backwardProb(state, t + 1)
* emission[state].Probability(dataSeq.unsafe_col(t + 1));
// Normalize by the weights from the forward algorithm.
backwardProb(j, t) /= scales[t + 1];
}
}
}
示例4: Gradient
void SGDTestFunction::Gradient(const arma::mat& coordinates,
const size_t begin,
arma::mat& gradient,
const size_t batchSize) const
{
gradient.zeros(3);
for (size_t i = begin; i < begin + batchSize; ++i)
{
switch (visitationOrder(i))
{
case 0:
if (coordinates[0] >= 0)
gradient[0] += std::exp(-coordinates[0]);
else
gradient[0] += -std::exp(coordinates[0]);
break;
case 1:
gradient[1] += 2 * coordinates[1];
break;
case 2:
gradient[2] += 4 * std::pow(coordinates[2], 3) + 6 * coordinates[2];
break;
}
}
gradient /= batchSize;
}
示例5: Initialize
inline static void Initialize(const MatType& V,
const size_t r,
arma::mat& W,
arma::mat& H)
{
const size_t n = V.n_rows;
const size_t m = V.n_cols;
if (columnsToAverage > m)
{
Log::Warn << "Number of random columns (columnsToAverage) is more than "
<< "the number of columns available in the V matrix; weird results "
<< "may ensue!" << std::endl;
}
W.zeros(n, r);
// Initialize W matrix with random columns.
for (size_t col = 0; col < r; col++)
{
for (size_t randCol = 0; randCol < columnsToAverage; randCol++)
{
// .col() does not work in this case, as of Armadillo 3.920.
W.unsafe_col(col) += V.col(math::RandInt(0, m));
}
}
// Now divide by p.
W /= columnsToAverage;
// Initialize H to random values.
H.randu(r, m);
}
示例6: GradientConstraint
void LovaszThetaSDP::GradientConstraint(const size_t index,
const arma::mat& coordinates,
arma::mat& gradient)
{
// Log::Debug << "Gradient of constraint " << index << " is " << std::endl;
if (index == 0) // This is the constraint Tr(X) = 1.
{
gradient = 2 * coordinates; // d/dR (Tr(R R^T)) = 2 R.
// std::cout << gradient;
return;
}
// Log::Debug << "Evaluating gradient of constraint " << index << " with ";
size_t i = edges(0, index - 1);
size_t j = edges(1, index - 1);
// Log::Debug << "i = " << i << " and j = " << j << "." << std::endl;
// Since the constraint is (R^T R)_ij, the gradient for (x, y) will be (I
// derived this for one of the MVU constraints):
// 0 , y != i, y != j
// 2 R_xj, y = i, y != j
// 2 R_xi, y != i, y = j
// 4 R_xy, y = i, y = j
// This results in the gradient matrix having two nonzero rows; for row
// i, the elements are R_nj, where n is the row; for column j, the elements
// are R_ni.
gradient.zeros(coordinates.n_rows, coordinates.n_cols);
gradient.col(i) = coordinates.col(j);
gradient.col(j) += coordinates.col(i); // In case j = i (shouldn't happen).
// std::cout << gradient;
}
示例7: Gradient
void RegularizedSVDFunction::Gradient(const arma::mat& parameters,
arma::mat& gradient) const
{
// For an example with rating corresponding to user 'i' and item 'j', the
// gradients for the parameters is as follows:
// grad(u(i)) = lambda * u(i) - error * v(j)
// grad(v(j)) = lambda * v(j) - error * u(i)
// 'error' is the prediction error for that example, which is:
// rating(i, j) - u(i).t() * v(j)
// The full gradient is calculated by summing the contributions over all the
// training examples.
gradient.zeros(rank, numUsers + numItems);
for (size_t i = 0; i < data.n_cols; i++)
{
// Indices for accessing the the correct parameter columns.
const size_t user = data(0, i);
const size_t item = data(1, i) + numUsers;
// Prediction error for the example.
const double rating = data(2, i);
double ratingError = rating - arma::dot(parameters.col(user),
parameters.col(item));
// Gradient is non-zero only for the parameter columns corresponding to the
// example.
gradient.col(user) += 2 * (lambda * parameters.col(user) -
ratingError * parameters.col(item));
gradient.col(item) += 2 * (lambda * parameters.col(item) -
ratingError * parameters.col(user));
}
}
示例8: Gradient
//! Calculate the gradient of one of the individual functions.
void GeneralizedRosenbrockFunction::Gradient(const arma::mat& coordinates,
const size_t i,
arma::mat& gradient) const
{
gradient.zeros(n);
gradient[i] = 400 * (std::pow(coordinates[i], 3) - coordinates[i] *
coordinates[i + 1]) + 2 * (coordinates[i] - 1);
gradient[i + 1] = 200 * (coordinates[i + 1] - std::pow(coordinates[i], 2));
}
示例9: Estimate
void HMM<Distribution>::Smooth(const arma::mat& dataSeq,
arma::mat& smoothSeq) const
{
// First run the forward algorithm.
arma::mat stateProb;
Estimate(dataSeq, stateProb);
// Compute expected emissions.
// Will not work for distributions without a Mean() function.
smoothSeq.zeros(dimensionality, dataSeq.n_cols);
for (size_t i = 0; i < emission.size(); i++)
smoothSeq += emission[i].Mean() * stateProb.row(i);
}
示例10: Evaluate
void RNN<
LayerTypes, OutputLayerType, InitializationRuleType, PerformanceFunction
>::Gradient(const arma::mat& /* unused */,
const size_t i,
arma::mat& gradient)
{
if (gradient.is_empty())
{
gradient = arma::zeros<arma::mat>(parameter.n_rows, parameter.n_cols);
}
else
{
gradient.zeros();
}
Evaluate(parameter, i, false);
arma::mat currentGradient = arma::mat(gradient.n_rows, gradient.n_cols);
NetworkGradients(currentGradient, network);
const arma::mat input = arma::mat(predictors.colptr(i), predictors.n_rows,
1, false, true);
// Iterate through the input sequence and perform the feed backward pass.
for (seqNum = seqLen - 1; seqNum >= 0; seqNum--)
{
// Load the network activation for the upcoming backward pass.
LoadActivations(input.rows(seqNum * inputSize, (seqNum + 1) *
inputSize - 1), network);
// Perform the backward pass.
if (seqOutput)
{
arma::mat seqError = error.unsafe_col(seqNum);
Backward(seqError, network);
}
else
{
Backward(error, network);
}
// Link the parameters and update the gradients.
LinkParameter(network);
UpdateGradients<>(network);
// Update the overall gradient.
gradient += currentGradient;
if (seqNum == 0) break;
}
}
示例11: Gradient
//! Calculate the gradient of one of the individual functions.
void GeneralizedRosenbrockFunction::Gradient(const arma::mat& coordinates,
const size_t i,
arma::mat& gradient,
const size_t batchSize) const
{
gradient.zeros(n);
for (size_t j = i; j < i + batchSize; ++j)
{
const size_t p = visitationOrder[j];
gradient[p] = 400 * (std::pow(coordinates[p], 3) - coordinates[p] *
coordinates[p + 1]) + 2 * (coordinates[p] - 1);
gradient[p + 1] = 200 * (coordinates[p + 1] - std::pow(coordinates[p], 2));
}
}
示例12: moveSize
void SA<FunctionType, CoolingScheduleType>::MoveControl(const size_t nMoves,
arma::mat& accept)
{
arma::mat target;
target.copy_size(accept);
target.fill(0.44);
moveSize = arma::log(moveSize);
moveSize += gain * (accept / (double) nMoves - target);
moveSize = arma::exp(moveSize);
// To avoid the use of element-wise arma::min(), which is only available in
// Armadillo after v3.930, we use a for loop here instead.
for (size_t i = 0; i < accept.n_elem; ++i)
moveSize(i) = (moveSize(i) > maxMove(i)) ? maxMove(i) : moveSize(i);
accept.zeros();
}
示例13: find_P
double find_P(const arma::mat& X, const arma::mat& Y, double sigma2,
float outliers, arma::vec& P1, arma::vec& Pt1, arma::mat& PX,
bool use_fgt, const float epsilon) {
P1.zeros();
Pt1.zeros();
PX.zeros();
const arma::uword N = X.n_rows;
const arma::uword M = Y.n_rows;
const arma::uword D = Y.n_cols;
const double h = std::sqrt(2 * sigma2);
const double ndi = (outliers * M * std::pow(2 * M_PI * sigma2, 0.5 * D)) /
((1 - outliers) * N);
arma::vec q = arma::ones<arma::vec>(M);
fgt::GaussTransformUnqPtr transformY;
if (use_fgt) {
transformY = fgt::choose_gauss_transform(Y, h, epsilon);
} else {
transformY.reset(new fgt::Direct(Y, h));
}
arma::vec denomP = transformY->compute(X, q);
denomP = denomP + ndi;
Pt1 = 1 - ndi / denomP;
q = 1 / denomP;
fgt::GaussTransformUnqPtr transformX;
if (use_fgt) {
transformX = fgt::choose_gauss_transform(X, h, epsilon);
} else {
transformX.reset(new fgt::Direct(X, h));
}
P1 = transformX->compute(Y, q);
for (arma::uword i = 0; i < D; ++i) {
q = X.col(i) / denomP;
arma::vec c = PX.unsafe_col(i);
PX.col(i) = transformX->compute(Y, q);
}
return -arma::sum(arma::log(denomP)) + D * N * std::log(sigma2) / 2;
}
示例14: Forward
void HMM<Distribution>::Filter(const arma::mat& dataSeq,
arma::mat& filterSeq,
size_t ahead) const
{
// First run the forward algorithm.
arma::mat forwardProb;
arma::vec scales;
Forward(dataSeq, scales, forwardProb);
// Propagate state ahead.
if (ahead != 0)
forwardProb = pow(transition, ahead) * forwardProb;
// Compute expected emissions.
// Will not work for distributions without a Mean() function.
filterSeq.zeros(dimensionality, dataSeq.n_cols);
for (size_t i = 0; i < emission.size(); i++)
filterSeq += emission[i].Mean() * forwardProb.row(i);
}
示例15: norm
double mlpack::cf::SVDWrapper<Factorizer>::Apply(const arma::mat& V,
arma::mat& W,
arma::mat& sigma,
arma::mat& H) const
{
// get svd factorization
arma::vec E;
factorizer.Apply(W, E, H, V);
// construct sigma matrix
sigma.zeros(V.n_rows, V.n_cols);
for(size_t i = 0;i < sigma.n_rows && i < sigma.n_cols;i++)
sigma(i, i) = E(i, 0);
arma::mat V_rec = W * sigma * arma::trans(H);
// return normalized frobenius error
return arma::norm(V - V_rec, "fro") / arma::norm(V, "fro");
}