当前位置: 首页>>代码示例>>C++>>正文


C++ mat::col方法代码示例

本文整理汇总了C++中arma::mat::col方法的典型用法代码示例。如果您正苦于以下问题:C++ mat::col方法的具体用法?C++ mat::col怎么用?C++ mat::col使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在arma::mat的用法示例。


在下文中一共展示了mat::col方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: Evaluate

double RegularizedSVDFunction::Evaluate(const arma::mat& parameters) const
{
  // The cost for the optimization is as follows:
  //          f(u, v) = sum((rating(i, j) - u(i).t() * v(j))^2)
  // The sum is over all the ratings in the rating matrix.
  // 'i' points to the user and 'j' points to the item being considered.
  // The regularization term is added to the above cost, where the vectors u(i)
  // and v(j) are regularized for each rating they contribute to.

  double cost = 0.0;

  for (size_t i = 0; i < data.n_cols; i++)
  {
    // Indices for accessing the the correct parameter columns.
    const size_t user = data(0, i);
    const size_t item = data(1, i) + numUsers;

    // Calculate the squared error in the prediction.
    const double rating = data(2, i);
    double ratingError = rating - arma::dot(parameters.col(user),
                                            parameters.col(item));
    double ratingErrorSquared = ratingError * ratingError;

    // Calculate the regularization penalty corresponding to the parameters.
    double userVecNorm = arma::norm(parameters.col(user), 2);
    double itemVecNorm = arma::norm(parameters.col(item), 2);
    double regularizationError = lambda * (userVecNorm * userVecNorm +
                                           itemVecNorm * itemVecNorm);

    cost += (ratingErrorSquared + regularizationError);
  }

  return cost;
}
开发者ID:sumedhghaisas,项目名称:mlpack,代码行数:34,代码来源:regularized_svd_function.cpp

示例2: corrected

arma::mat Vespucci::Math::Transform::cwt_spdbc_mat(const arma::mat &X, std::string wavelet, arma::uword qscale,
                            double threshold, std::string threshold_method,
                            arma::uword window_size, arma::field<arma::umat> &peak_positions,
                            arma::mat &baselines)
{
    baselines.set_size(X.n_rows, X.n_cols);
    arma::vec baseline;
    arma::vec spectrum;
    arma::vec current_corrected;
    arma::mat corrected(X.n_rows, X.n_cols);
    arma::umat current_peakpos;
    peak_positions.set_size(X.n_cols);
    arma::uword i;
    try{
        for (i = 0; i < X.n_cols; ++i){
            spectrum = X.col(i);
            current_corrected = Vespucci::Math::Transform::cwt_spdbc(spectrum, wavelet,
                                                   qscale, threshold,
                                                   threshold_method, window_size,
                                                   current_peakpos, baseline);

            peak_positions(i) = current_peakpos;
            baselines.col(i) = baseline;
            corrected.col(i) = current_corrected;
        }
    }catch(std::exception e){
        std::cerr << std::endl << "exception! cwt_spdbc_mat" << std::endl;
        std::cerr << "i = " << i << std::endl;
        std::cerr << e.what();
        throw(e);
    }

    return corrected;
}
开发者ID:GABowers,项目名称:Vespucci,代码行数:34,代码来源:cwt.cpp

示例3: data

void RegularizedSVDFunction<MatType>::Gradient(const arma::mat& parameters,
                                               const size_t start,
                                               GradType& gradient,
                                               const size_t batchSize) const
{
  gradient.zeros(rank, numUsers + numItems);

  // It's possible this could be SIMD-vectorized for additional speedup.
  for (size_t i = start; i < start + batchSize; ++i)
  {
    const size_t user = data(0, i);
    const size_t item = data(1, i) + numUsers;

    // Prediction error for the example.
    const double rating = data(2, i);
    double ratingError = rating - arma::dot(parameters.col(user),
                                            parameters.col(item));

    // Gradient is non-zero only for the parameter columns corresponding to the
    // example.
    gradient.col(user) += 2 * (lambda * parameters.col(user) -
                               ratingError * parameters.col(item));
    gradient.col(item) += 2 * (lambda * parameters.col(item) -
                               ratingError * parameters.col(user));
  }
}
开发者ID:dasayan05,项目名称:mlpack,代码行数:26,代码来源:regularized_svd_function_impl.hpp

示例4: Gradient

/**
 * Calculates and stores the gradient values given a set of parameters.
 */
void SoftmaxRegressionFunction::Gradient(const arma::mat& parameters,
                                         arma::mat& gradient) const
{
  // Calculate the class probabilities for each training example. The
  // probabilities for each of the classes are given by:
  // p_j = exp(theta_j' * x_i) / sum(exp(theta_k' * x_i))
  // The sum is calculated over all the classes.
  // x_i is the input vector for a particular training example.
  // theta_j is the parameter vector associated with a particular class.
  arma::mat probabilities;
  GetProbabilitiesMatrix(parameters, probabilities);

  // Calculate the parameter gradients.
  gradient.set_size(parameters.n_rows, parameters.n_cols);
  if (fitIntercept)
  {
    // Treating the intercept term parameters.col(0) seperately to avoid
    // the cost of building matrix [1; data].
    arma::mat inner = probabilities - groundTruth;
    gradient.col(0) =
      inner * arma::ones<arma::mat>(data.n_cols, 1) / data.n_cols +
      lambda * parameters.col(0);
    gradient.cols(1, parameters.n_cols - 1) =
      inner * data.t() / data.n_cols +
      lambda * parameters.cols(1, parameters.n_cols - 1);
  }
  else
  {
    gradient = (probabilities - groundTruth) * data.t() / data.n_cols +
               lambda * parameters;
  }
}
开发者ID:ml-ai-nlp-ir,项目名称:mlpack,代码行数:35,代码来源:softmax_regression_function.cpp

示例5: GenerateNoisySines

/**
 * Construct a 2-class dataset out of noisy sines.
 *
 * @param data Input data used to store the noisy sines.
 * @param labels Labels used to store the target class of the noisy sines.
 * @param points Number of points/features in a single sequence.
 * @param sequences Number of sequences for each class.
 * @param noise The noise factor that influences the sines.
 */
void GenerateNoisySines(arma::mat& data,
                        arma::mat& labels,
                        const size_t points,
                        const size_t sequences,
                        const double noise = 0.3)
{
  arma::colvec x =  arma::linspace<arma::Col<double> >(0,
      points - 1, points) / points * 20.0;
  arma::colvec y1 = arma::sin(x + arma::as_scalar(arma::randu(1)) * 3.0);
  arma::colvec y2 = arma::sin(x / 2.0 + arma::as_scalar(arma::randu(1)) * 3.0);

  data = arma::zeros(points, sequences * 2);
  labels = arma::zeros(2, sequences * 2);

  for (size_t seq = 0; seq < sequences; seq++)
  {
    data.col(seq) = arma::randu(points) * noise + y1 +
        arma::as_scalar(arma::randu(1) - 0.5) * noise;
    labels(0, seq) = 1;

    data.col(sequences + seq) = arma::randu(points) * noise + y2 +
        arma::as_scalar(arma::randu(1) - 0.5) * noise;
    labels(1, sequences + seq) = 1;
  }
}
开发者ID:AmesianX,项目名称:mlpack,代码行数:34,代码来源:recurrent_network_test.cpp

示例6: gradient_logPosterior_nogammas

// [[Rcpp::export]]
arma::vec gradient_logPosterior_nogammas (double temp, arma::vec event, arma::uvec idGK_fast,
                                 arma::vec event_colSumsW1, arma::mat W1s, arma::vec Bs_gammas,
                                 arma::vec event_colSumsWlong, arma::mat Wlongs, arma::vec alphas,
                                 arma::vec Pw,
                                 arma::vec mean_Bs_gammas, arma::mat Tau_Bs_gammas,
                                 arma::vec mean_alphas, arma::mat Tau_alphas,
                                 double tauBs, double A_tauBs, double B_tauBs) {
    vec Int = Pw % exp(W1s * Bs_gammas + Wlongs * alphas);
    // gradient Bs_gammas
    unsigned int n_Bs_gammas = Bs_gammas.n_rows;
    vec score_Bs_gammas = vec(n_Bs_gammas, fill::zeros);
    for (uword i = 0; i < n_Bs_gammas; ++i) {
        vec H = W1s.col(i) % Int;
        score_Bs_gammas[i] = temp * (event_colSumsW1[i] - sum(H));
    }
    score_Bs_gammas = score_Bs_gammas - tauBs * (Tau_Bs_gammas * (Bs_gammas - mean_Bs_gammas));
    // gradient alphas
    unsigned int n_alphas = alphas.n_rows;
    vec score_alphas = vec(n_alphas, fill::zeros);
    for (uword i = 0; i < n_alphas; ++i) {
        vec H = Wlongs.col(i) % Int;
        score_alphas[i] = temp * (event_colSumsWlong[i] - sum(H));
    }
    score_alphas = score_alphas - (Tau_alphas * (alphas - mean_alphas));
    // gradient tauBs
    vec z = Bs_gammas - mean_Bs_gammas;
    vec score_tauBs = tauBs * (- 0.5 * (z.t() * Tau_Bs_gammas * z) + (A_tauBs - 1) / tauBs - B_tauBs);
    // export results
    vec out = join_cols(score_Bs_gammas, score_alphas);
    out = join_cols(out, score_tauBs);
    return(out);
}
开发者ID:cran,项目名称:JMbayes,代码行数:33,代码来源:fast_score.cpp

示例7: RunGradientDescent

// The step size should be chosen carefully to guarantee convergence given a 
// reasonable number of computations.
int GradientDescent::RunGradientDescent(const Data &data) {
  assert(num_iters_ >= 1);
  const int kNumTrainEx = data.num_train_ex();
  assert(kNumTrainEx >= 1);
  const arma::mat kTrainingFeatures = data.training_features();
  const arma::vec kTrainingLabels = data.training_labels();

  double *j_theta_array = new double[num_iters_];

  // Recall that we are trying to minimize the following cost function:
  // ((training features) * (current weights) - (training labels))^2
  // Each iteration of this algorithm updates the weights as a scaled 
  // version of the gradient of this cost function.
  // This gradient is computed with respect to the weights.
  // Thus, each iteration of gradient descent performs the following:
  // (update) = (step size) * ((training features) * (current weights) - (training labels)) * (training features)
  // (new weights) = (current weights) - (update)
  for(int theta_index=0; theta_index<num_iters_; theta_index++)
  {
    const arma::vec kDiffVec = kTrainingFeatures*theta_-kTrainingLabels;
    const arma::mat kDiffVecTimesTrainFeat = \
      join_rows(kDiffVec % kTrainingFeatures.col(0),\
      kDiffVec % kTrainingFeatures.col(1));
    const arma::vec kThetaNew = theta_-alpha_*(1/(float)kNumTrainEx)*\
      (sum(kDiffVecTimesTrainFeat)).t();
    j_theta_array[theta_index] = ComputeCost(data);
    set_theta(kThetaNew);
  }

  delete [] j_theta_array;

  return 0;
}
开发者ID:cklcit03,项目名称:machine-learning,代码行数:35,代码来源:gradient_descent.cpp

示例8: runtime_error

void
HMM::cacheProbabilities(const arma::mat & data) {
  for (unsigned int i = 0; i < N_; ++i) {
    arma::vec weights = BModels_[i].getWeights();
    for (unsigned int l = 0; l < weights.n_elem; ++l) {
      const GM & gm = BModels_[i].getGM(l);
      gammaLts_[i].col(l) = weights(l) * arma::trans(gm.getDataProb(data));
    }
    B_.col(i) = arma::sum(gammaLts_[i],1);
  }
  if (!B_.is_finite()) {
    //B_.print("B");
    for (unsigned int i = 0; i < N_; ++i) {
      arma::vec test = B_.col(i);
      if (!test.is_finite()) {
        arma::vec weights = BModels_[i].getWeights();
        for (unsigned l = 0; l < weights.n_elem; ++l) {
        arma::vec test2 = gammaLts_[i].col(l); 
          if (!test2.is_finite()) {
            const GM & gm = BModels_[i].getGM(l);
            gm.print("gm");
            arma::vec test3 = arma::trans(gm.getDataProb(data));
            //test3.print("test3");
          }
        }
      }
    }
    throw std::runtime_error("probabilities not finite");
  }
}
开发者ID:buotex,项目名称:praktikum,代码行数:30,代码来源:hmm.hpp

示例9: GradientConstraint

void LovaszThetaSDP::GradientConstraint(const size_t index,
                                        const arma::mat& coordinates,
                                        arma::mat& gradient)
{
//  Log::Debug << "Gradient of constraint " << index << " is " << std::endl;
  if (index == 0) // This is the constraint Tr(X) = 1.
  {
    gradient = 2 * coordinates; // d/dR (Tr(R R^T)) = 2 R.
//    std::cout << gradient;
    return;
  }

//  Log::Debug << "Evaluating gradient of constraint " << index << " with ";
  size_t i = edges(0, index - 1);
  size_t j = edges(1, index - 1);
//  Log::Debug << "i = " << i << " and j = " << j << "." << std::endl;

  // Since the constraint is (R^T R)_ij, the gradient for (x, y) will be (I
  // derived this for one of the MVU constraints):
  //   0     , y != i, y != j
  //   2 R_xj, y  = i, y != j
  //   2 R_xi, y != i, y  = j
  //   4 R_xy, y  = i, y  = j
  // This results in the gradient matrix having two nonzero rows; for row
  // i, the elements are R_nj, where n is the row; for column j, the elements
  // are R_ni.
  gradient.zeros(coordinates.n_rows, coordinates.n_cols);

  gradient.col(i) = coordinates.col(j);
  gradient.col(j) += coordinates.col(i); // In case j = i (shouldn't happen).

//  std::cout << gradient;
}
开发者ID:0x0all,项目名称:mlpack,代码行数:33,代码来源:aug_lagrangian_test_functions.cpp

示例10: ResetParameter

void FFN<
LayerTypes, OutputLayerType, InitializationRuleType, PerformanceFunction
>::Predict(arma::mat& predictors, arma::mat& responses)
{
  deterministic = true;

  arma::mat responsesTemp;
  ResetParameter(network);
  Forward(arma::mat(predictors.colptr(0), predictors.n_rows, 1, false, true),
      network);
  OutputPrediction(responsesTemp, network);

  responses = arma::mat(responsesTemp.n_elem, predictors.n_cols);
  responses.col(0) = responsesTemp.col(0);

  for (size_t i = 1; i < predictors.n_cols; i++)
  {
    Forward(arma::mat(predictors.colptr(i), predictors.n_rows, 1, false, true),
        network);

    responsesTemp = arma::mat(responses.colptr(i), responses.n_rows, 1, false,
        true);
    OutputPrediction(responsesTemp, network);
    responses.col(i) = responsesTemp.col(0);
  }
}
开发者ID:GYengera,项目名称:mlpack,代码行数:26,代码来源:ffn_impl.hpp

示例11: HUpdate

  /**
   * The update rule for the encoding matrix H.
   * The function takes in all the matrices and only changes the
   * value of the H matrix.
   *
   * @param V Input matrix to be factorized.
   * @param W Basis matrix.
   * @param H Encoding matrix to be updated.
   */
  inline void HUpdate(const MatType& V,
                      const arma::mat& W,
                      arma::mat& H)
  {
    arma::mat deltaH;
    deltaH.zeros(H.n_rows, 1);

    const double val = V(currentItemIndex, currentUserIndex);

    // Update H matrix based on the non-zero entry found in WUpdate function.
    deltaH += (val - arma::dot(W.row(currentItemIndex),
        H.col(currentUserIndex))) * W.row(currentItemIndex).t();
    // Add regularization.
    if (kh != 0)
      deltaH -= kh * H.col(currentUserIndex);

    // Move on to the next entry.
    currentUserIndex = currentUserIndex + 1;
    if (currentUserIndex == V.n_rows)
    {
      currentUserIndex = 0;
      currentItemIndex = (currentItemIndex + 1) % V.n_cols;
    }

    H.col(currentUserIndex++) += u * deltaH;
  }
开发者ID:AmesianX,项目名称:mlpack,代码行数:35,代码来源:svd_complete_incremental_learning.hpp

示例12: Train

// Fits an alpha and beta parameter according to observation probabilities.
void GammaDistribution::Train(const arma::mat& rdata,
                              const arma::vec& probabilities,
                              const double tol)
{
  // If fittingSet is empty, nothing to do.
  if (arma::size(rdata) == arma::size(arma::mat()))
    return;

  arma::vec meanLogxVec(rdata.n_rows, arma::fill::zeros);
  arma::vec meanxVec(rdata.n_rows, arma::fill::zeros);
  arma::vec logMeanxVec(rdata.n_rows, arma::fill::zeros);

  for (size_t i = 0; i < rdata.n_cols; i++)
  {
    meanLogxVec += probabilities(i) * arma::log(rdata.col(i));
    meanxVec += probabilities(i) * rdata.col(i);
  }

  double totProbability = arma::accu(probabilities);

  meanLogxVec /= totProbability;
  meanxVec /= totProbability;
  logMeanxVec = arma::log(meanxVec);

  // Call the statistics-only GammaDistribution::Train() function to fit the
  // parameters. That function does all the work so we're done.
  Train(logMeanxVec, meanLogxVec, meanxVec, tol);
}
开发者ID:mlpack,项目名称:mlpack,代码行数:29,代码来源:gamma_distribution.cpp

示例13: WUpdate

  /**
   * The update rule for the basis matrix W.
   * The function takes in all the matrices and only changes the
   * value of the W matrix.
   *
   * @param V Input matrix to be factorized.
   * @param W Basis matrix to be updated.
   * @param H Encoding matrix.
   */
  inline void WUpdate(const arma::sp_mat& V,
                      arma::mat& W,
                      const arma::mat& H)
  {
    if(!isStart) (*it)++;
    else isStart = false;

    if(*it == V.end())
    {
        delete it;
        it = new arma::sp_mat::const_iterator(V.begin());
    }

    size_t currentUserIndex = it->col();
    size_t currentItemIndex = it->row();

    arma::mat deltaW(1, W.n_cols);
    deltaW.zeros();

    deltaW += (**it - arma::dot(W.row(currentItemIndex), H.col(currentUserIndex)))
                                      * arma::trans(H.col(currentUserIndex));
    if(kw != 0) deltaW -= kw * W.row(currentItemIndex);

    W.row(currentItemIndex) += u*deltaW;
  }
开发者ID:AmesianX,项目名称:mlpack,代码行数:34,代码来源:svd_complete_incremental_learning.hpp

示例14: Estimate

/**
 * Estimate the Gaussian distribution from the given observations, taking into
 * account the probability of each observation actually being from this
 * distribution.
 */
void GaussianDistribution::Estimate(const arma::mat& observations,
                                    const arma::vec& probabilities)
{
  if (observations.n_cols > 0)
  {
    mean.zeros(observations.n_rows);
    covariance.zeros(observations.n_rows, observations.n_rows);
  }
  else // This will end up just being empty.
  {
    mean.zeros(0);
    covariance.zeros(0);
    return;
  }

  double sumProb = 0;

  // First calculate the mean, and save the sum of all the probabilities for
  // later normalization.
  for (size_t i = 0; i < observations.n_cols; i++)
  {
    mean += probabilities[i] * observations.col(i);
    sumProb += probabilities[i];
  }

  if (sumProb == 0)
  {
    // Nothing in this Gaussian!  At least set the covariance so that it's
    // invertible.
    covariance.diag() += 1e-50;
    return;
  }

  // Normalize.
  mean /= sumProb;

  // Now find the covariance.
  for (size_t i = 0; i < observations.n_cols; i++)
  {
    arma::vec obsNoMean = observations.col(i) - mean;
    covariance += probabilities[i] * (obsNoMean * trans(obsNoMean));
  }

  // This is probably biased, but I don't know how to unbias it.
  covariance /= sumProb;

  // Ensure that the covariance is positive definite.
  if (det(covariance) <= 1e-50)
  {
    Log::Debug << "GaussianDistribution::Estimate(): Covariance matrix is not "
        << "positive definite. Adding perturbation." << std::endl;

    double perturbation = 1e-30;
    while (det(covariance) <= 1e-50)
    {
      covariance.diag() += perturbation;
      perturbation *= 10; // Slow, but we don't want to add too much.
    }
  }
}
开发者ID:dblalock,项目名称:mlpack-ios,代码行数:65,代码来源:gaussian_distribution.cpp

示例15: intervals

// [[Rcpp::depends(BH)]]
// [[Rcpp::depends(RcppArmadillo)]]
arma::mat intervals(arma::mat& means, const arma::mat& sds, const arma::vec& probs, unsigned int n_ahead) {
 
  boost::math::tools::eps_tolerance<double> tol;
  
  arma::mat intv(n_ahead, probs.n_elem);
  
  for (unsigned int i = 0; i < n_ahead; i++) {
    double lower = means.col(i).min() - 2 * sds.col(i).max();
    double upper = means.col(i).max() + 2 * sds.col(i).max();
    for (unsigned int j = 0; j < probs.n_elem; j++) {
      boost::uintmax_t max_iter = 1000;
      objective_gaussian f(means.col(i), sds.col(i), probs(j));
      std::pair<double, double> r =
        boost::math::tools::bisect(f, lower, upper, tol, max_iter);
      if (!tol(r.first, r.second) || (max_iter >= 1000)) {
        max_iter = 10000;
        r =  boost::math::tools::bisect(f, 1000 * lower, 1000 * upper, tol, max_iter);
      }
      intv(i, j) = r.first + (r.second - r.first) / 2.0;
      lower = intv(i, j);
    }
    
  }
  return intv;
}
开发者ID:helske,项目名称:bssm,代码行数:27,代码来源:interval.cpp


注:本文中的arma::mat::col方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。