本文整理汇总了C++中arma::mat::submat方法的典型用法代码示例。如果您正苦于以下问题:C++ mat::submat方法的具体用法?C++ mat::submat怎么用?C++ mat::submat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类arma::mat
的用法示例。
在下文中一共展示了mat::submat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: calculate
double MultiKernel::calculate(arma::mat &x, int r1, arma::mat &x2, int r2){
int dim1=x.n_rows;
int dim2=x2.n_rows;
int l = 0,h=0;
double result=0;
for (int i=0; i<features.size(); i++) {
h+=features[i]->calculateFeatureDimension();
arma::mat x_1_part=x.submat(0, l, dim1-1, h-1);
arma::mat x_2_part=x2.submat(0, l, dim2-1, h-1);
result+=this->kernels[i]->calculate(x_1_part, r1, x_2_part, r2);
l=h;
}
return result;
}
示例2: CrossValidation
/**
* CrossValidation function runs a k-fold cross validation on the training data
* by dividing the training data into k equal disjoint subsets. The model is
* trained on k-1 of these subsets and 1 subset is used as validation data.
*
* This process is repeated k times assigning each subset to be the validation
* data at most once.
*
* @params trainData The data available for training.
* @params trainLabels The labels corresponding to the training data.
* @params k The parameter k in k-fold cross validation.
* @param hiddenLayerSize Hidden layer size.
* @param maxEpochs Maximum number of epochs.
* @param trainError Error of predictions on training data.
* @param validationError Validation error of predictions.
*/
void CrossValidation(arma::mat& trainData,
const arma::mat& trainLabels,
const size_t k,
const size_t hiddenLayerSize,
const size_t maxEpochs,
double& trainError,
double& validationError)
{
// Number of datapoints in each subset in K-fold CV.
size_t validationDataSize = (int) trainData.n_cols / k;
trainError = validationError = 0.0;
for (size_t i = 0; i < trainData.n_cols; i = i + validationDataSize)
{
validationDataSize = (int) trainData.n_cols / k;
// The collection of the k-1 subsets to be used in training in a particular
// iteration.
arma::mat validationTrainData(trainData.n_rows, trainData.n_cols);
// The labels corresponding to training data.
arma::mat validationTrainLabels(trainLabels.n_rows, trainLabels.n_cols);
// The data subset which is used as validation data in a particular
// iteration.
arma::mat validationTestData(trainData.n_rows, validationDataSize);
// The labels corresponding to the validation data.
arma::mat validationTestLabels(trainLabels.n_rows, validationDataSize);
if (i + validationDataSize > trainData.n_cols)
{
validationDataSize = trainData.n_cols - i;
}
validationTestData = trainData.submat(0, i, trainData.n_rows - 1,
i + validationDataSize - 1);
validationTestLabels = trainLabels.submat(0, i, trainLabels.n_rows - 1,
i + validationDataSize - 1);
validationTrainData = trainData;
validationTrainData.shed_cols(i, i + validationDataSize - 1);
validationTrainLabels = trainLabels;
validationTrainLabels.shed_cols(i, i + validationDataSize - 1);
double tError, vError;
BuildVanillaNetwork(validationTrainData, validationTrainLabels,
validationTestData, validationTestLabels, hiddenLayerSize, maxEpochs,
validationTrainLabels.n_rows, tError, vError);
trainError += tError;
validationError += vError;
}
trainError /= k;
validationError /= k;
}
示例3: EvalParams
void EvalParams(arma::mat const ¶meters, size_t l1, size_t l2, size_t l3,
std::true_type /* unused */)
{
// w1, w2, b1 and b2 are not extracted separately, 'parameters' is directly
// used in their place to avoid copying data. The following representations
// are used:
// w1 <- parameters.submat(0, 0, l1-1, l2-1)
// w2 <- parameters.submat(l1, 0, l3-1, l2-1).t()
// b1 <- parameters.submat(0, l2, l1-1, l2)
// b2 <- parameters.submat(l3, 0, l3, l2-1).t()
// Compute activations of the hidden and output layers.
arma::mat tempInput = parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols);
hiddenLayerFunc.Forward(tempInput,
hiddenLayer);
tempInput = parameters.submat(l1, 0, l3 - 1, l2 - 1).t() * hiddenLayer +
arma::repmat(parameters.submat(l3, 0, l3, l2 - 1).t(), 1, data.n_cols);
outputLayerFunc.Forward(tempInput,
outputLayer);
// Average activations of the hidden layer.
rhoCap = arma::sum(hiddenLayer, 1) / static_cast<double>(data.n_cols);
// Difference between the reconstructed data and the original data.
diff = outputLayer - data;
}
示例4: AvgCrossValidation
/**
* AvgCrossValidation function takes a dataset and runs CrossValidation "iter"
* number of times and then return the average training and validation error.
* It shuffles the dataset in every iteration.
*
* @params dataset The dataset inclusive of the labels. Assuming the last
* "numLabels" number of rows are the labels which the model
* has to predict.
* @params numLabels number of rows which are the output labels in the dataset.
* @params iter The number of times Cross Validation has to be run.
* @params hiddenLayerSize The number of nodes in the hidden layer.
* @params maxEpochs The maximum number of epochs for the training.
* @param avgTrainError Average error.
* @param validationError Average validation.
*/
void AvgCrossValidation(arma::mat& dataset,
const size_t numLabels,
const size_t iter,
const size_t hiddenLayerSize,
const size_t maxEpochs,
double& avgTrainError,
double& avgValidationError)
{
avgValidationError = avgTrainError = 0.0;
for (size_t i = 0; i < iter; ++i)
{
dataset = arma::shuffle(dataset, 1);
arma::mat trainData = dataset.submat(0, 0, dataset.n_rows - 1 - numLabels,
dataset.n_cols - 1);
arma::mat trainLabels = dataset.submat(dataset.n_rows - numLabels, 0,
dataset.n_rows - 1, dataset.n_cols - 1);
double trainError, validationError;
CrossValidation(trainData, trainLabels, 10, hiddenLayerSize, maxEpochs,
trainError, validationError);
avgTrainError += trainError;
avgValidationError += validationError;
}
avgTrainError /= iter;
avgValidationError /= iter;
}
示例5: buildRhsLhsMatrix
void subspaceIdMoor::buildRhsLhsMatrix(arma::mat const &gam_inv, arma::mat const &gamm_inv, arma::mat const &R_,
arma::uword i, arma::uword n, arma::uword ny, arma::uword nu, arma::mat &RHS, arma::mat &LHS){
mat RhsUpper = join_horiz(gam_inv * R_.submat((2 * nu + ny)*i, 0, 2 * (nu + ny)*i - 1, (2 * nu + ny)*i - 1), zeros(n, ny));
mat RhsLower = R_.submat(nu*i, 0, 2 * nu*i - 1, (2 * nu + ny)*i + ny - 1);
RHS = join_vert(RhsUpper, RhsLower);
mat LhsUpper = gamm_inv*R_.submat((2 * nu + ny)*i + ny, 0, 2 * (nu + ny)*i - 1, (2 * nu + ny)*i + ny - 1);
mat LhsLower = R_.submat((2 * nu + ny)*i, 0, (2 * nu + ny)*i + ny - 1, (2 * nu + ny)*i + ny - 1);
LHS = join_vert(LhsUpper, LhsLower);
}
示例6: buildNMatrix
void subspaceIdMoor::buildNMatrix(arma::uword k, arma::mat const &M, arma::mat const &L1, arma::mat const &L2, arma::mat const &X,
arma::uword i, arma::uword n, arma::uword ny, arma::mat &N){
mat Upper, Lower;
Upper = join_horiz(M.cols((k - 1)*ny, ny*i - 1) - L1.cols((k-1)*ny, ny*i - 1), zeros(n, (k-1)*ny));
Lower = join_horiz(-L2.cols((k - 1) * ny, ny*i - 1), zeros(ny, (k - 1)*ny));
N = join_vert(Upper, Lower);
if (k == 1)
N.submat(n, 0, n + ny - 1, ny - 1) = eye(ny, ny) + N.submat(n, 0, n + ny - 1, ny - 1);
N = N * X;
}
示例7: log
vmat::vmat(const arma::mat &x, const arma::uvec &rc1, const arma::uvec &rc2) {
arma::mat x11 = x.submat(rc1,rc1);
arma::mat x12 = x.submat(rc1,rc2);
arma::mat x21 = x.submat(rc2,rc1);
arma::mat x22 = x.submat(rc2,rc2);
proj = x12*arma::inv(x22);
vcov = x11-proj*x21;
inv = arma::inv_sympd(vcov);
loginvsqdet = log(1/sqrt(arma::det(vcov)));
}
示例8: Transform
void ColumnsToBlocks::Transform(const arma::mat& maximalInputs,
arma::mat& output)
{
if (!IsPerfectSquare(maximalInputs.n_rows))
{
throw std::runtime_error("maximalInputs.n_rows should be perfect square");
}
if (blockHeight == 0 || blockWidth == 0)
{
size_t const squareRows =
static_cast<size_t>(std::sqrt(maximalInputs.n_rows));
blockHeight = squareRows;
blockWidth = squareRows;
}
if (blockHeight * blockWidth != maximalInputs.n_rows)
{
throw std::runtime_error("blockHeight * blockWidth should "
"equal to maximalInputs.n_rows");
}
const size_t rowOffset = blockHeight+bufSize;
const size_t colOffset = blockWidth+bufSize;
output.ones(bufSize + rows * rowOffset,
bufSize + cols * colOffset);
output *= bufValue;
size_t k = 0;
const size_t maxSize = std::min(rows * cols, (size_t) maximalInputs.n_cols);
for (size_t i = 0; i != rows; ++i)
{
for (size_t j = 0; j != cols; ++j)
{
// Now, copy the elements of the row to the output submatrix.
const size_t minRow = bufSize + i * rowOffset;
const size_t minCol = bufSize + j * colOffset;
const size_t maxRow = i * rowOffset + blockHeight;
const size_t maxCol = j * colOffset + blockWidth;
output.submat(minRow, minCol, maxRow, maxCol) =
arma::reshape(maximalInputs.col(k++), blockHeight, blockWidth);
if (k >= maxSize)
break;
}
}
if (scale)
{
const double max = output.max();
const double min = output.min();
if ((max - min) != 0)
{
output = (output - min) / (max - min) * (maxRange - minRange) + minRange;
}
}
}
示例9: MaximalInputs
void MaximalInputs(const arma::mat& parameters, arma::mat& output)
{
arma::mat paramTemp(parameters.submat(0, 0, (parameters.n_rows - 1) / 2 - 1,
parameters.n_cols - 2).t());
double const mean = arma::mean(arma::mean(paramTemp));
paramTemp -= mean;
NormalizeColByMax(paramTemp, output);
}
示例10: optCoef
unsigned int optCoef(arma::mat& weights, const arma::icube& obs, const arma::cube& emission,
const arma::mat& initk, const arma::cube& beta, const arma::mat& scales, arma::mat& coef,
const arma::mat& X, const arma::ivec& cumsumstate, const arma::ivec& numberOfStates,
int trace) {
int iter = 0;
double change = 1.0;
while ((change > 1e-10) & (iter < 100)) {
arma::vec tmpvec(X.n_cols * (weights.n_rows - 1));
bool solve_ok = arma::solve(tmpvec, hCoef(weights, X),
gCoef(obs, beta, scales, emission, initk, weights, X, cumsumstate, numberOfStates));
if (solve_ok == false) {
return (4);
}
arma::mat coefnew(coef.n_rows, coef.n_cols - 1);
for (unsigned int i = 0; i < (weights.n_rows - 1); i++) {
coefnew.col(i) = coef.col(i + 1) - tmpvec.subvec(i * X.n_cols, (i + 1) * X.n_cols - 1);
}
change = arma::accu(arma::abs(coef.submat(0, 1, coef.n_rows - 1, coef.n_cols - 1) - coefnew))
/ coefnew.n_elem;
coef.submat(0, 1, coef.n_rows - 1, coef.n_cols - 1) = coefnew;
iter++;
if (trace == 3) {
Rcout << "coefficient optimization iter: " << iter;
Rcout << " new coefficients: " << std::endl << coefnew << std::endl;
Rcout << " relative change: " << change << std::endl;
}
weights = exp(X * coef).t();
if (!weights.is_finite()) {
return (5);
}
weights.each_row() /= sum(weights, 0);
}
return (0);
}
示例11: norm
double mlpack::cf::SVDWrapper<DummyClass>::Apply(const arma::mat& V,
size_t r,
arma::mat& W,
arma::mat& H) const
{
// check if the given rank is valid
if(r > V.n_rows || r > V.n_cols)
{
Log::Info << "Rank " << r << ", given for decomposition is invalid." << std::endl;
r = (V.n_rows > V.n_cols) ? V.n_cols : V.n_rows;
Log::Info << "Setting decomposition rank to " << r << std::endl;
}
// get svd factorization
arma::vec sigma;
arma::svd(W, sigma, H, V);
// remove the part of W and H depending upon the value of rank
W = W.submat(0, 0, W.n_rows - 1, r - 1);
H = H.submat(0, 0, H.n_cols - 1, r - 1);
// take only required eigenvalues
sigma = sigma.subvec(0, r - 1);
// eigenvalue matrix is multiplied to W
// it can either be multiplied to H matrix
W = W * arma::diagmat(sigma);
// take transpose of the matrix H as required by CF module
H = arma::trans(H);
// reconstruct the matrix
arma::mat V_rec = W * H;
// return the normalized frobenius norm
return arma::norm(V - V_rec, "fro") / arma::norm(V, "fro");
}
示例12: loglikelihoodLogitCpp_t
// [[Rcpp::export]]
double loglikelihoodLogitCpp_t(const arma::vec& beta, const arma::mat& sigma, const arma::vec& sigmaType, const arma::vec& u,
const arma::vec& df, const arma::vec& kKi, const arma::vec& kLh, const arma::vec& kLhi, const arma::vec& kY, const arma::mat& kX, const arma::mat& kZ) {
double value = 0; /** The value to be returned */
int nObs = kY.n_elem;
int kP = kX.n_cols; /** Dimension of Beta */
int kK = kZ.n_cols; /** Dimension of U */
int kR = kKi.n_elem; /** Number of variance components */
// int kL = sum(kLh); /** Number of subvariance components */
/** sum of yij * (wij - log(1 + ...))
* This corresponds to the
*/
for (int i = 0; i < nObs; i++) {
double wij = 0;
for (int j = 0; j < kP; j++) {
wij += kX(i, j) * beta(j);
}
for (int j = 0; j < kK; j++) {
wij += kZ(i, j) * u(j);
}
value += kY(i) * wij - log(1 + exp(wij));
}
int from = 0;
int to = - 1;
int counter = 0;
for (int i = 0; i < kR; i++) {
for (int j = 0; j < kLh(i); j++) {
// std::cout<<i<<"\n";
to += kLhi(counter);
// std::cout<<"from:"<<from<<'\n';
// std::cout<<"to:"<<to<<'\n';
// std::cout<<sigmaType(i)<<"\n";
// std::cout<<kron(arma::mat(kLhi(counter), kLhi(counter), arma::fill::eye), getSigma(sigma.row(i).t()))<<"\n";
value += ldmt(u.subvec(from, to), df(counter), sigma.submat(from, from, to, to), sigmaType(i));
from = to + 1;
counter += 1;
}
}
return value;
}
示例13: getPowerSpec
// get a power spectrum from the samples
arma::mat getPowerSpec(arma::mat samples)
{
int winpts = round(wintime * data.sampleRate); // points in a window
int steppts = round(steptime * data.sampleRate); // points in a step
int winnum = (data.totalFrames - winpts) / steppts + 1; // how many windows
int nfft = pow(2.0, ceil(log((double)winpts) / log(2.0))); // fft numbers
arma::mat powerSpec(nfft, winnum);
arma::mat hamming = makeHamming(winpts);
// for each window, do hamming window and get the power spectrum
for (int i = 0; i < winnum; i++) {
int start = i * steppts;
int end = start + winpts - 1;
arma::mat winsamples = samples.submat(0, start, 0, end);
winsamples = winsamples % hamming; // element-wise multiplication, do hamming window
powerSpec.col(i) = powerFFT(trans(winsamples), nfft); // do fft and get power spectrum
}
return powerSpec;
}
示例14: getJacobianForTasks
arma::mat InverseKinematicJacobian::getJacobianForTasks(KinematicTree const& tree, std::vector<const Task*> const& tasks, arma::mat &jacobianRAW, bool normalize) const
{
const uint32_t numTasks = tasks.size();
uint numCols = tree.getMotorCnt();
arma::mat jacobian = arma::zeros(1, numCols);
if ((0 < numTasks) &&
(0 < numCols)) /* at least one task and at least one motor */
{
/* calculate the size of the jacobian and the task vector */
uint32_t numRows = 0;
for (const Task* const &task : tasks)
{
if (task->hasTarget()) {
numRows += task->getDimensionCnt();
}
}
/* build the "big" jacobian */
jacobian = arma::zeros(numRows, numCols);
jacobianRAW = arma::zeros(numRows, numCols);
uint32_t beginRow = 0;
for (const Task *const &task : tasks)
{
if (task->hasTarget())
{
const uint32_t endRow = beginRow + task->getDimensionCnt() - 1;
arma::mat jacobianRawSub;
jacobian.submat(beginRow, 0, endRow, numCols - 1) = task->getJacobianForTask(tree, jacobianRawSub, normalize);
jacobianRAW.submat(beginRow, 0, endRow, numCols - 1) = jacobianRawSub;
beginRow = endRow + 1;
}
}
}
return jacobian;
}
示例15: predictAll
arma::rowvec MultiKernel::predictAll(arma::mat &newX,std::vector<supportData*>& S,int B){
using namespace arma;
// preprocess first
preprocess(S,B);
int n=newX.n_rows;
mat y_hat(1,1,fill::zeros);
mat y(1,1,fill::zeros);
rowvec scores(n,fill::ones);
int dim1=newX.n_rows;
int l = 0,h=0;
std::vector<arma::mat> new_x;
for (int i=0; i<this->features.size(); i++) {
h+=features[i]->calculateFeatureDimension();
arma::mat x_1_part=newX.submat(0, l, dim1-1, h-1);
new_x.push_back(x_1_part);
l=h;
}
for (int k = 0; k < newX.n_rows; ++k) {
y(0)=k;
double current=0;
for (int i = 0; i < S.size(); ++i) {
int dim2=S[i]->x->n_rows;
l=0;
h=0;
std::vector<arma::mat> old_x;
for (int j=0; j<this->features.size(); j++) {
h+=features[j]->calculateFeatureDimension();
arma::mat x_2_part=S[i]->x->submat(0, l, dim2-1, h-1);
old_x.push_back(x_2_part);
l=h;
}
for (int yhat = 0; yhat < S[i]->x->n_rows; ++yhat) {
y_hat(0)=yhat;
if ((*S[i]->beta)[yhat]!=0){
// the below has to be multiplied by the velocities kernel
// current+= (*S[i]->beta)[yhat]*
// calculate(newX, y(0), *S[i]->x, y_hat(0));
for (int j=0; j<this->features.size(); j++) {
current+=(*S[i]->beta)[yhat]*this->kernels[j]->calculate(new_x[j], y(0), old_x[j], y_hat(0));
}
}
}
}
scores[k]=current;
}
return scores;
}