本文整理汇总了C++中ArrayXXd类的典型用法代码示例。如果您正苦于以下问题:C++ ArrayXXd类的具体用法?C++ ArrayXXd怎么用?C++ ArrayXXd使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ArrayXXd类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Exception
void CMT::WhiteningTransform::initialize(const ArrayXXd& input, int dimOut) {
if(input.cols() < input.rows())
throw Exception("Too few inputs to compute whitening transform.");
mMeanIn = input.rowwise().mean();
// compute covariances
MatrixXd covXX = covariance(input);
// input whitening
SelfAdjointEigenSolver<MatrixXd> eigenSolver;
eigenSolver.compute(covXX);
Array<double, 1, Dynamic> eigenvalues = eigenSolver.eigenvalues();
MatrixXd eigenvectors = eigenSolver.eigenvectors();
// don't whiten directions with near-zero variance
for(int i = 0; i < eigenvalues.size(); ++i)
if(eigenvalues[i] < 1e-7)
eigenvalues[i] = 1.;
mPreIn = (eigenvectors.array().rowwise() * eigenvalues.sqrt().cwiseInverse()).matrix()
* eigenvectors.transpose();
mPreInInv = (eigenvectors.array().rowwise() * eigenvalues.sqrt()).matrix()
* eigenvectors.transpose();
mMeanOut = VectorXd::Zero(dimOut);
mPreOut = MatrixXd::Identity(dimOut, dimOut);
mPreOutInv = MatrixXd::Identity(dimOut, dimOut);
mPredictor = MatrixXd::Zero(dimOut, input.rows());
mGradTransform = MatrixXd::Zero(dimOut, input.rows());
mLogJacobian = 1.;
}
示例2: setPosteriorSample
void NestedSampler::setPosteriorSample(ArrayXXd newPosteriorSample)
{
Ndimensions = newPosteriorSample.rows();
int Nsamples = newPosteriorSample.cols();
posteriorSample.resize(Ndimensions, Nsamples);
posteriorSample = newPosteriorSample;
}
示例3: logLikelihood
/**
* \brief Calculate the loglikelihood of a linear regression contained
* in a linear_reg object.
*
* @param X The design matrix.
*/
void linear_reg::logLikelihood(const mematrix<double>& X) {
/*
loglik = 0.;
double ss=0;
for (int i=0;i<rdata.nids;i++) {
double resid = rdata.Y[i] - beta.get(0,0); // intercept
for (int j=1;j<beta.nrow;j++) resid -= beta.get(j,0)*X.get(i,j);
// residuals[i] = resid;
ss += resid*resid;
}
sigma2 = ss/N;
*/
//cout << "estimate " << rdata.nids << "\n";
//(rdata.X).print();
//for (int i=0;i<rdata.nids;i++) cout << rdata.masked_data[i] << " ";
//cout << endl;
loglik = 0.;
double halfrecsig2 = .5 / sigma2;
//loglik -= halfrecsig2 * residuals[i] * residuals[i];
double intercept = beta.get(0, 0);
residuals.data = reg_data.Y.data.array() - intercept;
//matrix.
ArrayXXd betacol =
beta.data.block(1, 0, beta.data.rows() - 1, 1).array().transpose();
ArrayXXd resid_sub = (X.data.block(0, 1, X.data.rows(), X.data.cols() - 1)
* betacol.matrix().asDiagonal()).rowwise().sum();
//std::cout << resid_sub << std::endl;
residuals.data -= resid_sub.matrix();
//residuals[i] -= resid_sub;
loglik -= (residuals.data.array().square() * halfrecsig2).sum();
loglik -= static_cast<double>(reg_data.nids) * log(sqrt(sigma2));
}
示例4: Exception
Array<int, 1, Dynamic> CMT::MCBM::samplePrior(const MatrixXd& input) const {
if(input.rows() != dimIn())
throw Exception("Inputs have wrong dimensionality.");
ArrayXXd featureEnergy = mWeights * (mFeatures.transpose() * input).array().square().matrix();
ArrayXXd biasEnergy = mInputBias.transpose() * input;
ArrayXXd predictorEnergy = mPredictors * input;
ArrayXXd tmp0 = (featureEnergy + biasEnergy).colwise() + mPriors.array();
ArrayXXd tmp1 = (tmp0 + predictorEnergy).colwise() + mOutputBias.array();
ArrayXXd logPrior = tmp0 + tmp1;
logPrior.rowwise() -= logSumExp(logPrior);
ArrayXXd prior = logPrior.exp();
Array<int, 1, Dynamic> labels(input.cols());
#pragma omp parallel for
for(int j = 0; j < input.cols(); ++j) {
int i = 0;
double urand = static_cast<double>(rand()) / (static_cast<long>(RAND_MAX) + 1l);
double cdf;
// compute index
for(cdf = prior(0, j); cdf < urand; cdf += prior(i, j))
++i;
labels[j] = i;
}
return labels;
}
示例5: arrayMultiplierRowWise
/*
Multiply each row of u by temp
*/
MatrixXd arrayMultiplierRowWise(MatrixXd u,ArrayXXd temp,int n){
ArrayXXd uArray = u.array();
int i;
for(i=0;i<n;i++){
uArray.row(i) *= temp;
}
return uArray.matrix();
}
示例6: result
ArrayXXd CMT::tanh(const ArrayXXd& arr) {
ArrayXXd result(arr.rows(), arr.cols());
#pragma omp parallel for
for(int i = 0; i < arr.size(); ++i)
result(i) = std::tanh(arr(i));
return result;
}
示例7: outputs
ArrayXXd CMT::HistogramNonlinearity::operator()(const ArrayXXd& inputs) const {
ArrayXXd outputs(inputs.rows(), inputs.cols());
for(int i = 0; i < inputs.rows(); ++i)
for(int j = 0; j < inputs.cols(); ++j)
outputs(i, j) = mHistogram[bin(inputs(i, j))] + mEpsilon;
return outputs;
}
示例8: Exception
ArrayXXd CMT::HistogramNonlinearity::gradient(const ArrayXXd& inputs) const {
if(inputs.rows() != 1)
throw Exception("Data has to be stored in one row.");
ArrayXXd gradient = ArrayXXd::Zero(mHistogram.size(), inputs.cols());
for(int i = 0; i < inputs.rows(); ++i)
for(int j = 0; j < inputs.rows(); ++j)
gradient(bin(inputs(i, j)), j) = 1;
return gradient;
}
示例9: Exception
MatrixXd CMT::MLR::predict(const MatrixXd& input) const {
if(input.rows() != mDimIn)
throw Exception("Inputs have wrong dimensionality.");
MatrixXd output = MatrixXd::Zero(mDimOut, input.cols());
// distribution over outputs
ArrayXXd prob = (mWeights * input).colwise() + mBiases;
prob.rowwise() -= logSumExp(prob);
prob = prob.exp();
return prob;
}
示例10: sampleBinomial
ArrayXXi CMT::sampleBinomial(const ArrayXXi& n, const ArrayXXd& p) {
if(n.rows() != p.rows() || n.cols() != p.cols())
throw Exception("n and p must be of the same size.");
ArrayXXi samples = ArrayXXi::Zero(n.rows(), n.cols());
#pragma omp parallel for
for(int i = 0; i < samples.size(); ++i) {
// very naive algorithm for generating binomial samples
for(int k = 0; k < n(i); ++k)
if(rand() / static_cast<double>(RAND_MAX) < p(i))
samples(i) += 1;
}
return samples;
}
示例11:
void CMT::HistogramNonlinearity::initialize(
const ArrayXXd& inputs,
const ArrayXXd& outputs,
int numBins)
{
double max = inputs.maxCoeff();
double min = inputs.minCoeff();
mBinEdges = vector<double>(numBins + 1);
double binWidth = (max - min) / numBins;
for(int k = 0; k < mBinEdges.size(); ++k)
mBinEdges[k] = min + k * binWidth;
initialize(inputs, outputs);
}
示例12: samples
/**
* Algorithm due to Knuth, 1969.
*/
ArrayXXi CMT::samplePoisson(const ArrayXXd& lambda) {
ArrayXXi samples(lambda.rows(), lambda.cols());
ArrayXXd threshold = (-lambda).exp();
#pragma omp parallel for
for(int i = 0; i < samples.size(); ++i) {
double p = rand() / static_cast<double>(RAND_MAX);
int k = 0;
while(p > threshold(i)) {
k += 1;
p *= rand() / static_cast<double>(RAND_MAX);
}
samples(i) = k;
}
return samples;
}
示例13: mexFunction
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
int N = mxGetScalar(prhs[0]);
double d = mxGetScalar(prhs[1]);
double h = mxGetScalar(prhs[2]);
int Njacv = mxGetScalar(prhs[3]);
double b = mxGetScalar(prhs[4]);
double c = mxGetScalar(prhs[5]);
double dr = mxGetScalar(prhs[6]);
double di = mxGetScalar(prhs[7]);
int threadNum = mxGetScalar(prhs[8]);
double *a0 = mxGetPr(prhs[9]);
double *v = mxGetPr(prhs[10]);
double th = mxGetScalar(prhs[11]);
double phi = mxGetScalar(prhs[12]);
int nstp = mxGetScalar(prhs[13]);
// mwSize isJ = mxGetScalar(prhs[14]);
ArrayXXd av = gintgv(N, d, h, Njacv, b, c, dr, di, threadNum, a0, v, th, phi, nstp);
plhs[0] = mxCreateDoubleMatrix(av.rows(), av.cols(), mxREAL);
memcpy(mxGetPr(plhs[0]), av.data(), av.cols()*av.rows()*sizeof(double));
}
示例14: weights
double CMT::MLR::parameterGradient(
const MatrixXd& input,
const MatrixXd& output,
const lbfgsfloatval_t* x,
lbfgsfloatval_t* g,
const Trainable::Parameters& params_) const
{
const Parameters& params = dynamic_cast<const Parameters&>(params_);
MatrixXd weights = mWeights;
VectorXd biases = mBiases;
// copy parameters
int k = 0;
if(params.trainWeights)
for(int i = 1; i < weights.rows(); ++i)
for(int j = 0; j < weights.cols(); ++j, ++k)
weights(i, j) = x[k];
if(params.trainBiases)
for(int i = 1; i < mBiases.rows(); ++i, ++k)
biases[i] = x[k];
// compute distribution over outputs
ArrayXXd logProb = (weights * input).colwise() + biases;
logProb.rowwise() -= logSumExp(logProb);
// difference between prediction and actual output
MatrixXd diff = (logProb.exp().matrix() - output);
// compute gradients
double normConst = output.cols() * log(2.);
if(g) {
int offset = 0;
if(params.trainWeights) {
Map<Matrix<double, Dynamic, Dynamic, RowMajor> > weightsGrad(g, mDimOut - 1, mDimIn);
weightsGrad = (diff * input.transpose() / normConst).bottomRows(mDimOut - 1);
offset += weightsGrad.size();
weightsGrad += params.regularizeWeights.gradient(
weights.bottomRows(mDimOut - 1).transpose()).transpose();
}
if(params.trainBiases) {
VectorLBFGS biasesGrad(g + offset, mDimOut - 1);
biasesGrad = diff.rowwise().sum().bottomRows(mDimOut - 1) / normConst;
biasesGrad += params.regularizeBiases.gradient(biases);
}
}
// return negative average log-likelihood in bits
double value = -(logProb * output.array()).sum() / normConst;
if(params.trainWeights)
value += params.regularizeWeights.evaluate(weights.bottomRows(mDimOut - 1).transpose());
if(params.trainBiases)
value += params.regularizeBiases.evaluate(biases);
return value;
}
示例15: cube
MatrixXd cube(MatrixXd xin){
ArrayXXd x = xin.array(); //convert to Array
x*=(x*x);
return x.matrix();
}