本文整理汇总了C++中DiagonalMatrix::i方法的典型用法代码示例。如果您正苦于以下问题:C++ DiagonalMatrix::i方法的具体用法?C++ DiagonalMatrix::i怎么用?C++ DiagonalMatrix::i使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DiagonalMatrix
的用法示例。
在下文中一共展示了DiagonalMatrix::i方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pseudoInverse
Matrix BaseController::pseudoInverse(const Matrix M)
{
Matrix result;
//int rows = this->rows();
//int cols = this->columns();
// calculate SVD decomposition
Matrix U,V;
DiagonalMatrix D;
NEWMAT::SVD(M,D,U,V, true, true);
Matrix Dinv = D.i();
result = V * Dinv * U.t();
return result;
}
示例2: getGeneralizedInverse
void getGeneralizedInverse(Matrix& G, Matrix& Gi) {
#ifdef DEBUG
cout << "\n\ngetGeneralizedInverse - Singular Value\n";
#endif
// Singular value decomposition method
// do SVD
Matrix U, V;
DiagonalMatrix D;
SVD(G,D,U,V); // X = U * D * V.t()
#ifdef DEBUG
cout << "D:\n";
cout << setw(9) << setprecision(6) << (D);
cout << "\n\n";
#endif
DiagonalMatrix Di;
Di << D.i();
#ifdef DEBUG
cout << "Di:\n";
cout << setw(9) << setprecision(6) << (Di);
cout << "\n\n";
#endif
int i=Di.Nrows();
for (; i>=1; i--) {
if (Di(i) > 1000.0) {
Di(i) = 0.0;
}
}
#ifdef DEBUG
cout << "Di with biggies zeroed out:\n";
cout << setw(9) << setprecision(6) << (Di);
cout << "\n\n";
#endif
//Matrix Gi;
Gi << (U * (Di * V.t()));
return;
}
示例3: test5
void test5(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
cout << "\n\nTest 5 - singular value\n";
// Singular value decomposition method
// load data - 1s into col 1 of matrix
int npred1 = npred+1;
Matrix X(nobs,npred1); ColumnVector Y(nobs);
X.Column(1) = 1.0; X.Column(2) << x1; X.Column(3) << x2; Y << y;
// do SVD
Matrix U, V; DiagonalMatrix D;
SVD(X,D,U,V); // X = U * D * V.t()
ColumnVector Fitted = U.t() * Y;
ColumnVector A = V * ( D.i() * Fitted );
Fitted = U * Fitted;
ColumnVector Residual = Y - Fitted;
Real ResVar = Residual.SumSquare() / (nobs-npred1);
// get variances of estimates
D << V * (D * D).i() * V.t();
// Get diagonals of Hat matrix
DiagonalMatrix Hat; Hat << U * U.t();
// print out answers
cout << "\nEstimates and their standard errors\n\n";
ColumnVector SE(npred1);
for (int i=1; i<=npred1; i++) SE(i) = sqrt(D(i)*ResVar);
cout << setw(11) << setprecision(5) << (A | SE) << endl;
cout << "\nObservations, fitted value, residual value, hat value\n";
cout << setw(9) << setprecision(3) <<
(X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn());
cout << "\n\n";
}
示例4: covarMat
/*
* Fits a weighted cubic regression on predictor(s)
*
* @param contrast - want to predict this value per snp
* @param strength - covariate of choice
* @param weights - weight of data points for this genotype
* @param Predictor - output, prediction function coefficients
* @param Predicted - output, predicted contrast per snp
*/
void
FitWeightedCubic(const std::vector<double> &contrast,
const std::vector<double> &strength,
const std::vector<double> &weights,
std::vector<double> &Predictor,
std::vector<double> &Predicted) {
// Singular value decomposition method
unsigned int i;
unsigned int nobs;
unsigned int npred;
npred = 3+1;
nobs= contrast.size();
// convert double into doubles to match newmat
vector<Real> tmp_vec(nobs);
Real* tmp_ptr = &tmp_vec[0];
vector<Real> obs_vec(nobs);
Real *obs_ptr = &obs_vec[0];
vector<Real> weight_vec(nobs);
Matrix covarMat(nobs,npred);
ColumnVector observedVec(nobs);
// fill in the data
// modified by weights
for (i=0; i<nobs; i++)
weight_vec[i] = sqrt(weights[i]);
// load data - 1s into col 1 of matrix
for (i=0; i<nobs; i++)
tmp_vec[i] = weight_vec[i];
covarMat.Column(1) << tmp_ptr;
for (i=0; i<nobs; i++)
tmp_vec[i] *= strength[i];
covarMat.Column(2) << tmp_ptr;
for (i=0; i<nobs; i++)
tmp_vec[i] *= strength[i];
covarMat.Column(3) << tmp_ptr;
for (i=0; i<nobs; i++)
tmp_vec[i] *= strength[i];
covarMat.Column(4) << tmp_ptr;
for (i=0; i<nobs; i++)
obs_vec[i] = contrast[i]*weight_vec[i];
observedVec << obs_ptr;
// do SVD
Matrix U, V;
DiagonalMatrix D;
ColumnVector Fitted(nobs);
ColumnVector A(npred);
SVD(covarMat,D,U,V);
Fitted = U.t() * observedVec;
A = V * ( D.i() * Fitted );
// this predicts "0" for low weights
// because of weighted regression
Fitted = U * Fitted;
// this is the predictor
Predictor.resize(npred);
for (i=0; i<npred; i++)
Predictor[i] = A.element(i);
// export data back to doubles
// and therefore this predicts "0" for low-weighted points
// which is >not< the desired outcome!!!!
// instead we need to predict all points at once
// >unweighted< as output
vector<double> Goofy;
Predicted.resize(nobs);
for (i = 0; i < nobs; ++i) {
Goofy.resize(npred);
Goofy[0] = 1;
Goofy[1] = strength[i];
Goofy[2] = strength[i]*Goofy[1];
Goofy[3] = strength[i]*Goofy[2];
Predicted[i] = vprod(Goofy,Predictor);
}
}
示例5: trymat8
void trymat8()
{
// cout << "\nEighth test of Matrix package\n";
Tracer et("Eighth test of Matrix package");
Tracer::PrintTrace();
int i;
DiagonalMatrix D(6);
for (i=1;i<=6;i++) D(i,i)=i*i+i-10;
DiagonalMatrix D2=D;
Matrix MD=D;
DiagonalMatrix D1(6); for (i=1;i<=6;i++) D1(i,i)=-100+i*i*i;
Matrix MD1=D1;
Print(Matrix(D*D1-MD*MD1));
Print(Matrix((-D)*D1+MD*MD1));
Print(Matrix(D*(-D1)+MD*MD1));
DiagonalMatrix DX=D;
{
Tracer et1("Stage 1");
DX=(DX+D1)*DX; Print(Matrix(DX-(MD+MD1)*MD));
DX=D;
DX=-DX*DX+(DX-(-D1))*((-D1)+DX);
// Matrix MX = Matrix(MD1);
// MD1=DX+(MX.t())*(MX.t()); Print(MD1);
MD1=DX+(Matrix(MD1).t())*(Matrix(MD1).t()); Print(MD1);
DX=D; DX=DX; DX=D2-DX; Print(DiagonalMatrix(DX));
DX=D;
}
{
Tracer et1("Stage 2");
D.Release(2);
D1=D; D2=D;
Print(DiagonalMatrix(D1-DX));
Print(DiagonalMatrix(D2-DX));
MD1=1.0;
Print(Matrix(MD1-1.0));
}
{
Tracer et1("Stage 3");
//GenericMatrix
LowerTriangularMatrix LT(4);
LT << 1 << 2 << 3 << 4 << 5 << 6 << 7 << 8 << 9 << 10;
UpperTriangularMatrix UT = LT.t() * 2.0;
GenericMatrix GM1 = LT;
LowerTriangularMatrix LT1 = GM1-LT; Print(LT1);
GenericMatrix GM2 = GM1; LT1 = GM2; LT1 = LT1-LT; Print(LT1);
GM2 = GM1; LT1 = GM2; LT1 = LT1-LT; Print(LT1);
GM2 = GM1*2; LT1 = GM2; LT1 = LT1-LT*2; Print(LT1);
GM1.Release();
GM1=GM1; LT1=GM1-LT; Print(LT1); LT1=GM1-LT; Print(LT1);
GM1.Release();
GM1=GM1*4; LT1=GM1-LT*4; Print(LT1);
LT1=GM1-LT*4; Print(LT1); GM1.CleanUp();
GM1=LT; GM2=UT; GM1=GM1*GM2; Matrix M=GM1; M=M-LT*UT; Print(M);
Transposer(LT,GM2); LT1 = LT - GM2.t(); Print(LT1);
GM1=LT; Transposer(GM1,GM2); LT1 = LT - GM2.t(); Print(LT1);
GM1 = LT; GM1 = GM1 + GM1; LT1 = LT*2-GM1; Print(LT1);
DiagonalMatrix D; D << LT; GM1 = D; LT1 = GM1; LT1 -= D; Print(LT1);
UpperTriangularMatrix UT1 = GM1; UT1 -= D; Print(UT1);
}
{
Tracer et1("Stage 4");
// Another test of SVD
Matrix M(12,12); M = 0;
M(1,1) = M(2,2) = M(4,4) = M(6,6) =
M(7,7) = M(8,8) = M(10,10) = M(12,12) = -1;
M(1,6) = M(1,12) = -5.601594;
M(3,6) = M(3,12) = -0.000165;
M(7,6) = M(7,12) = -0.008294;
DiagonalMatrix D;
SVD(M,D);
SortDescending(D);
// answer given by matlab
DiagonalMatrix DX(12);
DX(1) = 8.0461;
DX(2) = DX(3) = DX(4) = DX(5) = DX(6) = DX(7) = 1;
DX(8) = 0.1243;
DX(9) = DX(10) = DX(11) = DX(12) = 0;
D -= DX; Clean(D,0.0001); Print(D);
}
#ifndef DONT_DO_NRIC
{
Tracer et1("Stage 5");
// test numerical recipes in C interface
DiagonalMatrix D(10);
D << 1 << 4 << 6 << 2 << 1 << 6 << 4 << 7 << 3 << 1;
ColumnVector C(10);
C << 3 << 7 << 5 << 1 << 4 << 2 << 3 << 9 << 1 << 3;
RowVector R(6);
R << 2 << 3 << 5 << 7 << 11 << 13;
nricMatrix M(10, 6);
DCR( D.nric(), C.nric(), 10, R.nric(), 6, M.nric() );
M -= D * C * R; Print(M);
D.ReSize(5);
D << 1.25 << 4.75 << 9.5 << 1.25 << 3.75;
C.ReSize(5);
//.........这里部分代码省略.........