本文整理汇总了C++中RowVector类的典型用法代码示例。如果您正苦于以下问题:C++ RowVector类的具体用法?C++ RowVector怎么用?C++ RowVector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RowVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test3
void test3(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
cout << "\n\nTest 3 - Cholesky\n";
// traditional sum of squares and products method of calculation
// with subtraction of means - using Cholesky decomposition
Matrix X(nobs,npred);
X.Column(1) << x1; X.Column(2) << x2;
ColumnVector Y(nobs); Y << y;
ColumnVector Ones(nobs); Ones = 1.0;
RowVector M = Ones.t() * X / nobs;
Matrix XC(nobs,npred);
XC = X - Ones * M;
ColumnVector YC(nobs);
Real m = Sum(Y) / nobs; YC = Y - Ones * m;
SymmetricMatrix SSQ; SSQ << XC.t() * XC;
// Cholesky decomposition of SSQ
LowerTriangularMatrix L = Cholesky(SSQ);
// calculate estimate
ColumnVector A = L.t().i() * (L.i() * (XC.t() * YC));
// calculate estimate of constant term
Real a = m - (M * A).AsScalar();
// Get variances of estimates from diagonal elements of invoice of SSQ
DiagonalMatrix D; D << L.t().i() * L.i();
ColumnVector V = D.AsColumn();
Real v = 1.0/nobs + (L.i() * M.t()).SumSquare();
// Calculate fitted values and residuals
int npred1 = npred+1;
ColumnVector Fitted = X * A + a;
ColumnVector Residual = Y - Fitted;
Real ResVar = Residual.SumSquare() / (nobs-npred1);
// Get diagonals of Hat matrix (an expensive way of doing this)
Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X;
DiagonalMatrix Hat; Hat << X1 * (X1.t() * X1).i() * X1.t();
// print out answers
cout << "\nEstimates and their standard errors\n\n";
cout.setf(ios::fixed, ios::floatfield);
cout << setw(11) << setprecision(5) << a << " ";
cout << setw(11) << setprecision(5) << sqrt(v*ResVar) << endl;
ColumnVector SE(npred);
for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar);
cout << setw(11) << setprecision(5) << (A | SE) << endl;
cout << "\nObservations, fitted value, residual value, hat value\n";
cout << setw(9) << setprecision(3) <<
(X | Y | Fitted | Residual | Hat.AsColumn());
cout << "\n\n";
}
示例2: operator
SquareMatrix SqMRO_LUBackSubstitution::operator()(RowVector &indx, RowVector &b) const {
SquareMatrix a(*m_thisMatrix);
int n, i, j, ip, ii = -1;
double sum;
n = a.getRows();
for (i = 0; i < n; i++) {
ip = (int)indx(i);
sum = b.element(ip);
b.element(ip) = b.element(i);
if (ii != -1) {
for (j = ii; j < i; j++) {
sum -= a.element(i,j) * b.element(j);
}
} else if (sum) {
ii = i;
}
b.element(i) = sum;
}
for (i = (n - 1); i >= 0; i--) {
sum = b.element(i);
for (j = (i + 1); j < n; j++) {
sum -= a.element(i,j) * b.element(j);
}
b.element(i) = sum / a.element(i,i);
}
return a;
}
示例3: Norm
Real Norm(RowVector<Type,N> const& Vec, unsigned int const p)
#endif
{
Trace("", "Norm(RowVector<Type,N>&, unsigned int)");
return Vec.Norm(p);
}
示例4: Trace
Type RowVector<Type,N>::operator*(RowVector<Type,N> const& Src) const
{
Trace("RowVector<Type,N>", "operator*(RowVector<Type,N>&)");
Type sum = Type();
for (unsigned int i = 1; i <= N; ++i) sum += (this->Value(i) * Src.Value(i));
return sum;
}
示例5:
void SpinAdapted::Wavefunction::CollectFrom (const RowVector& C)
{
int flatIndex = 0;
for (int lQ = 0; lQ < nrows (); ++lQ)
for (int rQ = 0; rQ < ncols (); ++rQ)
if (allowedQuantaMatrix (lQ, rQ))
for (int lQState = 0; lQState < operator_element(lQ, rQ).Nrows (); ++lQState)
for (int rQState = 0; rQState < operator_element(lQ, rQ).Ncols (); ++rQState)
{
operator_element(lQ, rQ).element (lQState, rQState) = C.element (flatIndex);
++flatIndex;
}
}
示例6: dotproduct
double SpinAdapted::dotproduct(const RowVector& a, const RowVector& b)
{
assert(a.Ncols() == b.Ncols());
#ifdef BLAS
return DDOT(a.Storage(), a.Store(), 1, b.Store(), 1);
#else
return a * b.t();
#endif
}
示例7: TestSort
static void TestSort(int n)
{
// make some data
RowVector X(n);
int i;
for (i = 1; i <= n; i++)
X(i) = sin((Real)i) + 0.3 * cos(i/5.0) - 0.6 * sin(i/7.0) + 0.2 * sin(2.0 * i);
RowVector X_Sorted = X; SimpleSortDescending(X_Sorted.Store(), n);
RowVector A = X + X.Reverse(); SimpleSortDescending(A.Store(), n);
// test descending sort
RowVector Y = X; SortDescending(Y); Y -= X_Sorted; Print(Y);
Y = X_Sorted; SortDescending(Y); Y -= X_Sorted; Print(Y);
Y = X_Sorted.Reverse(); SortDescending(Y); Y -= X_Sorted; Print(Y);
Y = X + X.Reverse(); SortDescending(Y); Y -= A; Print(Y);
// test ascending sort
Y = X; SortAscending(Y); Y -= X_Sorted.Reverse(); Print(Y);
Y = X_Sorted; SortAscending(Y); Y -= X_Sorted.Reverse(); Print(Y);
Y = X_Sorted.Reverse(); SortAscending(Y); Y -= X_Sorted.Reverse(); Print(Y);
Y = X + X.Reverse(); SortAscending(Y); Y -= A.Reverse(); Print(Y);
}
示例8: trymatb
void trymatb()
{
// cout << "\nEleventh test of Matrix package\n";
Tracer et("Eleventh test of Matrix package");
Tracer::PrintTrace();
int i; int j;
RowVector RV; RV.ReSize(10);
{
Tracer et1("Stage 1");
for (i=1;i<=10;i++) RV(i)=i*i-3;
Matrix X(1,1); X(1,1) = .25;
Print(RowVector(X.i() * RV - RV / .25));
// Print(RowVector(X.i() * Matrix(RV) - RV / .25)); // != zortech, AT&T
Print(RowVector(X.i() * RV - RV / .25));
}
LowerTriangularMatrix L(5); UpperTriangularMatrix U(5);
for (i=1; i<=5; i++) for (j=1; j<=i; j++)
{ L(i,j) = i*i + j -2.0; U(j,i) = i*i*j+3; }
DiagonalMatrix D(5);
for (i=1; i<=5; i++) D(i,i) = i*i + i + 2;
Matrix M1 = -L; Matrix M2 = L-U; Matrix M3 = U*3; Matrix M4 = U-L;
Matrix M5 = M1 - D; M1 = D * (-3) - M3;
{
Tracer et1("Stage 2");
Print(Matrix((M2-M4*2)+M5*3-M1));
M1 = L.t(); Print(Matrix(M1.t()-L));
M1 = U.t(); Print(Matrix(M1.t()-U));
}
{
Tracer et1("Stage 3");
SymmetricMatrix S(5);
for (i=1; i<=5; i++) for (j=1; j<=i; j++) S(i,j) = i*j+i-j+5;
M2 = S.i() * M4; M1 = S; M3=M1*M2-M4; Clean(M3,0.00000001); Print(M3);
SymmetricMatrix T(5);
for (i=1; i<=5; i++) for (j=1; j<=i; j++) T(i,j) = i*i*j*j+i-j+5-i*j;
M1 = S.i() * T; M1 = S * M1; M1 = M1-T; Clean(M1,0.00000001); Print(M1);
ColumnVector CV(5); for (i=1; i<=5; i++) CV(i) = i*i*i+10;
M1 = CV * RV;
}
{
Tracer et1("Stage 4");
M4.ReSize(5,10);
for (i=1; i<=5; i++) for (j=1; j<=10; j++) M4(i,j) = (i*i*i+10)*(j*j-3);
Print(Matrix(M1-M4));
M1 = L.t(); M2 = U.t(); M3 = L+U; Print(Matrix(M1-M3.t()+M2));
}
// UpperTriangularMatrix U2((const UpperTriangularMatrix&)U); // != zortech
UpperTriangularMatrix U2((UpperTriangularMatrix&)U);
{
Tracer et1("Stage 5");
Print(Matrix(U2-U));
M2.ReSize(10,10);
for (i=1; i<=10; i++) for (j=1; j<=10; j++) M2(i,j) = (i*i*i+10)*(j*j-3);
D << M2; L << M2; U << M2; // check copy into
Print( Matrix( (D+M2)-(L+U) ) );
}
{
Tracer et1("Stage 6");
M1.ReSize(6,10);
for (i=1; i<=6; i++) for (j=1; j<=10; j++) M1(i,j) = 100*i + j;
M2 = M1.SubMatrix(3,5,4,7); M3.ReSize(3,4);
for (i=3; i<=5; i++) for (j=4; j<=7; j++) M3(i-2,j-3) = 100*i + j;
Print(Matrix(M2-M3));
}
int a1,a2,a3,a4;
{
Tracer et1("Stage 7");
int a1,a2,a3,a4;
a1=4; a2=9; a3=4; a4=7;
U.ReSize(10);
for (i=1; i<=10; i++) for (j=i; j<=10; j++) U(i,j) = 100*i + j;
M2 = U.SubMatrix(a1,a2,a3,a4);
M3.ReSize(a2-a1+1,a4-a3+1); M3=0.0;
for (i=a1; i<=a2; i++) for (j=(i>a3) ? i : a3; j<=a4; j++)
M3(i-a1+1,j-a3+1) = 100*i + j;
Print(Matrix(M2-M3));
}
{
Tracer et1("Stage 8");
a1=3; a2=9; a3=2; a4=7;
U.ReSize(10);
for (i=1; i<=10; i++) for (j=i; j<=10; j++) U(i,j) = 100*i + j;
M2 = U.SubMatrix(a1,a2,a3,a4);
M3.ReSize(a2-a1+1,a4-a3+1); M3=0.0;
for (i=a1; i<=a2; i++) for (j=(i>a3) ? i : a3; j<=a4; j++)
M3(i-a1+1,j-a3+1) = 100*i + j;
Print(Matrix(M2-M3));
}
{
Tracer et1("Stage 9");
a1=4; a2=6; a3=2; a4=5;
U.ReSize(10);
for (i=1; i<=10; i++) for (j=i; j<=10; j++) U(i,j) = 100*i + j;
M2 = U.SubMatrix(a1,a2,a3,a4);
M3.ReSize(a2-a1+1,a4-a3+1); M3=0.0;
for (i=a1; i<=a2; i++) for (j=(i>a3) ? i : a3; j<=a4; j++)
M3(i-a1+1,j-a3+1) = 100*i + j;
Print(Matrix(M2-M3));
}
//.........这里部分代码省略.........
示例9: trymatc
void trymatc()
{
// cout << "\nTwelfth test of Matrix package\n";
Tracer et("Twelfth test of Matrix package");
Tracer::PrintTrace();
DiagonalMatrix D(15); D=1.5;
Matrix A(15,15);
int i,j;
for (i=1;i<=15;i++) for (j=1;j<=15;j++) A(i,j)=i*i+j-150;
{ A = A + D; }
ColumnVector B(15);
for (i=1;i<=15;i++) B(i)=i+i*i-150.0;
{
Tracer et1("Stage 1");
ColumnVector B1=B;
B=(A*2.0).i() * B1;
Matrix X = A*B-B1/2.0;
Clean(X, 0.000000001); Print(X);
A.ReSize(3,5);
for (i=1; i<=3; i++) for (j=1; j<=5; j++) A(i,j) = i+100*j;
B = A.AsColumn()+10000;
RowVector R = (A+10000).AsColumn().t();
Print( RowVector(R-B.t()) );
}
{
Tracer et1("Stage 2");
B = A.AsColumn()+10000;
Matrix XR = (A+10000).AsMatrix(15,1).t();
Print( RowVector(XR-B.t()) );
}
{
Tracer et1("Stage 3");
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0+10000;
Matrix MR = (A+10000).AsColumn().t();
Print( RowVector(MR-B.t()) );
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0;
MR = A.AsColumn().t();
Print( RowVector(MR-B.t()) );
}
{
Tracer et1("Stage 4");
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0;
RowVector R = A.AsColumn().t();
Print( RowVector(R-B.t()) );
}
{
Tracer et1("Stage 5");
RowVector R = (A.AsColumn()-5000).t();
B = ((R.t()+10000) - A.AsColumn())-5000;
Print( RowVector(B.t()) );
}
{
Tracer et1("Stage 6");
B = A.AsColumn(); ColumnVector B1 = (A+10000).AsColumn() - 10000;
Print(ColumnVector(B1-B));
}
{
Tracer et1("Stage 7");
Matrix X = B.AsMatrix(3,5); Print(Matrix(X-A));
for (i=1; i<=3; i++) for (j=1; j<=5; j++) B(5*(i-1)+j) -= i+100*j;
Print(B);
}
{
Tracer et1("Stage 8");
A.ReSize(7,7); D.ReSize(7);
for (i=1; i<=7; i++) for (j=1; j<=7; j++) A(i,j) = i*j*j;
for (i=1; i<=7; i++) D(i,i) = i;
UpperTriangularMatrix U; U << A;
Matrix X = A; for (i=1; i<=7; i++) X(i,i) = i;
A.Inject(D); Print(Matrix(X-A));
X = U; U.Inject(D); A = U; for (i=1; i<=7; i++) X(i,i) = i;
Print(Matrix(X-A));
}
{
Tracer et1("Stage 9");
A.ReSize(7,5);
for (i=1; i<=7; i++) for (j=1; j<=5; j++) A(i,j) = i+100*j;
Matrix Y = A; Y = Y - ((const Matrix&)A); Print(Y);
Matrix X = A; // X.Release();
Y = A; Y = ((const Matrix&)X) - A; Print(Y); Y = 0.0;
Y = ((const Matrix&)X) - ((const Matrix&)A); Print(Y);
}
{
Tracer et1("Stage 10");
// some tests on submatrices
UpperTriangularMatrix U(20);
for (i=1; i<=20; i++) for (j=i; j<=20; j++) U(i,j)=100 * i + j;
UpperTriangularMatrix V = U.SymSubMatrix(1,5);
UpperTriangularMatrix U1 = U;
//.........这里部分代码省略.........
示例10:
MySymmetricMatrix::SymmetricMatrix(int num_rows,const RowVector& v):EigenSymmetricMatrix(num_rows,v.columns()){
EigenSymmetricMatrix & m = *this;
const EigenRowVector & r = v;
for(unsigned int i=0;i<num_rows;i++)
m.row(i) = r;
}
示例11: debug_assert
void SeparationTask::exportComponents() const
{
debug_assert(&phaseMatrix() &&
&magnitudeSpectraMatrix(0) &&
&gainsMatrix());
logger().debug(nameAndTaskID() + " exporting the components.");
Poco::Util::LayeredConfiguration& cfg =
BasicApplication::instance().config();
// Determine whether to export components as WAV and/or as matrix files.
bool exportAsMatrix = cfg.getBool("blissart.separation.export.matrix",
false);
if (exportAsMatrix)
logger().debug("Exporting components as matrix file(s).");
bool exportAsWave = cfg.getBool("blissart.separation.export.wave",
true);
if (exportAsWave)
logger().debug("Exporting components as waveform file(s).");
// Determine whether to use wiener reconstruction.
bool wienerRec = cfg.getBool("blissart.separation.export.wienerrec",
true);
// Compute the reconstructed matrix (WH) in case of wiener reconstruction.
Poco::SharedPtr<Matrix> reconst;
if (wienerRec) {
//reconst = new Matrix(phaseMatrix().rows(), phaseMatrix().cols());
reconst = new Matrix(magnitudeSpectraMatrix(0).rows(),
gainsMatrix().cols());
if (_nrOfSpectra > 1) {
reconst->zero();
Matrix hShifted = gainsMatrix();
for (unsigned int t = 0; t < _nrOfSpectra; ++t) {
reconst->add(magnitudeSpectraMatrix(t) * hShifted);
hShifted.shiftColumnsRight();
}
}
else {
magnitudeSpectraMatrix(0).multWithMatrix(gainsMatrix(), reconst);
}
// revert transform to reconst
reconst = revertTransforms(reconst);
}
// Retrieve desired component indices.
vector<vector<int> > compIndices = _exportComponentIndices;
if (compIndices.empty()) {
vector<int> compIndicesSource;
for (int i = 1; i <= _nrOfComponents; i++) {
compIndicesSource.push_back(i);
}
compIndices.push_back(compIndicesSource);
}
// Reconstruct components and mix, if desired.
int sourceIndex = 1;
for (vector<vector<int> >::const_iterator sourceIt = compIndices.begin();
sourceIt != compIndices.end(); ++sourceIt, ++sourceIndex)
{
// Holds the mixed spectrogram if mixing is desired.
Poco::SharedPtr<Matrix> mixedSpectrogram;
logger().debug("Exporting components for source #" +
Poco::NumberFormatter::format(sourceIndex));
for (vector<int>::const_iterator it = sourceIt->begin();
it != sourceIt->end(); ++it)
{
if (*it < 1 || *it > _nrOfComponents) {
logger().error(nameAndTaskID() + ": invalid component index: " +
Poco::NumberFormatter::format(*it));
continue;
}
int i = *it - 1;
logger().debug(nameAndTaskID() + " exporting component #" +
Poco::NumberFormatter::format(i));
// Compute the component's magnitude spectrum.
Poco::SharedPtr<Matrix> magnitudeSpectrum = new Matrix(
magnitudeSpectraMatrix(0).rows(),
gainsMatrix().cols());
// NMD case
if (_nrOfSpectra > 1) {
magnitudeSpectrum->zero();
RowVector componentGains = gainsMatrix().nthRow(i);
for (unsigned int t = 0; t < _nrOfSpectra; t++) {
ColVector componentSpectrum = magnitudeSpectraMatrix(t).nthColumn(i);
magnitudeSpectrum->add(componentSpectrum * componentGains);
componentGains.shiftRight();
}
}
// "NMF" case (separated for efficiency)
else {
ColVector componentSpectrum = magnitudeSpectraMatrix(0).nthColumn(i);
RowVector componentGains = gainsMatrix().nthRow(i);
*magnitudeSpectrum = componentSpectrum * componentGains;
}
// revert transformation to component spectrogram
magnitudeSpectrum = revertTransforms(magnitudeSpectrum);
//.........这里部分代码省略.........
示例12: test2
void test2(Real* y, Real* x1, Real* x2, int nobs, int npred)
{
cout << "\n\nTest 2 - traditional, OK\n";
// traditional sum of squares and products method of calculation
// with subtraction of means - less subject to round-off error
// than test1
// make matrix of predictor values
Matrix X(nobs,npred);
// load x1 and x2 into X
// [use << rather than = when loading arrays]
X.Column(1) << x1; X.Column(2) << x2;
// vector of Y values
ColumnVector Y(nobs); Y << y;
// make vector of 1s
ColumnVector Ones(nobs); Ones = 1.0;
// calculate means (averages) of x1 and x2 [ .t() takes transpose]
RowVector M = Ones.t() * X / nobs;
// and subtract means from x1 and x1
Matrix XC(nobs,npred);
XC = X - Ones * M;
// do the same to Y [use Sum to get sum of elements]
ColumnVector YC(nobs);
Real m = Sum(Y) / nobs; YC = Y - Ones * m;
// form sum of squares and product matrix
// [use << rather than = for copying Matrix into SymmetricMatrix]
SymmetricMatrix SSQ; SSQ << XC.t() * XC;
// calculate estimate
// [bracket last two terms to force this multiplication first]
// [ .i() means inverse, but inverse is not explicity calculated]
ColumnVector A = SSQ.i() * (XC.t() * YC);
// calculate estimate of constant term
// [AsScalar converts 1x1 matrix to Real]
Real a = m - (M * A).AsScalar();
// Get variances of estimates from diagonal elements of inverse of SSQ
// [ we are taking inverse of SSQ - we need it for finding D ]
Matrix ISSQ = SSQ.i(); DiagonalMatrix D; D << ISSQ;
ColumnVector V = D.AsColumn();
Real v = 1.0/nobs + (M * ISSQ * M.t()).AsScalar();
// for calc variance of const
// Calculate fitted values and residuals
int npred1 = npred+1;
ColumnVector Fitted = X * A + a;
ColumnVector Residual = Y - Fitted;
Real ResVar = Residual.SumSquare() / (nobs-npred1);
// Get diagonals of Hat matrix (an expensive way of doing this)
Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X;
DiagonalMatrix Hat; Hat << X1 * (X1.t() * X1).i() * X1.t();
// print out answers
cout << "\nEstimates and their standard errors\n\n";
cout.setf(ios::fixed, ios::floatfield);
cout << setw(11) << setprecision(5) << a << " ";
cout << setw(11) << setprecision(5) << sqrt(v*ResVar) << endl;
// make vector of standard errors
ColumnVector SE(npred);
for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar);
// use concatenation function to form matrix and use matrix print
// to get two columns
cout << setw(11) << setprecision(5) << (A | SE) << endl;
cout << "\nObservations, fitted value, residual value, hat value\n";
cout << setw(9) << setprecision(3) <<
(X | Y | Fitted | Residual | Hat.AsColumn());
cout << "\n\n";
}
示例13: trymatc
void trymatc()
{
// cout << "\nTwelfth test of Matrix package\n";
Tracer et("Twelfth test of Matrix package");
Tracer::PrintTrace();
DiagonalMatrix D(15); D=1.5;
Matrix A(15,15);
int i,j;
for (i=1;i<=15;i++) for (j=1;j<=15;j++) A(i,j)=i*i+j-150;
{ A = A + D; }
ColumnVector B(15);
for (i=1;i<=15;i++) B(i)=i+i*i-150.0;
{
Tracer et1("Stage 1");
ColumnVector B1=B;
B=(A*2.0).i() * B1;
Matrix X = A*B-B1/2.0;
Clean(X, 0.000000001); Print(X);
A.ReSize(3,5);
for (i=1; i<=3; i++) for (j=1; j<=5; j++) A(i,j) = i+100*j;
B = A.AsColumn()+10000;
RowVector R = (A+10000).AsColumn().t();
Print( RowVector(R-B.t()) );
}
{
Tracer et1("Stage 2");
B = A.AsColumn()+10000;
Matrix XR = (A+10000).AsMatrix(15,1).t();
Print( RowVector(XR-B.t()) );
}
{
Tracer et1("Stage 3");
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0+10000;
Matrix MR = (A+10000).AsColumn().t();
Print( RowVector(MR-B.t()) );
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0;
MR = A.AsColumn().t();
Print( RowVector(MR-B.t()) );
}
{
Tracer et1("Stage 4");
B = (A.AsMatrix(15,1)+A.AsColumn())/2.0;
RowVector R = A.AsColumn().t();
Print( RowVector(R-B.t()) );
}
{
Tracer et1("Stage 5");
RowVector R = (A.AsColumn()-5000).t();
B = ((R.t()+10000) - A.AsColumn())-5000;
Print( RowVector(B.t()) );
}
{
Tracer et1("Stage 6");
B = A.AsColumn(); ColumnVector B1 = (A+10000).AsColumn() - 10000;
Print(ColumnVector(B1-B));
}
{
Tracer et1("Stage 7");
Matrix X = B.AsMatrix(3,5); Print(Matrix(X-A));
for (i=1; i<=3; i++) for (j=1; j<=5; j++) B(5*(i-1)+j) -= i+100*j;
Print(B);
}
{
Tracer et1("Stage 8");
A.ReSize(7,7); D.ReSize(7);
for (i=1; i<=7; i++) for (j=1; j<=7; j++) A(i,j) = i*j*j;
for (i=1; i<=7; i++) D(i,i) = i;
UpperTriangularMatrix U; U << A;
Matrix X = A; for (i=1; i<=7; i++) X(i,i) = i;
A.Inject(D); Print(Matrix(X-A));
X = U; U.Inject(D); A = U; for (i=1; i<=7; i++) X(i,i) = i;
Print(Matrix(X-A));
}
{
Tracer et1("Stage 9");
A.ReSize(7,5);
for (i=1; i<=7; i++) for (j=1; j<=5; j++) A(i,j) = i+100*j;
Matrix Y = A; Y = Y - ((const Matrix&)A); Print(Y);
Matrix X = A;
Y = A; Y = ((const Matrix&)X) - A; Print(Y); Y = 0.0;
Y = ((const Matrix&)X) - ((const Matrix&)A); Print(Y);
}
{
Tracer et1("Stage 10");
// some tests on submatrices
UpperTriangularMatrix U(20);
for (i=1; i<=20; i++) for (j=i; j<=20; j++) U(i,j)=100 * i + j;
UpperTriangularMatrix V = U.SymSubMatrix(1,5);
UpperTriangularMatrix U1 = U;
//.........这里部分代码省略.........
示例14: a
SquareMatrix SqMRO_LUDecomposition::operator()(RowVector &indx, double *d) const {
SquareMatrix a(*m_thisMatrix);
int n, i, imax, j, k;
double big, dum, sum, temp;
RowVector vv(a.getRows());
n = a.getRows();
// No row interchanges yet
*d = 1.0;
// Loop over rows to get implicit scaling information
for (i = 0; i < n; i++) {
big = 0.0;
for (j = 0; j < n; j++) {
temp = fabs(a.element(i,j));
if (temp > big) {
big = temp;
}
}
if (big == 0.0) {
error("SquareMatrixAlias::luDecomp : MatrixAlias is rank defficient\n\n");
return a;
}
vv(i) = 1 / big;
}
// Loop over columns (Crout's method)
for (j = 0; j < n; j++) {
for (i = 0; i < j; i++) {
sum = a.element(i,j);
for (k = 0; k < i; k++) {
sum -= a.element(i,k) * a.element(k,j);
}
a.element(i,j) = sum;
}
big = 0;
for (i = j; i < n; i++) {
sum = a.element(i,j);
for (k = 0; k < j; k++) {
sum -= a.element(i,k) * a.element(k,j);
}
a.element(i,j) = sum;
dum = vv(i) * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k < n; k++) {
dum = a.element(imax,k);
a.element(imax,k) = a.element(j,k);
a.element(j,k) = dum;
}
*d = -(*d);
vv(imax) = vv(j);
}
indx.element(j) = imax;
// Singularity may arise as aresult of rounding errors
// Substitute in small values for zeros
if (a.element(j,j) == 0) {
error("SquareMatrixAlias::luDecomp : MatrixAlias is rank defficient\n\n");
a.element(j,j) = 1e-100;
}
if (j != n) {
dum = 1 / a.element(j,j);
for (i = j+1; i < n; i++) {
a.element(i,j) *= dum;
}
}
}
return a;
}
示例15: gru_impl
inline tensor5s gru_impl(const tensor5& input,
const std::size_t n_units,
const bool use_bias,
const bool reset_after,
const bool return_sequences,
const float_vec& weights,
const float_vec& recurrent_weights,
const float_vec& bias,
const std::string& activation,
const std::string& recurrent_activation)
{
const std::size_t n_timesteps = input.shape().width_;
const std::size_t n_features = input.shape().depth_;
// weight matrices
const EigenIndex n = EigenIndex(n_units);
const RowMajorMatrix<Dynamic, Dynamic> W = eigen_row_major_mat_from_values(n_features, n_units * 3, weights);
const RowMajorMatrix<Dynamic, Dynamic> U = eigen_row_major_mat_from_values(n_units, n_units * 3, recurrent_weights);
// kernel bias
RowVector<Dynamic> b_x(n_units * 3);
if (use_bias && bias.size() >= 1 * n_units * 3)
std::copy_n(bias.cbegin(), n_units * 3, b_x.data());
else
b_x.setZero();
// recurrent kernel bias
RowVector<Dynamic> b_h(n_units * 3);
if (use_bias && bias.size() >= 2 * n_units * 3)
std::copy_n(bias.cbegin() + static_cast<float_vec::const_iterator::difference_type>(n_units * 3), n_units * 3, b_h.data());
else
b_h.setZero();
// initialize cell output states h
RowVector<Dynamic> h(1, n_units);
h.setZero();
// write input to eigen matrix of shape (timesteps, n_features)
RowMajorMatrix<Dynamic, Dynamic> x(n_timesteps, n_features);
for (std::size_t a_t = 0; a_t < n_timesteps; ++a_t)
for (std::size_t a_f = 0; a_f < n_features; ++a_f)
x(EigenIndex(a_t), EigenIndex(a_f)) = input.get(0, 0, 0, a_t, a_f);
// kernel applied to inputs (with bias), produces shape (timesteps, n_units * 3)
RowMajorMatrix<Dynamic, Dynamic> Wx = x * W;
Wx.rowwise() += b_x;
// get activation functions
auto act_func = get_activation_func(activation);
auto act_func_recurrent = get_activation_func(recurrent_activation);
// computing GRU output
tensor5s gru_result;
if (return_sequences)
gru_result = { tensor5(shape5(1, 1, 1, n_timesteps, n_units), float_type(0)) };
else
gru_result = { tensor5(shape5(1, 1, 1, 1, n_units), float_type(0)) };
for (EigenIndex k = 0; k < EigenIndex(n_timesteps); ++k)
{
RowVector<Dynamic> r;
RowVector<Dynamic> z;
RowVector<Dynamic> m;
// in the formulae below, the following notations are used:
// A b matrix product
// a o b Hadamard (element-wise) product
// x input vector
// h state vector
// W_{x,a} block of the kernel weight matrix corresponding to "a"
// W_{h,a} block of the recurrent kernel weight matrix corresponding to "a"
// b_{x,a} part of the kernel bias vector corresponding to "a"
// b_{h,a} part of the recurrent kernel bias corresponding to "a"
// z update gate vector
// r reset gate vector
if (reset_after)
{
// recurrent kernel applied to timestep (with bias), produces shape (1, n_units * 3)
RowMajorMatrix<1, Dynamic> Uh = h * U;
Uh += b_h;
// z = sigmoid(W_{x,z} x + b_{i,z} + W_{h,z} h + b_{h,z})
z = (Wx.block(k, 0 * n, 1, n) + Uh.block(0, 0 * n, 1, n)).unaryExpr(act_func_recurrent);
// r = sigmoid(W_{x,r} x + b_{i,r} + W_{h,r} h + b_{h,r})
r = (Wx.block(k, 1 * n, 1, n) + Uh.block(0, 1 * n, 1, n)).unaryExpr(act_func_recurrent);
// m = tanh(W_{x,m} x + b_{i,m} + r * (W_{h,m} h + b_{h,m}))
m = (Wx.block(k, 2 * n, 1, n) + (r.array() * Uh.block(0, 2 * n, 1, n).array()).matrix()).unaryExpr(act_func);
}
else
{
// z = sigmoid(W_{x,z} x + b_{x,z} + W_{h,z} h + b_{h,z})
z = (Wx.block(k, 0 * n, 1, n) + h * U.block(0, 0 * n, n, n) + b_h.block(0, 0 * n, 1, n)).unaryExpr(act_func_recurrent);
// r = sigmoid(W_{x,r} x + b_{x,r} + W_{h,r} h + b_{h,r})
r = (Wx.block(k, 1 * n, 1, n) + h * U.block(0, 1 * n, n, n) + b_h.block(0, 1 * n, 1, n)).unaryExpr(act_func_recurrent);
// m = tanh(W_{x,m} x + b_{x,m} + W_{h,m} (r o h) + b_{h,m}))
m = (Wx.block(k, 2 * n, 1, n) + (r.array() * h.array()).matrix() * U.block(0, 2 * n, n, n) + b_h.block(0, 2 * n, 1, n)).unaryExpr(act_func);
}
//.........这里部分代码省略.........