本文整理汇总了C++中dmatrix类的典型用法代码示例。如果您正苦于以下问题:C++ dmatrix类的具体用法?C++ dmatrix怎么用?C++ dmatrix使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了dmatrix类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sin
/**
* Description not yet available.
* \param
*/
dmatrix sin(const dmatrix& m)
{
ivector cmin(m.rowmin(),m.rowmax());
ivector cmax(m.rowmin(),m.rowmax());
int i;
for (i=m.rowmin();i<=m.rowmax();i++)
{
cmin(i)=m(i).indexmin();
cmax(i)=m(i).indexmax();
}
dmatrix tmp(m.rowmin(),m.rowmax(),cmin,cmax);
for (i=m.rowmin();i<=m.rowmax();i++)
{
tmp(i)=sin(m(i));
}
return tmp;
}
示例2: elem_prod
/**
* Description not yet available.
* \param
*/
dmatrix elem_prod(const dmatrix& m, const dmatrix& m2)
{
ivector cmin(m.rowmin(),m.rowmax());
ivector cmax(m.rowmin(),m.rowmax());
int i;
for (i=m.rowmin();i<=m.rowmax();i++)
{
cmin(i)=m(i).indexmin();
cmax(i)=m(i).indexmax();
}
dmatrix tmp(m.rowmin(),m.rowmax(),cmin,cmax);
for (i=m.rowmin();i<=m.rowmax();i++)
{
tmp(i)=elem_prod(m(i),m2(i));
}
return tmp;
}
示例3: trace
/**
Return the sum of the diagonal of a square matrix mat.
\param mat is a square scalar matrix
*/
double trace(const dmatrix& mat)
{
if (mat.colmin() != mat.rowmin() || mat.colmax() != mat.rowmax() )
{
cerr << "Matrix not square in trace\n";
ad_exit(1);
}
double sum = 0.0;
for (int i = mat.colmin(); i <= mat.colmax(); ++i)
{
sum += mat.elem(i, i);
}
return sum;
}
示例4: getAverageToCentroidDiameter
double clusteringValidity::getAverageToCentroidDiameter(const dmatrix& m1) const {
dvector a(m1.columns());
int i,j;
l2Distance<double> dist;
double distance=0.0;
for (i=0; i<m1.rows(); i++) {
a.add(m1.getRow(i));
}
a.divide(m1.rows());
for (j=0; j< m1.rows(); j++) {
distance+=dist.apply(a,m1.getRow(j));
}
if (m1.rows()>0) {
return (2*distance/(double)m1.rows());
} else {
return 2*distance;
}
}
示例5: distances
double clusteringValidity::getMaximumDistance(const dmatrix& m1,
const dmatrix& m2) const {
int i,j;
dmatrix distances(m1.rows(),m2.rows());
l2Distance<double> dist;
for (i=0; i<m1.rows(); i++) {
for (j=0; j<m2.rows(); j++) {
distances[i][j]=dist.apply(m1.getRow(i),m2.getRow(j));
}
}
return distances.maximum();
}
示例6: getAverageDistance
double clusteringValidity::getAverageDistance(const dmatrix& m1,
const dmatrix& m2) const {
double distance=0.0;
int i,j;
l2Distance<double> dist;
for (i=0; i<m1.rows(); i++) {
for (j=0; j<m2.rows(); j++) {
distance+=dist.apply(m1.getRow(i),m2.getRow(j));
}
}
distance=distance/((double)m1.rows()*(double)m2.rows());
return distance;
}
示例7: getAverageDiameter
double clusteringValidity::getAverageDiameter(const dmatrix& m1) const {
double distance=0.0;
int j,k;
l2Distance<double> dist;
for (j=0; j<m1.rows(); j++) {
for (k=0; k<m1.rows(); k++) {
distance+=dist.apply(m1.getRow(j),
m1.getRow(k));
}
}
if (m1.rows()>1) {
return (distance/((double)m1.rows()*
(double)(m1.rows()-1)));
} else {
return distance;
}
}
示例8: ghk
/**
* Description not yet available.
* \param
*/
dvariable ghk(const dvar_vector& lower,const dvar_vector& upper,
const dvar_matrix& Sigma, const dmatrix& eps)
{
RETURN_ARRAYS_INCREMENT();
int n=lower.indexmax();
int m=eps.indexmax();
dvariable ssum=0.0;
dvar_matrix ch=choleski_decomp(Sigma);
dvar_vector l(1,n);
dvar_vector u(1,n);
for (int k=1;k<=m;k++)
{
dvariable weight=1.0;
l=lower;
u=upper;
for (int j=1;j<=n;j++)
{
l(j)/=ch(j,j);
u(j)/=ch(j,j);
dvariable Phiu=cumd_norm(u(j));
dvariable Phil=cumd_norm(l(j));
weight*=Phiu-Phil;
dvariable eta=inv_cumd_norm((Phiu-Phil)*eps(k,j)+Phil+1.e-30);
for (int i=j+1;i<=n;i++)
{
dvariable tmp=ch(i,j)*eta;
l(i)-=tmp;
u(i)-=tmp;
}
}
ssum+=weight;
}
RETURN_ARRAYS_DECREMENT();
return ssum/m;
}
示例9: symmetrize
/**
* Description not yet available.
* \param
*/
dmatrix symmetrize(const dmatrix& m)
{
if (m.rowmin() != m.colmin() || m.rowmax() != m.colmax() )
{
cerr << " Non square matrix passed to dmatrix symmetrize\n";
ad_exit(1);
}
int rmin=m.rowmin();
int rmax=m.rowmax();
dmatrix s(rmin,rmax,rmin,rmax);
for (int i=rmin; i<=rmax; i++)
{
s(i,i)=m(i,i);
for (int j=rmin; j<i; j++)
{
s(i,j)=(m(i,j)+m(j,i))/2.;
s(j,i)=s(i,j);
}
}
return s;
}
示例10: mult_likelihood
dvariable mult_likelihood(const dmatrix &o, const dvar_matrix &p, dvar_matrix &nu,
const dvariable &log_vn)
{
// kludge to ensure observed and predicted matrixes are the same size
if(o.colsize()!=p.colsize() || o.rowsize()!=p.rowsize())
{
cerr<<"Error in multivariate_t_likelihood, observed and predicted matrixes"
" are not the same size\n";
ad_exit(1);
}
dvariable vn = mfexp(log_vn);
dvariable ff = 0.0;
int r1 = o.rowmin();
int r2 = o.rowmax();
int c1 = o.colmin();
int c2 = o.colmax();
for(int i = r1; i <= r2; i++ )
{
dvar_vector sobs = vn * o(i)/sum(o(i)); //scale observed numbers by effective sample size.
ff -= gammln(vn);
for(int j = c1; j <= c2; j++ )
{
if( value(sobs(j)) > 0.0 )
ff += gammln(sobs(j));
}
ff -= sobs * log(TINY + p(i));
dvar_vector o1=o(i)/sum(o(i));
dvar_vector p1=p(i)/sum(p(i));
nu(i) = elem_div(o1-p1,sqrt(elem_prod(p1,1.-p1)/vn));
}
// exit(1);
return ff;
}
示例11: SVD
/**
calculate Pseudo-Inverse using SVD(Singular Value Decomposition)
by lapack library DGESVD (_a can be non-square matrix)
*/
int calcPseudoInverse(const dmatrix &_a, dmatrix &_a_pseu, double _sv_ratio)
{
int i, j, k;
char jobu = 'A';
char jobvt = 'A';
int m = (int)_a.rows();
int n = (int)_a.cols();
int max_mn = max(m,n);
int min_mn = min(m,n);
dmatrix a(m,n);
a = _a;
int lda = m;
double *s = new double[max_mn];
int ldu = m;
double *u = new double[ldu*m];
int ldvt = n;
double *vt = new double[ldvt*n];
int lwork = max(3*min_mn+max_mn, 5*min_mn); // for CLAPACK ver.2 & ver.3
double *work = new double[lwork];
int info;
for(i = 0; i < max_mn; i++) s[i] = 0.0;
dgesvd_(&jobu, &jobvt, &m, &n, &(a(0,0)), &lda, s, u, &ldu, vt, &ldvt, work,
&lwork, &info);
double smin, smax=0.0;
for (j = 0; j < min_mn; j++) if (s[j] > smax) smax = s[j];
smin = smax*_sv_ratio; // default _sv_ratio is 1.0e-3
for (j = 0; j < min_mn; j++) if (s[j] < smin) s[j] = 0.0;
//------------ calculate pseudo inverse pinv(A) = V*S^(-1)*U^(T)
// S^(-1)*U^(T)
for (j = 0; j < m; j++){
if (s[j]){
for (i = 0; i < m; i++) u[j*m+i] /= s[j];
}
else {
for (i = 0; i < m; i++) u[j*m+i] = 0.0;
}
}
// V * (S^(-1)*U^(T))
_a_pseu.resize(n,m);
for(j = 0; j < n; j++){
for(i = 0; i < m; i++){
_a_pseu(j,i) = 0.0;
for(k = 0; k < min_mn; k++){
if(s[k]) _a_pseu(j,i) += vt[j*n+k] * u[k*m+i];
}
}
}
delete [] work;
delete [] vt;
delete [] s;
delete [] u;
return info;
}
示例12: orthogonalize
inline void orthogonalize(int N__,
int n__,
std::vector<wave_functions*> wfs__,
int idx_bra__,
int idx_ket__,
dmatrix<T>& o__,
wave_functions& tmp__)
{
PROFILE("sddk::wave_functions::orthogonalize");
auto pu = wfs__[0]->pu();
/* project out the old subspace:
* |\tilda phi_new> = |phi_new> - |phi_old><phi_old|phi_new> */
if (N__ > 0) {
inner(*wfs__[idx_bra__], 0, N__, *wfs__[idx_ket__], N__, n__, 0.0, o__, 0, 0);
transform(pu, -1.0, wfs__, 0, N__, o__, 0, 0, 1.0, wfs__, N__, n__);
}
/* orthogonalize new n__ x n__ block */
inner(*wfs__[idx_bra__], N__, n__, *wfs__[idx_ket__], N__, n__, 0.0, o__, 0, 0);
/* single MPI rank */
if (o__.blacs_grid().comm().size() == 1) {
bool use_magma{false};
#if defined(__GPU) && defined(__MAGMA)
if (pu == GPU) {
use_magma = true;
}
#endif
if (use_magma) {
#ifdef __GPU
/* Cholesky factorization */
if (int info = linalg<GPU>::potrf(n__, o__.template at<GPU>(), o__.ld())) {
std::stringstream s;
s << "error in GPU factorization, info = " << info;
TERMINATE(s);
}
/* inversion of triangular matrix */
if (linalg<GPU>::trtri(n__, o__.template at<GPU>(), o__.ld())) {
TERMINATE("error in inversion");
}
#endif
} else { /* CPU version */
//check_hermitian("OVLP", o__, n__);
//o__.serialize("overlap.dat", n__);
/* Cholesky factorization */
if (int info = linalg<CPU>::potrf(n__, &o__(0, 0), o__.ld())) {
std::stringstream s;
s << "error in factorization, info = " << info << std::endl
<< "number of existing states: " << N__ << std::endl
<< "number of new states: " << n__ << std::endl
<< "number of wave_functions: " << wfs__.size() << std::endl
<< "idx_bra: " << idx_bra__ << " " << "idx_ket:" << idx_ket__;
TERMINATE(s);
}
/* inversion of triangular matrix */
if (linalg<CPU>::trtri(n__, &o__(0, 0), o__.ld())) {
TERMINATE("error in inversion");
}
if (pu == GPU) {
#ifdef __GPU
acc::copyin(o__.template at<GPU>(), o__.ld(), o__.template at<CPU>(), o__.ld(), n__, n__);
#endif
}
}
/* CPU version */
if (pu == CPU) {
/* multiplication by triangular matrix */
for (auto& e: wfs__) {
/* wave functions are complex, transformation matrix is complex */
if (std::is_same<T, double_complex>::value) {
linalg<CPU>::trmm('R', 'U', 'N', e->pw_coeffs().num_rows_loc(), n__, double_complex(1, 0),
reinterpret_cast<double_complex*>(o__.template at<CPU>()), o__.ld(),
e->pw_coeffs().prime().at<CPU>(0, N__), e->pw_coeffs().prime().ld());
if (e->has_mt() && e->mt_coeffs().num_rows_loc()) {
linalg<CPU>::trmm('R', 'U', 'N', e->mt_coeffs().num_rows_loc(), n__, double_complex(1, 0),
reinterpret_cast<double_complex*>(o__.template at<CPU>()), o__.ld(),
e->mt_coeffs().prime().at<CPU>(0, N__), e->mt_coeffs().prime().ld());
}
}
/* wave functions are real (psi(G) = psi^{*}(-G)), transformation matrix is real */
if (std::is_same<T, double>::value) {
linalg<CPU>::trmm('R', 'U', 'N', 2 * e->pw_coeffs().num_rows_loc(), n__, 1.0,
reinterpret_cast<double*>(o__.template at<CPU>()), o__.ld(),
reinterpret_cast<double*>(e->pw_coeffs().prime().at<CPU>(0, N__)), 2 * e->pw_coeffs().prime().ld());
if (e->has_mt() && e->mt_coeffs().num_rows_loc()) {
linalg<CPU>::trmm('R', 'U', 'N', 2 * e->mt_coeffs().num_rows_loc(), n__, 1.0,
reinterpret_cast<double*>(o__.template at<CPU>()), o__.ld(),
reinterpret_cast<double*>(e->mt_coeffs().prime().at<CPU>(0, N__)), 2 * e->mt_coeffs().prime().ld());
}
}
}
}
#ifdef __GPU
if (pu == GPU) {
//.........这里部分代码省略.........
示例13: ad_exit
/**
* Description not yet available.
* \param
*/
dvar_matrix operator*(const dvar_matrix& m1, const dmatrix& cm2)
{
if (m1.colmin() != cm2.rowmin() || m1.colmax() != cm2.rowmax())
{
cerr << " Incompatible array bounds in "
"dmatrix operator*(const dvar_matrix& x, const dmatrix& m)\n";
ad_exit(21);
}
dmatrix cm1=value(m1);
//dmatrix cm2=value(m2);
dmatrix tmp(m1.rowmin(),m1.rowmax(), cm2.colmin(), cm2.colmax());
#ifdef OPT_LIB
const size_t rowsize = (size_t)cm2.rowsize();
#else
const int _rowsize = cm2.rowsize();
assert(_rowsize > 0);
const size_t rowsize = (size_t)_rowsize;
#endif
try
{
double* temp_col = new double[rowsize];
temp_col-=cm2.rowmin();
for (int j=cm2.colmin(); j<=cm2.colmax(); j++)
{
for (int k=cm2.rowmin(); k<=cm2.rowmax(); k++)
{
temp_col[k] = cm2.elem(k,j);
}
for (int i=cm1.rowmin(); i<=cm1.rowmax(); i++)
{
double sum=0.0;
dvector& temp_row = cm1(i);
for (int k=cm1.colmin(); k<=cm1.colmax(); k++)
{
sum+=temp_row(k) * (temp_col[k]);
// sum+=temp_row(k) * cm2(k,j);
}
tmp(i,j)=sum;
}
}
temp_col+=cm2.rowmin();
delete [] temp_col;
temp_col = 0;
}
catch (std::bad_alloc& e)
{
cerr << "Error[" << __FILE__ << ':' << __LINE__
<< "]: Unable to allocate array.\n";
//ad_exit(21);
throw e;
}
dvar_matrix vtmp=nograd_assign(tmp);
save_identifier_string("TEST1");
//m1.save_dvar_matrix_value();
m1.save_dvar_matrix_position();
cm2.save_dmatrix_value();
cm2.save_dmatrix_position();
vtmp.save_dvar_matrix_position();
save_identifier_string("TEST6");
gradient_structure::GRAD_STACK1->
set_gradient_stack(dmcm_prod);
return vtmp;
}
示例14: getAverageInterpointDistance
double clusteringValidity::getAverageInterpointDistance(const dmatrix& m1,
const dmatrix& m2) const {
l2Distance<double> dist;
int i;
dvector a(m1.columns());
dvector b(m2.columns());
for (i=0; i<m1.rows();i++) {
a.add(m1.getRow(i));
}
a.divide(m1.rows()); // centroid 1
for (i=0; i<m2.rows();i++) {
b.add(m2.getRow(i));
}
b.divide(m2.rows()); // centroid 2
double distance=0.0;
for (i=0; i<m1.rows(); i++) {
distance+=dist.apply(m1.getRow(i),a);
}
for (i=0; i<m2.rows(); i++) {
distance+=dist.apply(m2.getRow(i),b);
}
return (distance/(m1.rows()+m2.rows()));
}
示例15: trainSteepestSequential
bool MLP::trainSteepestSequential(const dmatrix& data,
const ivector& internalIds) {
const parameters& param = getParameters();
char buffer[256];
bool abort = false;
scramble<int> scrambler;
int i,j,k;
double tmpError;
ivector idx;
idx.resize(data.rows(),0,false,false);
for (i=0;i<idx.size();++i) {
idx.at(i)=i;
}
if (param.momentum > 0) {
// with momentum
dvector grad,delta(weights.size(),0.0);
for (i=0; !abort && (i<param.maxNumberOfEpochs); ++i) {
scrambler.apply(idx); // present the pattern in a random sequence
totalError = 0;
for (j=0;j<idx.size();++j) {
k=idx.at(j);
calcGradient(data.getRow(k),internalIds.at(k),grad);
computeActualError(internalIds.at(k),tmpError);
totalError+=tmpError;
delta.addScaled(param.learnrate,grad,param.momentum,delta);
weights.add(delta);
}
// update progress info object
if (validProgressObject()) {
sprintf(buffer,"Error=%f",totalError/errorNorm);
getProgressObject().step(buffer);
abort = abort || (totalError/errorNorm <= param.stopError);
abort = abort || getProgressObject().breakRequested();
}
}
} else {
// without momentum
ivector idx;
idx.resize(data.rows(),0,false,false);
dvector grad;
int i,j,k;
double tmpError;
for (i=0;i<idx.size();++i) {
idx.at(i)=i;
}
for (i=0; !abort && (i<param.maxNumberOfEpochs); ++i) {
scrambler.apply(idx); // present the pattern in a random sequence
totalError = 0;
for (j=0;j<idx.size();++j) {
k=idx.at(j);
calcGradient(data.getRow(k),internalIds.at(k),grad);
computeActualError(internalIds.at(k),tmpError);
totalError+=tmpError;
weights.addScaled(param.learnrate,grad);
}
// update progress info object
if (validProgressObject()) {
sprintf(buffer,"Error=%f",totalError/errorNorm);
getProgressObject().step(buffer);
abort = abort || (totalError/errorNorm <= param.stopError);
abort = abort || getProgressObject().breakRequested();
}
}
}
return true;
}