本文整理汇总了C++中Mat::copy_size方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::copy_size方法的具体用法?C++ Mat::copy_size怎么用?C++ Mat::copy_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::copy_size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tmp
inline
void
op_cumsum_vec::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_cumsum_vec>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& X = tmp.M;
const uword n_elem = X.n_elem;
out.copy_size(X);
eT* out_mem = out.memptr();
const eT* X_mem = X.memptr();
eT acc = eT(0);
for(uword i=0; i<n_elem; ++i)
{
acc += X_mem[i];
out_mem[i] = acc;
}
}
示例2: tmp
inline
void
op_fliplr::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_fliplr>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT> X = tmp.M;
if(&out != &X)
{
out.copy_size(X);
for(uword i=0; i<X.n_cols; ++i)
{
out.col(i) = X.col(X.n_cols-1 - i);
}
}
else
{
const uword N = X.n_cols / 2;
for(uword i=0; i<N; ++i)
{
out.swap_cols(i, X.n_cols-1 - i);
}
}
}
示例3:
inline
void
op_sort::apply_noalias(Mat<eT>& out, const Mat<eT>& X, const uword sort_type, const uword dim)
{
arma_extra_debug_sigprint();
if( (X.n_rows * X.n_cols) <= 1 )
{
out = X;
return;
}
if(dim == 0) // sort the contents of each column
{
arma_extra_debug_print("op_sort::apply(), dim = 0");
out = X;
const uword n_rows = out.n_rows;
const uword n_cols = out.n_cols;
for(uword col=0; col < n_cols; ++col)
{
op_sort::direct_sort( out.colptr(col), n_rows, sort_type );
}
}
else
if(dim == 1) // sort the contents of each row
{
if(X.n_rows == 1) // a row vector
{
arma_extra_debug_print("op_sort::apply(), dim = 1, vector specific");
out = X;
op_sort::direct_sort(out.memptr(), out.n_elem, sort_type);
}
else // not a row vector
{
arma_extra_debug_print("op_sort::apply(), dim = 1, generic");
out.copy_size(X);
const uword n_rows = out.n_rows;
const uword n_cols = out.n_cols;
podarray<eT> tmp_array(n_cols);
for(uword row=0; row < n_rows; ++row)
{
op_sort::copy_row(tmp_array.memptr(), X, row);
op_sort::direct_sort( tmp_array.memptr(), n_cols, sort_type );
op_sort::copy_row(out, tmp_array.memptr(), row);
}
}
}
}
示例4: Y
inline
bool
op_princomp::direct_princomp
(
Mat<typename T1::elem_type>& coeff_out,
Mat<typename T1::elem_type>& score_out,
const Base<typename T1::elem_type, T1>& X,
const typename arma_not_cx<typename T1::elem_type>::result* junk
)
{
arma_extra_debug_sigprint();
arma_ignore(junk);
typedef typename T1::elem_type eT;
const unwrap_check<T1> Y( X.get_ref(), score_out );
const Mat<eT>& in = Y.M;
const uword n_rows = in.n_rows;
const uword n_cols = in.n_cols;
if(n_rows > 1) // more than one sample
{
// subtract the mean - use score_out as temporary matrix
score_out = in; score_out.each_row() -= mean(in);
// singular value decomposition
Mat<eT> U;
Col<eT> s;
const bool svd_ok = svd(U, s, coeff_out, score_out);
if(svd_ok == false) { return false; }
// normalize the eigenvalues
s /= std::sqrt( double(n_rows - 1) );
// project the samples to the principals
score_out *= coeff_out;
if(n_rows <= n_cols) // number of samples is less than their dimensionality
{
score_out.cols(n_rows-1,n_cols-1).zeros();
Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
s = s_tmp;
}
}
else // 0 or 1 samples
{
coeff_out.eye(n_cols, n_cols);
score_out.copy_size(in);
score_out.zeros();
}
return true;
}
示例5: eT
inline
void
interp1_helper_nearest(const Mat<eT>& XG, const Mat<eT>& YG, const Mat<eT>& XI, Mat<eT>& YI, const eT extrap_val)
{
arma_extra_debug_sigprint();
const eT XG_min = XG.min();
const eT XG_max = XG.max();
YI.copy_size(XI);
const eT* XG_mem = XG.memptr();
const eT* YG_mem = YG.memptr();
const eT* XI_mem = XI.memptr();
eT* YI_mem = YI.memptr();
const uword NG = XG.n_elem;
const uword NI = XI.n_elem;
uword best_j = 0;
for(uword i=0; i<NI; ++i)
{
eT best_err = Datum<eT>::inf;
const eT XI_val = XI_mem[i];
if((XI_val < XG_min) || (XI_val > XG_max))
{
YI_mem[i] = extrap_val;
}
else
{
// XG and XI are guaranteed to be sorted in ascending manner,
// so start searching XG from last known optimum position
for(uword j=best_j; j<NG; ++j)
{
const eT tmp = XG_mem[j] - XI_val;
const eT err = (tmp >= eT(0)) ? tmp : -tmp;
if(err >= best_err)
{
// error is going up, so we have found the optimum position
break;
}
else
{
best_err = err;
best_j = j; // remember the optimum position
}
}
YI_mem[i] = YG_mem[best_j];
}
}
}
示例6: repmat
inline
bool
op_princomp::direct_princomp
(
Mat< std::complex<T> >& coeff_out,
Mat< std::complex<T> >& score_out,
const Mat< std::complex<T> >& in
)
{
arma_extra_debug_sigprint();
typedef std::complex<T> eT;
const u32 n_rows = in.n_rows;
const u32 n_cols = in.n_cols;
if(n_rows > 1) // more than one sample
{
// subtract the mean - use score_out as temporary matrix
score_out = in - repmat(mean(in), n_rows, 1);
// singular value decomposition
Mat<eT> U;
Col< T> s;
const bool svd_ok = svd(U,s,coeff_out,score_out);
if(svd_ok == false)
{
return false;
}
// U.reset();
// normalize the eigenvalues
s /= std::sqrt( double(n_rows - 1) );
// project the samples to the principals
score_out *= coeff_out;
if(n_rows <= n_cols) // number of samples is less than their dimensionality
{
score_out.cols(n_rows-1,n_cols-1).zeros();
}
}
else // 0 or 1 samples
{
coeff_out.eye(n_cols, n_cols);
score_out.copy_size(in);
score_out.zeros();
}
return true;
}
示例7:
inline
void
op_trimat::apply_htrans
(
Mat<eT>& out,
const Mat<eT>& A,
const bool upper,
const typename arma_cx_only<eT>::result* junk
)
{
arma_extra_debug_sigprint();
arma_ignore(junk);
arma_debug_check( (A.is_square() == false), "trimatu()/trimatl(): given matrix must be square sized" );
const uword N = A.n_rows;
if(&out != &A)
{
out.copy_size(A);
}
if(upper)
{
// Upper triangular: but since we're transposing, we're taking the lower
// triangular and putting it in the upper half.
for(uword row = 0; row < N; ++row)
{
eT* out_colptr = out.colptr(row);
for(uword col = 0; col <= row; ++col)
{
//out.at(col, row) = std::conj( A.at(row, col) );
out_colptr[col] = std::conj( A.at(row, col) );
}
}
}
else
{
// Lower triangular: but since we're transposing, we're taking the upper
// triangular and putting it in the lower half.
for(uword row = 0; row < N; ++row)
{
for(uword col = row; col < N; ++col)
{
out.at(col, row) = std::conj( A.at(row, col) );
}
}
}
op_trimat::fill_zeros(out, upper);
}
示例8: tmp
inline
void
op_trimat::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_trimat>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& A = tmp.M;
arma_debug_check( (A.is_square() == false), "trimatu()/trimatl(): given matrix must be square sized" );
const uword N = A.n_rows;
const bool upper = (in.aux_uword_a == 0);
if(&out != &A)
{
out.copy_size(A);
if(upper)
{
// upper triangular: copy the diagonal and the elements above the diagonal
for(uword i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( out_data, A_data, i+1 );
}
}
else
{
// lower triangular: copy the diagonal and the elements below the diagonal
for(uword i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( &out_data[i], &A_data[i], N-i );
}
}
}
op_trimat::fill_zeros(out, upper);
}
示例9: tmp
inline
void
op_fliplr::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_fliplr>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& X = tmp.M;
const uword X_n_cols = X.n_cols;
if(&out != &X)
{
out.copy_size(X);
if(T1::is_row || X.is_rowvec())
{
for(uword i=0; i<X_n_cols; ++i) { out[i] = X[X_n_cols-1 - i]; }
}
else
{
for(uword i=0; i<X_n_cols; ++i) { out.col(i) = X.col(X_n_cols-1 - i); }
}
}
else
{
const uword N = X_n_cols / 2;
if(T1::is_row || X.is_rowvec())
{
for(uword i=0; i<N; ++i) { std::swap(out[i], out[X_n_cols-1 - i]); }
}
else
{
for(uword i=0; i<N; ++i) { out.swap_cols(i, X_n_cols-1 - i); }
}
}
}
示例10: norm
inline
void
op_normalise_mat::apply(Mat<eT>& out, const Mat<eT>& A, const uword p, const uword dim)
{
arma_extra_debug_sigprint();
typedef typename get_pod_type<eT>::result T;
out.copy_size(A);
if(A.n_elem == 0) { return; }
if(dim == 0)
{
const uword n_cols = A.n_cols;
for(uword i=0; i<n_cols; ++i)
{
const T norm_val_a = norm(A.col(i), p);
const T norm_val_b = (norm_val_a != T(0)) ? norm_val_a : T(1);
out.col(i) = A.col(i) / norm_val_b;
}
}
else
{
// better-than-nothing implementation
const uword n_rows = A.n_rows;
for(uword i=0; i<n_rows; ++i)
{
const T norm_val_a = norm(A.row(i), p);
const T norm_val_b = (norm_val_a != T(0)) ? norm_val_a : T(1);
out.row(i) = A.row(i) / norm_val_b;
}
}
}
示例11: tmp
inline
void
op_symmat_cx::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_symmat_cx>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& A = tmp.M;
arma_debug_check( (A.is_square() == false), "symmatu()/symmatl(): given matrix must be square sized" );
const uword N = A.n_rows;
const bool upper = (in.aux_uword_a == 0);
const bool do_conj = (in.aux_uword_b == 1);
if(&out != &A)
{
out.copy_size(A);
if(upper)
{
// upper triangular: copy the diagonal and the elements above the diagonal
for(uword i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( out_data, A_data, i+1 );
}
}
else
{
// lower triangular: copy the diagonal and the elements below the diagonal
for(uword i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( &out_data[i], &A_data[i], N-i );
}
}
}
if(do_conj)
{
if(upper)
{
// reflect elements across the diagonal from upper triangle to lower triangle
for(uword col=1; col < N; ++col)
{
const eT* coldata = out.colptr(col);
for(uword row=0; row < col; ++row)
{
out.at(col,row) = std::conj(coldata[row]);
}
}
}
else
{
// reflect elements across the diagonal from lower triangle to upper triangle
for(uword col=0; col < N; ++col)
{
const eT* coldata = out.colptr(col);
for(uword row=(col+1); row < N; ++row)
{
out.at(col,row) = std::conj(coldata[row]);
}
}
}
}
else // don't do complex conjugation
{
if(upper)
{
// reflect elements across the diagonal from upper triangle to lower triangle
for(uword col=1; col < N; ++col)
{
const eT* coldata = out.colptr(col);
for(uword row=0; row < col; ++row)
{
out.at(col,row) = coldata[row];
}
}
}
else
{
// reflect elements across the diagonal from lower triangle to upper triangle
//.........这里部分代码省略.........
示例12: tmp
inline
void
op_symmat::apply
(
Mat<typename T1::elem_type>& out,
const Op<T1,op_symmat>& in,
const typename arma_not_cx<typename T1::elem_type>::result* junk
)
{
arma_extra_debug_sigprint();
arma_ignore(junk);
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& A = tmp.M;
arma_debug_check( (A.is_square() == false), "symmatu()/symmatl(): given matrix must be square" );
const u32 N = A.n_rows;
const bool upper = (in.aux_u32_a == 0);
if(&out != &A)
{
out.copy_size(A);
if(upper)
{
// upper triangular: copy the diagonal and the elements above the diagonal
for(u32 i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( out_data, A_data, i+1 );
}
}
else
{
// lower triangular: copy the diagonal and the elements below the diagonal
for(u32 i=0; i<N; ++i)
{
const eT* A_data = A.colptr(i);
eT* out_data = out.colptr(i);
arrayops::copy( &out_data[i], &A_data[i], N-i );
}
}
}
if(upper)
{
// reflect elements across the diagonal from upper triangle to lower triangle
for(u32 col=1; col < N; ++col)
{
const eT* coldata = out.colptr(col);
for(u32 row=0; row < col; ++row)
{
out.at(col,row) = coldata[row];
}
}
}
else
{
// reflect elements across the diagonal from lower triangle to upper triangle
for(u32 col=0; col < N; ++col)
{
const eT* coldata = out.colptr(col);
for(u32 row=(col+1); row < N; ++row)
{
out.at(col,row) = coldata[row];
}
}
}
}
示例13: tmp
inline
void
op_shuffle::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_shuffle>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& X = tmp.M;
if(X.is_empty()) { out.copy_size(X); return; }
const uword dim = in.aux_uword_a;
const uword N = (dim == 0) ? X.n_rows : X.n_cols;
// see "fn_sort_index.hpp" for the definition of "arma_sort_index_packet"
// and the associated comparison functor
std::vector< arma_sort_index_packet<int,uword> > packet_vec(N);
for(uword i=0; i<N; ++i)
{
packet_vec[i].val = std::rand();
packet_vec[i].index = i;
}
arma_sort_index_helper_ascend comparator;
std::sort( packet_vec.begin(), packet_vec.end(), comparator );
const bool is_alias = (&out == &X);
if(X.is_vec() == false)
{
if(is_alias == false)
{
arma_extra_debug_print("op_shuffle::apply(): matrix");
out.copy_size(X);
if(dim == 0)
{
for(uword i=0; i<N; ++i) { out.row(i) = X.row(packet_vec[i].index); }
}
else
{
for(uword i=0; i<N; ++i) { out.col(i) = X.col(packet_vec[i].index); }
}
}
else // in-place shuffle
{
arma_extra_debug_print("op_shuffle::apply(): in-place matrix");
// reuse the val member variable of packet_vec
// to indicate whether a particular row or column
// has already been shuffled
for(uword i=0; i<N; ++i)
{
packet_vec[i].val = 0;
}
if(dim == 0)
{
for(uword i=0; i<N; ++i)
{
if(packet_vec[i].val == 0)
{
const uword j = packet_vec[i].index;
out.swap_rows(i, j);
packet_vec[j].val = 1;
}
}
}
else
{
for(uword i=0; i<N; ++i)
{
if(packet_vec[i].val == 0)
{
const uword j = packet_vec[i].index;
out.swap_cols(i, j);
packet_vec[j].val = 1;
}
}
}
}
}
else // we're dealing with a vector
{
if(is_alias == false)
{
arma_extra_debug_print("op_shuffle::apply(): vector");
out.copy_size(X);
//.........这里部分代码省略.........
示例14: tmp
inline
void
op_circshift::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_circshift>& in)
{
arma_extra_debug_sigprint();
typedef typename T1::elem_type eT;
const unwrap<T1> tmp(in.m);
const Mat<eT>& X = tmp.M;
if(X.is_empty()) { out.copy_size(X); return; }
const uword dim = in.aux_uword_a;
arma_debug_check( (dim > 1), "circshift(): dim must be 0 or 1" );
const uword shift = in.aux_uword_b;
const bool is_alias = (&out == &X);
uword xshift = 0;
uword yshift = 0;
uword xdim = X.n_cols;
uword ydim = X.n_rows;
if(is_alias == 0) {
out.copy_size(X);
if(dim == 0) {
xshift = shift;
for (uword i = 0; i < xdim; i++) {
uword ii = positive_modulo(i + xshift, xdim);
for (uword j = 0; j < ydim; j++) {
uword jj = positive_modulo(j + yshift, ydim);
out(jj,ii) = X(j,i);
}
}
} else {
yshift = shift;
for (uword i = 0; i < xdim; i++) {
uword ii = positive_modulo(i + xshift, xdim);
for (uword j = 0; j < ydim; j++) {
uword jj = positive_modulo(j + yshift, ydim);
out(jj,ii) = X(j,i);
}
}
}
} else { //X is an alias of out (same memory address)
Mat<eT> temp;
temp.copy_size(X);
temp = X;
if(dim == 0) {
xshift = shift;
for (uword i = 0; i < xdim; i++) {
uword ii = positive_modulo(i + xshift, xdim);
for (uword j = 0; j < ydim; j++) {
uword jj = positive_modulo(j + yshift, ydim);
out(jj,ii) = temp(j,i);
}
}
} else {
yshift = shift;
for (uword i = 0; i < xdim; i++) {
uword ii = positive_modulo(i + xshift, xdim);
for (uword j = 0; j < ydim; j++) {
uword jj = positive_modulo(j + yshift, ydim);
out(jj,ii) = temp(j,i);
}
}
}
}
}
示例15: repmat
inline
void
op_princomp::direct_princomp
(
Mat<eT>& coeff_out,
Mat<eT>& score_out,
Col<eT>& latent_out,
Col<eT>& tsquared_out,
const Mat<eT>& in
)
{
arma_extra_debug_sigprint();
const u32 n_rows = in.n_rows;
const u32 n_cols = in.n_cols;
if(n_rows > 1) // more than one sample
{
// subtract the mean - use score_out as temporary matrix
score_out = in - repmat(mean(in), n_rows, 1);
// singular value decomposition
Mat<eT> U;
Col<eT> s;
const bool svd_ok = svd(U,s,coeff_out,score_out);
if(svd_ok == false)
{
arma_print("princomp(): singular value decomposition failed");
coeff_out.reset();
score_out.reset();
latent_out.reset();
tsquared_out.reset();
return;
}
//U.reset(); // TODO: do we need this ? U will get automatically deleted anyway
// normalize the eigenvalues
s /= std::sqrt(n_rows - 1);
// project the samples to the principals
score_out *= coeff_out;
if(n_rows <= n_cols) // number of samples is less than their dimensionality
{
score_out.cols(n_rows-1,n_cols-1).zeros();
//Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
Col<eT> s_tmp(n_cols);
s_tmp.zeros();
s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
s = s_tmp;
// compute the Hotelling's T-squared
s_tmp.rows(0,n_rows-2) = eT(1) / s_tmp.rows(0,n_rows-2);
const Mat<eT> S = score_out * diagmat(Col<eT>(s_tmp));
tsquared_out = sum(S%S,1);
}
else
{
// compute the Hotelling's T-squared
const Mat<eT> S = score_out * diagmat(Col<eT>( eT(1) / s));
tsquared_out = sum(S%S,1);
}
// compute the eigenvalues of the principal vectors
latent_out = s%s;
}
else // single sample - row
{
if(n_rows == 1)
{
coeff_out = eye< Mat<eT> >(n_cols, n_cols);
score_out.copy_size(in);
score_out.zeros();
latent_out.set_size(n_cols);
latent_out.zeros();
tsquared_out.set_size(1);
tsquared_out.zeros();
}
else
{
coeff_out.reset();
score_out.reset();
latent_out.reset();
tsquared_out.reset();
}
}
}