本文整理汇总了C++中mat::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ mat::empty方法的具体用法?C++ mat::empty怎么用?C++ mat::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mat
的用法示例。
在下文中一共展示了mat::empty方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mse
double mse(const mat & A, const mat & W, const mat & H, const mat & W1, const mat & H2)
{
// compute mean square error of A and fixed A
const int k = W.n_cols - H2.n_cols;
mat Adiff = A;
Adiff -= W.cols(0, k-1) * H.cols(0, k-1).t();
if (!W1.empty())
Adiff -= W1*H.cols(k, H.n_cols-1).t();
if (!H2.empty())
Adiff -= W.cols(k, W.n_cols-1)*H2.t();
if (A.is_finite())
return mean(mean(square(Adiff)));
else
return mean(square(Adiff.elem(find_finite(Adiff))));
}
示例2: update_WtW
inline void update_WtW(mat & WtW, const mat & W, const mat & W1, const mat & H2)
{
// compute WtW = (W[:, 0:k-1], W1)^T (W[:, 0:k-1], W1)
if (H2.empty())
update_WtW(WtW, W, W1);
else
{
int k = W.n_cols - H2.n_cols;
update_WtW(WtW, W.cols(0, k-1), W1);
}
}
示例3: update_WtA
inline void update_WtA(mat & WtA, const mat & W, const mat & W1, const mat & H2, const mat & A)
{
// compute WtA = (W[:, 0:k-1], W1)^T (A - W[, k:end] H2^T)
if (H2.empty())
update_WtA(WtA, W, W1, A);
else
{
int k = W.n_cols - H2.n_cols;
//std::cout << "1.3" << std::endl;
//A.print("A = ");
//(W.cols(k, W.n_cols-1) * H2.t()).print("W[, k:] = ");
update_WtA(WtA, W.cols(0, k-1), W1, A - W.cols(k, W.n_cols-1) * H2.t());
}
}
示例4: nnmf
//[[Rcpp::export]]
Rcpp::List nnmf(const mat & A, const unsigned int k, mat W, mat H, umat Wm, umat Hm,
const vec & alpha, const vec & beta, const unsigned int max_iter, const double rel_tol,
const int n_threads, const int verbose, const bool show_warning, const unsigned int inner_max_iter,
const double inner_rel_tol, const int method, unsigned int trace)
{
/******************************************************************************************************
* Non-negative Matrix Factorization(NNMF) using alternating scheme
* ----------------------------------------------------------------
* Description:
* Decompose matrix A such that
* A = W H
* Arguments:
* A : Matrix to be decomposed
* W, H : Initial matrices of W and H, where ncol(W) = nrow(H) = k. # of rows/columns of W/H could be 0
* Wm, Hm : Masks of W and H, s.t. masked entries are no-updated and fixed to initial values
* alpha : [L2, angle, L1] regularization on W (non-masked entries)
* beta : [L2, angle, L1] regularization on H (non-masked entries)
* max_iter : Maximum number of iteration
* rel_tol : Relative tolerance between two successive iterations, = |e2-e1|/avg(e1, e2)
* n_threads : Number of threads (openMP)
* verbose : Either 0 = no any tracking, 1 == progression bar, 2 == print iteration info
* show_warning : If to show warning if targeted `tol` is not reached
* inner_max_iter : Maximum number of iterations passed to each inner W or H matrix updating loop
* inner_rel_tol : Relative tolerance passed to inner W or H matrix updating loop, = |e2-e1|/avg(e1, e2)
* method : Integer of 1, 2, 3 or 4, which encodes methods
* : 1 = sequential coordinate-wise minimization using square loss
* : 2 = Lee's multiplicative update with square loss, which is re-scaled gradient descent
* : 3 = sequentially quadratic approximated minimization with KL-divergence
* : 4 = Lee's multiplicative update with KL-divergence, which is re-scaled gradient descent
* trace : A positive integer, error will be checked very 'trace' iterations. Computing WH can be very expansive,
* : so one may not want to check error A-WH every single iteration
* Return:
* A list (Rcpp::List) of
* W, H : resulting W and H matrices
* mse_error : a vector of mean square error (divided by number of non-missings)
* mkl_error : a vector (length = number of iterations) of mean KL-distance
* target_error : a vector of loss (0.5*mse or mkl), plus constraints
* average_epoch : a vector of average epochs (one complete swap over W and H)
* Author:
* Eric Xihui Lin <[email protected]>
* Version:
* 2015-12-11
******************************************************************************************************/
unsigned int n = A.n_rows;
unsigned int m = A.n_cols;
//int k = H.n_rows; // decomposition rank k
unsigned int N_non_missing = n*m;
if (trace < 1) trace = 1;
unsigned int err_len = (unsigned int)std::ceil(double(max_iter)/double(trace)) + 1;
vec mse_err(err_len), mkl_err(err_len), terr(err_len), ave_epoch(err_len);
// check progression
bool show_progress = false;
if (verbose == 1) show_progress = true;
Progress prgrss(max_iter, show_progress);
double rel_err = rel_tol + 1;
double terr_last = 1e99;
uvec non_missing;
bool any_missing = !A.is_finite();
if (any_missing)
{
non_missing = find_finite(A);
N_non_missing = non_missing.n_elem;
mkl_err.fill(mean((A.elem(non_missing)+TINY_NUM) % log(A.elem(non_missing)+TINY_NUM) - A.elem(non_missing)));
}
else
mkl_err.fill(mean(mean((A+TINY_NUM) % log(A+TINY_NUM) - A))); // fixed part in KL-dist, mean(A log(A) - A)
if (Wm.empty())
Wm.resize(0, n);
else
inplace_trans(Wm);
if (Hm.empty())
Hm.resize(0, m);
if (W.empty())
{
W.randu(k, n);
W *= 0.01;
if (!Wm.empty())
W.elem(find(Wm > 0)).fill(0.0);
}
else
inplace_trans(W);
if (H.empty())
{
H.randu(k, m);
H *= 0.01;
if (!Hm.empty())
H.elem(find(Hm > 0)).fill(0.0);
}
if (verbose == 2)
{
Rprintf("\n%10s | %10s | %10s | %10s | %10s\n", "Iteration", "MSE", "MKL", "Target", "Rel. Err.");
//.........这里部分代码省略.........
示例5: nnls_solver_with_missing
mat nnls_solver_with_missing(const mat & A, const mat & W, const mat & W1, const mat & H2, const umat & mask,
const double & eta, const double & beta, int max_iter, double rel_tol, int n_threads)
{
// A = [W, W1, W2] [H, H1, H2]^T.
// Where A may have missing values
// Note that here in the input W = [W, W2]
// compute x = [H, H1]^T given W, W2
// A0 = W2*H2 is empty when H2 is empty (no partial info in H)
// Return: x = [H, H1]
int n = A.n_rows, m = A.n_cols;
int k = W.n_cols - H2.n_cols;
int kW = W1.n_cols;
int nH = k+kW;
mat x(nH, m, fill::zeros);
if (n_threads < 0) n_threads = 0;
bool is_masked = !mask.empty();
#pragma omp parallel for num_threads(n_threads) schedule(dynamic)
for (int j = 0; j < m; j++)
{
// break if all entries of col_j are masked
if (is_masked && arma::all(mask.col(j)))
continue;
uvec non_missing = find_finite(A.col(j));
mat WtW(nH, nH); // WtW
update_WtW(WtW, W.rows(non_missing), W1.rows(non_missing), H2);
if (beta > 0) WtW += beta;
if (eta > 0) WtW.diag() += eta;
mat mu(nH, 1); // -WtA
uvec jv(1);
jv(0) = j;
//non_missing.t().print("non_missing = ");
//std::cout << "1.1" << std::endl;
if (H2.empty())
update_WtA(mu, W.rows(non_missing), W1.rows(non_missing), H2, A.submat(non_missing, jv));
else
update_WtA(mu, W.rows(non_missing), W1.rows(non_missing), H2.rows(j, j), A.submat(non_missing, jv));
//std::cout << "1.5" << std::endl;
vec x0(nH);
double tmp;
int i = 0;
double err1, err2 = 9999;
do {
x0 = x.col(j);
err1 = err2;
err2 = 0;
for (int l = 0; l < nH; l++)
{
if (is_masked && mask(l,j) > 0) continue;
tmp = x(l,j) - mu(l,0) / WtW(l,l);
if (tmp < 0) tmp = 0;
if (tmp != x(l,j))
{
mu.col(0) += (tmp - x(l,j)) * WtW.col(l);
}
x(l,j) = tmp;
tmp = std::abs(x(l,j) - x0(l));
if (tmp > err2) err2 = tmp;
}
} while(++i < max_iter && std::abs(err1 - err2) / (err1 + 1e-9) > rel_tol);
}
return x;
}