本文整理汇总了C++中CTime::cur_time_diff方法的典型用法代码示例。如果您正苦于以下问题:C++ CTime::cur_time_diff方法的具体用法?C++ CTime::cur_time_diff怎么用?C++ CTime::cur_time_diff使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CTime
的用法示例。
在下文中一共展示了CTime::cur_time_diff方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CTime
CSimpleFeatures<float64_t>* CKernelLocallyLinearEmbedding::embed_kernel(CKernel* kernel)
{
CTime* time = new CTime();
time->start();
SGMatrix<float64_t> kernel_matrix = kernel->get_kernel_matrix();
SG_DEBUG("Kernel matrix computation took %fs\n",time->cur_time_diff());
time->start();
SGMatrix<int32_t> neighborhood_matrix = get_neighborhood_matrix(kernel_matrix,m_k);
SG_DEBUG("Neighbors finding took %fs\n",time->cur_time_diff());
time->start();
SGMatrix<float64_t> M_matrix = construct_weight_matrix(kernel_matrix,neighborhood_matrix);
SG_DEBUG("Weights computation took %fs\n",time->cur_time_diff());
kernel_matrix.destroy_matrix();
neighborhood_matrix.destroy_matrix();
time->start();
SGMatrix<float64_t> nullspace = construct_embedding(M_matrix,m_target_dim);
SG_DEBUG("Embedding construction took %fs\n",time->cur_time_diff());
M_matrix.destroy_matrix();
delete time;
return new CSimpleFeatures<float64_t>(nullspace);
}
示例2: svm_bmrm_solver
bmrm_return_value_T svm_bmrm_solver(
bmrm_data_T* data,
float64_t* W,
float64_t TolRel,
float64_t TolAbs,
float64_t lambda,
uint32_t _BufSize,
bool cleanICP,
uint32_t cleanAfter,
float64_t K,
uint32_t Tmax,
CRiskFunction* risk_function)
{
bmrm_return_value_T bmrm = {0, 0, 0, 0, 0, 0, 0};
libqp_state_T qp_exitflag;
float64_t *b, *beta, *diag_H, sq_norm_W;
float64_t R, *subgrad, *A, QPSolverTolRel, rsum, C=1.0;
uint32_t *I, *ICPcounter, *ICPs, cntICP=0;
uint8_t S = 1;
uint32_t nDim=data->w_dim;
CTime ttime;
float64_t tstart, tstop;
float64_t *b2, *beta2, *diag_H2, *A2, *H2;
uint32_t *I2, *ICPcounter2, nCP_new=0, idx=0, idx2=0, icp_iter=0, icp_iter2=0;
int32_t idx_icp=0, idx_icp2=0;
bool flag1=true, flag2=true;
tstart=ttime.cur_time_diff(false);
BufSize=_BufSize;
QPSolverTolRel=TolRel*0.5;
H=NULL;
b=NULL;
beta=NULL;
A=NULL;
subgrad=NULL;
diag_H=NULL;
I=NULL;
ICPcounter=NULL;
ICPs=NULL;
b2=NULL;
beta2=NULL;
H2=NULL;
I2=NULL;
ICPcounter2=NULL;
A2=NULL;
diag_H2=NULL;
H=(float64_t*)LIBBMRM_CALLOC(BufSize*BufSize, sizeof(float64_t));
if (H==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
A=(float64_t*)LIBBMRM_CALLOC(nDim*BufSize, sizeof(float64_t));
if (A==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
b=(float64_t*)LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (b==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
beta=(float64_t*)LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (beta==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
subgrad=(float64_t*)LIBBMRM_CALLOC(nDim, sizeof(float64_t));
if (subgrad==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
diag_H=(float64_t*)LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (diag_H==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
I=(uint32_t*)LIBBMRM_CALLOC(BufSize, sizeof(uint32_t));
if (I==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
//.........这里部分代码省略.........
示例3: p
//.........这里部分代码省略.........
m_relative_tolerence, m_absolute_tolerence);
// start the timer
CTime time;
time.start();
// set the residuals to zero
if (m_store_residuals)
m_residuals.set_const(0.0);
// CG iteration begins
for (it.begin(r); !it.end(r); ++it)
{
SG_DEBUG("CG iteration %d, residual norm %f\n",
it.get_iter_info().iteration_count,
it.get_iter_info().residual_norm);
if (m_store_residuals)
{
m_residuals[it.get_iter_info().iteration_count]
=it.get_iter_info().residual_norm;
}
// apply linear operator to the direction vector
SGVector<float64_t> Ap_=A->apply(p_);
Map<VectorXd> Ap(Ap_.vector, Ap_.vlen);
// compute p^{T}Ap, if zero, failure
float64_t p_dot_Ap=p.dot(Ap);
if (p_dot_Ap==0.0)
break;
// compute the beta parameter of CG_M
float64_t beta=-r_norm2/p_dot_Ap;
// compute the zeta-shifted parameter of CG_M
compute_zeta_sh_new(zeta_sh_old, zeta_sh_cur, shifts, beta_old, beta,
alpha, zeta_sh_new);
// compute beta-shifted parameter of CG_M
compute_beta_sh(zeta_sh_new, zeta_sh_cur, beta, beta_sh);
// update the solution vector and residual
for (index_t i=0; i<shifts.vlen; ++i)
x_sh.col(i)-=beta_sh[i]*p_sh.col(i);
// r_{i}=r_{i-1}+\beta_{i}Ap
r+=beta*Ap;
// compute new ||r||_{2}, if zero, converged
float64_t r_norm2_i=r.dot(r);
if (r_norm2_i==0.0)
break;
// compute the alpha parameter of CG_M
alpha=r_norm2_i/r_norm2;
// update ||r||_{2}
r_norm2=r_norm2_i;
// update direction
p=r+alpha*p;
compute_alpha_sh(zeta_sh_new, zeta_sh_cur, beta_sh, beta, alpha, alpha_sh);
for (index_t i=0; i<shifts.vlen; ++i)
{
p_sh.col(i)*=alpha_sh[i];
p_sh.col(i)+=zeta_sh_new[i]*r;
}
// update parameters
for (index_t i=0; i<shifts.vlen; ++i)
{
zeta_sh_old[i]=zeta_sh_cur[i];
zeta_sh_cur[i]=zeta_sh_new[i];
}
beta_old=beta;
}
float64_t elapsed=time.cur_time_diff();
if (!it.succeeded(r))
SG_WARNING("Did not converge!\n");
SG_INFO("Iteration took %d times, residual norm=%.20lf, time elapsed=%f\n",
it.get_iter_info().iteration_count, it.get_iter_info().residual_norm, elapsed);
// compute the final result vector multiplied by weights
SGVector<complex128_t> result(b.vlen);
result.set_const(0.0);
Map<VectorXcd> x(result.vector, result.vlen);
for (index_t i=0; i<x_sh.cols(); ++i)
x+=x_sh.col(i)*weights[i];
SG_DEBUG("Leaving\n");
return result;
}
示例4: train_machine
bool CShareBoost::train_machine(CFeatures* data)
{
if (data)
set_features(data);
if (m_features == NULL)
SG_ERROR("No features given for training\n")
if (m_labels == NULL)
SG_ERROR("No labels given for training\n")
if (m_nonzero_feas <= 0)
SG_ERROR("Set a valid (> 0) number of non-zero features to seek before training\n")
if (m_nonzero_feas >= dynamic_cast<CDenseFeatures<float64_t>*>(m_features)->get_num_features())
SG_ERROR("It doesn't make sense to use ShareBoost with num non-zero features >= num features in the data\n")
m_fea = dynamic_cast<CDenseFeatures<float64_t> *>(m_features)->get_feature_matrix();
m_rho = SGMatrix<float64_t>(m_multiclass_strategy->get_num_classes(), m_fea.num_cols);
m_rho_norm = SGVector<float64_t>(m_fea.num_cols);
m_pred = SGMatrix<float64_t>(m_fea.num_cols, m_multiclass_strategy->get_num_classes());
m_pred.zero();
m_activeset = SGVector<int32_t>(m_fea.num_rows);
m_activeset.vlen = 0;
m_machines->reset_array();
for (int32_t i=0; i < m_multiclass_strategy->get_num_classes(); ++i)
m_machines->push_back(new CLinearMachine());
CTime *timer = new CTime();
float64_t t_compute_pred = 0; // t of 1st round is 0, since no pred to compute
for (int32_t t=0; t < m_nonzero_feas; ++t)
{
timer->start();
compute_rho();
int32_t i_fea = choose_feature();
m_activeset.vector[m_activeset.vlen] = i_fea;
m_activeset.vlen += 1;
float64_t t_choose_feature = timer->cur_time_diff();
timer->start();
optimize_coefficients();
float64_t t_optimize = timer->cur_time_diff();
SG_SDEBUG(" SB[round %03d]: (%8.4f + %8.4f) sec.\n", t,
t_compute_pred + t_choose_feature, t_optimize);
timer->start();
compute_pred();
t_compute_pred = timer->cur_time_diff();
}
SG_UNREF(timer);
// release memory
m_fea = SGMatrix<float64_t>();
m_rho = SGMatrix<float64_t>();
m_rho_norm = SGVector<float64_t>();
m_pred = SGMatrix<float64_t>();
return true;
}
示例5: train
bool CLPBoost::train(CFeatures* data)
{
ASSERT(labels);
ASSERT(features);
int32_t num_train_labels=labels->get_num_labels();
int32_t num_feat=features->get_dim_feature_space();
int32_t num_vec=features->get_num_vectors();
ASSERT(num_vec==num_train_labels);
delete[] w;
w=new float64_t[num_feat];
memset(w,0,sizeof(float64_t)*num_feat);
w_dim=num_feat;
CCplex solver;
solver.init(E_LINEAR);
SG_PRINT("setting up lpboost\n");
solver.setup_lpboost(C1, num_vec);
SG_PRINT("finished setting up lpboost\n");
float64_t result=init(num_vec);
ASSERT(result);
int32_t num_hypothesis=0;
CTime time;
CSignal::clear_cancel();
while (!(CSignal::cancel_computations()))
{
int32_t max_dim=0;
float64_t violator=find_max_violator(max_dim);
SG_PRINT("iteration:%06d violator: %10.17f (>1.0) chosen: %d\n", num_hypothesis, violator, max_dim);
if (violator <= 1.0+epsilon && num_hypothesis>1) //no constraint violated
{
SG_PRINT("converged after %d iterations!\n", num_hypothesis);
break;
}
float64_t factor=+1.0;
if (max_dim>=num_svec)
{
factor=-1.0;
max_dim-=num_svec;
}
SGSparseVectorEntry<float64_t>* h=sfeat[max_dim].features;
int32_t len=sfeat[max_dim].num_feat_entries;
solver.add_lpboost_constraint(factor, h, len, num_vec, labels);
solver.optimize(u);
//CMath::display_vector(u, num_vec, "u");
num_hypothesis++;
if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time())
break;
}
float64_t* lambda=new float64_t[num_hypothesis];
solver.optimize(u, lambda);
//CMath::display_vector(lambda, num_hypothesis, "lambda");
for (int32_t i=0; i<num_hypothesis; i++)
{
int32_t d=dim->get_element(i);
if (d>=num_svec)
w[d-num_svec]+=lambda[i];
else
w[d]-=lambda[i];
}
//solver.write_problem("problem.lp");
solver.cleanup();
cleanup();
return true;
}
示例6: result
SGVector<float64_t> CConjugateGradientSolver::solve(
CLinearOperator<float64_t>* A, SGVector<float64_t> b)
{
SG_DEBUG("CConjugateGradientSolve::solve(): Entering..\n");
// sanity check
REQUIRE(A, "Operator is NULL!\n");
REQUIRE(A->get_dimension()==b.vlen, "Dimension mismatch!\n");
// the final solution vector, initial guess is 0
SGVector<float64_t> result(b.vlen);
result.set_const(0.0);
// the rest of the part hinges on eigen3 for computing norms
Map<VectorXd> x(result.vector, result.vlen);
Map<VectorXd> b_map(b.vector, b.vlen);
// direction vector
SGVector<float64_t> p_(result.vlen);
Map<VectorXd> p(p_.vector, p_.vlen);
// residual r_i=b-Ax_i, here x_0=[0], so r_0=b
VectorXd r=b_map;
// initial direction is same as residual
p=r;
// the iterator for this iterative solver
IterativeSolverIterator<float64_t> it(b_map, m_max_iteration_limit,
m_relative_tolerence, m_absolute_tolerence);
// CG iteration begins
float64_t r_norm2=r.dot(r);
// start the timer
CTime time;
time.start();
// set the residuals to zero
if (m_store_residuals)
m_residuals.set_const(0.0);
for (it.begin(r); !it.end(r); ++it)
{
SG_DEBUG("CG iteration %d, residual norm %f\n",
it.get_iter_info().iteration_count,
it.get_iter_info().residual_norm);
if (m_store_residuals)
{
m_residuals[it.get_iter_info().iteration_count]
=it.get_iter_info().residual_norm;
}
// apply linear operator to the direction vector
SGVector<float64_t> Ap_=A->apply(p_);
Map<VectorXd> Ap(Ap_.vector, Ap_.vlen);
// compute p^{T}Ap, if zero, failure
float64_t p_dot_Ap=p.dot(Ap);
if (p_dot_Ap==0.0)
break;
// compute the alpha parameter of CG
float64_t alpha=r_norm2/p_dot_Ap;
// update the solution vector and residual
// x_{i}=x_{i-1}+\alpha_{i}p
x+=alpha*p;
// r_{i}=r_{i-1}-\alpha_{i}p
r-=alpha*Ap;
// compute new ||r||_{2}, if zero, converged
float64_t r_norm2_i=r.dot(r);
if (r_norm2_i==0.0)
break;
// compute the beta parameter of CG
float64_t beta=r_norm2_i/r_norm2;
// update direction, and ||r||_{2}
r_norm2=r_norm2_i;
p=r+beta*p;
}
float64_t elapsed=time.cur_time_diff();
if (!it.succeeded(r))
SG_WARNING("Did not converge!\n");
SG_INFO("Iteration took %ld times, residual norm=%.20lf, time elapsed=%lf\n",
it.get_iter_info().iteration_count, it.get_iter_info().residual_norm, elapsed);
SG_DEBUG("CConjugateGradientSolve::solve(): Leaving..\n");
return result;
}
示例7: train
//.........这里部分代码省略.........
float64_t obj=0;
delta_active=num_vec;
last_it_noimprovement=-1;
work_epsilon=0.99;
autoselected_epsilon=work_epsilon;
compute_projection(num_feat, num_vec);
CTime time;
float64_t loop_time=0;
while (!(CSignal::cancel_computations()))
{
CTime t;
delta_active=find_active(num_feat, num_vec, num_active, num_bound);
update_active(num_feat, num_vec);
#ifdef DEBUG_SUBGRADIENTLPM
SG_PRINT("==================================================\niteration: %d ", num_iterations);
obj=compute_objective(num_feat, num_vec);
SG_PRINT("objective:%.10f alpha: %.10f dir_deriv: %f num_bound: %d num_active: %d work_eps: %10.10f eps: %10.10f auto_eps: %10.10f time:%f\n",
obj, alpha, dir_deriv, num_bound, num_active, work_epsilon, epsilon, autoselected_epsilon, loop_time);
#else
SG_ABS_PROGRESS(work_epsilon, -CMath::log10(work_epsilon), -CMath::log10(0.99999999), -CMath::log10(epsilon), 6);
#endif
//CMath::display_vector(w, w_dim, "w");
//SG_PRINT("bias: %f\n", bias);
//CMath::display_vector(proj, num_vec, "proj");
//CMath::display_vector(idx_active, num_active, "idx_active");
//SG_PRINT("num_active: %d\n", num_active);
//CMath::display_vector(idx_bound, num_bound, "idx_bound");
//SG_PRINT("num_bound: %d\n", num_bound);
//CMath::display_vector(sum_CXy_active, num_feat, "sum_CXy_active");
//SG_PRINT("sum_Cy_active: %f\n", sum_Cy_active);
//CMath::display_vector(grad_w, num_feat, "grad_w");
//SG_PRINT("grad_b:%f\n", grad_b);
dir_deriv=compute_min_subgradient(num_feat, num_vec, num_active, num_bound);
alpha=line_search(num_feat, num_vec);
if (num_it_noimprovement==10 || num_bound<qpsize_max)
{
float64_t norm_grad=CMath::dot(grad_w, grad_w, num_feat) +
grad_b*grad_b;
SG_PRINT("CHECKING OPTIMALITY CONDITIONS: "
"work_epsilon: %10.10f delta_active:%d alpha: %10.10f norm_grad: %10.10f a*norm_grad:%10.16f\n",
work_epsilon, delta_active, alpha, norm_grad, CMath::abs(alpha*norm_grad));
if (work_epsilon<=epsilon && delta_active==0 && CMath::abs(alpha*norm_grad)<1e-6)
break;
else
num_it_noimprovement=0;
}
//if (work_epsilon<=epsilon && delta_active==0 && num_it_noimprovement)
if ((dir_deriv<0 || alpha==0) && (work_epsilon<=epsilon && delta_active==0))
{
if (last_it_noimprovement==num_iterations-1)
{
SG_PRINT("no improvement...\n");
num_it_noimprovement++;
}
else
num_it_noimprovement=0;
last_it_noimprovement=num_iterations;
}
CMath::vec1_plus_scalar_times_vec2(w, -alpha, grad_w, num_feat);
bias-=alpha*grad_b;
update_projection(alpha, num_vec);
t.stop();
loop_time=t.time_diff_sec();
num_iterations++;
if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time())
break;
}
SG_INFO("converged after %d iterations\n", num_iterations);
obj=compute_objective(num_feat, num_vec);
SG_INFO("objective: %f alpha: %f dir_deriv: %f num_bound: %d num_active: %d\n",
obj, alpha, dir_deriv, num_bound, num_active);
#ifdef DEBUG_SUBGRADIENTLPM
CMath::display_vector(w, w_dim, "w");
SG_PRINT("bias: %f\n", bias);
#endif
SG_PRINT("solver time:%f s\n", lpmtim);
cleanup();
return true;
}
示例8: svm_bmrm_solver
bmrm_return_value_T svm_bmrm_solver(
CStructuredModel* model,
float64_t* W,
float64_t TolRel,
float64_t TolAbs,
float64_t _lambda,
uint32_t _BufSize,
bool cleanICP,
uint32_t cleanAfter,
float64_t K,
uint32_t Tmax,
bool verbose)
{
bmrm_return_value_T bmrm;
libqp_state_T qp_exitflag={0, 0, 0, 0};
float64_t *b, *beta, *diag_H, *prevW;
float64_t R, *subgrad, *A, QPSolverTolRel, C=1.0, wdist=0.0;
floatmax_t rsum, sq_norm_W, sq_norm_Wdiff=0.0;
uint32_t *I;
uint8_t S=1;
uint32_t nDim=model->get_dim();
CTime ttime;
float64_t tstart, tstop;
bmrm_ll *CPList_head, *CPList_tail, *cp_ptr, *cp_ptr2, *cp_list=NULL;
float64_t *A_1=NULL, *A_2=NULL;
bool *map=NULL;
tstart=ttime.cur_time_diff(false);
BufSize=_BufSize;
QPSolverTolRel=1e-9;
H=NULL;
b=NULL;
beta=NULL;
A=NULL;
subgrad=NULL;
diag_H=NULL;
I=NULL;
prevW=NULL;
H= (float64_t*) LIBBMRM_CALLOC(BufSize*BufSize, sizeof(float64_t));
if (H==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
A= (float64_t*) LIBBMRM_CALLOC(nDim*BufSize, sizeof(float64_t));
if (A==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
b= (float64_t*) LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (b==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
beta= (float64_t*) LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (beta==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
subgrad= (float64_t*) LIBBMRM_CALLOC(nDim, sizeof(float64_t));
if (subgrad==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
diag_H= (float64_t*) LIBBMRM_CALLOC(BufSize, sizeof(float64_t));
if (diag_H==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
I= (uint32_t*) LIBBMRM_CALLOC(BufSize, sizeof(uint32_t));
if (I==NULL)
{
bmrm.exitflag=-2;
goto cleanup;
}
//.........这里部分代码省略.........
示例9: main
int main()
{
init_shogun(&print_message, &print_warning,
&print_error);
try
{
uint256_t* a;
uint32_t* b;
CTime t;
t.io->set_loglevel(MSG_DEBUG);
SG_SPRINT("gen data..");
t.start();
gen_ints(a,b, LEN);
t.cur_time_diff(true);
SG_SPRINT("qsort..");
t.start();
CMath::qsort_index(a, b, LEN);
t.cur_time_diff(true);
SG_SPRINT("\n\n");
for (uint32_t i=0; i<10; i++)
{
SG_SPRINT("a[%d]=", i);
a[i].print_hex();
SG_SPRINT("\n");
}
SG_SPRINT("\n\n");
a[0]=(uint64_t[4]) {1,2,3,4};
uint64_t val[4]={5,6,7,8};
a[1]=val;
a[2]=a[0];
CMath::swap(a[0],a[1]);
printf("a[0]==a[1] %d\n", (int) (a[0] == a[1]));
printf("a[0]<a[1] %d\n", (int) (a[0] < a[1]));
printf("a[0]<=a[1] %d\n", (int) (a[0] <= a[1]));
printf("a[0]>a[1] %d\n", (int) (a[0] > a[1]));
printf("a[0]>=a[1] %d\n", (int) (a[0] >= a[1]));
printf("a[0]==a[0] %d\n", (int) (a[0] == a[0]));
printf("a[0]<a[0] %d\n", (int) (a[0] < a[0]));
printf("a[0]<=a[0] %d\n", (int) (a[0] <= a[0]));
printf("a[0]>a[0] %d\n", (int) (a[0] > a[0]));
printf("a[0]>=a[0] %d\n", (int) (a[0] >= a[0]));
SG_SPRINT("\n\n");
for (uint32_t i=0; i<10 ; i++)
{
SG_SPRINT("a[%d]=", i);
a[i].print_hex();
printf("\n");
}
delete[] a;
delete[] b;
}
catch(ShogunException & sh)
{
SG_SPRINT("%s",sh.get_exception_string());
}
exit_shogun();
}
示例10: main
int main(int argc, char ** argv)
{
init_shogun_with_defaults();
SGVector< float64_t > labs(NUM_CLASSES*NUM_SAMPLES);
SGMatrix< float64_t > feats(DIMS, NUM_CLASSES*NUM_SAMPLES);
gen_rand_data(labs, feats);
//read_data(labs, feats);
// Create train labels
CMulticlassSOLabels* labels = new CMulticlassSOLabels(labs);
CMulticlassLabels* mlabels = new CMulticlassLabels(labs);
// Create train features
CDenseFeatures< float64_t >* features = new CDenseFeatures< float64_t >(feats);
// Create structured model
CMulticlassModel* model = new CMulticlassModel(features, labels);
// Create loss function
CHingeLoss* loss = new CHingeLoss();
// Create SO-SVM
CPrimalMosekSOSVM* sosvm = new CPrimalMosekSOSVM(model, loss, labels);
CDualLibQPBMSOSVM* bundle = new CDualLibQPBMSOSVM(model, loss, labels, 1000);
bundle->set_verbose(false);
SG_REF(sosvm);
SG_REF(bundle);
CTime start;
float64_t t1;
sosvm->train();
SG_SPRINT(">>>> PrimalMosekSOSVM trained in %9.4f\n", (t1 = start.cur_time_diff(false)));
bundle->train();
SG_SPRINT(">>>> BMRM trained in %9.4f\n", start.cur_time_diff(false)-t1);
CStructuredLabels* out = CStructuredLabels::obtain_from_generic(sosvm->apply());
CStructuredLabels* bout = CStructuredLabels::obtain_from_generic(bundle->apply());
// Create liblinear svm classifier with L2-regularized L2-loss
CLibLinear* svm = new CLibLinear(L2R_L2LOSS_SVC);
// Add some configuration to the svm
svm->set_epsilon(EPSILON);
svm->set_bias_enabled(false);
// Create a multiclass svm classifier that consists of several of the previous one
CLinearMulticlassMachine* mc_svm =
new CLinearMulticlassMachine( new CMulticlassOneVsRestStrategy(),
(CDotFeatures*) features, svm, mlabels);
SG_REF(mc_svm);
// Train the multiclass machine using the data passed in the constructor
mc_svm->train();
CMulticlassLabels* mout = CMulticlassLabels::obtain_from_generic(mc_svm->apply());
SGVector< float64_t > w = sosvm->get_w();
for ( int32_t i = 0 ; i < w.vlen ; ++i )
SG_SPRINT("%10f ", w[i]);
SG_SPRINT("\n\n");
for ( int32_t i = 0 ; i < NUM_CLASSES ; ++i )
{
CLinearMachine* lm = (CLinearMachine*) mc_svm->get_machine(i);
SGVector< float64_t > mw = lm->get_w();
for ( int32_t j = 0 ; j < mw.vlen ; ++j )
SG_SPRINT("%10f ", mw[j]);
SG_UNREF(lm); // because of CLinearMulticlassMachine::get_machine()
}
SG_SPRINT("\n");
CStructuredAccuracy* structured_evaluator = new CStructuredAccuracy();
CMulticlassAccuracy* multiclass_evaluator = new CMulticlassAccuracy();
SG_REF(structured_evaluator);
SG_REF(multiclass_evaluator);
SG_SPRINT("SO-SVM: %5.2f%\n", 100.0*structured_evaluator->evaluate(out, labels));
SG_SPRINT("BMRM: %5.2f%\n", 100.0*structured_evaluator->evaluate(bout, labels));
SG_SPRINT("MC: %5.2f%\n", 100.0*multiclass_evaluator->evaluate(mout, mlabels));
// Free memory
SG_UNREF(multiclass_evaluator);
SG_UNREF(structured_evaluator);
SG_UNREF(mout);
SG_UNREF(mc_svm);
SG_UNREF(bundle);
SG_UNREF(sosvm);
SG_UNREF(bout);
SG_UNREF(out);
exit_shogun();
return 0;
}
示例11: apply
CFeatures* CLocallyLinearEmbedding::apply(CFeatures* features)
{
ASSERT(features);
// check features
if (!(features->get_feature_class()==C_DENSE &&
features->get_feature_type()==F_DREAL))
{
SG_ERROR("Given features are not of SimpleRealFeatures type.\n");
}
// shorthand for simplefeatures
CDenseFeatures<float64_t>* simple_features = (CDenseFeatures<float64_t>*) features;
SG_REF(features);
// get and check number of vectors
int32_t N = simple_features->get_num_vectors();
if (m_k>=N)
SG_ERROR("Number of neighbors (%d) should be less than number of objects (%d).\n",
m_k, N);
// compute distance matrix
SG_DEBUG("Computing distance matrix\n");
ASSERT(m_distance);
CTime* time = new CTime();
time->start();
m_distance->init(simple_features,simple_features);
SGMatrix<float64_t> distance_matrix = m_distance->get_distance_matrix();
m_distance->remove_lhs_and_rhs();
SG_DEBUG("Distance matrix computation took %fs\n",time->cur_time_diff());
SG_DEBUG("Calculating neighborhood matrix\n");
SGMatrix<int32_t> neighborhood_matrix;
time->start();
if (m_auto_k)
{
neighborhood_matrix = get_neighborhood_matrix(distance_matrix,m_max_k);
m_k = estimate_k(simple_features,neighborhood_matrix);
SG_DEBUG("Estimated k with value of %d\n",m_k);
}
else
neighborhood_matrix = get_neighborhood_matrix(distance_matrix,m_k);
SG_DEBUG("Neighbors finding took %fs\n",time->cur_time_diff());
// init W (weight) matrix
float64_t* W_matrix = SG_CALLOC(float64_t, N*N);
// construct weight matrix
SG_DEBUG("Constructing weight matrix\n");
time->start();
SGMatrix<float64_t> weight_matrix = construct_weight_matrix(simple_features,W_matrix,neighborhood_matrix);
SG_DEBUG("Weight matrix construction took %.5fs\n", time->cur_time_diff());
// find null space of weight matrix
SG_DEBUG("Finding nullspace\n");
time->start();
SGMatrix<float64_t> new_feature_matrix = construct_embedding(weight_matrix,m_target_dim);
SG_DEBUG("Eigenproblem solving took %.5fs\n", time->cur_time_diff());
delete time;
SG_UNREF(features);
return (CFeatures*)(new CDenseFeatures<float64_t>(new_feature_matrix));
}
示例12: test
void test(MultilabelParameter param, SGMatrix<int32_t> labels_train, SGMatrix<float64_t> feats_train,
SGMatrix<int32_t> labels_test, SGMatrix<float64_t> feats_test)
{
int32_t num_sample_train = labels_train.num_cols;
int32_t num_classes = labels_train.num_rows;
int32_t dim = feats_train.num_rows;
// Build factor graph
SGMatrix< int32_t > mat_edges = get_edge_list(param.graph_type, num_classes);
int32_t num_edges = mat_edges.num_rows;
int32_t tid;
// we have l = num_classes different weights: w_1, w_2, ..., w_l
// so we create num_classes different unary factor types
DynArray<CTableFactorType *> v_ftp_u;
for (int32_t u = 0; u < num_classes; u++)
{
tid = u;
SGVector<int32_t> card_u(1);
card_u[0] = NUM_STATUS;
SGVector<float64_t> w_u(dim * NUM_STATUS);
w_u.zero();
v_ftp_u.append_element(new CTableFactorType(tid, card_u, w_u));
}
// define factor type: tree edge factor
// note that each edge is a new type
DynArray<CTableFactorType *> v_ftp_t;
for (int32_t t = 0; t < num_edges; t++)
{
tid = t + num_classes;
SGVector<int32_t> card_t(2);
card_t[0] = NUM_STATUS;
card_t[1] = NUM_STATUS;
SGVector<float64_t> w_t(NUM_STATUS * NUM_STATUS);
w_t.zero();
v_ftp_t.append_element(new CTableFactorType(tid, card_t, w_t));
}
// prepare features and labels in factor graph
CFactorGraphFeatures * fg_feats_train = new CFactorGraphFeatures(num_sample_train);
SG_REF(fg_feats_train);
CFactorGraphLabels * fg_labels_train = new CFactorGraphLabels(num_sample_train);
SG_REF(fg_labels_train);
build_factor_graph(param, feats_train, labels_train, fg_feats_train, fg_labels_train, v_ftp_u, v_ftp_t);
SG_SPRINT("----------------------------------------------------\n");
CFactorGraphModel * model = new CFactorGraphModel(fg_feats_train, fg_labels_train, param.infer_type, false);
SG_REF(model);
// initialize model parameters
for (int32_t u = 0; u < num_classes; u++)
model->add_factor_type(v_ftp_u[u]);
for (int32_t t = 0; t < num_edges; t++)
model->add_factor_type(v_ftp_t[t]);
// create SGD solver
CStochasticSOSVM * sgd = new CStochasticSOSVM(model, fg_labels_train, true);
sgd->set_num_iter(param.sgd_num_iter);
sgd->set_lambda(param.sgd_lambda);
SG_REF(sgd);
// timer
CTime start;
// train SGD
sgd->train();
float64_t t2 = start.cur_time_diff(false);
SG_SPRINT("SGD trained in %9.4f\n", t2);
// Evaluation SGD
CStructuredLabels * labels_sgd = CLabelsFactory::to_structured(sgd->apply());
SG_REF(labels_sgd);
float64_t ave_loss_sgd = 0.0;
evaluate(model, num_sample_train, labels_sgd, fg_labels_train, ave_loss_sgd);
SG_SPRINT("sgd solver: average training loss = %f\n", ave_loss_sgd);
SG_UNREF(labels_sgd);
if(labels_test.num_cols > 0)
{
// prepare features and labels in factor graph
int32_t num_sample_test = labels_test.num_cols;
CFactorGraphFeatures * fg_feats_test = new CFactorGraphFeatures(num_sample_test);
SG_REF(fg_feats_test);
CFactorGraphLabels * fg_labels_test = new CFactorGraphLabels(num_sample_test);
SG_REF(fg_labels_test);
build_factor_graph(param, feats_test, labels_test, fg_feats_test, fg_labels_test, v_ftp_u, v_ftp_t);
sgd->set_features(fg_feats_test);
sgd->set_labels(fg_labels_test);
labels_sgd = CLabelsFactory::to_structured(sgd->apply());
//.........这里部分代码省略.........