本文整理汇总了C++中CKernel::get_cache_size方法的典型用法代码示例。如果您正苦于以下问题:C++ CKernel::get_cache_size方法的具体用法?C++ CKernel::get_cache_size怎么用?C++ CKernel::get_cache_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CKernel
的用法示例。
在下文中一共展示了CKernel::get_cache_size方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: train_machine
bool CSVMLightOneClass::train_machine(CFeatures* data)
{
//certain setup params
mkl_converged=false;
verbosity=1 ;
init_margin=0.15;
init_iter=500;
precision_violations=0;
opt_precision=DEF_PRECISION;
strcpy (learn_parm->predfile, "");
learn_parm->biased_hyperplane=0;
learn_parm->sharedslack=0;
learn_parm->remove_inconsistent=0;
learn_parm->skip_final_opt_check=0;
learn_parm->svm_maxqpsize=get_qpsize();
learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize-1;
learn_parm->maxiter=100000;
learn_parm->svm_iter_to_shrink=100;
learn_parm->svm_c=C1;
learn_parm->transduction_posratio=0.33;
learn_parm->svm_costratio=C2/C1;
learn_parm->svm_costratio_unlab=1.0;
learn_parm->svm_unlabbound=1E-5;
learn_parm->epsilon_crit=epsilon; // GU: better decrease it ... ??
learn_parm->epsilon_a=1E-15;
learn_parm->compute_loo=0;
learn_parm->rho=1.0;
learn_parm->xa_depth=0;
if (!kernel)
SG_ERROR( "SVM_light can not proceed without kernel!\n");
if (data)
kernel->init(data, data);
if (!kernel->has_features())
SG_ERROR( "SVM_light can not proceed without initialized kernel!\n");
int32_t num_vec=kernel->get_num_vec_lhs();
SG_INFO("num_vec=%d\n", num_vec);
SG_UNREF(labels);
labels=new CLabels(num_vec);
labels->set_to_one();
// in case of LINADD enabled kernels cleanup!
if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
kernel->clear_normal() ;
// output some info
SG_DEBUG( "threads = %i\n", parallel->get_num_threads()) ;
SG_DEBUG( "qpsize = %i\n", learn_parm->svm_maxqpsize) ;
SG_DEBUG( "epsilon = %1.1e\n", learn_parm->epsilon_crit) ;
SG_DEBUG( "kernel->has_property(KP_LINADD) = %i\n", kernel->has_property(KP_LINADD)) ;
SG_DEBUG( "kernel->has_property(KP_KERNCOMBINATION) = %i\n", kernel->has_property(KP_KERNCOMBINATION)) ;
SG_DEBUG( "kernel->has_property(KP_BATCHEVALUATION) = %i\n", kernel->has_property(KP_BATCHEVALUATION)) ;
SG_DEBUG( "kernel->get_optimization_type() = %s\n", kernel->get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" : "SLOWBUTMEMEFFICIENT" ) ;
SG_DEBUG( "get_solver_type() = %i\n", get_solver_type());
SG_DEBUG( "get_linadd_enabled() = %i\n", get_linadd_enabled()) ;
SG_DEBUG( "get_batch_computation_enabled() = %i\n", get_batch_computation_enabled()) ;
SG_DEBUG( "kernel->get_num_subkernels() = %i\n", kernel->get_num_subkernels()) ;
use_kernel_cache = !((kernel->get_kernel_type() == K_CUSTOM) ||
(get_linadd_enabled() && kernel->has_property(KP_LINADD)));
SG_DEBUG( "use_kernel_cache = %i\n", use_kernel_cache) ;
if (kernel->get_kernel_type() == K_COMBINED)
{
CKernel* kn = ((CCombinedKernel*)kernel)->get_first_kernel();
while (kn)
{
// allocate kernel cache but clean up beforehand
kn->resize_kernel_cache(kn->get_cache_size());
SG_UNREF(kn);
kn = ((CCombinedKernel*) kernel)->get_next_kernel();
}
}
kernel->resize_kernel_cache(kernel->get_cache_size());
// train the svm
svm_learn();
// brain damaged svm light work around
create_new_model(model->sv_num-1);
set_bias(-model->b);
for (int32_t i=0; i<model->sv_num-1; i++)
{
set_alpha(i, model->alpha[i+1]);
set_support_vector(i, model->supvec[i+1]);
}
// in case of LINADD enabled kernels cleanup!
if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
{
kernel->clear_normal() ;
kernel->delete_optimization() ;
//.........这里部分代码省略.........