本文整理汇总了C++中CKernel::resize_kernel_cache方法的典型用法代码示例。如果您正苦于以下问题:C++ CKernel::resize_kernel_cache方法的具体用法?C++ CKernel::resize_kernel_cache怎么用?C++ CKernel::resize_kernel_cache使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CKernel
的用法示例。
在下文中一共展示了CKernel::resize_kernel_cache方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: svr_learn
void CSVRLight::svr_learn()
{
int32_t *inconsistent, i, j;
int32_t upsupvecnum;
float64_t maxdiff, *lin, *c, *a;
int32_t iterations;
float64_t *xi_fullset; /* buffer for storing xi on full sample in loo */
float64_t *a_fullset; /* buffer for storing alpha on full sample in loo */
TIMING timing_profile;
SHRINK_STATE shrink_state;
int32_t* label;
int32_t* docs;
ASSERT(m_labels);
int32_t totdoc=m_labels->get_num_labels();
num_vectors=totdoc;
// set up regression problem in standard form
docs=SG_MALLOC(int32_t, 2*totdoc);
label=SG_MALLOC(int32_t, 2*totdoc);
c = SG_MALLOC(float64_t, 2*totdoc);
for(i=0;i<totdoc;i++) {
docs[i]=i;
j=2*totdoc-1-i;
label[i]=+1;
c[i]=m_labels->get_label(i);
docs[j]=j;
label[j]=-1;
c[j]=m_labels->get_label(i);
}
totdoc*=2;
//prepare kernel cache for regression (i.e. cachelines are twice of current size)
kernel->resize_kernel_cache( kernel->get_cache_size(), true);
if (kernel->get_kernel_type() == K_COMBINED)
{
CCombinedKernel* k = (CCombinedKernel*) kernel;
CKernel* kn = k->get_first_kernel();
while (kn)
{
kn->resize_kernel_cache( kernel->get_cache_size(), true);
SG_UNREF(kn);
kn = k->get_next_kernel();
}
}
timing_profile.time_kernel=0;
timing_profile.time_opti=0;
timing_profile.time_shrink=0;
timing_profile.time_update=0;
timing_profile.time_model=0;
timing_profile.time_check=0;
timing_profile.time_select=0;
SG_FREE(W);
W=NULL;
if (kernel->has_property(KP_KERNCOMBINATION) && callback)
{
W = SG_MALLOC(float64_t, totdoc*kernel->get_num_subkernels());
for (i=0; i<totdoc*kernel->get_num_subkernels(); i++)
W[i]=0;
}
/* make sure -n value is reasonable */
if((learn_parm->svm_newvarsinqp < 2)
|| (learn_parm->svm_newvarsinqp > learn_parm->svm_maxqpsize)) {
learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize;
}
init_shrink_state(&shrink_state,totdoc,(int32_t)MAXSHRINK);
inconsistent = SG_MALLOC(int32_t, totdoc);
a = SG_MALLOC(float64_t, totdoc);
a_fullset = SG_MALLOC(float64_t, totdoc);
xi_fullset = SG_MALLOC(float64_t, totdoc);
lin = SG_MALLOC(float64_t, totdoc);
learn_parm->svm_cost = SG_MALLOC(float64_t, totdoc);
if (m_linear_term.vlen>0)
learn_parm->eps=get_linear_term_array();
else
{
learn_parm->eps=SG_MALLOC(float64_t, totdoc); /* equivalent regression epsilon for classification */
CMath::fill_vector(learn_parm->eps, totdoc, tube_epsilon);
}
SG_FREE(model->supvec);
SG_FREE(model->alpha);
SG_FREE(model->index);
model->supvec = SG_MALLOC(int32_t, totdoc+2);
model->alpha = SG_MALLOC(float64_t, totdoc+2);
model->index = SG_MALLOC(int32_t, totdoc+2);
model->at_upper_bound=0;
model->b=0;
model->supvec[0]=0; /* element 0 reserved and empty for now */
model->alpha[0]=0;
//.........这里部分代码省略.........
示例2: train_machine
bool CSVMLightOneClass::train_machine(CFeatures* data)
{
//certain setup params
mkl_converged=false;
verbosity=1 ;
init_margin=0.15;
init_iter=500;
precision_violations=0;
opt_precision=DEF_PRECISION;
strcpy (learn_parm->predfile, "");
learn_parm->biased_hyperplane=0;
learn_parm->sharedslack=0;
learn_parm->remove_inconsistent=0;
learn_parm->skip_final_opt_check=0;
learn_parm->svm_maxqpsize=get_qpsize();
learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize-1;
learn_parm->maxiter=100000;
learn_parm->svm_iter_to_shrink=100;
learn_parm->svm_c=C1;
learn_parm->transduction_posratio=0.33;
learn_parm->svm_costratio=C2/C1;
learn_parm->svm_costratio_unlab=1.0;
learn_parm->svm_unlabbound=1E-5;
learn_parm->epsilon_crit=epsilon; // GU: better decrease it ... ??
learn_parm->epsilon_a=1E-15;
learn_parm->compute_loo=0;
learn_parm->rho=1.0;
learn_parm->xa_depth=0;
if (!kernel)
SG_ERROR( "SVM_light can not proceed without kernel!\n");
if (data)
kernel->init(data, data);
if (!kernel->has_features())
SG_ERROR( "SVM_light can not proceed without initialized kernel!\n");
int32_t num_vec=kernel->get_num_vec_lhs();
SG_INFO("num_vec=%d\n", num_vec);
SG_UNREF(labels);
labels=new CLabels(num_vec);
labels->set_to_one();
// in case of LINADD enabled kernels cleanup!
if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
kernel->clear_normal() ;
// output some info
SG_DEBUG( "threads = %i\n", parallel->get_num_threads()) ;
SG_DEBUG( "qpsize = %i\n", learn_parm->svm_maxqpsize) ;
SG_DEBUG( "epsilon = %1.1e\n", learn_parm->epsilon_crit) ;
SG_DEBUG( "kernel->has_property(KP_LINADD) = %i\n", kernel->has_property(KP_LINADD)) ;
SG_DEBUG( "kernel->has_property(KP_KERNCOMBINATION) = %i\n", kernel->has_property(KP_KERNCOMBINATION)) ;
SG_DEBUG( "kernel->has_property(KP_BATCHEVALUATION) = %i\n", kernel->has_property(KP_BATCHEVALUATION)) ;
SG_DEBUG( "kernel->get_optimization_type() = %s\n", kernel->get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" : "SLOWBUTMEMEFFICIENT" ) ;
SG_DEBUG( "get_solver_type() = %i\n", get_solver_type());
SG_DEBUG( "get_linadd_enabled() = %i\n", get_linadd_enabled()) ;
SG_DEBUG( "get_batch_computation_enabled() = %i\n", get_batch_computation_enabled()) ;
SG_DEBUG( "kernel->get_num_subkernels() = %i\n", kernel->get_num_subkernels()) ;
use_kernel_cache = !((kernel->get_kernel_type() == K_CUSTOM) ||
(get_linadd_enabled() && kernel->has_property(KP_LINADD)));
SG_DEBUG( "use_kernel_cache = %i\n", use_kernel_cache) ;
if (kernel->get_kernel_type() == K_COMBINED)
{
CKernel* kn = ((CCombinedKernel*)kernel)->get_first_kernel();
while (kn)
{
// allocate kernel cache but clean up beforehand
kn->resize_kernel_cache(kn->get_cache_size());
SG_UNREF(kn);
kn = ((CCombinedKernel*) kernel)->get_next_kernel();
}
}
kernel->resize_kernel_cache(kernel->get_cache_size());
// train the svm
svm_learn();
// brain damaged svm light work around
create_new_model(model->sv_num-1);
set_bias(-model->b);
for (int32_t i=0; i<model->sv_num-1; i++)
{
set_alpha(i, model->alpha[i+1]);
set_support_vector(i, model->supvec[i+1]);
}
// in case of LINADD enabled kernels cleanup!
if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
{
kernel->clear_normal() ;
kernel->delete_optimization() ;
//.........这里部分代码省略.........