本文整理汇总了C++中CDotFeatures类的典型用法代码示例。如果您正苦于以下问题:C++ CDotFeatures类的具体用法?C++ CDotFeatures怎么用?C++ CDotFeatures使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CDotFeatures类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SG_ERROR
CLatentLabels* CLatentSVM::apply()
{
if (!m_model)
SG_ERROR("LatentModel is not set!\n");
if (!features)
return NULL;
index_t num_examples = m_model->get_num_vectors();
CLatentLabels* hs = new CLatentLabels(num_examples);
CBinaryLabels* ys = new CBinaryLabels(num_examples);
hs->set_labels(ys);
m_model->set_labels(hs);
for (index_t i = 0; i < num_examples; ++i)
{
/* find h for the example */
CData* h = m_model->infer_latent_variable(w, i);
hs->add_latent_label(h);
}
/* compute the y labels */
CDotFeatures* x = m_model->get_psi_feature_vectors();
x->dense_dot_range(ys->get_labels().vector, 0, num_examples, NULL, w.vector, w.vlen, 0.0);
return hs;
}
示例2: if
void CExactInferenceMethod::update_all()
{
if (m_labels)
m_label_vector =
((CRegressionLabels*) m_labels)->get_labels().clone();
if (m_features && m_features->has_property(FP_DOT) && m_features->get_num_vectors())
m_feature_matrix =
((CDotFeatures*)m_features)->get_computed_dot_feature_matrix();
else if (m_features && m_features->get_feature_class() == C_COMBINED)
{
CDotFeatures* feat =
(CDotFeatures*)((CCombinedFeatures*)m_features)->
get_first_feature_obj();
if (feat->get_num_vectors())
m_feature_matrix = feat->get_computed_dot_feature_matrix();
SG_UNREF(feat);
}
update_data_means();
if (m_kernel)
update_train_kernel();
if (m_ktrtr.num_cols*m_ktrtr.num_rows)
{
update_chol();
update_alpha();
}
}
示例3: SG_ERROR
void CExactInferenceMethod::check_members()
{
if (!m_labels)
SG_ERROR("No labels set\n")
if (m_labels->get_label_type() != LT_REGRESSION)
SG_ERROR("Expected RegressionLabels\n")
if (!m_features)
SG_ERROR("No features set!\n")
if (m_labels->get_num_labels() != m_features->get_num_vectors())
SG_ERROR("Number of training vectors does not match number of labels\n")
if(m_features->get_feature_class() == C_COMBINED)
{
CDotFeatures* feat =
(CDotFeatures*)((CCombinedFeatures*)m_features)->
get_first_feature_obj();
if (!feat->has_property(FP_DOT))
SG_ERROR("Specified features are not of type CFeatures\n")
if (feat->get_feature_class() != C_DENSE)
SG_ERROR("Expected Simple Features\n")
if (feat->get_feature_type() != F_DREAL)
SG_ERROR("Expected Real Features\n")
SG_UNREF(feat);
}
示例4: SG_REF
void CInferenceMethod::set_latent_features(CFeatures* feat)
{
SG_REF(feat);
SG_UNREF(m_latent_features);
m_latent_features=feat;
if (m_latent_features && m_latent_features->has_property(FP_DOT) && m_latent_features->get_num_vectors())
m_latent_matrix =
((CDotFeatures*)m_latent_features)->get_computed_dot_feature_matrix();
else if (m_latent_features && m_latent_features->get_feature_class() == C_COMBINED)
{
CDotFeatures* subfeat =
(CDotFeatures*)((CCombinedFeatures*)m_latent_features)->
get_first_feature_obj();
if (m_latent_features->get_num_vectors())
m_latent_matrix = subfeat->get_computed_dot_feature_matrix();
SG_UNREF(subfeat);
}
update_data_means();
update_train_kernel();
update_chol();
update_alpha();
}
示例5: apply_regression
CRegressionLabels* CGaussianProcessRegression::apply_regression(CFeatures* data)
{
if (data)
{
if(data->get_feature_class() == C_COMBINED)
{
CDotFeatures* feat =
(CDotFeatures*)((CCombinedFeatures*)data)->
get_first_feature_obj();
if (!feat->has_property(FP_DOT))
SG_ERROR("Specified features are not of type CFeatures\n")
if (feat->get_feature_class() != C_DENSE)
SG_ERROR("Expected Simple Features\n")
if (feat->get_feature_type() != F_DREAL)
SG_ERROR("Expected Real Features\n")
SG_UNREF(feat);
}
else
{
if (!data->has_property(FP_DOT))
示例6: argmax
CResultSet* CMulticlassModel::argmax(
SGVector< float64_t > w,
int32_t feat_idx,
bool const training)
{
CDotFeatures* df = (CDotFeatures*) m_features;
int32_t feats_dim = df->get_dim_feature_space();
if ( training )
{
CMulticlassSOLabels* ml = (CMulticlassSOLabels*) m_labels;
m_num_classes = ml->get_num_classes();
}
else
{
REQUIRE(m_num_classes > 0, "The model needs to be trained before "
"using it for prediction\n");
}
int32_t dim = get_dim();
ASSERT(dim == w.vlen)
// Find the class that gives the maximum score
float64_t score = 0, ypred = 0;
float64_t max_score = -CMath::INFTY;
for ( int32_t c = 0 ; c < m_num_classes ; ++c )
{
score = df->dense_dot(feat_idx, w.vector+c*feats_dim, feats_dim);
if ( training )
score += delta_loss(feat_idx, c);
if ( score > max_score )
{
max_score = score;
ypred = c;
}
}
// Build the CResultSet object to return
CResultSet* ret = new CResultSet();
SG_REF(ret);
CRealNumber* y = new CRealNumber(ypred);
SG_REF(y);
ret->psi_pred = get_joint_feature_vector(feat_idx, y);
ret->score = max_score;
ret->argmax = y;
if ( training )
{
ret->delta = CStructuredModel::delta_loss(feat_idx, y);
ret->psi_truth = CStructuredModel::get_joint_feature_vector(
feat_idx, feat_idx);
ret->score -= SGVector< float64_t >::dot(w.vector,
ret->psi_truth.vector, dim);
}
return ret;
}
示例7: get_first_feature_obj
void CCombinedDotFeatures::set_subfeature_weights(
float64_t* weights, int32_t num_weights)
{
int32_t i=0 ;
CListElement* current = NULL ;
CDotFeatures* f = get_first_feature_obj(current);
ASSERT(num_weights==get_num_feature_obj());
while(f)
{
f->set_combined_feature_weight(weights[i]);
SG_UNREF(f);
f = get_next_feature_obj(current);
i++;
}
}
示例8: ASSERT
void CCombinedDotFeatures::get_subfeature_weights(float64_t** weights, int32_t* num_weights)
{
*num_weights = get_num_feature_obj();
ASSERT(*num_weights > 0);
*weights=SG_MALLOC(float64_t, *num_weights);
float64_t* w = *weights;
CListElement* current = NULL;
CDotFeatures* f = get_first_feature_obj(current);
while (f)
{
*w++=f->get_combined_feature_weight();
SG_UNREF(f);
f = get_next_feature_obj(current);
}
}
示例9: SG_ERROR
bool CGaussian::train(CFeatures* data)
{
// init features with data if necessary and assure type is correct
if (data)
{
if (!data->has_property(FP_DOT))
SG_ERROR("Specified features are not of type CDotFeatures\n");
set_features(data);
}
CDotFeatures* dotdata = (CDotFeatures *) data;
delete[] m_mean;
delete[] m_cov;
dotdata->get_mean(&m_mean, &m_mean_length);
dotdata->get_cov(&m_cov, &m_cov_rows, &m_cov_cols);
init();
return true;
}
示例10: ASSERT
bool CGMM::train(CFeatures* data)
{
ASSERT(m_n != 0);
if (m_components)
cleanup();
/** init features with data if necessary and assure type is correct */
if (data)
{
if (!data->has_property(FP_DOT))
SG_ERROR("Specified features are not of type CDotFeatures\n");
set_features(data);
}
CDotFeatures* dotdata = (CDotFeatures *) data;
int32_t num_vectors = dotdata->get_num_vectors();
int32_t num_dim = dotdata->get_dim_feature_space();
CEuclidianDistance* dist = new CEuclidianDistance();
CKMeans* init_k_means = new CKMeans(m_n, dist);
init_k_means->train(dotdata);
float64_t* init_means;
int32_t init_mean_dim;
int32_t init_mean_size;
init_k_means->get_cluster_centers(&init_means, &init_mean_dim, &init_mean_size);
float64_t* init_cov;
int32_t init_cov_rows;
int32_t init_cov_cols;
dotdata->get_cov(&init_cov, &init_cov_rows, &init_cov_cols);
m_coefficients = new float64_t[m_coef_size];
m_components = new CGaussian*[m_n];
for (int i=0; i<m_n; i++)
{
m_coefficients[i] = 1.0/m_coef_size;
m_components[i] = new CGaussian(&(init_means[i*init_mean_dim]), init_mean_dim,
init_cov, init_cov_rows, init_cov_cols);
}
/** question of faster vs. less memory using */
float64_t* pdfs = new float64_t[num_vectors*m_n];
float64_t* T = new float64_t[num_vectors*m_n];
int32_t iter = 0;
float64_t e_log_likelihood_change = m_minimal_change + 1;
float64_t e_log_likelihood_old = 0;
float64_t e_log_likelihood_new = -FLT_MAX;
while (iter<m_max_iter && e_log_likelihood_change>m_minimal_change)
{
e_log_likelihood_old = e_log_likelihood_new;
e_log_likelihood_new = 0;
/** Precomputing likelihoods */
float64_t* point;
int32_t point_len;
for (int i=0; i<num_vectors; i++)
{
dotdata->get_feature_vector(&point, &point_len, i);
for (int j=0; j<m_n; j++)
pdfs[i*m_n+j] = m_components[j]->compute_PDF(point, point_len);
delete[] point;
}
for (int i=0; i<num_vectors; i++)
{
float64_t sum = 0;
for (int j=0; j<m_n; j++)
sum += m_coefficients[j]*pdfs[i*m_n+j];
for (int j=0; j<m_n; j++)
{
T[i*m_n+j] = (m_coefficients[j]*pdfs[i*m_n+j])/sum;
e_log_likelihood_new += T[i*m_n+j]*CMath::log(m_coefficients[j]*pdfs[i*m_n+j]);
}
}
/** Not sure if getting the abs value is a good idea */
e_log_likelihood_change = CMath::abs(e_log_likelihood_new - e_log_likelihood_old);
/** Updates */
float64_t T_sum;
float64_t* mean_sum;
float64_t* cov_sum;
for (int i=0; i<m_n; i++)
{
T_sum = 0;
mean_sum = new float64_t[num_dim];
memset(mean_sum, 0, num_dim*sizeof(float64_t));
for (int j=0; j<num_vectors; j++)
{
T_sum += T[j*m_n+i];
dotdata->get_feature_vector(&point, &point_len, j);
CMath::add<float64_t>(mean_sum, T[j*m_n+i], point, 1, mean_sum, point_len);
delete[] point;
//.........这里部分代码省略.........
示例11: SG_MALLOC
void CLibLinear::solve_l1r_lr(
const problem *prob_col, double eps,
double Cp, double Cn)
{
int l = prob_col->l;
int w_size = prob_col->n;
int j, s, iter = 0;
int active_size = w_size;
int max_num_linesearch = 20;
double x_min = 0;
double sigma = 0.01;
double d, G, H;
double Gmax_old = CMath::INFTY;
double Gmax_new;
double Gmax_init=0;
double sum1, appxcond1;
double sum2, appxcond2;
double cond;
int *index = SG_MALLOC(int, w_size);
int32_t *y = SG_MALLOC(int32_t, l);
double *exp_wTx = SG_MALLOC(double, l);
double *exp_wTx_new = SG_MALLOC(double, l);
double *xj_max = SG_MALLOC(double, w_size);
double *C_sum = SG_MALLOC(double, w_size);
double *xjneg_sum = SG_MALLOC(double, w_size);
double *xjpos_sum = SG_MALLOC(double, w_size);
CDotFeatures* x = prob_col->x;
void* iterator;
int ind;
double val;
double C[3] = {Cn,0,Cp};
int n = prob_col->n;
if (prob_col->use_bias)
n--;
for(j=0; j<l; j++)
{
exp_wTx[j] = 1;
if(prob_col->y[j] > 0)
y[j] = 1;
else
y[j] = -1;
}
for(j=0; j<w_size; j++)
{
w.vector[j] = 0;
index[j] = j;
xj_max[j] = 0;
C_sum[j] = 0;
xjneg_sum[j] = 0;
xjpos_sum[j] = 0;
if (use_bias && j==n)
{
for (ind=0; ind<l; ind++)
{
x_min = CMath::min(x_min, 1.0);
xj_max[j] = CMath::max(xj_max[j], 1.0);
C_sum[j] += C[GETI(ind)];
if(y[ind] == -1)
xjneg_sum[j] += C[GETI(ind)];
else
xjpos_sum[j] += C[GETI(ind)];
}
}
else
{
iterator=x->get_feature_iterator(j);
while (x->get_next_feature(ind, val, iterator))
{
x_min = CMath::min(x_min, val);
xj_max[j] = CMath::max(xj_max[j], val);
C_sum[j] += C[GETI(ind)];
if(y[ind] == -1)
xjneg_sum[j] += C[GETI(ind)]*val;
else
xjpos_sum[j] += C[GETI(ind)]*val;
}
x->free_feature_iterator(iterator);
}
}
CTime start_time;
while (iter < max_iterations && !CSignal::cancel_computations())
{
if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time)
break;
Gmax_new = 0;
for(j=0; j<active_size; j++)
{
int i = j+rand()%(active_size-j);
CMath::swap(index[i], index[j]);
}
//.........这里部分代码省略.........
示例12: sparse_add_new_cut
/*----------------------------------------------------------------------------------
sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
new_a = sum(data_X(:,find(new_cut ~=0 )),2);
new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
sparse_A(:,nSel+1) = new_a;
---------------------------------------------------------------------------------*/
int CSVMOcas::add_new_cut(
float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
uint32_t nSel, void* ptr)
{
CSVMOcas* o = (CSVMOcas*) ptr;
CDotFeatures* f = o->features;
uint32_t nDim=(uint32_t) o->w_dim;
float64_t* y = o->lab.vector;
float64_t** c_val = o->cp_value;
uint32_t** c_idx = o->cp_index;
uint32_t* c_nzd = o->cp_nz_dims;
float64_t* c_bias = o->cp_bias;
float64_t sq_norm_a;
uint32_t i, j, nz_dims;
/* temporary vector */
float64_t* new_a = o->tmp_a_buf;
memset(new_a, 0, sizeof(float64_t)*nDim);
for(i=0; i < cut_length; i++)
{
f->add_to_dense_vec(y[new_cut[i]], new_cut[i], new_a, nDim);
if (o->use_bias)
c_bias[nSel]+=y[new_cut[i]];
}
/* compute new_a'*new_a and count number of non-zerou dimensions */
nz_dims = 0;
sq_norm_a = CMath::sq(c_bias[nSel]);
for(j=0; j < nDim; j++ ) {
if(new_a[j] != 0) {
nz_dims++;
sq_norm_a += new_a[j]*new_a[j];
}
}
/* sparsify new_a and insert it to the last column of sparse_A */
c_nzd[nSel] = nz_dims;
c_idx[nSel]=NULL;
c_val[nSel]=NULL;
if(nz_dims > 0)
{
c_idx[nSel]=SG_MALLOC(uint32_t, nz_dims);
c_val[nSel]=SG_MALLOC(float64_t, nz_dims);
uint32_t idx=0;
for(j=0; j < nDim; j++ )
{
if(new_a[j] != 0)
{
c_idx[nSel][idx] = j;
c_val[nSel][idx++] = new_a[j];
}
}
}
new_col_H[nSel] = sq_norm_a;
for(i=0; i < nSel; i++)
{
float64_t tmp = c_bias[nSel]*c_bias[i];
for(j=0; j < c_nzd[i]; j++)
tmp += new_a[c_idx[i][j]]*c_val[i][j];
new_col_H[i] = tmp;
}
//CMath::display_vector(new_col_H, nSel+1, "new_col_H");
//CMath::display_vector((int32_t*) c_idx[nSel], (int32_t) nz_dims, "c_idx");
//CMath::display_vector((float64_t*) c_val[nSel], nz_dims, "c_val");
return 0;
}