本文整理汇总了C++中CMulticlassLabels类的典型用法代码示例。如果您正苦于以下问题:C++ CMulticlassLabels类的具体用法?C++ CMulticlassLabels怎么用?C++ CMulticlassLabels使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CMulticlassLabels类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: REQUIRE
CMulticlassLabels* CRelaxedTree::apply_multiclass(CFeatures* data)
{
if (data != NULL)
{
CDenseFeatures<float64_t> *feats = dynamic_cast<CDenseFeatures<float64_t>*>(data);
REQUIRE(feats != NULL, ("Require non-NULL dense features of float64_t\n"))
set_features(feats);
}
// init kernels for all sub-machines
for (int32_t i=0; i<m_machines->get_num_elements(); i++)
{
CSVM *machine = (CSVM*)m_machines->get_element(i);
CKernel *kernel = machine->get_kernel();
CFeatures* lhs = kernel->get_lhs();
kernel->init(lhs, m_feats);
SG_UNREF(machine);
SG_UNREF(kernel);
SG_UNREF(lhs);
}
CMulticlassLabels *lab = new CMulticlassLabels(m_feats->get_num_vectors());
SG_REF(lab);
for (int32_t i=0; i < lab->get_num_labels(); ++i)
{
lab->set_int_label(i, int32_t(apply_one(i)));
}
return lab;
}
示例2: l1norm
int32_t CShareBoost::choose_feature()
{
SGVector<float64_t> l1norm(m_fea.num_rows);
for (int32_t j=0; j < m_fea.num_rows; ++j)
{
if (std::find(&m_activeset[0], &m_activeset[m_activeset.vlen], j) !=
&m_activeset[m_activeset.vlen])
{
l1norm[j] = 0;
}
else
{
l1norm[j] = 0;
CMulticlassLabels *lab = dynamic_cast<CMulticlassLabels *>(m_labels);
for (int32_t k=0; k < m_multiclass_strategy->get_num_classes(); ++k)
{
float64_t abssum = 0;
for (int32_t ii=0; ii < m_fea.num_cols; ++ii)
{
abssum += m_fea(j, ii)*(m_rho(k, ii)/m_rho_norm[ii] -
(j == lab->get_int_label(ii)));
}
l1norm[j] += CMath::abs(abssum);
}
l1norm[j] /= m_fea.num_cols;
}
}
return SGVector<float64_t>::arg_max(l1norm.vector, 1, l1norm.vlen);
}
示例3: SG_ERROR
CMulticlassLabels* CConditionalProbabilityTree::apply_multiclass(CFeatures* data)
{
if (data)
{
if (data->get_feature_class() != C_STREAMING_DENSE)
SG_ERROR("Expected StreamingDenseFeatures\n")
if (data->get_feature_type() != F_SHORTREAL)
SG_ERROR("Expected float32_t feature type\n")
set_features(dynamic_cast<CStreamingDenseFeatures<float32_t>* >(data));
}
vector<int32_t> predicts;
m_feats->start_parser();
while (m_feats->get_next_example())
{
predicts.push_back(apply_multiclass_example(m_feats->get_vector()));
m_feats->release_example();
}
m_feats->end_parser();
CMulticlassLabels *labels = new CMulticlassLabels(predicts.size());
for (size_t i=0; i < predicts.size(); ++i)
labels->set_int_label(i, predicts[i]);
return labels;
}
示例4: main
int main(int argc, char** argv)
{
init_shogun_with_defaults();
/* dense features from matrix */
CCSVFile* feature_file = new CCSVFile(fname_feats);
SGMatrix<float64_t> mat=SGMatrix<float64_t>();
mat.load(feature_file);
SG_UNREF(feature_file);
CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(mat);
SG_REF(features);
/* labels from vector */
CCSVFile* label_file = new CCSVFile(fname_labels);
SGVector<float64_t> label_vec;
label_vec.load(label_file);
SG_UNREF(label_file);
CMulticlassLabels* labels=new CMulticlassLabels(label_vec);
SG_REF(labels);
SG_SPRINT("Performing ShareBoost on a %d-class problem\n", labels->get_num_classes());
// Create ShareBoost Machine
CShareBoost *machine = new CShareBoost(features, labels, 10);
SG_REF(machine);
machine->train();
SGVector<int32_t> activeset = machine->get_activeset();
SG_SPRINT("%d out of %d features are selected:\n", activeset.vlen, mat.num_rows);
for (int32_t i=0; i < activeset.vlen; ++i)
SG_SPRINT("activeset[%02d] = %d\n", i, activeset[i]);
CDenseSubsetFeatures<float64_t> *subset_fea = new CDenseSubsetFeatures<float64_t>(features, machine->get_activeset());
SG_REF(subset_fea);
CMulticlassLabels* output = CLabelsFactory::to_multiclass(machine->apply(subset_fea));
int32_t correct = 0;
for (int32_t i=0; i < output->get_num_labels(); ++i)
if (output->get_int_label(i) == labels->get_int_label(i))
correct++;
SG_SPRINT("Accuracy = %.4f\n", float64_t(correct)/labels->get_num_labels());
// Free resources
SG_UNREF(machine);
SG_UNREF(output);
SG_UNREF(subset_fea);
SG_UNREF(features);
SG_UNREF(labels);
exit_shogun();
return 0;
}
示例5: test
void test()
{
/* dense features from matrix */
CAsciiFile* feature_file = new CAsciiFile(fname_feats);
SGMatrix<float64_t> mat=SGMatrix<float64_t>();
mat.load(feature_file);
SG_UNREF(feature_file);
CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t>(mat);
SG_REF(features);
/* labels from vector */
CAsciiFile* label_file = new CAsciiFile(fname_labels);
SGVector<float64_t> label_vec;
label_vec.load(label_file);
SG_UNREF(label_file);
CMulticlassLabels* labels=new CMulticlassLabels(label_vec);
SG_REF(labels);
// Create liblinear svm classifier with L2-regularized L2-loss
CLibLinear* svm = new CLibLinear(L2R_L2LOSS_SVC);
SG_REF(svm);
// Add some configuration to the svm
svm->set_epsilon(EPSILON);
svm->set_bias_enabled(true);
CECOCDiscriminantEncoder *encoder = new CECOCDiscriminantEncoder();
encoder->set_features(features);
encoder->set_labels(labels);
// Create a multiclass svm classifier that consists of several of the previous one
CLinearMulticlassMachine* mc_svm = new CLinearMulticlassMachine(
new CECOCStrategy(encoder, new CECOCHDDecoder()), (CDotFeatures*) features, svm, labels);
SG_REF(mc_svm);
// Train the multiclass machine using the data passed in the constructor
mc_svm->train();
// Classify the training examples and show the results
CMulticlassLabels* output = CLabelsFactory::to_multiclass(mc_svm->apply());
SGVector< int32_t > out_labels = output->get_int_labels();
SGVector< int32_t >::display_vector(out_labels.vector, out_labels.vlen);
// Free resources
SG_UNREF(mc_svm);
SG_UNREF(svm);
SG_UNREF(output);
SG_UNREF(features);
SG_UNREF(labels);
}
示例6: compute_rho
void CShareBoost::compute_rho()
{
CMulticlassLabels *lab = dynamic_cast<CMulticlassLabels *>(m_labels);
for (int32_t i=0; i < m_rho.num_rows; ++i)
{ // i loop classes
for (int32_t j=0; j < m_rho.num_cols; ++j)
{ // j loop samples
int32_t label = lab->get_int_label(j);
m_rho(i,j) = CMath::exp((label == i) - m_pred(j, label) + m_pred(j, i));
}
}
// normalize
for (int32_t j=0; j < m_rho.num_cols; ++j)
{
m_rho_norm[j] = 0;
for (int32_t i=0; i < m_rho.num_rows; ++i)
m_rho_norm[j] += m_rho(i,j);
}
}
示例7: set_features
CMulticlassLabels* CGaussianNaiveBayes::apply_multiclass(CFeatures* data)
{
if (data)
set_features(data);
ASSERT(m_features)
// init number of vectors
int32_t num_vectors = m_features->get_num_vectors();
// init result labels
CMulticlassLabels* result = new CMulticlassLabels(num_vectors);
// classify each example of data
SG_PROGRESS(0, 0, num_vectors)
for (int i = 0; i < num_vectors; i++)
{
result->set_label(i,apply_one(i));
SG_PROGRESS(i + 1, 0, num_vectors)
}
SG_DONE()
return result;
};
示例8: main
int main(int argc, char** argv)
{
int32_t num_vectors = 0;
int32_t num_feats = 0;
init_shogun_with_defaults();
const char*fname_train = "../data/7class_example4_train.dense";
CStreamingAsciiFile *train_file = new CStreamingAsciiFile(fname_train);
SG_REF(train_file);
CStreamingDenseFeatures<float64_t> *stream_features = new CStreamingDenseFeatures<float64_t>(train_file, true, 1024);
SG_REF(stream_features);
SGMatrix<float64_t> mat;
SGVector<float64_t> labvec(1000);
stream_features->start_parser();
SGVector< float64_t > vec;
while (stream_features->get_next_example())
{
vec = stream_features->get_vector();
if (num_feats == 0)
{
num_feats = vec.vlen;
mat = SGMatrix<float64_t>(num_feats, 1000);
}
std::copy(vec.vector, vec.vector+vec.vlen, mat.get_column_vector(num_vectors));
labvec[num_vectors] = stream_features->get_label();
num_vectors++;
stream_features->release_example();
}
stream_features->end_parser();
mat.num_cols = num_vectors;
labvec.vlen = num_vectors;
CMulticlassLabels* labels = new CMulticlassLabels(labvec);
SG_REF(labels);
// Create features with the useful values from mat
CDenseFeatures< float64_t >* features = new CDenseFeatures<float64_t>(mat);
SG_REF(features);
SG_SPRINT("Performing ShareBoost on a %d-class problem\n", labels->get_num_classes());
// Create ShareBoost Machine
CShareBoost *machine = new CShareBoost(features, labels, 10);
SG_REF(machine);
machine->train();
SGVector<int32_t> activeset = machine->get_activeset();
SG_SPRINT("%d out of %d features are selected:\n", activeset.vlen, mat.num_rows);
for (int32_t i=0; i < activeset.vlen; ++i)
SG_SPRINT("activeset[%02d] = %d\n", i, activeset[i]);
CDenseSubsetFeatures<float64_t> *subset_fea = new CDenseSubsetFeatures<float64_t>(features, machine->get_activeset());
SG_REF(subset_fea);
CMulticlassLabels* output = CMulticlassLabels::obtain_from_generic(machine->apply(subset_fea));
int32_t correct = 0;
for (int32_t i=0; i < output->get_num_labels(); ++i)
if (output->get_int_label(i) == labels->get_int_label(i))
correct++;
SG_SPRINT("Accuracy = %.4f\n", float64_t(correct)/labels->get_num_labels());
// Free resources
SG_UNREF(machine);
SG_UNREF(output);
SG_UNREF(subset_fea);
SG_UNREF(features);
SG_UNREF(labels);
SG_UNREF(train_file);
SG_UNREF(stream_features);
exit_shogun();
return 0;
}
示例9: mu
SGVector<int32_t> CRelaxedTree::color_label_space(CSVM *svm, SGVector<int32_t> classes)
{
SGVector<int32_t> mu(classes.vlen);
CMulticlassLabels *labels = dynamic_cast<CMulticlassLabels *>(m_labels);
SGVector<float64_t> resp = eval_binary_model_K(svm);
ASSERT(resp.vlen == labels->get_num_labels())
SGVector<float64_t> xi_pos_class(classes.vlen), xi_neg_class(classes.vlen);
SGVector<float64_t> delta_pos(classes.vlen), delta_neg(classes.vlen);
for (int32_t i=0; i < classes.vlen; ++i)
{
// find number of instances from this class
int32_t ni=0;
for (int32_t j=0; j < labels->get_num_labels(); ++j)
{
if (labels->get_int_label(j) == classes[i])
{
ni++;
}
}
xi_pos_class[i] = 0;
xi_neg_class[i] = 0;
for (int32_t j=0; j < resp.vlen; ++j)
{
if (labels->get_int_label(j) == classes[i])
{
xi_pos_class[i] += std::max(0.0, 1 - resp[j]);
xi_neg_class[i] += std::max(0.0, 1 + resp[j]);
}
}
delta_pos[i] = 1.0/ni * xi_pos_class[i] - float64_t(m_A)/m_svm_C;
delta_neg[i] = 1.0/ni * xi_neg_class[i] - float64_t(m_A)/m_svm_C;
if (delta_pos[i] > 0 && delta_neg[i] > 0)
{
mu[i] = 0;
}
else
{
if (delta_pos[i] < delta_neg[i])
mu[i] = 1;
else
mu[i] = -1;
}
}
// enforce balance constraints
int32_t B_prime = 0;
for (int32_t i=0; i < mu.vlen; ++i)
B_prime += mu[i];
if (B_prime > m_B)
{
enforce_balance_constraints_upper(mu, delta_neg, delta_pos, B_prime, xi_neg_class);
}
if (B_prime < -m_B)
{
enforce_balance_constraints_lower(mu, delta_neg, delta_pos, B_prime, xi_neg_class);
}
int32_t npos = 0;
for (index_t i=0; i < mu.vlen; ++i)
{
if (mu[i] == 1)
npos++;
}
if (npos == 0)
{
// no positive class
index_t min_idx = SGVector<float64_t>::arg_min(xi_pos_class.vector, 1, xi_pos_class.vlen);
mu[min_idx] = 1;
}
int32_t nneg = 0;
for (index_t i=0; i < mu.vlen; ++i)
{
if (mu[i] == -1)
nneg++;
}
if (nneg == 0)
{
// no negative class
index_t min_idx = SGVector<float64_t>::arg_min(xi_neg_class.vector, 1, xi_neg_class.vlen);
if (mu[min_idx] == 1 && (npos == 0 || npos == 1))
{
// avoid overwritting the only positive class
float64_t min_val = 0;
int32_t i, min_i;
for (i=0; i < xi_neg_class.vlen; ++i)
{
if (mu[i] != 1)
{
min_val = xi_neg_class[i];
//.........这里部分代码省略.........
示例10: test
void test()
{
// Prepare to read a file for the training data
char fname_feats[] = "../data/fm_train_real.dat";
char fname_labels[] = "../data/label_train_multiclass.dat";
CStreamingAsciiFile* ffeats_train = new CStreamingAsciiFile(fname_feats);
CStreamingAsciiFile* flabels_train = new CStreamingAsciiFile(fname_labels);
SG_REF(ffeats_train);
SG_REF(flabels_train);
CStreamingDenseFeatures< float64_t >* stream_features =
new CStreamingDenseFeatures< float64_t >(ffeats_train, false, 1024);
CStreamingDenseFeatures< float64_t >* stream_labels =
new CStreamingDenseFeatures< float64_t >(flabels_train, true, 1024);
SG_REF(stream_features);
SG_REF(stream_labels);
stream_features->start_parser();
// Read the values from the file and store them in features
CDenseFeatures< float64_t >* features=
(CDenseFeatures< float64_t >*)
stream_features->get_streamed_features(1000);
stream_features->end_parser();
CMulticlassLabels* labels = new CMulticlassLabels(features->get_num_vectors());
SG_REF(features);
SG_REF(labels);
// Read the labels from the file
int32_t idx = 0;
stream_labels->start_parser();
while ( stream_labels->get_next_example() )
{
labels->set_int_label( idx++, (int32_t)stream_labels->get_label() );
stream_labels->release_example();
}
stream_labels->end_parser();
// Create liblinear svm classifier with L2-regularized L2-loss
CLibLinear* svm = new CLibLinear(L2R_L2LOSS_SVC);
SG_REF(svm);
// Add some configuration to the svm
svm->set_epsilon(EPSILON);
svm->set_bias_enabled(true);
CECOCDiscriminantEncoder *encoder = new CECOCDiscriminantEncoder();
encoder->set_features(features);
encoder->set_labels(labels);
// Create a multiclass svm classifier that consists of several of the previous one
CLinearMulticlassMachine* mc_svm = new CLinearMulticlassMachine(
new CECOCStrategy(encoder, new CECOCHDDecoder()), (CDotFeatures*) features, svm, labels);
SG_REF(mc_svm);
// Train the multiclass machine using the data passed in the constructor
mc_svm->train();
// Classify the training examples and show the results
CMulticlassLabels* output = CMulticlassLabels::obtain_from_generic(mc_svm->apply());
SGVector< int32_t > out_labels = output->get_int_labels();
SGVector< int32_t >::display_vector(out_labels.vector, out_labels.vlen);
// Free resources
SG_UNREF(mc_svm);
SG_UNREF(svm);
SG_UNREF(output);
SG_UNREF(features);
SG_UNREF(labels);
SG_UNREF(ffeats_train);
SG_UNREF(flabels_train);
SG_UNREF(stream_features);
SG_UNREF(stream_labels);
}
示例11: main
int main()
{
init_shogun_with_defaults();
const char* train_file_name = "../data/7class_example4_train.dense";
const char* test_file_name = "../data/7class_example4_test.dense";
CStreamingAsciiFile* train_file = new CStreamingAsciiFile(train_file_name);
SG_REF(train_file);
CStreamingDenseFeatures<float32_t>* train_features = new CStreamingDenseFeatures<float32_t>(train_file, true, 1024);
SG_REF(train_features);
CRandomConditionalProbabilityTree *cpt = new CRandomConditionalProbabilityTree();
cpt->set_num_passes(1);
cpt->set_features(train_features);
cpt->train();
cpt->print_tree();
CStreamingAsciiFile* test_file = new CStreamingAsciiFile(test_file_name);
SG_REF(test_file);
CStreamingDenseFeatures<float32_t>* test_features = new CStreamingDenseFeatures<float32_t>(test_file, true, 1024);
SG_REF(test_features);
CMulticlassLabels *pred = cpt->apply_multiclass(test_features);
test_features->reset_stream();
SG_SPRINT("num_labels = %d\n", pred->get_num_labels());
SG_UNREF(test_features);
SG_UNREF(test_file);
test_file = new CStreamingAsciiFile(test_file_name);
SG_REF(test_file);
test_features = new CStreamingDenseFeatures<float32_t>(test_file, true, 1024);
SG_REF(test_features);
CMulticlassLabels *gnd = new CMulticlassLabels(pred->get_num_labels());
test_features->start_parser();
for (int32_t i=0; i < pred->get_num_labels(); ++i)
{
test_features->get_next_example();
gnd->set_int_label(i, test_features->get_label());
test_features->release_example();
}
test_features->end_parser();
int32_t n_correct = 0;
for (index_t i=0; i < pred->get_num_labels(); ++i)
{
if (pred->get_int_label(i) == gnd->get_int_label(i))
n_correct++;
//SG_SPRINT("%d-%d ", pred->get_int_label(i), gnd->get_int_label(i));
}
SG_SPRINT("\n");
SG_SPRINT("Multiclass Accuracy = %.2f%%\n", 100.0*n_correct / gnd->get_num_labels());
SG_UNREF(train_features);
SG_UNREF(test_features);
SG_UNREF(train_file);
SG_UNREF(test_file);
SG_UNREF(cpt);
SG_UNREF(pred);
exit_shogun();
return 0;
}
示例12: main
int main(int argc, char** argv)
{
int32_t num_vectors = 0;
int32_t num_feats = 2;
init_shogun_with_defaults();
// Prepare to read a file for the training data
char fname_feats[] = "../data/fm_train_real.dat";
char fname_labels[] = "../data/label_train_multiclass.dat";
CStreamingAsciiFile* ffeats_train = new CStreamingAsciiFile(fname_feats);
CStreamingAsciiFile* flabels_train = new CStreamingAsciiFile(fname_labels);
SG_REF(ffeats_train);
SG_REF(flabels_train);
CStreamingDenseFeatures< float64_t >* stream_features =
new CStreamingDenseFeatures< float64_t >(ffeats_train, false, 1024);
CStreamingDenseFeatures< float64_t >* stream_labels =
new CStreamingDenseFeatures< float64_t >(flabels_train, true, 1024);
SG_REF(stream_features);
SG_REF(stream_labels);
// Create a matrix with enough space to read all the feature vectors
SGMatrix< float64_t > mat = SGMatrix< float64_t >(num_feats, 1000);
// Read the values from the file and store them in mat
SGVector< float64_t > vec;
stream_features->start_parser();
while ( stream_features->get_next_example() )
{
vec = stream_features->get_vector();
for ( int32_t i = 0 ; i < num_feats ; ++i )
mat.matrix[num_vectors*num_feats + i] = vec[i];
num_vectors++;
stream_features->release_example();
}
stream_features->end_parser();
mat.num_cols = num_vectors;
// Create features with the useful values from mat
CDenseFeatures< float64_t >* features = new CDenseFeatures<float64_t>(mat);
CMulticlassLabels* labels = new CMulticlassLabels(num_vectors);
SG_REF(features);
SG_REF(labels);
// Read the labels from the file
int32_t idx = 0;
stream_labels->start_parser();
while ( stream_labels->get_next_example() )
{
labels->set_int_label( idx++, (int32_t)stream_labels->get_label() );
stream_labels->release_example();
}
stream_labels->end_parser();
// Create liblinear svm classifier with L2-regularized L2-loss
CLibLinear* svm = new CLibLinear(L2R_L2LOSS_SVC);
SG_REF(svm);
// Add some configuration to the svm
svm->set_epsilon(EPSILON);
svm->set_bias_enabled(true);
// Create a multiclass svm classifier that consists of several of the previous one
CLinearMulticlassMachine* mc_svm = new CLinearMulticlassMachine(
new CMulticlassOneVsOneStrategy(), (CDotFeatures*) features, svm, labels);
SG_REF(mc_svm);
// Train the multiclass machine using the data passed in the constructor
mc_svm->train();
// Classify the training examples and show the results
CMulticlassLabels* output = CMulticlassLabels::obtain_from_generic(mc_svm->apply());
SGVector< int32_t > out_labels = output->get_int_labels();
SGVector<int32_t>::display_vector(out_labels.vector, out_labels.vlen);
// Free resources
SG_UNREF(mc_svm);
SG_UNREF(svm);
SG_UNREF(output);
SG_UNREF(features);
SG_UNREF(labels);
SG_UNREF(ffeats_train);
SG_UNREF(flabels_train);
SG_UNREF(stream_features);
SG_UNREF(stream_labels);
exit_shogun();
return 0;
}