本文整理汇总了C++中DynArray::get_num_elements方法的典型用法代码示例。如果您正苦于以下问题:C++ DynArray::get_num_elements方法的具体用法?C++ DynArray::get_num_elements怎么用?C++ DynArray::get_num_elements使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DynArray
的用法示例。
在下文中一共展示了DynArray::get_num_elements方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char **argv)
{
init_shogun(&print_message, &print_message, &print_message);
/* create example tree */
CModelSelectionParameters* tree=create_param_tree();
tree->print();
SG_SPRINT("----------------------------------\n");
/* build combinations of parameter trees */
DynArray<CParameterCombination*> combinations;
tree->get_combinations(combinations);
apply_parameter_tree(combinations);
/* print and directly delete them all */
for (index_t i=0; i<combinations.get_num_elements(); ++i)
combinations[i]->destroy(true, true);
/* delete example tree (after processing of combinations because CSGObject
* (namely the kernel) of the tree is SG_UNREF'ed (and not REF'ed anywhere
* else) */
tree->destroy();
exit_shogun();
return 0;
}
示例2: apply_parameter_tree
void apply_parameter_tree(DynArray<CParameterCombination*>& combinations)
{
/* create some data */
float64_t* matrix=new float64_t[6];
for (index_t i=0; i<6; i++)
matrix[i]=i;
/* create three 2-dimensional vectors
* to avoid deleting these, REF now and UNREF when finished */
CSimpleFeatures<float64_t>* features=new CSimpleFeatures<float64_t> ();
features->set_feature_matrix(matrix, 2, 3);
SG_REF(features);
/* create three labels, will be handed to svm and automaticall deleted */
CLabels* labels=new CLabels(3);
labels->set_label(0, -1);
labels->set_label(1, +1);
labels->set_label(2, -1);
/* create libsvm with C=10 and train */
CLibSVM* svm=new CLibSVM();
svm->set_labels(labels);
for (index_t i=0; i<combinations.get_num_elements(); ++i)
{
SG_SPRINT("applying:\n");
combinations[i]->print();
CParameterCombination* current_combination=combinations[i];
Parameter* current_parameters=svm->m_parameters;
current_combination->apply_to_parameter(current_parameters);
/* get kernel to set features, get_kernel SG_REF's the kernel */
CKernel* kernel=svm->get_kernel();
kernel->init(features, features);
svm->train();
/* classify on training examples */
for (index_t i=0; i<3; i++)
SG_SPRINT("output[%d]=%f\n", i, svm->apply(i));
/* unset features and SG_UNREF kernel */
kernel->cleanup();
SG_UNREF(kernel);
SG_SPRINT("----------------\n\n");
}
/* free up memory */
SG_UNREF(features);
SG_UNREF(svm);
}
示例3: get_oob_error
float64_t CBaggingMachine::get_oob_error(CEvaluation* eval) const
{
REQUIRE(m_combination_rule != NULL, "Combination rule is not set!");
REQUIRE(m_bags->get_num_elements() > 0, "BaggingMachine is not trained!");
SGMatrix<float64_t> output(m_features->get_num_vectors(), m_bags->get_num_elements());
if (m_labels->get_label_type() == LT_REGRESSION)
output.zero();
else
output.set_const(NAN);
/* TODO: add parallel support of applying the OOBs
only possible when add_subset is thread-safe
#pragma omp parallel for num_threads(parallel->get_num_threads())
*/
for (index_t i = 0; i < m_bags->get_num_elements(); i++)
{
CMachine* m = dynamic_cast<CMachine*>(m_bags->get_element(i));
CDynamicArray<index_t>* current_oob
= dynamic_cast<CDynamicArray<index_t>*>(m_oob_indices->get_element(i));
SGVector<index_t> oob(current_oob->get_array(), current_oob->get_num_elements(), false);
oob.display_vector();
m_features->add_subset(oob);
CLabels* l = m->apply(m_features);
SGVector<float64_t> lv = l->get_values();
// assign the values in the matrix (NAN) that are in-bag!
for (index_t j = 0; j < oob.vlen; j++)
output(oob[j], i) = lv[j];
m_features->remove_subset();
SG_UNREF(current_oob);
SG_UNREF(m);
SG_UNREF(l);
}
output.display_matrix();
DynArray<index_t> idx;
for (index_t i = 0; i < m_features->get_num_vectors(); i++)
{
if (m_all_oob_idx[i])
idx.push_back(i);
}
SGVector<float64_t> combined = m_combination_rule->combine(output);
CLabels* predicted = NULL;
switch (m_labels->get_label_type())
{
case LT_BINARY:
predicted = new CBinaryLabels(combined);
break;
case LT_MULTICLASS:
predicted = new CMulticlassLabels(combined);
break;
case LT_REGRESSION:
predicted = new CRegressionLabels(combined);
break;
default:
SG_ERROR("Unsupported label type\n");
}
m_labels->add_subset(SGVector<index_t>(idx.get_array(), idx.get_num_elements(), false));
float64_t res = eval->evaluate(predicted, m_labels);
m_labels->remove_subset();
return res;
}