本文整理汇总了C++中AtomList::Append方法的典型用法代码示例。如果您正苦于以下问题:C++ AtomList::Append方法的具体用法?C++ AtomList::Append怎么用?C++ AtomList::Append使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AtomList
的用法示例。
在下文中一共展示了AtomList::Append方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: map
void regression::map(int argc, const t_atom *argv)
{
GRT::UINT numSamples = regression_data.getNumSamples();
GRT::Regressifier ®ressifier = get_Regressifier_instance();
if (numSamples == 0)
{
error("no observations added, use 'add' to add training data");
return;
}
if (regressifier.getTrained() == false)
{
error("data_typel has not been trained, use 'train' to train the data_typel");
return;
}
GRT::UINT numInputNeurons = regressifier.getNumInputFeatures();
GRT::VectorDouble query(numInputNeurons);
if (argc < 0 || (unsigned)argc != numInputNeurons)
{
error("invalid input length, expected " + std::to_string(numInputNeurons) + " got " + std::to_string(argc));
}
for (uint32_t index = 0; index < (uint32_t)argc; ++index)
{
double value = GetAFloat(argv[index]);
query[index] = value;
}
bool success = regressifier.predict(query);
if (success == false)
{
error("unable to map input");
return;
}
GRT::VectorDouble regression_data = regressifier.getRegressionData();
GRT::VectorDouble::size_type numOutputDimensions = regression_data.size();
if (numOutputDimensions != regressifier.getNumOutputDimensions())
{
error("invalid output dimensions: " + std::to_string(numOutputDimensions));
return;
}
AtomList result;
for (uint32_t index = 0; index < numOutputDimensions; ++index)
{
t_atom value_a;
double value = regression_data[index];
SetFloat(value_a, value);
result.Append(value_a);
}
ToOutList(0, result);
}
示例2: map
void ann::map(int argc, const t_atom *argv)
{
const data_type data_type = get_data_type();
GRT::UINT numSamples = data_type == LABELLED_CLASSIFICATION ? classification_data.getNumSamples() : regression_data.getNumSamples();
if (numSamples == 0)
{
flext::error("no observations added, use 'add' to add training data");
return;
}
if (grt_ann.getTrained() == false)
{
flext::error("model has not been trained, use 'train' to train the model");
return;
}
GRT::UINT numInputNeurons = grt_ann.getNumInputNeurons();
GRT::VectorDouble query(numInputNeurons);
if (argc < 0 || (unsigned)argc != numInputNeurons)
{
flext::error("invalid input length, expected %d, got %d", numInputNeurons, argc);
}
for (uint32_t index = 0; index < (uint32_t)argc; ++index)
{
double value = GetAFloat(argv[index]);
query[index] = value;
}
bool success = grt_ann.predict(query);
if (success == false)
{
flext::error("unable to map input");
return;
}
if (grt_ann.getClassificationModeActive())
{
const GRT::VectorDouble likelihoods = grt_ann.getClassLikelihoods();
const GRT::Vector<GRT::UINT> labels = classification_data.getClassLabels();
const GRT::UINT predicted = grt_ann.getPredictedClassLabel();
const GRT::UINT classification = predicted == 0 ? 0 : get_class_id_for_index(predicted);
if (likelihoods.size() != labels.size())
{
flext::error("labels / likelihoods size mismatch");
}
else if (probs)
{
AtomList probs_list;
for (unsigned count = 0; count < labels.size(); ++count)
{
t_atom label_a;
t_atom likelihood_a;
SetFloat(likelihood_a, static_cast<float>(likelihoods[count]));
SetInt(label_a, get_class_id_for_index(labels[count]));
probs_list.Append(label_a);
probs_list.Append(likelihood_a);
}
ToOutAnything(1, get_s_probs(), probs_list);
}
ToOutInt(0, classification);
}
else if (grt_ann.getRegressionModeActive())
{
GRT::VectorDouble regression_data = grt_ann.getRegressionData();
GRT::VectorDouble::size_type numOutputDimensions = regression_data.size();
if (numOutputDimensions != grt_ann.getNumOutputNeurons())
{
flext::error("invalid output dimensions: %d", numOutputDimensions);
return;
}
AtomList result;
for (uint32_t index = 0; index < numOutputDimensions; ++index)
{
t_atom value_a;
double value = regression_data[index];
SetFloat(value_a, value);
result.Append(value_a);
}
ToOutList(0, result);
}
}