本文整理汇总了C++中NeuralNetwork::calcCost方法的典型用法代码示例。如果您正苦于以下问题:C++ NeuralNetwork::calcCost方法的具体用法?C++ NeuralNetwork::calcCost怎么用?C++ NeuralNetwork::calcCost使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NeuralNetwork
的用法示例。
在下文中一共展示了NeuralNetwork::calcCost方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: c
TEST(TestNeuralNetwork, costFunction)
{
int called = 0;
CostFunction c([&called](float, float expected) {
called++;
return expected * 64.0f;
});
NeuralNetwork nn {2, 2};
vector<float> expected {1.0f, 0.5f};
nn.setCostFunction(c);
EXPECT_FLOAT_EQ((64.0f + 32.0f), nn.calcCost(expected));
EXPECT_EQ(2, called);
}
示例2:
TEST(TestNeuralNetwork, zeroInputAndWeights)
{
NeuralNetwork nn {2, 2};
vector<float> input {1.0f, 1.0f};
vector<float> expected = {0.0f, 0.0f};
vector<float> output;
nn.setWeights([]() {
return 0.0f;
});
nn.calc();
nn.getOutput(output);
ASSERT_EQ(expected.size(), output.size());
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(expected[i], output[i]);
}
EXPECT_EQ(0, nn.calcCost(expected));
}
示例3: testConvergence
static void testConvergence(NeuralNetwork &nn, void (NeuralNetwork::*algorithm)(const std::vector<float> &, std::vector<float> &), bool (*sampleGenerator)(int, std::vector<float>&, std::vector<float>&))
{
vector<float> input;
vector<float> expected;
vector<float> gradient;
const float alpha = 0.2f;
float cost = 0.0f;
float prevCost = 0.0f;
input.resize(2);
expected.resize(2);
int sampleId = 0;
for (int i = 0; i < 128; ++i) {
bool lastInBatch = sampleGenerator(sampleId++, input, expected);
nn.setInput(input);
nn.calc();
(nn.*algorithm)(expected, gradient);
ASSERT_EQ(nn.getWeights().size(), gradient.size());
cost += nn.calcCost(expected);
if (lastInBatch) {
nn.applyGradient(gradient, alpha);
for (int w = 0; w < gradient.size(); ++w)
gradient[w] = 0.0f;
if (prevCost != 0.0f)
ASSERT_LT(cost - prevCost, 0.0f);
prevCost = cost;
cost = 0.0f;
}
}
}