本文整理汇总了C++中NeuralNetwork::count_parameters_number方法的典型用法代码示例。如果您正苦于以下问题:C++ NeuralNetwork::count_parameters_number方法的具体用法?C++ NeuralNetwork::count_parameters_number怎么用?C++ NeuralNetwork::count_parameters_number使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类NeuralNetwork
的用法示例。
在下文中一共展示了NeuralNetwork::count_parameters_number方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test_count_parameters_number
void NeuralNetworkTest::test_count_parameters_number(void) {
message += "test_count_parameters_number\n";
NeuralNetwork nn;
IndependentParameters* ip;
// Test
nn.set();
assert_true(nn.count_parameters_number() == 0, LOG);
// Test
nn.set(1, 1, 1);
assert_true(nn.count_parameters_number() == 4, LOG);
// Test
nn.set(1);
assert_true(nn.count_parameters_number() == 1, LOG);
// Test
nn.set(1, 1, 1);
ip = new IndependentParameters(1);
nn.set_independent_parameters_pointer(ip);
assert_true(nn.count_parameters_number() == 5, LOG);
}
示例2: test_calculate_gradient
void NormalizedSquaredErrorTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
Vector<double> network_parameters;
DataSet ds;
Matrix<double> data;
NormalizedSquaredError nse(&nn, &ds);
Vector<double> objective_gradient;
Vector<double> numerical_objective_gradient;
// Test
nn.set(1,1,1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 2);
data.set(2, 2);
data[0][0] = -1.0;
data[0][1] = -1.0;
data[1][0] = 1.0;
data[1][1] = 1.0;
ds.set_data(data);
objective_gradient = nse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
nn.set(3, 4, 5);
nn.randomize_parameters_normal();
network_parameters = nn.arrange_parameters();
ds.set(3, 5, 2);
ds.randomize_data_normal();
objective_gradient = nse.calculate_gradient();
numerical_objective_gradient = nd.calculate_gradient(nse, &NormalizedSquaredError::calculate_performance, network_parameters);
assert_true((objective_gradient - numerical_objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
示例3: test_calculate_gradient
void PerformanceFunctionalTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NeuralNetwork nn;
size_t parameters_number;
Vector<double> parameters;
PerformanceFunctional pf(&nn);
pf.destruct_all_terms();
pf.set_regularization_type(PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);
Vector<double> gradient;
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
parameters = nn.arrange_parameters();
gradient = pf.calculate_gradient(parameters);
assert_true(gradient == 0.0, LOG);
// Test
parameters_number = nn.count_parameters_number();
nn.initialize_parameters(0.0);
MockPerformanceTerm* mptp = new MockPerformanceTerm(&nn);
pf.set_user_objective_pointer(mptp);
gradient = pf.calculate_gradient();
assert_true(gradient.size() == parameters_number, LOG);
assert_true(gradient == 0.0, LOG);
}
示例4: test_set_parameters
void NeuralNetworkTest::test_set_parameters(void) {
message += "test_set_parameters\n";
Vector<unsigned> multilayer_perceptron_architecture;
NeuralNetwork nn;
unsigned parameters_number;
Vector<double> parameters;
// Test
nn.set_parameters(parameters);
parameters = nn.arrange_parameters();
assert_true(parameters.size() == 0, LOG);
// Test
multilayer_perceptron_architecture.set(2, 2);
nn.set(multilayer_perceptron_architecture);
nn.construct_independent_parameters();
nn.get_independent_parameters_pointer()->set_parameters_number(2);
parameters_number = nn.count_parameters_number();
parameters.set(0.0, 1.0, parameters_number - 1);
nn.set_parameters(parameters);
parameters = nn.arrange_parameters();
assert_true(parameters.size() == parameters_number, LOG);
assert_true(parameters[0] == 0.0, LOG);
assert_true(parameters[parameters_number - 1] == parameters_number - 1.0,
LOG);
}
示例5: test_calculate_terms_Jacobian
void SumSquaredErrorTest::test_calculate_terms_Jacobian(void)
{
message += "test_calculate_terms_Jacobian\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
Vector<size_t> architecture;
Vector<double> parameters;
DataSet ds;
SumSquaredError sse(&nn, &ds);
Vector<double> gradient;
Vector<double> terms;
Matrix<double> terms_Jacobian;
Matrix<double> numerical_Jacobian_terms;
// Test
nn.set(1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
terms_Jacobian = sse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().get_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
sse.set(&nn, &ds);
ds.initialize_data(0.0);
terms_Jacobian = sse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 5;
architecture[1] = 1;
architecture[2] = 2;
nn.set(architecture);
nn.initialize_parameters(0.0);
ds.set(5, 2, 3);
sse.set(&nn, &ds);
ds.initialize_data(0.0);
terms_Jacobian = sse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
nn.set(1, 1, 1);
nn.randomize_parameters_normal();
parameters = nn.arrange_parameters();
ds.set(1, 1, 1);
ds.randomize_data_normal();
terms_Jacobian = sse.calculate_terms_Jacobian();
numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters);
assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);
// Test
nn.set(2, 2, 2);
nn.randomize_parameters_normal();
parameters = nn.arrange_parameters();
ds.set(2, 2, 2);
ds.randomize_data_normal();
terms_Jacobian = sse.calculate_terms_Jacobian();
numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters);
assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);
// Test
//.........这里部分代码省略.........
示例6: test_calculate_gradient
void SumSquaredErrorTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NumericalDifferentiation nd;
DataSet ds;
NeuralNetwork nn;
SumSquaredError sse(&nn, &ds);
Vector<size_t> architecture;
Vector<double> parameters;
Vector<double> gradient;
Vector<double> numerical_gradient;
Vector<double> error;
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
gradient = sse.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
sse.set(&nn, &ds);
ds.initialize_data(0.0);
gradient.clear();
gradient = sse.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 5;
architecture[1] = 1;
architecture[2] = 2;
nn.set(architecture);
nn.initialize_parameters(0.0);
ds.set(5, 5, 2);
sse.set(&nn, &ds);
ds.initialize_data(0.0);
gradient.clear();
gradient = sse.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
gradient.clear();
gradient = sse.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 3, 2);
sse.set(&nn, &ds);
ds.initialize_data(0.0);
gradient.clear();
gradient = sse.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
//.........这里部分代码省略.........
示例7: test_calculate_Jacobian_terms
void MeanSquaredErrorTest::test_calculate_Jacobian_terms(void)
{
message += "test_calculate_Jacobian_terms\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
Vector<unsigned> multilayer_perceptron_architecture;
Vector<double> parameters;
DataSet ds;
MeanSquaredError mse(&nn, &ds);
Vector<double> objective_gradient;
Vector<double> evaluation_terms;
Matrix<double> terms_Jacobian;
Matrix<double> numerical_Jacobian_terms;
// Test
nn.set(1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
terms_Jacobian = mse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
mse.set(&nn, &ds);
ds.initialize_data(0.0);
terms_Jacobian = mse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
multilayer_perceptron_architecture.set(3);
multilayer_perceptron_architecture[0] = 2;
multilayer_perceptron_architecture[1] = 1;
multilayer_perceptron_architecture[2] = 2;
nn.set(multilayer_perceptron_architecture);
nn.initialize_parameters(0.0);
ds.set(2, 2, 5);
mse.set(&nn, &ds);
ds.initialize_data(0.0);
terms_Jacobian = mse.calculate_terms_Jacobian();
assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(terms_Jacobian == 0.0, LOG);
// Test
nn.set(1, 1, 1);
nn.randomize_parameters_normal();
parameters = nn.arrange_parameters();
ds.set(1, 1, 1);
ds.randomize_data_normal();
terms_Jacobian = mse.calculate_terms_Jacobian();
numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters);
assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);
// Test
nn.set(2, 2, 2);
nn.randomize_parameters_normal();
parameters = nn.arrange_parameters();
ds.set(2, 2, 2);
ds.randomize_data_normal();
terms_Jacobian = mse.calculate_terms_Jacobian();
numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters);
assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);
// Test
//.........这里部分代码省略.........
示例8: test_calculate_gradient
void MeanSquaredErrorTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
Vector<unsigned> multilayer_perceptron_architecture;
Vector<double> parameters;
DataSet ds;
MeanSquaredError mse(&nn, &ds);
Vector<double> objective_gradient;
Vector<double> numerical_objective_gradient;
Vector<double> numerical_differentiation_error;
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
objective_gradient = mse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
mse.set(&nn, &ds);
ds.initialize_data(0.0);
objective_gradient = mse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
multilayer_perceptron_architecture.set(3);
multilayer_perceptron_architecture[0] = 2;
multilayer_perceptron_architecture[1] = 1;
multilayer_perceptron_architecture[2] = 3;
nn.set(multilayer_perceptron_architecture);
nn.initialize_parameters(0.0);
ds.set(2, 3, 5);
mse.set(&nn, &ds);
ds.initialize_data(0.0);
objective_gradient = mse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
ds.set(1, 1, 1);
ds.initialize_data(0.0);
objective_gradient = mse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
mse.set(&nn, &ds);
ds.initialize_data(0.0);
objective_gradient = mse.calculate_gradient();
assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
assert_true(objective_gradient == 0.0, LOG);
// Test
nn.set(1, 1);
nn.initialize_parameters(1.0);
//.........这里部分代码省略.........
示例9: test_calculate_outputs
void NeuralNetworkTest::test_calculate_outputs(void) {
message += "test_calculate_outputs\n";
NeuralNetwork nn;
unsigned inputs_number;
unsigned outputs_number;
Vector<unsigned> architecture;
Vector<double> inputs;
Vector<double> outputs;
unsigned parameters_number;
Vector<double> parameters;
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
inputs.set(3, 0.0);
outputs = nn.calculate_outputs(inputs);
assert_true(outputs == 0.0, LOG);
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
inputs.set(1, 0.0);
outputs = nn.calculate_outputs(inputs);
assert_true(outputs == 0.0, LOG);
// Test
nn.set(1, 1);
inputs.set(1);
inputs.randomize_normal();
parameters = nn.arrange_parameters();
assert_true(
nn.calculate_outputs(inputs) == nn.calculate_outputs(inputs, parameters),
LOG);
// Test
nn.set(4, 3, 5);
inputs.set(4, 0.0);
parameters_number = nn.count_parameters_number();
parameters.set(parameters_number, 0.0);
outputs = nn.calculate_outputs(inputs, parameters);
assert_true(outputs.size() == 5, LOG);
assert_true(outputs == 0.0, LOG);
// Test
architecture.set(5);
architecture.randomize_uniform(5, 10);
nn.set(architecture);
inputs_number = nn.get_inputs_pointer()->get_inputs_number();
outputs_number = nn.get_outputs_pointer()->get_outputs_number();
inputs.set(inputs_number, 0.0);
parameters_number = nn.count_parameters_number();
parameters.set(parameters_number, 0.0);
outputs = nn.calculate_outputs(inputs, parameters);
assert_true(outputs.size() == outputs_number, LOG);
assert_true(outputs == 0.0, LOG);
}
示例10: test_calculate_Hessian
void NeuralParametersNormTest::test_calculate_Hessian(void)
{
message += "test_calculate_Hessian\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
NeuralParametersNorm npn(&nn);
npn.set_neural_parameters_norm_weight(1.0);
Vector<size_t> architecture;
Vector<double> parameters;
Matrix<double> Hessian;
Matrix<double> numerical_Hessian;
Matrix<double> error;
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
Hessian = npn.calculate_Hessian();
assert_true(Hessian.get_rows_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
Hessian = npn.calculate_Hessian();
assert_true(Hessian.get_rows_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 5;
architecture[1] = 1;
architecture[2] = 2;
nn.set(architecture);
nn.initialize_parameters(0.0);
npn.set_neural_network_pointer(&nn);
Hessian = npn.calculate_Hessian();
assert_true(Hessian.get_rows_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
npn.set_neural_network_pointer(&nn);
Hessian = npn.calculate_Hessian();
assert_true(Hessian.get_rows_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian.get_columns_number() == nn.count_parameters_number(), LOG);
assert_true(Hessian == 0.0, LOG);
// Test
// for(size_t i = 0; i < 100; i++)
// {
// nn.set(1, 1);
// nn.randomize_parameters_normal();
// parameters = nn.arrange_parameters();
// Hessian = npn.calculate_Hessian();
// numerical_Hessian = nd.calculate_Hessian(npn, &NeuralParametersNorm::calculate_performance, parameters);
// error = (Hessian - numerical_Hessian).calculate_absolute_value();
// std::cout << error << std::endl;
// assert_true(error < 1.0e-3, LOG);
// }
}
示例11: test_calculate_gradient
void NeuralParametersNormTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
NeuralParametersNorm npn(&nn);
Vector<size_t> architecture;
Vector<double> parameters;
Vector<double> gradient;
Vector<double> numerical_gradient;
Vector<double> error;
// Test
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
gradient = npn.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
gradient = npn.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 5;
architecture[1] = 1;
architecture[2] = 2;
nn.set(architecture);
nn.initialize_parameters(0.0);
npn.set_neural_network_pointer(&nn);
gradient = npn.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3, 4, 2);
nn.initialize_parameters(0.0);
npn.set_neural_network_pointer(&nn);
gradient = npn.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.initialize_parameters(1.0);
parameters = nn.arrange_parameters();
gradient = npn.calculate_gradient();
numerical_gradient = nd.calculate_gradient(npn, &NeuralParametersNorm::calculate_regularization, parameters);
error = (gradient - numerical_gradient).calculate_absolute_value();
assert_true(error < 1.0e-3, LOG);
}
示例12: test_calculate_gradient
void MinkowskiErrorTest::test_calculate_gradient(void)
{
message += "test_calculate_gradient\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
Vector<size_t> architecture;
Vector<double> parameters;
DataSet ds;
MinkowskiError me(&nn, &ds);
Vector<double> gradient;
Vector<double> numerical_gradient;
// Test
nn.set(1,1,1);
nn.initialize_parameters(0.0);
ds.set(1,1,1);
ds.initialize_data(0.0);
gradient = me.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3,4,2);
nn.initialize_parameters(0.0);
ds.set(3, 2, 5);
me.set(&nn, &ds);
ds.initialize_data(0.0);
gradient = me.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 2;
architecture[1] = 1;
architecture[2] = 3;
nn.set(architecture);
nn.initialize_parameters(0.0);
ds.set(2, 3, 5);
me.set(&nn, &ds);
ds.initialize_data(0.0);
gradient = me.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(1,1,1);
nn.initialize_parameters(0.0);
ds.set(1,1,1);
ds.initialize_data(0.0);
gradient = me.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
nn.set(3,4,2);
nn.initialize_parameters(0.0);
ds.set(3,2,5);
me.set(&nn, &ds);
ds.initialize_data(0.0);
gradient = me.calculate_gradient();
assert_true(gradient.size() == nn.count_parameters_number(), LOG);
assert_true(gradient == 0.0, LOG);
// Test
architecture.set(3);
architecture[0] = 2;
architecture[1] = 1;
//.........这里部分代码省略.........
示例13: test_calculate_Hessian_approximation
void LevenbergMarquardtAlgorithmTest::test_calculate_Hessian_approximation(void)
{
message += "test_calculate_Hessian_approximation\n";
NumericalDifferentiation nd;
NeuralNetwork nn;
size_t parameters_number;
Vector<double> parameters;
DataSet ds;
PerformanceFunctional pf(&nn, &ds);
pf.set_error_type(PerformanceFunctional::SUM_SQUARED_ERROR);
Matrix<double> terms_Jacobian;
Matrix<double> Hessian;
Matrix<double> numerical_Hessian;
Matrix<double> Hessian_approximation;
LevenbergMarquardtAlgorithm lma(&pf);
// Test
nn.set(1, 2);
nn.initialize_parameters(0.0);
parameters_number = nn.count_parameters_number();
ds.set(1,2,2);
ds.initialize_data(0.0);
terms_Jacobian = pf.calculate_terms_Jacobian();
Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);
assert_true(Hessian_approximation.get_rows_number() == parameters_number, LOG);
assert_true(Hessian_approximation.get_columns_number() == parameters_number, LOG);
assert_true(Hessian_approximation.is_symmetric(), LOG);
// Test
pf.set_error_type(PerformanceFunctional::NORMALIZED_SQUARED_ERROR);
nn.set(1,1,2);
nn.randomize_parameters_normal();
parameters_number = nn.count_parameters_number();
ds.set(1,2,3);
ds.randomize_data_normal();
terms_Jacobian = pf.calculate_terms_Jacobian();
Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);
assert_true(Hessian_approximation.get_rows_number() == parameters_number, LOG);
assert_true(Hessian_approximation.get_columns_number() == parameters_number, LOG);
assert_true(Hessian_approximation.is_symmetric(), LOG);
// Test
nn.set(2);
nn.randomize_parameters_normal();
MockErrorTerm* mptp = new MockErrorTerm(&nn);
pf.set_user_error_pointer(mptp);
terms_Jacobian = pf.calculate_terms_Jacobian();
Hessian = pf.calculate_Hessian();
lma.set_damping_parameter(0.0);
assert_true((lma.calculate_Hessian_approximation(terms_Jacobian) - Hessian).calculate_absolute_value() < 1.0e-3, LOG);
// Test
pf.set_error_type(PerformanceFunctional::SUM_SQUARED_ERROR);
ds.set(1, 1, 1);
ds.randomize_data_normal();
nn.set(1, 1);
parameters = nn.arrange_parameters();
nn.randomize_parameters_normal();
numerical_Hessian = nd.calculate_Hessian(pf, &PerformanceFunctional::calculate_performance, parameters);
terms_Jacobian = pf.calculate_terms_Jacobian();
Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);
//.........这里部分代码省略.........
示例14: test_calculate_Hessian
void PerformanceFunctionalTest::test_calculate_Hessian(void) {
message += "test_calculate_Hessian\n";
NeuralNetwork nn;
unsigned parameters_number;
Vector<double> parameters;
PerformanceFunctional pf(&nn);
pf.destruct_all_terms();
pf.set_regularization_type(
PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);
Matrix<double> Hessian;
nn.set(1, 1, 1);
nn.initialize_parameters(0.0);
parameters_number = nn.count_parameters_number();
parameters = nn.arrange_parameters();
Hessian = pf.calculate_Hessian(parameters);
assert_true(Hessian.get_rows_number() == parameters_number, LOG);
assert_true(Hessian.get_columns_number() == parameters_number, LOG);
nn.set();
nn.initialize_parameters(0.0);
parameters_number = nn.count_parameters_number();
parameters = nn.arrange_parameters();
Hessian = pf.calculate_Hessian(parameters);
assert_true(Hessian.get_rows_number() == parameters_number, LOG);
assert_true(Hessian.get_columns_number() == parameters_number, LOG);
nn.set(1, 1);
nn.initialize_parameters(0.0);
parameters_number = nn.count_parameters_number();
parameters = nn.arrange_parameters();
Hessian = pf.calculate_Hessian(parameters);
assert_true(Hessian.get_rows_number() == parameters_number, LOG);
assert_true(Hessian.get_columns_number() == parameters_number, LOG);
// Test
parameters_number = nn.count_parameters_number();
nn.initialize_parameters(0.0);
MockPerformanceTerm* mptp = new MockPerformanceTerm(&nn);
pf.set_user_objective_pointer(mptp);
Hessian = pf.calculate_Hessian();
assert_true(Hessian.get_rows_number() == parameters_number, LOG);
assert_true(Hessian.get_columns_number() == parameters_number, LOG);
}