本文整理汇总了C#中ActivationNetwork.Compute方法的典型用法代码示例。如果您正苦于以下问题:C# ActivationNetwork.Compute方法的具体用法?C# ActivationNetwork.Compute怎么用?C# ActivationNetwork.Compute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ActivationNetwork
的用法示例。
在下文中一共展示了ActivationNetwork.Compute方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: RunEpochTest1
public void RunEpochTest1()
{
Accord.Math.Tools.SetupGenerator(0);
double[][] input =
{
new double[] { -1, -1 },
new double[] { -1, 1 },
new double[] { 1, -1 },
new double[] { 1, 1 }
};
double[][] output =
{
new double[] { -1 },
new double[] { 1 },
new double[] { 1 },
new double[] { -1 }
};
Neuron.RandGenerator = new ThreadSafeRandom(0);
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(2), 2, 2, 1);
var teacher = new ParallelResilientBackpropagationLearning(network);
double error = 1.0;
while (error > 1e-5)
error = teacher.RunEpoch(input, output);
for (int i = 0; i < input.Length; i++)
{
double actual = network.Compute(input[i])[0];
double expected = output[i][0];
Assert.AreEqual(expected, actual, 0.01);
Assert.IsFalse(Double.IsNaN(actual));
}
}
示例2: ZeroLambdaTest
public void ZeroLambdaTest()
{
double[,] data = null;
// open selected file
using (TextReader stream = new StringReader(Properties.Resources.ZeroLambda))
using (CsvReader reader = new CsvReader(stream, false))
{
data = reader.ToTable().ToMatrix();
}
// number of learning samples
int samples = data.GetLength(0);
var ranges = data.Range(dimension: 0);
Assert.AreEqual(2, ranges.Length);
var rangeX = ranges[0];
var rangeY = ranges[1];
// data transformation factor
double yFactor = 1.7 / rangeY.Length;
double yMin = rangeY.Min;
double xFactor = 2.0 / rangeX.Length;
double xMin = rangeX.Min;
// prepare learning data
double[][] input = new double[samples][];
double[][] output = new double[samples][];
for (int i = 0; i < samples; i++)
{
input[i] = new double[1];
output[i] = new double[1];
input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set input
output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; // set output
}
// create multi-layer neural network
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(5),
1, 12, 1);
// create teacher
LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network, true);
teacher.LearningRate = 1;
// iterations
int iteration = 1;
int iterations = 2000;
// solution array
double[,] solution = new double[samples, 2];
double[] networkInput = new double[1];
bool needToStop = false;
double learningError = 0;
// loop
while (!needToStop)
{
Assert.AreNotEqual(0, teacher.LearningRate);
// run epoch of learning procedure
double error = teacher.RunEpoch(input, output) / samples;
// calculate solution
for (int j = 0; j < samples; j++)
{
networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0;
solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin;
}
// calculate error
learningError = 0.0;
for (int j = 0; j < samples; j++)
{
networkInput[0] = input[j][0];
learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin));
}
// increase current iteration
iteration++;
// check if we need to stop
if ((iterations != 0) && (iteration > iterations))
break;
}
Assert.IsTrue(learningError < 0.13);
}
示例3: MulticlassTest1
public void MulticlassTest1()
{
Accord.Math.Tools.SetupGenerator(0);
Neuron.RandGenerator = new ThreadSafeRandom(0);
int numberOfInputs = 3;
int numberOfClasses = 4;
int hiddenNeurons = 5;
double[][] input =
{
new double[] { -1, -1, -1 }, // 0
new double[] { -1, 1, -1 }, // 1
new double[] { 1, -1, -1 }, // 1
new double[] { 1, 1, -1 }, // 0
new double[] { -1, -1, 1 }, // 2
new double[] { -1, 1, 1 }, // 3
new double[] { 1, -1, 1 }, // 3
new double[] { 1, 1, 1 } // 2
};
int[] labels =
{
0,
1,
1,
0,
2,
3,
3,
2,
};
double[][] outputs = Accord.Statistics.Tools
.Expand(labels, numberOfClasses, -1, 1);
var function = new BipolarSigmoidFunction(2);
var network = new ActivationNetwork(function,
numberOfInputs, hiddenNeurons, numberOfClasses);
new NguyenWidrow(network).Randomize();
var teacher = new LevenbergMarquardtLearning(network);
double error = Double.PositiveInfinity;
for (int i = 0; i < 10; i++)
error = teacher.RunEpoch(input, outputs);
for (int i = 0; i < input.Length; i++)
{
int answer;
double[] output = network.Compute(input[i]);
double response = output.Max(out answer);
int expected = labels[i];
Assert.AreEqual(expected, answer);
}
}
示例4: RunEpochTest4
public void RunEpochTest4()
{
Accord.Math.Tools.SetupGenerator(0);
double[][] input =
{
new double[] { 0, 0 },
};
double[][] output =
{
new double[] { 0 },
};
Neuron.RandGenerator = new ThreadSafeRandom(0);
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(2), 2, 1);
var teacher = new LevenbergMarquardtLearning(network,
true, JacobianMethod.ByBackpropagation);
double error = 1.0;
for (int i = 0; i < 1000; i++)
error = teacher.RunEpoch(input, output);
for (int i = 0; i < input.Length; i++)
Assert.AreEqual(network.Compute(input[i])[0], output[i][0], 0.1);
}
示例5: RunEpochTest3
public void RunEpochTest3()
{
double[,] dataset = yinyang;
double[][] input = dataset.GetColumns(0, 1).ToArray();
double[][] output = dataset.GetColumn(2).ToArray();
Neuron.RandGenerator = new ThreadSafeRandom(0);
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(2), 2, 5, 1);
var teacher = new LevenbergMarquardtLearning(network,
true, JacobianMethod.ByBackpropagation);
Assert.IsTrue(teacher.UseRegularization);
double error = 1.0;
for (int i = 0; i < 500; i++)
error = teacher.RunEpoch(input, output);
double[][] actual = new double[output.Length][];
for (int i = 0; i < input.Length; i++)
actual[i] = network.Compute(input[i]);
for (int i = 0; i < input.Length; i++)
Assert.AreEqual(Math.Sign(output[i][0]), Math.Sign(actual[i][0]));
}
示例6: RunEpochTest1
public void RunEpochTest1()
{
Accord.Math.Tools.SetupGenerator(0);
double[][] input =
{
new double[] { -1, -1 },
new double[] { -1, 1 },
new double[] { 1, -1 },
new double[] { 1, 1 }
};
double[][] output =
{
new double[] { -1 },
new double[] { 1 },
new double[] { 1 },
new double[] { -1 }
};
Neuron.RandGenerator = new ThreadSafeRandom(0);
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(2), 2, 2, 1);
var teacher = new LevenbergMarquardtLearning(network,
false, JacobianMethod.ByFiniteDifferences);
double error = 1.0;
while (error > 1e-5)
error = teacher.RunEpoch(input, output);
for (int i = 0; i < input.Length; i++)
Assert.AreEqual(network.Compute(input[i])[0], output[i][0], 0.1);
}
示例7: computeError
private static double computeError(double[][] inputs, double[][] outputs, ActivationNetwork ann)
{
// Compute the machine outputs
int miss = 0;
for (int i = 0; i < inputs.Length; i++)
{
var y = System.Math.Sign(ann.Compute(inputs[i])[0]);
var o = outputs[i][0];
if (y != o) miss++;
}
return (double)miss / inputs.Length;
}
示例8: SearchSolution
// Worker thread
void SearchSolution()
{
// number of learning samples
int samples = data.GetLength(0);
// data transformation factor
double yFactor = 1.7 / chart.RangeY.Length;
double yMin = chart.RangeY.Min;
double xFactor = 2.0 / chart.RangeX.Length;
double xMin = chart.RangeX.Min;
// prepare learning data
double[][] input = new double[samples][];
double[][] output = new double[samples][];
for (int i = 0; i < samples; i++)
{
input[i] = new double[1];
output[i] = new double[1];
// set input
input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0;
// set output
output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85;
}
// create multi-layer neural network
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(sigmoidAlphaValue),
1, neuronsInFirstLayer, 1);
if (useNguyenWidrow)
{
NguyenWidrow initializer = new NguyenWidrow(network);
initializer.Randomize();
}
// create teacher
var teacher = new ParallelResilientBackpropagationLearning(network);
// iterations
int iteration = 1;
// solution array
double[,] solution = new double[50, 2];
double[] networkInput = new double[1];
// calculate X values to be used with solution function
for (int j = 0; j < 50; j++)
{
solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49;
}
// loop
while (!needToStop)
{
// run epoch of learning procedure
double error = teacher.RunEpoch(input, output) / samples;
// calculate solution
for (int j = 0; j < 50; j++)
{
networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0;
solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin;
}
chart.UpdateDataSeries("solution", solution);
// calculate error
double learningError = 0.0;
for (int j = 0, k = data.GetLength(0); j < k; j++)
{
networkInput[0] = input[j][0];
learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin));
}
// set current iteration's info
SetText(currentIterationBox, iteration.ToString());
SetText(currentErrorBox, learningError.ToString("F3"));
// increase current iteration
iteration++;
// check if we need to stop
if ((iterations != 0) && (iteration > iterations))
break;
}
// enable settings controls
EnableControls(true);
}
示例9: SearchSolution
// Worker thread
void SearchSolution()
{
// number of learning samples
int samples = data.GetLength(0);
// prepare learning data
DoubleRange unit = new DoubleRange(-1, 1);
double[][] input = Tools.Scale(from: xRange, to: unit, x: data.GetColumn(0)).ToArray();
double[][] output = Tools.Scale(from: yRange, to: unit, x: data.GetColumn(1)).ToArray();
// create multi-layer neural network
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(sigmoidAlphaValue),
1, neuronsInFirstLayer, 1);
if (useNguyenWidrow)
{
new NguyenWidrow(network).Randomize();
}
// create teacher
var teacher = new ParallelResilientBackpropagationLearning(network);
// iterations
int iteration = 1;
// solution array
double[,] solution = new double[samples, 2];
// loop
while (!needToStop)
{
// run epoch of learning procedure
double error = teacher.RunEpoch(input, output) / samples;
// calculate solution
for (int j = 0; j < samples; j++)
{
double x = input[j][0];
double y = network.Compute(new[] { x })[0];
solution[j, 0] = Tools.Scale(from: unit, to: xRange, x: x);
solution[j, 1] = Tools.Scale(from: unit, to: yRange, x: y);
}
chart.UpdateDataSeries("solution", solution);
// calculate error
double learningError = 0.0;
for (int j = 0; j < samples; j++)
{
double x = input[j][0];
double expected = data[j, 1];
double actual = network.Compute(new[] { x })[0];
learningError += Math.Abs(expected - actual);
}
// set current iteration's info
SetText(currentIterationBox, iteration.ToString());
SetText(currentErrorBox, learningError.ToString("F3"));
// increase current iteration
iteration++;
// check if we need to stop
if ((iterations != 0) && (iteration > iterations))
break;
}
// enable settings controls
EnableControls(true);
}
示例10: SearchSolution
// Worker thread
void SearchSolution()
{
// number of learning samples
int samples = data.Length - predictionSize - windowSize;
// data transformation factor
double factor = 1.7 / chart.RangeY.Length;
double yMin = chart.RangeY.Min;
// prepare learning data
double[][] input = new double[samples][];
double[][] output = new double[samples][];
for (int i = 0; i < samples; i++)
{
input[i] = new double[windowSize];
output[i] = new double[1];
// set input
for (int j = 0; j < windowSize; j++)
{
input[i][j] = (data[i + j] - yMin) * factor - 0.85;
}
// set output
output[i][0] = (data[i + windowSize] - yMin) * factor - 0.85;
}
// create multi-layer neural network
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(sigmoidAlphaValue),
windowSize, windowSize * 2, 1);
// create teacher
var teacher = new ParallelResilientBackpropagationLearning(network);
teacher.Reset(initialStep);
// run at least one backpropagation epoch
//teacher2.RunEpoch(input, output);
// iterations
int iteration = 1;
// solution array
int solutionSize = data.Length - windowSize;
double[,] solution = new double[solutionSize, 2];
double[] networkInput = new double[windowSize];
// calculate X values to be used with solution function
for (int j = 0; j < solutionSize; j++)
{
solution[j, 0] = j + windowSize;
}
// loop
while (!needToStop)
{
// run epoch of learning procedure
double error = teacher.RunEpoch(input, output) / samples;
// calculate solution and learning and prediction errors
double learningError = 0.0;
double predictionError = 0.0;
// go through all the data
for (int i = 0, n = data.Length - windowSize; i < n; i++)
{
// put values from current window as network's input
for (int j = 0; j < windowSize; j++)
{
networkInput[j] = (data[i + j] - yMin) * factor - 0.85;
}
// evalue the function
solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin;
// calculate prediction error
if (i >= n - predictionSize)
{
predictionError += Math.Abs(solution[i, 1] - data[windowSize + i]);
}
else
{
learningError += Math.Abs(solution[i, 1] - data[windowSize + i]);
}
}
// update solution on the chart
chart.UpdateDataSeries("solution", solution);
// set current iteration's info
SetText(currentIterationBox, iteration.ToString());
SetText(currentLearningErrorBox, learningError.ToString("F3"));
SetText(currentPredictionErrorBox, predictionError.ToString("F3"));
// increase current iteration
iteration++;
// check if we need to stop
if ((iterations != 0) && (iteration > iterations))
break;
}
//.........这里部分代码省略.........
示例11: Prever
/// <summary>
/// Faz a previsão de pontos inéditos a rede neural treinada.
/// </summary>
/// <param name="dadosBase">Dados a serem comparados com os previstos.</param>
/// <param name="dadosAuxiliares">Dados prévios aos dados base. Para Validação: dados de treinamento. Para Teste: dados de validação.</param>
/// <param name="indiceID">Indice em que se inicia os dados base em relação aos dados totais. Para validação: tamanho dos dados de treinamento. Para teste: tamanho dos dados de treinamento somado ao tamanho dos de teste.</param>
/// <returns></returns>
private List<double> Prever(List<double> dadosBase, List<double> dadosAuxiliares, int indiceID)
{
network = (ActivationNetwork)ActivationNetwork.Load(@"C:\Users\Paulo\Desktop\NetworkTest.bin");
//criação da lista de dados provisória usada na previsão
List<double> dadosPrevisao = new List<double>();
List<double> diferenca = new List<double>();
//lista contendo todos os ids de 1 a 52
List<int> ids = Serie.Ids;
//variavel que possuirá o id binário
int[] id = new int[6];
int tamanhoAux = (dadosAuxiliares.Count);
//inicio do processo de adição de dados à lista fazendo que
//o primeiro ponto previsto seja exatamente o ultimo dos dados auxiliares
int con = (dadosAuxiliares.Count) - windowSize - 1;
for (int i = con; i < tamanhoAux; i++)
{
//adiciona os valosres de data, a lista de dados para treino primeiro
dadosPrevisao.Add(dadosAuxiliares[i]);
}
//definição do tamanho da solução, deve ser do tamanho do teste mais um
int solutionSize = dadosBase.Count + 1;
List<double> solution = new List<double>();
//definição do tamanho da entrada da rede neural para a previsão
double[] networkInput = new double[windowSize + predictionSize * 6];
//variavel auxiliar para o id binário
int contador = 0;
con = indiceID - windowSize - 1;
//inicia processo de predição deslocando de um por um os pontos previstos
for (int i = 0, n = dadosBase.Count + 1; i < n; i = i + predictionSize)
{
int a = windowSize;
contador = 0;
// seta os valores da atual janela de previsão como entrada da rede neural
for (int j = 0; j < windowSize + predictionSize; j++)
{
if (j < windowSize)
{
//entrada tem de ser formatada
networkInput[j] = (dadosPrevisao[i + j] - Serie.Min) * fatorNormal - 1.0;
}
else
{
id = CUtil.ConversaoBinario(ids[con + i + a]);
a++;
for (int c = 0; c < 6; c++)
{
networkInput[windowSize + contador] = id[c];
contador++;
}
}
}//fim do for interno
for (int k = 0; k < network.Compute(networkInput).Length; k++)
{
if ((i + k) < solutionSize)
{
diferenca.Add((network.Compute(networkInput)[k] + 1.0) / fatorNormal + Serie.Min);
dadosPrevisao.Add((network.Compute(networkInput)[k] + 1.0) / fatorNormal + Serie.Min);
}
}
}//fim do for externo
solution = Serie.DiferencaInversa(diferenca, Serie.Dados[indiceID, 1]);
solution.RemoveAt(0);
return solution;
}
示例12: MulticlassTest1
public void MulticlassTest1()
{
Accord.Math.Tools.SetupGenerator(0);
// Suppose we would like to teach a network to recognize
// the following input vectors into 3 possible classes:
//
double[][] inputs =
{
new double[] { 0, 1, 1, 0 }, // 0
new double[] { 0, 1, 0, 0 }, // 0
new double[] { 0, 0, 1, 0 }, // 0
new double[] { 0, 1, 1, 0 }, // 0
new double[] { 0, 1, 0, 0 }, // 0
new double[] { 1, 0, 0, 0 }, // 1
new double[] { 1, 0, 0, 0 }, // 1
new double[] { 1, 0, 0, 1 }, // 1
new double[] { 0, 0, 0, 1 }, // 1
new double[] { 0, 0, 0, 1 }, // 1
new double[] { 1, 1, 1, 1 }, // 2
new double[] { 1, 0, 1, 1 }, // 2
new double[] { 1, 1, 0, 1 }, // 2
new double[] { 0, 1, 1, 1 }, // 2
new double[] { 1, 1, 1, 1 }, // 2
};
int[] classes =
{
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
2, 2, 2, 2, 2,
};
// First we have to convert this problem into a way that the neural
// network can handle. The first step is to expand the classes into
// indicator vectors, where a 1 into a position signifies that this
// position indicates the class the sample belongs to.
//
double[][] outputs = Accord.Statistics.Tools.Expand(classes, -1, +1);
// Create an activation function for the net
var function = new BipolarSigmoidFunction();
// Create an activation network with the function and
// 4 inputs, 5 hidden neurons and 3 possible outputs:
var network = new ActivationNetwork(function, 4, 5, 3);
// Randomly initialize the network
new NguyenWidrow(network).Randomize();
// Teach the network using parallel Rprop:
var teacher = new ParallelResilientBackpropagationLearning(network);
double error = 1.0;
while (error > 1e-5)
error = teacher.RunEpoch(inputs, outputs);
// Checks if the network has learned
for (int i = 0; i < inputs.Length; i++)
{
double[] answer = network.Compute(inputs[i]);
int expected = classes[i];
int actual; answer.Max(out actual);
Assert.AreEqual(expected, actual, 0.01);
}
}