本文整理汇总了C#中Network.AddLayer方法的典型用法代码示例。如果您正苦于以下问题:C# Network.AddLayer方法的具体用法?C# Network.AddLayer怎么用?C# Network.AddLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Network
的用法示例。
在下文中一共展示了Network.AddLayer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: NetTrainer
public NetTrainer(IModelService modelService)
{
Network net = new Network();
net.AddLayer(new BasicLayer(new ActivationTANH(), true, DefaultFingerprintSize));
net.AddLayer(new BasicLayer(new ActivationTANH(), true, DefaultHiddenNeuronsCount));
net.AddLayer(new BasicLayer(new ActivationTANH(), false, OutPutNeurons));
net.Structure.FinalizeStructure();
net.Reset();
this.modelService = modelService;
pauseSem = new Semaphore(0, 1, "PauseSemaphore");
}
示例2: BuildNetwork
public void BuildNetwork()
{
_network = new Network(_node);
_network.AddLayer(4); //Hidden layer with 2 neurons
_network.AddLayer(1); //Output layer with 1 neuron
_network.BindInputLayer(_input); //Bind Input Data
_network.BindTraining(_desired); //Bind desired output data
_network.AutoLinkFeedforward(); //Create synapses between the layers for typical feedforward networks.
}
示例3: RunDemo
public void RunDemo()
{
Console.WriteLine("### BASIC UNBOUND DEMO ###");
//Initialize the network manager.
//This constructor also creates the first
//network layer (Inputlayer).
Network network = new Network();
//You need to initialize (the size of) the
//input layer in an unbound scenario
network.InitUnboundInputLayer(3);
//Add the hidden layer with 4 neurons.
network.AddLayer(4);
//Add the output layer with 2 neurons.
network.AddLayer(2);
//Connect the neurons together using synapses.
//This is the easiest way to do it; I'll discuss
//other ways in more detail in another demo.
network.AutoLinkFeedforward();
//Push new input data
network.PushUnboundInput(new bool[] {false,true,false});
//... and output training data ...
network.PushUnboundTraining(new bool[] {false,true});
//Propagate the network using the bound input data.
//Internally, this is a two round process, to
//correctly handle feedbacks
network.CalculateFeedforward();
//Collect the network output and print it.
App.PrintArray(network.CollectOutput());
//Train the current pattern using Backpropagation (one step)!
network.TrainCurrentPattern(false,true);
//Print the output; the difference to (-1,1) should be
//smaller this time!
App.PrintArray(network.CollectOutput());
//Same one more time:
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
//Train another pattern:
Console.WriteLine("# new pattern:");
//this time we're using doubles directly, instead of booleans.
//5/1 are the default values for input/training values.
network.PushUnboundInput(new double[] {5d,-5d,-5d});
network.PushUnboundTraining(new double[] {1,1});
//calculate ...
network.CalculateFeedforward();
App.PrintArray(network.CollectOutput());
//... and train it one time
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
//what about the old pattern now?
Console.WriteLine("# the old pattern again:");
network.PushUnboundInput(new double[] {-5d,5d,-5d});
network.PushUnboundTraining(new double[] {-1,1});
network.CalculateFeedforward();
App.PrintArray(network.CollectOutput());
Console.WriteLine("=== COMPLETE ===");
Console.WriteLine();
}
示例4: Main
static void Main(string[] args)
{
Controler ffc = new Controler(1);
Network net = new Network(ffc.Node);
net.InitUnboundInputLayer(1).BasicConfiguration.ActivationType.Value = EActivationType.Linear;
net.AddLayer(1, EActivationType.Linear);
net.AutoLinkFeedforward();
Neuron neuron = net.LastLayer[0];
Synapse synapse = neuron.SourceSynapses[0];
ffc.ImportNetwork(net, false);
BasicConfig config = ffc.NeuralNetwork.BasicConfiguration;
config.BiasNeuronEnable.Value = true;
config.BiasNeuronOutput.Value = 1.0;
config.FlatspotEliminationEnable.Value = false;
config.WeightDecayEnable.Value = false;
config.SymmetryPreventionEnable.Value = false;
config.ManhattanTrainingEnable.Value = false;
config.LearningRate.Value = 0.005;
StochasticCoordinateGenerator scg = new StochasticCoordinateGenerator(0,10,100);
//RegularCoordinateGenerator rcg = new RegularCoordinateGenerator(-25, 25, 50);
DynamicSampleProvider dsp = new DynamicSampleProvider(my_func, scg); //rcg);
ffc.Provider = dsp; // new CachedSampleProvider(dsp);
Console.WriteLine("TARGET FUNCTION: 3*x+5");
Console.WriteLine("TARGET Synapse Weight = 3.0");
Console.WriteLine("TARGET Bias Weight = 5.0");
Console.WriteLine("TARGET Mean Squared Error <= 0.000000001");
Console.WriteLine();
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.WriteLine("Initial MSE: " + ffc.EstimateMeanSquaredError());
Console.WriteLine();
ffc.TrainAllSamplesOnce();
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.WriteLine("Trained MSE: " + ffc.EstimateMeanSquaredError());
Console.WriteLine();
ffc.TrainAllSamplesOnce();
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.WriteLine("Trained MSE: " + ffc.EstimateMeanSquaredError());
Console.WriteLine();
ffc.TrainAllSamplesOnce();
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.WriteLine("Trained MSE: " + ffc.EstimateMeanSquaredError());
Console.WriteLine();
Console.WriteLine("Auto Training, maximum 1000 Epochs");
Console.WriteLine();
if(ffc.TrainAllSamplesUntil(0.000000001, 1000))
{
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.ForegroundColor = ConsoleColor.Green;
Console.WriteLine("SUCCEEDS auto training with MSE: " + ffc.EstimateMeanSquaredError());
Console.ResetColor();
}
else
{
Console.WriteLine("Synapse Weight: " + synapse.Weight + " - Bias Weight: " + neuron.BiasNeuronWeight);
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("FAILS auto training with MSE: " + ffc.EstimateMeanSquaredError());
Console.ResetColor();
}
Console.ReadKey();
}
示例5: RebuildCurrentNetworkStructure
private void RebuildCurrentNetworkStructure()
{
currentNetwork = new Network();
currentNeuronMap = new Hashtable();
currentSynapseMap = new Hashtable();
//Build Layers and Neurons
NeuralDataSet.LayersRow[] layerRows = SelectLayersFromNetwork(currentNetworkRow);
if(layerRows.Length == 0)
return;
NeuralDataSet.NeuronsRow[] neuronRows = SelectNeuronsFromLayer(layerRows[0]);
currentNetwork.InitUnboundInputLayer(neuronRows.Length);
Layer[] layers = new Layer[layerRows.Length];
layers[0] = currentNetwork.FirstLayer;
AppendNeuronsToNeuronMap(currentNeuronMap,neuronRows,layers[0]);
for(int i=1;i<layerRows.Length;i++)
{
neuronRows = SelectNeuronsFromLayer(layerRows[i]);
layers[i] = currentNetwork.AddLayer(neuronRows.Length);
AppendNeuronsToNeuronMap(currentNeuronMap,neuronRows,layers[i]);
}
//Build Synapses
NeuralDataSet.SynapsesRow[] synapseRows = SelectSynapsesFromNetwork(currentNetworkRow);
for(int i=0;i<synapseRows.Length;i++)
currentSynapseMap.Add(synapseRows[i].syID,
((Neuron)currentNeuronMap[synapseRows[i].syFK_neuronSource]).ConnectToNeuron((Neuron)currentNeuronMap[synapseRows[i].syFK_neuronTarget]));
}
示例6: RunDemo
public void RunDemo()
{
Console.WriteLine("### BASIC BOUND DEMO ###");
//Prepare you're input and training data
//to bind to the network
double[] input = new double[] {-5d,5d,-5d};
double[] training = new double[] {-1,1};
//Initialize the network manager.
//This constructor also creates the first
//network layer (Inputlayer).
Network network = new Network();
//Bind your input array (to the already
//existing input layer)
network.BindInputLayer(input);
//Add the hidden layer with 4 neurons.
network.AddLayer(4);
//Add the output layer with 2 neurons.
network.AddLayer(2);
//bind your training array to the output layer.
//Always do this AFTER creating the layers.
network.BindTraining(training);
//Connect the neurons together using synapses.
//This is the easiest way to do it; I'll discuss
//other ways in more detail in another demo.
network.AutoLinkFeedforward();
//Propagate the network using the bound input data.
//Internally, this is a two round process, to
//correctly handle feedbacks
network.CalculateFeedforward();
//Collect the network output and print it.
App.PrintArray(network.CollectOutput());
//Train the current pattern using Backpropagation (one step)!
network.TrainCurrentPattern(false,true);
//Print the output; the difference to (-1,1) should be
//smaller this time!
App.PrintArray(network.CollectOutput());
//Same one more time:
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
//Train another pattern:
Console.WriteLine("# new pattern:");
input[0] = 5d;
input[1] = -5d;
training[0] = 1;
//calculate ...
network.CalculateFeedforward();
App.PrintArray(network.CollectOutput());
//... and train it one time
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
//what about the old pattern now?
Console.WriteLine("# the old pattern again:");
input[0] = -5d;
input[1] = 5d;
training[0] = -1;
network.CalculateFeedforward();
App.PrintArray(network.CollectOutput());
Console.WriteLine("=== COMPLETE ===");
Console.WriteLine();
}
示例7: RunDemo
public void RunDemo()
{
Console.WriteLine("### NETWORK STRUCTURE DEMO ###");
//Initialize the network manager.
//This constructor also creates the first
//network layer (Inputlayer).
Network network = new Network();
//You need to initialize (the size of) the
//input layer in an unbound scenario
network.InitUnboundInputLayer(3);
//Add the hidden layer with 4 neurons.
network.AddLayer(4);
//Add the output layer with 2 neurons.
network.AddLayer(2);
//Instead of calling AutoLinkFeedforward()
//on this place, in this demo we'll connect
//the network together by our own!
Layer input = network.FirstLayer;
Layer hidden = input.TargetLayer;
Layer output = network.LastLayer;
//First we want to connect all neurons
//of the hidden layer to all neurons
//of the input layer (that's exactly
//what the AutoLinkFeedforward would
//do - but between all layers).
input.CrossLinkForward();
//Then we want to achieve a lateral
//feedback in the hidden layer
//(AutoLinkFeedforward does NOT do this):
hidden.CrossLinkLayer();
//Next we want to connect the first
//and the second Neuron of the hidden
//Layer to the first output neuron,
//and the third and fourth to the 2nd
//output neuron. Some of the synapses
//shall start with special weights:
hidden[0].ConnectToNeuron(output[0]);
hidden[1].ConnectToNeuron(output[0],0.5);
hidden[2].ConnectToNeuron(output[1]);
hidden[3].ConnectToNeuron(output[1],-1.5);
//That's it. Now we can work with it,
//just we did on the Basic Unbound Demo:
network.PushUnboundInput(new bool[] {false,true,false});
network.PushUnboundTraining(new bool[] {false,true});
network.CalculateFeedforward();
App.PrintArray(network.CollectOutput());
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
network.TrainCurrentPattern(false,true);
App.PrintArray(network.CollectOutput());
//This demo may help you e.g. building your own
//INetworkStructureFactory implementations
//for the grid pattern matching building block.
//(You may also want to check out the default
//implementation!)
Console.WriteLine("=== COMPLETE ===");
Console.WriteLine();
}
示例8: NetTrainer
/// <summary>
/// Standard constructor of NetTrainer, should be used in most cases.
/// </summary>
/// <param name = "dalManager">Database gateway</param>
public NetTrainer(DaoGateway dalManager)
{
Network net = new Network();
net.AddLayer(new BasicLayer(new ActivationTANH(), true, DEFAULT_FINGERPRINT_SIZE));
net.AddLayer(new BasicLayer(new ActivationTANH(), true, DEFAULT_HIDDEN_NEURONS_COUNT));
net.AddLayer(new BasicLayer(new ActivationTANH(), false, OUT_PUT_NEURONS));
net.Structure.FinalizeStructure();
net.Reset();
Init(net, dalManager);
}