本文整理汇总了Java中org.neuroph.nnet.learning.BackPropagation类的典型用法代码示例。如果您正苦于以下问题:Java BackPropagation类的具体用法?Java BackPropagation怎么用?Java BackPropagation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BackPropagation类属于org.neuroph.nnet.learning包,在下文中一共展示了BackPropagation类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createMLPerceptron
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
/**
* Creates and returns a new instance of Multi Layer Perceptron
* @param layersStr space separated number of neurons in layers
* @param transferFunctionType transfer function type for neurons
* @return instance of Multi Layer Perceptron
*/
public static MultiLayerPerceptron createMLPerceptron(String layersStr, TransferFunctionType transferFunctionType, Class learningRule, boolean useBias, boolean connectIO) {
ArrayList<Integer> layerSizes = VectorParser.parseInteger(layersStr);
NeuronProperties neuronProperties = new NeuronProperties(transferFunctionType, useBias);
MultiLayerPerceptron nnet = new MultiLayerPerceptron(layerSizes, neuronProperties);
// set learning rule - TODO: use reflection here
if (learningRule.getName().equals(BackPropagation.class.getName())) {
nnet.setLearningRule(new BackPropagation());
} else if (learningRule.getName().equals(MomentumBackpropagation.class.getName())) {
nnet.setLearningRule(new MomentumBackpropagation());
} else if (learningRule.getName().equals(DynamicBackPropagation.class.getName())) {
nnet.setLearningRule(new DynamicBackPropagation());
} else if (learningRule.getName().equals(ResilientPropagation.class.getName())) {
nnet.setLearningRule(new ResilientPropagation());
}
// connect io
if (connectIO) {
nnet.connectInputsToOutputs();
}
return nnet;
}
示例2: main
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
public static void main(String[] args) {
// create training set (logical XOR function)
DataSet trainingSet = new DataSet(2, 1);
trainingSet.addRow(new DataSetRow(new double[]{0, 0}, new double[]{0}));
trainingSet.addRow(new DataSetRow(new double[]{0, 1}, new double[]{1}));
trainingSet.addRow(new DataSetRow(new double[]{1, 0}, new double[]{1}));
trainingSet.addRow(new DataSetRow(new double[]{1, 1}, new double[]{0}));
// create multi layer perceptron
MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, 2, 3, 1);
myMlPerceptron.setLearningRule(new BackPropagation());
// learn the training set
myMlPerceptron.learn(trainingSet);
// test perceptron
System.out.println("Testing trained neural network");
testNeuralNetwork(myMlPerceptron, trainingSet);
// save trained neural network
myMlPerceptron.save("myMlPerceptron.nnet");
// load saved neural network
NeuralNetwork loadedMlPerceptron = NeuralNetwork.createFromFile("myMlPerceptron.nnet");
// test loaded neural network
System.out.println("Testing loaded neural network");
testNeuralNetwork(loadedMlPerceptron, trainingSet);
}
示例3: AnimalNetwork
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
/**
* Instantiates a new animal network.
*
* @param input the input
* @param hidden the hidden
* @param output the output
*/
public AnimalNetwork(int input,int hidden,int output) {
super();
System.out.println("network is created");
initializeNeurons();
animal_network = new MultiLayerPerceptron(TransferFunctionType.SIGMOID,Data.INPUTUNITS,Data.HIDDENUNITS,Data.OUTPUTUNITS);
animal_network.setNetworkType(NeuralNetworkType.MULTI_LAYER_PERCEPTRON);
animal_network.randomizeWeights(); //randomize weights
((LMS) animal_network.getLearningRule()).setMaxError(MAXERROR);//0-1
((LMS) animal_network.getLearningRule()).setLearningRate(LEARNINGRATE);//0-1
((LMS) animal_network.getLearningRule()).setMaxIterations(MAXITERATIONS);//0-1
animal_network.setLearningRule(new BackPropagation());
}
示例4: handleLearningEvent
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
@Override
public void handleLearningEvent(LearningEvent event) {
BackPropagation bp = (BackPropagation) event.getSource();
System.out.println(bp.getCurrentIteration() + ". iteration | Total network error: " + bp.getTotalNetworkError());
listener.batchImageTrainingUpdate(bp.getCurrentIteration(), bp.getTotalNetworkError());
}
示例5: handleLearningEvent
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
@Override
public void handleLearningEvent(LearningEvent event) {
BackPropagation bp = (BackPropagation) event.getSource();
System.out.println(bp.getCurrentIteration() + ". iteration | Total network error: " + bp.getTotalNetworkError());
}
示例6: handleLearningEvent
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
@Override
public void handleLearningEvent(LearningEvent event) {
BackPropagation bp = (BackPropagation)event.getSource();
System.out.println(bp.getCurrentIteration() + ". iteration : "+ bp.getTotalNetworkError());
}
示例7: handleLearningEvent
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
@Override
public void handleLearningEvent(LearningEvent event) {
BackPropagation bp = (BackPropagation) event.getSource();
System.out.println(bp.getCurrentIteration() + ". iteration : " + bp.getTotalNetworkError());
}
示例8: handleLearningEvent
import org.neuroph.nnet.learning.BackPropagation; //导入依赖的package包/类
@Override
public void handleLearningEvent(LearningEvent event) {
BackPropagation bp = (BackPropagation) event.getSource();
System.out.println(bp.getCurrentIteration() + ". iteration : "
+ bp.getTotalNetworkError());
}