本文整理汇总了Java中org.neuroph.nnet.learning.MomentumBackpropagation类的典型用法代码示例。如果您正苦于以下问题:Java MomentumBackpropagation类的具体用法?Java MomentumBackpropagation怎么用?Java MomentumBackpropagation使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MomentumBackpropagation类属于org.neuroph.nnet.learning包,在下文中一共展示了MomentumBackpropagation类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doRun
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
@Override
public void doRun() {
try {
System.out.println("Starting training thread....." + sampleDimension.toString() + " and " + imageLabels.toString());
HashMap<String, BufferedImage> imagesMap = new HashMap<String, BufferedImage>();
for (File file : srcDirectory.listFiles()) {
imageLabels.add(FilenameUtils.removeExtension(file.getName()));
if (sampleDimension.getWidth() > 0 && sampleDimension.getHeight() > 0) {
Double w = sampleDimension.getWidth();
Double h = sampleDimension.getHeight();
imagesMap.put(file.getName(), ImageUtilities.resizeImage(ImageUtilities.loadImage(file), w.intValue(), h.intValue()));
}
}
Map<String, FractionRgbData> imageRgbData = ImageUtilities.getFractionRgbDataForImages(imagesMap);
DataSet learningData = ImageRecognitionHelper.createRGBTrainingSet(imageLabels, imageRgbData);
nnet = NeuralNetwork.load(new FileInputStream(nnFile)); //Load NNetwork
MomentumBackpropagation mBackpropagation = (MomentumBackpropagation) nnet.getLearningRule();
mBackpropagation.setLearningRate(learningRate);
mBackpropagation.setMaxError(maxError);
mBackpropagation.setMomentum(momentum);
System.out.println("Network Information\nLabel = " + nnet.getLabel()
+ "\n Input Neurons = " + nnet.getInputsCount()
+ "\n Number of layers = " + nnet.getLayersCount()
);
mBackpropagation.addListener(this);
System.out.println("Starting training......");
nnet.learn(learningData, mBackpropagation);
//Training Completed
listener.batchImageTrainingCompleted();
} catch (FileNotFoundException ex) {
System.out.println(ex.getMessage() + "\n" + ex.getLocalizedMessage());
}
}
示例2: createMLPerceptron
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
/**
* Creates and returns a new instance of Multi Layer Perceptron
* @param layersStr space separated number of neurons in layers
* @param transferFunctionType transfer function type for neurons
* @return instance of Multi Layer Perceptron
*/
public static MultiLayerPerceptron createMLPerceptron(String layersStr, TransferFunctionType transferFunctionType, Class learningRule, boolean useBias, boolean connectIO) {
ArrayList<Integer> layerSizes = VectorParser.parseInteger(layersStr);
NeuronProperties neuronProperties = new NeuronProperties(transferFunctionType, useBias);
MultiLayerPerceptron nnet = new MultiLayerPerceptron(layerSizes, neuronProperties);
// set learning rule - TODO: use reflection here
if (learningRule.getName().equals(BackPropagation.class.getName())) {
nnet.setLearningRule(new BackPropagation());
} else if (learningRule.getName().equals(MomentumBackpropagation.class.getName())) {
nnet.setLearningRule(new MomentumBackpropagation());
} else if (learningRule.getName().equals(DynamicBackPropagation.class.getName())) {
nnet.setLearningRule(new DynamicBackPropagation());
} else if (learningRule.getName().equals(ResilientPropagation.class.getName())) {
nnet.setLearningRule(new ResilientPropagation());
}
// connect io
if (connectIO) {
nnet.connectInputsToOutputs();
}
return nnet;
}
示例3: AnimalNetwork
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
/**
* Instantiates a new animal network.
*
* @param input the input
* @param hidden the hidden
* @param output the output
*/
public AnimalNetwork(int input,int hidden,int output) {
super();
System.out.println("network is created");
initializeNeurons();
initializeQuestions();
animal_network = new MultiLayerPerceptron(TransferFunctionType.SIGMOID,Data.INPUTUNITS,Data.HIDDENUNITS,Data.OUTPUTUNITS);
animal_network.setNetworkType(NeuralNetworkType.MULTI_LAYER_PERCEPTRON);
animal_network.randomizeWeights(); //randomize weights
// set parameters
((LMS) animal_network.getLearningRule()).setMaxError(MAXERROR);//0-1
((LMS) animal_network.getLearningRule()).setLearningRate(LEARNINGRATE);//0-1
((LMS) animal_network.getLearningRule()).setMaxIterations(MAXITERATIONS);//0-1
MomentumBackpropagation backpropogation = new MomentumBackpropagation(); // define momentum
backpropogation.setMomentum(0.7); // set momentum
animal_network.setLearningRule(backpropogation);
}
示例4: startLearning
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
public void startLearning() {
Thread t1 = new Thread(new Runnable() {
public void run() {
console.addLog("Loading test set");
testSet = loader.loadDataSet(testSetPath);
console.addLog("Test set loaded");
console.addLog("Loading training set");
trainingSet = loader.loadDataSet(trainingSetPath);
console.addLog("Training set loaded. Input size: " + trainingSet.getInputSize() +
" Output size: " + trainingSet.getOutputSize());
nnet = new MultiLayerPerceptron(TransferFunctionType.SIGMOID,
trainingSet.getInputSize(), 86, 86, trainingSet.getOutputSize());
MomentumBackpropagation backPropagation = new MomentumBackpropagation();
backPropagation.setLearningRate(learningRate);
backPropagation.setMomentum(momentum);
LearningTestSetEvaluator evaluator =
new LearningTestSetEvaluator(nnetName, testSet, trainingSet, console);
backPropagation.addListener(evaluator);
backPropagation.addListener(new LearningEventListener() {
@Override
public void handleLearningEvent(LearningEvent event) {
if (event.getEventType() == LearningEvent.Type.LEARNING_STOPPED) {
listeners.forEach((listener) -> listener.learningStopped(LearningNetTask.this));
}
}
});
nnet.setLearningRule(backPropagation);
console.addLog("Started neural net learning with momentum: "
+ momentum + ", learning rate: " + learningRate);
nnet.learnInNewThread(trainingSet);
}
});
t1.start();
}
示例5: learnNeuralNet
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
private static void learnNeuralNet(DataSet trainingSet, DataSet testSet) {
TestSetEvaluator testEvaluator = new TestSetEvaluator(NNET_NAME, testSet, trainingSet);
MultiLayerPerceptron nnet = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, INPUT_LAYER, 86, 86, OUTPUT_LAYER);
MomentumBackpropagation bp = new MomentumBackpropagation();
bp.setLearningRate(LEARINING_RATE);
bp.setMomentum(MOMENTUM);
bp.addListener(testEvaluator);
nnet.setLearningRule(bp);
nnet.learn(trainingSet);
nnet.save(NNET_NAME + "last");
}
示例6: learnNeuralNet
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
private static void learnNeuralNet(DataSet trainingSet, DataSet testSet) {
TestSetEvaluator testEvaluator = new TestSetEvaluator(NNET_NAME, testSet, trainingSet);
MultiLayerPerceptron nnet = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, INPUT_LAYER, 140, OUTPUT_LAYER);
MomentumBackpropagation bp = new MomentumBackpropagation();
bp.setLearningRate(LEARINING_RATE);
bp.setMomentum(MOMENTUM);
bp.addListener(testEvaluator);
nnet.setLearningRule(bp);
nnet.learn(trainingSet);
nnet.save(NNET_NAME + "last");
}
示例7: learnNeuralNet
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
private static void learnNeuralNet(DataSet trainingSet, DataSet testSet) {
TestSetEvaluator testEvaluator = new TestSetEvaluator(NNET_NAME, testSet, trainingSet);
MultiLayerPerceptron nnet = new MultiLayerPerceptron(TransferFunctionType.SIGMOID, INPUT_LAYER, 76, 76, OUTPUT_LAYER);
MomentumBackpropagation bp = new MomentumBackpropagation();
bp.setLearningRate(LEARINING_RATE);
bp.setMomentum(MOMENTUM);
bp.addListener(testEvaluator);
nnet.setLearningRule(bp);
nnet.learn(trainingSet);
nnet.save(NNET_NAME + "last");
}
示例8: doRun
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
@Override
public void doRun() {
HashMap<String, BufferedImage> imagesMap = new HashMap<String, BufferedImage>();
String fileName = "";
if (!isReal) {
fileName = "real";
} else {
fileName = "faked";
}
System.out.println("Teaching as " + fileName);
imagesMap.put(fileName, image);
Map<String, FractionRgbData> imageRgbData = ImageUtilities.getFractionRgbDataForImages(imagesMap);
DataSet learningData = ImageRecognitionHelper.createRGBTrainingSet(labels, imageRgbData);
MomentumBackpropagation mBackpropagation = (MomentumBackpropagation) nnet.getLearningRule();
mBackpropagation.setLearningRate(learningRate);
mBackpropagation.setMaxError(maxError);
mBackpropagation.setMomentum(momentum);
System.out.println("Network Information\nLabel = " + nnet.getLabel()
+ "\n Input Neurons = " + nnet.getInputsCount()
+ "\n Number of layers = " + nnet.getLayersCount()
);
mBackpropagation.addListener(this);
System.out.println("Starting training......");
nnet.learn(learningData, mBackpropagation);
//Mark nnet as dirty. Write on close
isDirty = true;
}
示例9: run
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
/**
* Runs this sample
*/
public void run() {
// create training set (logical XOR function)
DataSet trainingSet = new DataSet(2, 1);
trainingSet.addRow(new DataSetRow(new double[]{0, 0}, new double[]{0}));
trainingSet.addRow(new DataSetRow(new double[]{0, 1}, new double[]{1}));
trainingSet.addRow(new DataSetRow(new double[]{1, 0}, new double[]{1}));
trainingSet.addRow(new DataSetRow(new double[]{1, 1}, new double[]{0}));
// create multi layer perceptron
MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.TANH, 2, 3, 1);
// enable batch if using MomentumBackpropagation
if( myMlPerceptron.getLearningRule() instanceof MomentumBackpropagation ){
((MomentumBackpropagation)myMlPerceptron.getLearningRule()).setBatchMode(true);
((MomentumBackpropagation)myMlPerceptron.getLearningRule()).setMaxError(0.00001);
}
LearningRule learningRule = myMlPerceptron.getLearningRule();
learningRule.addListener(this);
// learn the training set
System.out.println("Training neural network...");
myMlPerceptron.learn(trainingSet);
// test perceptron
System.out.println("Testing trained neural network");
testNeuralNetwork(myMlPerceptron, trainingSet);
// save trained neural network
myMlPerceptron.save("myMlPerceptron.nnet");
// load saved neural network
NeuralNetwork loadedMlPerceptron = NeuralNetwork.load("myMlPerceptron.nnet");
// test loaded neural network
System.out.println("Testing loaded neural network");
testNeuralNetwork(loadedMlPerceptron, trainingSet);
}
示例10: prepareTest
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
/**
* Benchmrk preparation consists of training set and neural networ creatiion.
* This method generates training set with 100 rows, where every row has 10 input and 5 output elements
* Neural network has two hiddden layers with 8 and 7 neurons, and runs learning rule for 2000 iterations
*/
@Override
public void prepareTest() {
int trainingSetSize = 100;
int inputSize = 10;
int outputSize = 5;
this.trainingSet = new DataSet(inputSize, outputSize);
for (int i = 0; i < trainingSetSize; i++) {
double input[] = new double[inputSize];
for( int j=0; j<inputSize; j++)
input[j] = Math.random();
double output[] = new double[outputSize];
for( int j=0; j<outputSize; j++)
output[j] = Math.random();
DataSetRow trainingSetRow = new DataSetRow(input, output);
trainingSet.addRow(trainingSetRow);
}
network = new MultiLayerPerceptron(inputSize, 8, 7, outputSize);
((MomentumBackpropagation)network.getLearningRule()).setMaxIterations(2000);
}
示例11: createNetwork
import org.neuroph.nnet.learning.MomentumBackpropagation; //导入依赖的package包/类
/**
* Creates MultiLayerPerceptron Network architecture - fully connected
* feed forward with specified number of neurons in each layer
*
* @param neuronsInLayers
* collection of neuron numbers in getLayersIterator
* @param neuronProperties
* neuron properties
*/
private void createNetwork(List<Integer> neuronsInLayers, NeuronProperties neuronProperties) {
// set network type
this.setNetworkType(NeuralNetworkType.MULTI_LAYER_PERCEPTRON);
// create input layer
NeuronProperties inputNeuronProperties = new NeuronProperties(InputNeuron.class, Linear.class);
Layer layer = LayerFactory.createLayer(neuronsInLayers.get(0), inputNeuronProperties);
boolean useBias = true; // use bias neurons by default
if (neuronProperties.hasProperty("useBias")) {
useBias = (Boolean)neuronProperties.getProperty("useBias");
}
if (useBias) {
layer.addNeuron(new BiasNeuron());
}
this.addLayer(layer);
// create layers
Layer prevLayer = layer;
//for(Integer neuronsNum : neuronsInLayers)
for(int layerIdx = 1; layerIdx < neuronsInLayers.size(); layerIdx++){
Integer neuronsNum = neuronsInLayers.get(layerIdx);
// createLayer layer
layer = LayerFactory.createLayer(neuronsNum, neuronProperties);
if ( useBias && (layerIdx< (neuronsInLayers.size()-1)) ) {
layer.addNeuron(new BiasNeuron());
}
// add created layer to network
this.addLayer(layer);
// createLayer full connectivity between previous and this layer
if (prevLayer != null)
ConnectionFactory.fullConnect(prevLayer, layer);
prevLayer = layer;
}
// set input and output cells for network
NeuralNetworkFactory.setDefaultIO(this);
// set learnng rule
//this.setLearningRule(new BackPropagation(this));
this.setLearningRule(new MomentumBackpropagation());
// this.setLearningRule(new DynamicBackPropagation());
this.randomizeWeights(new NguyenWidrowRandomizer(-0.7, 0.7));
}