本文整理汇总了Java中org.encog.ml.train.MLTrain类的典型用法代码示例。如果您正苦于以下问题:Java MLTrain类的具体用法?Java MLTrain怎么用?Java MLTrain使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MLTrain类属于org.encog.ml.train包,在下文中一共展示了MLTrain类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: withNEAT
import org.encog.ml.train.MLTrain; //导入依赖的package包/类
private MLRegression withNEAT() {
final NEATPopulation pop = new NEATPopulation(400, 10, 1000);
final CalculateScore score = new TrainingSetScore(this.training);
// train the neural network
final ActivationStep step = new ActivationStep();
step.setCenter(0.5);
pop.setOutputActivationFunction(step);
final MLTrain train = new NEATTraining(score, pop);
EncogUtility.trainToError(train, 0.01515);
return (MLRegression) train.getMethod();
}
示例2: withCPN
import org.encog.ml.train.MLTrain; //导入依赖的package包/类
private MLRegression withCPN() {
final CPN result = new CPN(400, 1000, 10, 1);
final MLTrain trainInstar = new TrainInstar(result, training, 0.1, false);
EncogUtility.trainToError(trainInstar, 0.01515);
final MLTrain trainOutstar = new TrainOutstar(result, training, 0.1);
EncogUtility.trainToError(trainOutstar, 0.01515);
return result;
}
示例3: withResilieant
import org.encog.ml.train.MLTrain; //导入依赖的package包/类
private MLRegression withResilieant() {
final MLTrain train = new ResilientPropagation(EncogUtility.simpleFeedForward(400, 100, 0, 10, false),
this.training);
EncogUtility.trainToError(train, 0.01515);
return (MLRegression) train.getMethod();
}
示例4: withSVM
import org.encog.ml.train.MLTrain; //导入依赖的package包/类
private MLRegression withSVM() {
final MLTrain train = new SVMTrain(new SVM(400, true), this.training);
EncogUtility.trainToError(train, 0.01515);
return (MLRegression) train.getMethod();
}
示例5: train
import org.encog.ml.train.MLTrain; //导入依赖的package包/类
public void train(final ArrayList<DataPoint> dataHistory) {
if (isTraining()) {
throw new IllegalStateException();
}
setTrainerThread(new Thread() {
public void run() {
// Clean and normalize the data history
ArrayList<DataPoint> cleanedDataHistory = cleanDataHistory(dataHistory);
ArrayList<DataPoint> normalizedDataHistory = normalizeDataHistory(cleanedDataHistory);
// Create a new neural network and data set
BasicNetwork neuralNetwork = EncogUtility.simpleFeedForward(2, getHiddenLayerNeurons(0),
getHiddenLayerNeurons(1), 5, true);
MLDataSet dataSet = new BasicMLDataSet();
// Add all points of the data history to the data set
for (DataPoint dataPoint : normalizedDataHistory) {
MLData input = new BasicMLData(2);
input.setData(0, dataPoint.getX());
input.setData(1, dataPoint.getY());
// If getButton() is 0, the output will be 0, 0, 0, 0
// If getButton() is 2, the output will be 0, 1, 0, 0
// If getButton() is 4, the output will be 0, 0, 0, 1
MLData ideal = new BasicMLData(5);
for (int i = 0; i <= 4; i++) {
ideal.setData(i, (dataPoint.getButton() == i) ? 1 : 0);
}
MLDataPair pair = new BasicMLDataPair(input, ideal);
dataSet.add(pair);
}
// Create a training method
MLTrain trainingMethod = new ResilientPropagation((ContainsFlat) neuralNetwork, dataSet);
long startTime = System.currentTimeMillis();
int timeLeft = getMaxTrainingTime();
int iteration = 0;
// Train the network using multiple iterations on the training method
do {
trainingMethod.iteration();
timeLeft = (int) ((startTime + getMaxTrainingTime()) - System.currentTimeMillis());
iteration++;
sendNeuralNetworkIteration(iteration, trainingMethod.getError(), timeLeft);
} while (trainingMethod.getError() > getMaxTrainingError() && timeLeft > 0
&& !trainingMethod.isTrainingDone());
trainingMethod.finishTraining();
// Return the neural network to all listeners
sendNeuralNetworkTrainerResult(neuralNetwork);
}
});
getTrainerThread().start();
}