本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.setListeners方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.setListeners方法的具体用法?Java ComputationGraph.setListeners怎么用?Java ComputationGraph.setListeners使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.graph.ComputationGraph
的用法示例。
在下文中一共展示了ComputationGraph.setListeners方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMNISTConfig
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
@Ignore //Should be run manually
public void testMNISTConfig() throws Exception {
int batchSize = 64; // Test batch size
DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345);
ComputationGraph net = getCNNMnistConfig();
net.init();
net.setListeners(new ScoreIterationListener(1));
for (int i = 0; i < 50; i++) {
net.fit(mnistTrain.next());
Thread.sleep(1000);
}
Thread.sleep(100000);
}
示例2: testListenersViaModelGraph
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testListenersViaModelGraph() {
TestListener.clearCounts();
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("in").addLayer("0",
new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10)
.activation(Activation.TANH).build(),
"in")
.setOutputs("0").build();
ComputationGraph model = new ComputationGraph(conf);
model.init();
StatsStorage ss = new InMemoryStatsStorage();
model.setListeners(new TestListener(), new StatsListener(ss));
testListenersForModel(model, null);
assertEquals(1, ss.listSessionIDs().size());
assertEquals(2, ss.listWorkerIDsForSession(ss.listSessionIDs().get(0)).size());
}
示例3: testBadTuning
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBadTuning() {
//Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(5.0)) //Intentionally huge LR
.weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5000))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES),
new MaxScoreIterationTerminationCondition(10)) //Initial score is ~2.5
.scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build();
IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter);
EarlyStoppingResult result = trainer.fit();
assertTrue(result.getTotalEpochs() < 5);
assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition,
result.getTerminationReason());
String expDetails = new MaxScoreIterationTerminationCondition(10).toString();
assertEquals(expDetails, result.getTerminationDetails());
assertEquals(0, result.getBestModelEpoch());
assertNotNull(result.getBestModel());
}
示例4: testTimeTermination
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testTimeTermination() {
//test termination after max time
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder()
.addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(10000))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
//.scoreCalculator(new DataSetLossCalculator(irisIter, true)) //No score calculator in this test (don't need score)
.modelSaver(saver).build();
IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter);
long startTime = System.currentTimeMillis();
EarlyStoppingResult result = trainer.fit();
long endTime = System.currentTimeMillis();
int durationSeconds = (int) (endTime - startTime) / 1000;
assertTrue(durationSeconds >= 3);
assertTrue(durationSeconds <= 9);
assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition,
result.getTerminationReason());
String expDetails = new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例5: testNoImprovementNEpochsTermination
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testNoImprovementNEpochsTermination() {
//Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs
//Simulate this by setting LR = 0.0
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder()
.addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(100),
new ScoreImprovementEpochTerminationCondition(5))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build();
IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter);
EarlyStoppingResult result = trainer.fit();
//Expect no score change due to 0 LR -> terminate after 6 total epochs
assertEquals(6, result.getTotalEpochs());
assertEquals(0, result.getBestModelEpoch());
assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例6: testListeners
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testListeners() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES))
.scoreCalculator(new DataSetLossCalculatorCG(irisIter, true)).modelSaver(saver).build();
LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener();
IEarlyStoppingTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, irisIter, listener);
trainer.fit();
assertEquals(1, listener.onStartCallCount);
assertEquals(5, listener.onEpochCallCount);
assertEquals(1, listener.onCompletionCallCount);
}
示例7: testUICompGraph
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
@Ignore
public void testUICompGraph() throws Exception {
StatsStorage ss = new InMemoryStatsStorage();
UIServer uiServer = UIServer.getInstance();
uiServer.attach(ss);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in")
.addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(),
"in")
.addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(4).nOut(3).build(), "L0")
.pretrain(false).backprop(true).setOutputs("L1").build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
net.setListeners(new StatsListener(ss), new ScoreIterationListener(1));
DataSetIterator iter = new IrisDataSetIterator(150, 150);
for (int i = 0; i < 100; i++) {
net.fit(iter);
Thread.sleep(100);
}
Thread.sleep(100000);
}
示例8: testBadTuning
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testBadTuning() {
//Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(2.0)) //Intentionally huge LR
.weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.IDENTITY)
.lossFunction(LossFunctions.LossFunction.MSE).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
JavaRDD<DataSet> irisData = getIris();
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5000))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new SparkLossCalculatorComputationGraph(
irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc()))
.modelSaver(saver).build();
TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0);
IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm,
esConf, net, irisData.map(new DataSetToMultiDataSetFn()));
EarlyStoppingResult result = trainer.fit();
assertTrue(result.getTotalEpochs() < 5);
assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition,
result.getTerminationReason());
String expDetails = new MaxScoreIterationTerminationCondition(7.5).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例9: testNoImprovementNEpochsTermination
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testNoImprovementNEpochsTermination() {
//Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs
//Simulate this by setting LR = 0.0
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder()
.addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
JavaRDD<DataSet> irisData = getIris();
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(100),
new ScoreImprovementEpochTerminationCondition(5))
.iterationTerminationConditions(new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new SparkLossCalculatorComputationGraph(
irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc()))
.modelSaver(saver).build();
TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0);
IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm,
esConf, net, irisData.map(new DataSetToMultiDataSetFn()));
EarlyStoppingResult result = trainer.fit();
//Expect no score change due to 0 LR -> terminate after 6 total epochs
assertTrue(result.getTotalEpochs() < 12); //Normally expect 6 epochs exactly; get a little more than that here due to rounding + order of operations
assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例10: testListeners
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testListeners() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
JavaRDD<DataSet> irisData = getIris();
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES))
.scoreCalculator(new SparkLossCalculatorComputationGraph(
irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc()))
.modelSaver(saver).build();
LoggingEarlyStoppingListener listener = new LoggingEarlyStoppingListener();
TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0);
IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm,
esConf, net, irisData.map(new DataSetToMultiDataSetFn()));
trainer.setListener(listener);
trainer.fit();
assertEquals(1, listener.onStartCallCount);
assertEquals(5, listener.onEpochCallCount);
assertEquals(1, listener.onCompletionCallCount);
}
示例11: testTimeTermination
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testTimeTermination() {
//test termination after max time
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder()
.addInputs("in")
.addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in")
.setOutputs("0").pretrain(false).backprop(true).build();
ComputationGraph net = new ComputationGraph(conf);
net.setListeners(new ScoreIterationListener(1));
JavaRDD<DataSet> irisData = getIris();
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf = new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(10000))
.iterationTerminationConditions(new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new SparkLossCalculatorComputationGraph(
irisData.map(new DataSetToMultiDataSetFn()), true, sc.sc()))
.modelSaver(saver).build();
TrainingMaster tm = new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0);
IEarlyStoppingTrainer<ComputationGraph> trainer = new SparkEarlyStoppingGraphTrainer(getContext().sc(), tm,
esConf, net, irisData.map(new DataSetToMultiDataSetFn()));
long startTime = System.currentTimeMillis();
EarlyStoppingResult result = trainer.fit();
long endTime = System.currentTimeMillis();
int durationSeconds = (int) (endTime - startTime) / 1000;
assertTrue(durationSeconds >= 3);
assertTrue(durationSeconds <= 9);
assertEquals(EarlyStoppingResult.TerminationReason.IterationTerminationCondition,
result.getTerminationReason());
String expDetails = new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS).toString();
assertEquals(expDetails, result.getTerminationDetails());
}