本文整理汇总了Java中org.nd4j.linalg.learning.config.Sgd类的典型用法代码示例。如果您正苦于以下问题:Java Sgd类的具体用法?Java Sgd怎么用?Java Sgd使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Sgd类属于org.nd4j.linalg.learning.config包,在下文中一共展示了Sgd类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testNoImprovementNEpochsTermination
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testNoImprovementNEpochsTermination() {
//Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs
//Simulate this by setting LR = 0.0
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list()
.layer(0, new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(100),
new ScoreImprovementEpochTerminationCondition(5))
.iterationTerminationConditions(
new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver)
.build();
IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter);
EarlyStoppingResult result = trainer.fit();
//Expect no score change due to 0 LR -> terminate after 6 total epochs
assertEquals(6, result.getTotalEpochs());
assertEquals(0, result.getBestModelEpoch());
assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例2: getOriginalNet
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
public static MultiLayerNetwork getOriginalNet(int seed){
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.list()
.layer(new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build())
.layer(new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new DenseLayer.Builder().nOut(64).build())
.layer(new DenseLayer.Builder().nIn(64).nOut(64).build())
.layer(new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.convolutionalFlat(28,28,1))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例3: getOriginalGraph
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.graphBuilder()
.addInputs("in")
.layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
.layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
.layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
.layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
.layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
.layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
.setOutputs("5")
.setInputTypes(InputType.convolutionalFlat(28,28,1))
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
return net;
}
示例4: testSetParams
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testSetParams() {
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.updater(new Sgd(1e-1))
.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3)
.weightInit(WeightInit.ZERO).activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.build();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf,
Collections.<IterationListener>singletonList(new ScoreIterationListener(1)), 0, params, true);
params = l.params();
l.setParams(params);
assertEquals(params, l.params());
}
示例5: getDenseMLNConfig
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
private static MultiLayerNetwork getDenseMLNConfig(boolean backprop, boolean pretrain) {
int numInputs = 4;
int outputNum = 3;
long seed = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed)
.updater(new Sgd(1e-3)).l1(0.3).l2(1e-3).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(numInputs).nOut(3)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(3).nOut(2)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).nIn(2).nOut(outputNum).build())
.backprop(backprop).pretrain(pretrain).build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
return model;
}
示例6: testAutoEncoder
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testAutoEncoder() throws Exception {
MnistDataFetcher fetcher = new MnistDataFetcher(true);
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).updater(new Sgd(0.1))
.layer(new org.deeplearning4j.nn.conf.layers.AutoEncoder.Builder().nIn(784).nOut(600)
.corruptionLevel(0.6)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).build())
.build();
fetcher.fetch(100);
DataSet d2 = fetcher.next();
INDArray input = d2.getFeatureMatrix();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
AutoEncoder da = (AutoEncoder) conf.getLayer().instantiate(conf,
Arrays.<IterationListener>asList(new ScoreIterationListener(1)), 0, params, true);
assertEquals(da.params(), da.params());
assertEquals(471784, da.params().length());
da.setParams(da.params());
da.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
da.fit(input);
}
示例7: testBackProp
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testBackProp() throws Exception {
MnistDataFetcher fetcher = new MnistDataFetcher(true);
// LayerFactory layerFactory = LayerFactories.getFactory(new org.deeplearning4j.nn.conf.layers.AutoEncoder());
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.updater(new Sgd(0.1))
.layer(new org.deeplearning4j.nn.conf.layers.AutoEncoder.Builder().nIn(784).nOut(600)
.corruptionLevel(0.6)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).build())
.build();
fetcher.fetch(100);
DataSet d2 = fetcher.next();
INDArray input = d2.getFeatureMatrix();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
AutoEncoder da = (AutoEncoder) conf.getLayer().instantiate(conf, null, 0, params, true);
Gradient g = new DefaultGradient();
g.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, da.decode(da.activate(input)).sub(input));
}
示例8: getIrisMLPSimpleConfig
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
/** Very simple back-prop config set up for Iris.
* Learning Rate = 0.1
* No regularization, no Adagrad, no momentum etc. One iteration.
*/
private static MultiLayerConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes,
Activation activationFunction) {
NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1))
.seed(12345L).list();
for (int i = 0; i < hiddenLayerSizes.length; i++) {
int nIn = (i == 0 ? 4 : hiddenLayerSizes[i - 1]);
lb.layer(i, new DenseLayer.Builder().nIn(nIn).nOut(hiddenLayerSizes[i]).weightInit(WeightInit.XAVIER)
.activation(activationFunction).build());
}
lb.layer(hiddenLayerSizes.length,
new OutputLayer.Builder(LossFunction.MCXENT).nIn(hiddenLayerSizes[hiddenLayerSizes.length - 1])
.nOut(3).weightInit(WeightInit.XAVIER)
.activation(activationFunction.equals(Activation.IDENTITY) ? Activation.IDENTITY
: Activation.SOFTMAX)
.build());
lb.pretrain(false).backprop(true);
return lb.build();
}
示例9: testCompGraphNullLayer
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testCompGraphNullLayer() {
ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.01))
.seed(42).miniBatch(false).l1(0.2).l2(0.2)
/* Graph Builder */
.updater(Updater.RMSPROP).graphBuilder().addInputs("in")
.addLayer("L" + 1,
new GravesLSTM.Builder().nIn(20).updater(Updater.RMSPROP).nOut(10)
.weightInit(WeightInit.XAVIER)
.dropOut(0.4).l1(0.3).activation(Activation.SIGMOID).build(),
"in")
.addLayer("output",
new RnnOutputLayer.Builder().nIn(20).nOut(10).activation(Activation.SOFTMAX)
.weightInit(WeightInit.RELU_UNIFORM).build(),
"L" + 1)
.setOutputs("output");
ComputationGraphConfiguration conf = gb.build();
ComputationGraph cg = new ComputationGraph(conf);
cg.init();
}
示例10: getNetwork
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
private MultiLayerNetwork getNetwork() {
int nIn = 5;
int nOut = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01).l2(0.01)
.updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build())
.layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build())
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例11: testWriteMLNModel
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testWriteMLNModel() throws Exception {
int nIn = 5;
int nOut = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01)
.l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build())
.layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build())
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(net, tempFile, true);
MultiLayerNetwork network = ModelSerializer.restoreMultiLayerNetwork(tempFile);
assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson());
assertEquals(net.params(), network.params());
assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例12: testWriteCGModel
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testWriteCGModel() throws Exception {
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1))
.graphBuilder().addInputs("in")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3)
.build(),
"dense")
.setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(cg, tempFile, true);
ComputationGraph network = ModelSerializer.restoreComputationGraph(tempFile);
assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson());
assertEquals(cg.params(), network.params());
assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例13: testWriteCGModelInputStream
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testWriteCGModelInputStream() throws Exception {
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1))
.graphBuilder().addInputs("in")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3)
.build(),
"dense")
.setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(cg, tempFile, true);
FileInputStream fis = new FileInputStream(tempFile);
ComputationGraph network = ModelSerializer.restoreComputationGraph(fis);
assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson());
assertEquals(cg.params(), network.params());
assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例14: test
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void test() throws IOException {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new OutputLayer.Builder().nIn(10).nOut(10).build()).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
MultiLayerNetwork net2 =
new TransferLearning.Builder(net)
.fineTuneConfiguration(
new FineTuneConfiguration.Builder().updater(new Sgd(0.01)).build())
.setFeatureExtractor(0).build();
File f = Files.createTempFile("dl4jTestTransferStatsCollection", "bin").toFile();
f.delete();
net2.setListeners(new StatsListener(new FileStatsStorage(f)));
//Previosuly: failed on frozen layers
net2.fit(new DataSet(Nd4j.rand(8, 10), Nd4j.rand(8, 10)));
f.deleteOnExit();
}
示例15: testEarlyStoppingEveryNEpoch
import org.nd4j.linalg.learning.config.Sgd; //导入依赖的package包/类
@Test
public void testEarlyStoppingEveryNEpoch() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.01)).weightInit(WeightInit.XAVIER).list()
.layer(0, new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5))
.scoreCalculator(new DataSetLossCalculator(irisIter, true))
.evaluateEveryNEpochs(2).modelSaver(saver).build();
IEarlyStoppingTrainer<MultiLayerNetwork> trainer = new EarlyStoppingTrainer(esConf, net, irisIter);
EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit();
System.out.println(result);
assertEquals(5, result.getTotalEpochs());
assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
}