本文整理汇总了Java中org.deeplearning4j.nn.api.OptimizationAlgorithm类的典型用法代码示例。如果您正苦于以下问题:Java OptimizationAlgorithm类的具体用法?Java OptimizationAlgorithm怎么用?Java OptimizationAlgorithm使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
OptimizationAlgorithm类属于org.deeplearning4j.nn.api包,在下文中一共展示了OptimizationAlgorithm类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: softMaxRegression
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
private static MultiLayerNetwork softMaxRegression(int seed,
int iterations, int numRows, int numColumns, int outputNum) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(
GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(1)
.layer(0,
new OutputLayer.Builder(
LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax")
.nIn(numColumns * numRows).nOut(outputNum)
.build()).pretrain(true).backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
return model;
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:25,代码来源:NeuralNetworks.java
示例2: getDeepDenseLayerNetworkConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
*/
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
final int numHiddenNodes = 50;
return new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(numHiddenNodes).nOut(numOutputs).build())
.pretrain(false).backprop(true).build();
}
示例3: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
{
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
.dropOut(0.5).build();
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
.nOut(parameters.getOutputSize()).activation("softmax").build();
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
.layer(1, outputLayer).build();
}
示例4: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
.list(4)
.layer(0,
new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(1,
new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(2,
new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(3,
new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu")
.lossFunction(LossFunctions.LossFunction.MSE).build())
.pretrain(false).backprop(true).build();
}
示例5: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
.layer(0,
new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)
.build())
.layer(2,
new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
.nOut(parameters.getOutputSize()).build())
.pretrain(true).backprop(false).build();
}
示例6: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
.layer(0,
new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
.activation("relu").weightInit(WeightInit.RELU).build())
.layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
.weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例7: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
.layer(0,
new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
.weightInit(WeightInit.XAVIER).activation("relu").build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例8: testConjugateGradientDescent
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Test
public void testConjugateGradientDescent() throws Exception {
DenseLayer dl1 = new DenseLayer();
dl1.setNOut(16);
OutputLayer ol = new OutputLayer();
Layer[] ls = new Layer[] {dl1, ol};
NeuralNetConfiguration nnc = new NeuralNetConfiguration();
nnc.setOptimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT);
nnc.setSeed(TestUtil.SEED);
clf.setNeuralNetConfiguration(nnc);
clf.setNumEpochs(3);
clf.setLayers(ls);
final EarlyStopping config = new EarlyStopping(0, 0);
clf.setEarlyStopping(config);
TestUtil.crossValidate(clf, DatasetLoader.loadGlass());
}
示例9: getConfiguration
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.constrainGradientToUnitNorm(true)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(rows * columns).nOut(600).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(600).nOut(250).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(250).nOut(200).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.weightInit(WeightInit.XAVIER)
.activation("softmax")
.nIn(200).nOut(AUTOMATIC).build())
.pretrain(true).backprop(false)
.build();
return conf;
}
示例10: testSmallAmountOfData
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Test
public void testSmallAmountOfData() {
//Idea: Test spark training where some executors don't get any data
//in this case: by having fewer examples (2 DataSets) than executors (local[*])
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3)
.activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.MSE).nIn(3).nOut(nOut).activation(Activation.SOFTMAX)
.build())
.build();
SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf,
new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0));
Nd4j.getRandom().setSeed(12345);
DataSet d1 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));
DataSet d2 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));
JavaRDD<DataSet> rddData = sc.parallelize(Arrays.asList(d1, d2));
sparkNet.fit(rddData);
}
示例11: testWriteCGModel
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Test
public void testWriteCGModel() throws Exception {
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1))
.graphBuilder().addInputs("in")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3)
.build(),
"dense")
.setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(cg, tempFile, true);
ComputationGraph network = ModelSerializer.restoreComputationGraph(tempFile);
assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson());
assertEquals(cg.params(), network.params());
assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例12: testMultiCNNLayer
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Test
public void testMultiCNNLayer() throws Exception {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list()
.layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
.activation(Activation.RELU).build())
.layer(1, new LocalResponseNormalization.Builder().build()).layer(2,
new DenseLayer.Builder()
.nOut(2).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(10)
.build())
.backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
DataSetIterator iter = new MnistDataSetIterator(2, 2);
DataSet next = iter.next();
network.fit(next);
}
示例13: testCNNBNActivationCombo
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
@Test
public void testCNNBNActivationCombo() throws Exception {
DataSetIterator iter = new MnistDataSetIterator(2, 2);
DataSet next = iter.next();
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
.list()
.layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
.activation(Activation.IDENTITY).build())
.layer(1, new BatchNormalization.Builder().build())
.layer(2, new ActivationLayer.Builder().activation(Activation.RELU).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build())
.backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
network.fit(next);
assertNotEquals(null, network.getLayer(0).getParam("W"));
assertNotEquals(null, network.getLayer(0).getParam("b"));
}
示例14: getCNNMLNConfig
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
private static MultiLayerNetwork getCNNMLNConfig(boolean backprop, boolean pretrain) {
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder conf =
new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new ConvolutionLayer.Builder(new int[] {10, 10}).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX,
new int[] {2, 2}).stride(1, 1).build())
.layer(2, new OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(outputNum).weightInit(WeightInit.XAVIER)
.activation(Activation.SOFTMAX).build())
.setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(backprop)
.pretrain(pretrain);
MultiLayerNetwork model = new MultiLayerNetwork(conf.build());
model.init();
return model;
}
示例15: incompleteLRN
import org.deeplearning4j.nn.api.OptimizationAlgorithm; //导入依赖的package包/类
public MultiLayerConfiguration.Builder incompleteLRN() {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(2, new LocalResponseNormalization.Builder().build())
.layer(3, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nOut(6).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}).build())
.layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2)
.build());
return builder;
}