本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.OutputLayer类的典型用法代码示例。如果您正苦于以下问题:Java OutputLayer类的具体用法?Java OutputLayer怎么用?Java OutputLayer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
OutputLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了OutputLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: softMaxRegression
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerNetwork softMaxRegression(int seed,
int iterations, int numRows, int numColumns, int outputNum) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(
GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(1)
.layer(0,
new OutputLayer.Builder(
LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax")
.nIn(numColumns * numRows).nOut(outputNum)
.build()).pretrain(true).backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
return model;
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:25,代码来源:NeuralNetworks.java
示例2: getDeepDenseLayerNetworkConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
*/
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
final int numHiddenNodes = 50;
return new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(numHiddenNodes).nOut(numOutputs).build())
.pretrain(false).backprop(true).build();
}
示例3: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
{
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
.dropOut(0.5).build();
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
.nOut(parameters.getOutputSize()).activation("softmax").build();
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
.layer(1, outputLayer).build();
}
示例4: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
.list(4)
.layer(0,
new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(1,
new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(2,
new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(3,
new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu")
.lossFunction(LossFunctions.LossFunction.MSE).build())
.pretrain(false).backprop(true).build();
}
示例5: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
.layer(0,
new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)
.build())
.layer(2,
new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
.nOut(parameters.getOutputSize()).build())
.pretrain(true).backprop(false).build();
}
示例6: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
.layer(0,
new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
.activation("relu").weightInit(WeightInit.RELU).build())
.layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
.weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例7: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
.layer(0,
new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
.weightInit(WeightInit.XAVIER).activation("relu").build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例8: net
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerConfiguration net(int nIn, int nOut) {
return new NeuralNetConfiguration.Builder()
.seed(42)
.iterations(1)
.activation(Activation.RELU)
.weightInit(WeightInit.XAVIER)
.learningRate(0.1)
.regularization(true).l2(1e-4)
.list(
new DenseLayer.Builder().nIn(nIn).nOut(3).build(),
new DenseLayer.Builder().nIn(3).nOut(3).build(),
new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX)
.nIn(3)
.nOut(nOut)
.build()
)
.build();
}
示例9: getConfiguration
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.constrainGradientToUnitNorm(true)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(rows * columns).nOut(600).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(600).nOut(250).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(250).nOut(200).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.weightInit(WeightInit.XAVIER)
.activation("softmax")
.nIn(200).nOut(AUTOMATIC).build())
.pretrain(true).backprop(false)
.build();
return conf;
}
示例10: testSmallAmountOfData
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testSmallAmountOfData() {
//Idea: Test spark training where some executors don't get any data
//in this case: by having fewer examples (2 DataSets) than executors (local[*])
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3)
.activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.MSE).nIn(3).nOut(nOut).activation(Activation.SOFTMAX)
.build())
.build();
SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf,
new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0));
Nd4j.getRandom().setSeed(12345);
DataSet d1 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));
DataSet d2 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));
JavaRDD<DataSet> rddData = sc.parallelize(Arrays.asList(d1, d2));
sparkNet.fit(rddData);
}
示例11: testNoImprovementNEpochsTermination
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testNoImprovementNEpochsTermination() {
//Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs
//Simulate this by setting LR = 0.0
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list()
.layer(0, new OutputLayer.Builder().nIn(4).nOut(3)
.lossFunction(LossFunctions.LossFunction.MCXENT).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.setListeners(new ScoreIterationListener(1));
DataSetIterator irisIter = new IrisDataSetIterator(150, 150);
EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(100),
new ScoreImprovementEpochTerminationCondition(5))
.iterationTerminationConditions(
new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
.scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver)
.build();
IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter);
EarlyStoppingResult result = trainer.fit();
//Expect no score change due to 0 LR -> terminate after 6 total epochs
assertEquals(6, result.getTotalEpochs());
assertEquals(0, result.getBestModelEpoch());
assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString();
assertEquals(expDetails, result.getTerminationDetails());
}
示例12: getOriginalGraph
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.graphBuilder()
.addInputs("in")
.layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
.layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
.layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
.layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
.layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
.layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
.setOutputs("5")
.setInputTypes(InputType.convolutionalFlat(28,28,1))
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
return net;
}
示例13: testSetGetUpdater2
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testSetGetUpdater2() {
//Same as above test, except that we are doing setUpdater on a new network
Nd4j.getRandom().setSeed(12345L);
double lr = 0.03;
int nIn = 4;
int nOut = 8;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(lr,0.6)).list()
.layer(0, new DenseLayer.Builder().nIn(nIn).nOut(5)
.updater(org.deeplearning4j.nn.conf.Updater.SGD).build())
.layer(1, new DenseLayer.Builder().nIn(5).nOut(6)
.updater(new NoOp()).build())
.layer(2, new DenseLayer.Builder().nIn(6).nOut(7)
.updater(org.deeplearning4j.nn.conf.Updater.ADAGRAD).build())
.layer(3, new OutputLayer.Builder().nIn(7).nOut(nOut)
.updater(org.deeplearning4j.nn.conf.Updater.NESTEROVS).build())
.backprop(true).pretrain(false).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
Updater newUpdater = UpdaterCreator.getUpdater(net);
net.setUpdater(newUpdater);
assertTrue(newUpdater == net.getUpdater()); //Should be identical object
}
示例14: testJsonComputationGraph
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testJsonComputationGraph() {
//ComputationGraph with a custom layer; check JSON and YAML config actually works...
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in")
.addLayer("1", new CustomLayer(3.14159), "0").addLayer("2",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10)
.build(),
"1")
.setOutputs("2").pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
ComputationGraphConfiguration confFromJson = ComputationGraphConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
ComputationGraphConfiguration confFromYaml = ComputationGraphConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例15: checkInitializationFF
import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void checkInitializationFF() {
//Actually create a network with a custom layer; check initialization and forward pass
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(9).nOut(10).build()).layer(1, new CustomLayer(3.14159)) //hard-coded nIn/nOut of 10
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(11).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(9 * 10 + 10, net.getLayer(0).numParams());
assertEquals(10 * 10 + 10, net.getLayer(1).numParams());
assertEquals(10 * 11 + 11, net.getLayer(2).numParams());
//Check for exceptions...
net.output(Nd4j.rand(1, 9));
net.fit(new DataSet(Nd4j.rand(1, 9), Nd4j.rand(1, 11)));
}