当前位置: 首页>>代码示例>>Java>>正文


Java OutputLayer类代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.OutputLayer的典型用法代码示例。如果您正苦于以下问题:Java OutputLayer类的具体用法?Java OutputLayer怎么用?Java OutputLayer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


OutputLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了OutputLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: softMaxRegression

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerNetwork softMaxRegression(int seed,
		int iterations, int numRows, int numColumns, int outputNum) {
	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(seed)
			.gradientNormalization(
					GradientNormalization.ClipElementWiseAbsoluteValue)
			.gradientNormalizationThreshold(1.0)
			.iterations(iterations)
			.momentum(0.5)
			.momentumAfter(Collections.singletonMap(3, 0.9))
			.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
			.list(1)
			.layer(0,
					new OutputLayer.Builder(
							LossFunction.NEGATIVELOGLIKELIHOOD)
							.activation("softmax")
							.nIn(numColumns * numRows).nOut(outputNum)
							.build()).pretrain(true).backprop(false)
			.build();

	MultiLayerNetwork model = new MultiLayerNetwork(conf);

	return model;
}
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:25,代码来源:NeuralNetworks.java

示例2: getDeepDenseLayerNetworkConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
 */
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
    final int numHiddenNodes = 50;
    return new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .learningRate(learningRate)
            .weightInit(WeightInit.XAVIER)
            .updater(Updater.NESTEROVS).momentum(0.9)
            .list()
            .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
                    .activation(Activation.TANH).build())
            .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
                    .activation(Activation.TANH).build())
            .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                    .activation(Activation.IDENTITY)
                    .nIn(numHiddenNodes).nOut(numOutputs).build())
            .pretrain(false).backprop(true).build();
}
 
开发者ID:IsaacChanghau,项目名称:NeuralNetworksLite,代码行数:22,代码来源:RegressionMathFunctions.java

示例3: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
   {
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
	.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
	.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
	.dropOut(0.5).build();

final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
	.nOut(parameters.getOutputSize()).activation("softmax").build();

return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
	.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
	.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
	.layer(1, outputLayer).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:17,代码来源:DeepBeliefNetworkModel.java

示例4: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
	.list(4)
	.layer(0,
		new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(1,
		new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(2,
		new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(3,
		new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu")
			.lossFunction(LossFunctions.LossFunction.MSE).build())
	.pretrain(false).backprop(true).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:22,代码来源:AnomalyDetectionModel.java

示例5: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
	.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
	.momentumAfter(Collections.singletonMap(3, 0.9))
	.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
	.layer(0,
		new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
			.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
	.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
		.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)

		.build())
	.layer(2,
		new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
			.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
	.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
		.nOut(parameters.getOutputSize()).build())
	.pretrain(true).backprop(false).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:23,代码来源:StackedAutoEncoderModel.java

示例6: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.iterations(parameters.getIterations())
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
	.layer(0,
		new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
			.activation("relu").weightInit(WeightInit.RELU).build())
	.layer(1,
		new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
			.weightInit(WeightInit.XAVIER).activation("softmax").build())
	.backprop(true).pretrain(false);

new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());

return builder.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:20,代码来源:ConvolutionalNetModel.java

示例7: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
	.iterations(parameters.getIterations())
	.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
	.layer(0,
		new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
			.weightInit(WeightInit.XAVIER).activation("relu").build())
	.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
	.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
		.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
	.backprop(true).pretrain(false);

new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());

return builder.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:21,代码来源:ConvolutionalNetModel.java

示例8: net

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerConfiguration net(int nIn, int nOut) {
    return new NeuralNetConfiguration.Builder()
            .seed(42)
            .iterations(1)
            .activation(Activation.RELU)
            .weightInit(WeightInit.XAVIER)
            .learningRate(0.1)
            .regularization(true).l2(1e-4)
            .list(
                    new DenseLayer.Builder().nIn(nIn).nOut(3).build(),
                    new DenseLayer.Builder().nIn(3).nOut(3).build(),
                    new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                            .activation(Activation.SOFTMAX)
                            .nIn(3)
                            .nOut(nOut)
                            .build()
            )
            .build();
}
 
开发者ID:wmeddie,项目名称:dl4j-trainer-archetype,代码行数:20,代码来源:Train.java

示例9: getConfiguration

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .constrainGradientToUnitNorm(true)
                .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
                .list(4)
                .layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(rows * columns).nOut(600).build())
                .layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(600).nOut(250).build())
                .layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
                        .weightInit(WeightInit.XAVIER)
                        .nIn(250).nOut(200).build())
                .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
                        .weightInit(WeightInit.XAVIER)
                        .activation("softmax")
                        .nIn(200).nOut(AUTOMATIC).build())
                .pretrain(true).backprop(false)
                .build();

        return conf;
    }
 
开发者ID:javadba,项目名称:dl4j-spark-ml-examples,代码行数:26,代码来源:JavaLfwClassification.java

示例10: testSmallAmountOfData

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testSmallAmountOfData() {
    //Idea: Test spark training where some executors don't get any data
    //in this case: by having fewer examples (2 DataSets) than executors (local[*])

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp())
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3)
                                    .activation(Activation.TANH).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MSE).nIn(3).nOut(nOut).activation(Activation.SOFTMAX)
                                                    .build())
                    .build();

    SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf,
                    new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 10, 1, 0));

    Nd4j.getRandom().setSeed(12345);
    DataSet d1 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));
    DataSet d2 = new DataSet(Nd4j.rand(1, nIn), Nd4j.rand(1, nOut));

    JavaRDD<DataSet> rddData = sc.parallelize(Arrays.asList(d1, d2));

    sparkNet.fit(rddData);

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:TestSparkMultiLayerParameterAveraging.java

示例11: testNoImprovementNEpochsTermination

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testNoImprovementNEpochsTermination() {
    //Idea: terminate training if score (test set loss) does not improve for 5 consecutive epochs
    //Simulate this by setting LR = 0.0

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list()
                    .layer(0, new OutputLayer.Builder().nIn(4).nOut(3)
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .pretrain(false).backprop(true).build();
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.setListeners(new ScoreIterationListener(1));

    DataSetIterator irisIter = new IrisDataSetIterator(150, 150);

    EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>();
    EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
                    new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
                                    .epochTerminationConditions(new MaxEpochsTerminationCondition(100),
                                                    new ScoreImprovementEpochTerminationCondition(5))
                                    .iterationTerminationConditions(
                                                    new MaxTimeIterationTerminationCondition(3, TimeUnit.SECONDS),
                                                    new MaxScoreIterationTerminationCondition(7.5)) //Initial score is ~2.5
                                    .scoreCalculator(new DataSetLossCalculator(irisIter, true)).modelSaver(saver)
                                    .build();

    IEarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, irisIter);
    EarlyStoppingResult result = trainer.fit();

    //Expect no score change due to 0 LR -> terminate after 6 total epochs
    assertEquals(6, result.getTotalEpochs());
    assertEquals(0, result.getBestModelEpoch());
    assertEquals(EarlyStoppingResult.TerminationReason.EpochTerminationCondition, result.getTerminationReason());
    String expDetails = new ScoreImprovementEpochTerminationCondition(5).toString();
    assertEquals(expDetails, result.getTerminationDetails());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:TestEarlyStopping.java

示例12: getOriginalGraph

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .weightInit(WeightInit.XAVIER)
            .activation(Activation.TANH)
            .convolutionMode(ConvolutionMode.Same)
            .updater(new Sgd(0.3))
            .graphBuilder()
            .addInputs("in")
            .layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
            .layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
            .layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
            .layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
            .layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
            .layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
            .setOutputs("5")
            .setInputTypes(InputType.convolutionalFlat(28,28,1))
            .build();


    ComputationGraph net = new ComputationGraph(conf);
    net.init();
    return net;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:25,代码来源:TestFrozenLayers.java

示例13: testSetGetUpdater2

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testSetGetUpdater2() {
    //Same as above test, except that we are doing setUpdater on a new network
    Nd4j.getRandom().setSeed(12345L);
    double lr = 0.03;
    int nIn = 4;
    int nOut = 8;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(lr,0.6)).list()
                    .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(5)
                                    .updater(org.deeplearning4j.nn.conf.Updater.SGD).build())
                    .layer(1, new DenseLayer.Builder().nIn(5).nOut(6)
                                    .updater(new NoOp()).build())
                    .layer(2, new DenseLayer.Builder().nIn(6).nOut(7)
                                    .updater(org.deeplearning4j.nn.conf.Updater.ADAGRAD).build())
                    .layer(3, new OutputLayer.Builder().nIn(7).nOut(nOut)
                                    .updater(org.deeplearning4j.nn.conf.Updater.NESTEROVS).build())
                    .backprop(true).pretrain(false).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    Updater newUpdater = UpdaterCreator.getUpdater(net);
    net.setUpdater(newUpdater);
    assertTrue(newUpdater == net.getUpdater()); //Should be identical object
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:TestUpdaters.java

示例14: testJsonComputationGraph

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void testJsonComputationGraph() {
    //ComputationGraph with a custom layer; check JSON and YAML config actually works...

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
                    .addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in")
                    .addLayer("1", new CustomLayer(3.14159), "0").addLayer("2",
                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10)
                                                    .build(),
                                    "1")
                    .setOutputs("2").pretrain(false).backprop(true).build();

    String json = conf.toJson();
    String yaml = conf.toYaml();

    System.out.println(json);

    ComputationGraphConfiguration confFromJson = ComputationGraphConfiguration.fromJson(json);
    assertEquals(conf, confFromJson);

    ComputationGraphConfiguration confFromYaml = ComputationGraphConfiguration.fromYaml(yaml);
    assertEquals(conf, confFromYaml);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:24,代码来源:TestCustomLayers.java

示例15: checkInitializationFF

import org.deeplearning4j.nn.conf.layers.OutputLayer; //导入依赖的package包/类
@Test
public void checkInitializationFF() {
    //Actually create a network with a custom layer; check initialization and forward pass

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new DenseLayer.Builder().nIn(9).nOut(10).build()).layer(1, new CustomLayer(3.14159)) //hard-coded nIn/nOut of 10
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(11).build())
                    .pretrain(false).backprop(true).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    assertEquals(9 * 10 + 10, net.getLayer(0).numParams());
    assertEquals(10 * 10 + 10, net.getLayer(1).numParams());
    assertEquals(10 * 11 + 11, net.getLayer(2).numParams());

    //Check for exceptions...
    net.output(Nd4j.rand(1, 9));
    net.fit(new DataSet(Nd4j.rand(1, 9), Nd4j.rand(1, 11)));
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:21,代码来源:TestCustomLayers.java


注:本文中的org.deeplearning4j.nn.conf.layers.OutputLayer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。