当前位置: 首页>>代码示例>>Java>>正文


Java Updater类代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.Updater的典型用法代码示例。如果您正苦于以下问题:Java Updater类的具体用法?Java Updater怎么用?Java Updater使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Updater类属于org.deeplearning4j.nn.conf包,在下文中一共展示了Updater类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUpdaters

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Test
public void testUpdaters() {
    SparkDl4jMultiLayer sparkNet = getBasicNetwork();
    MultiLayerNetwork netCopy = sparkNet.getNetwork().clone();

    netCopy.fit(data);
    Updater expectedUpdater = netCopy.conf().getLayer().getUpdater();
    double expectedLR = netCopy.conf().getLayer().getLearningRate();
    double expectedMomentum = netCopy.conf().getLayer().getMomentum();

    Updater actualUpdater = sparkNet.getNetwork().conf().getLayer().getUpdater();
    sparkNet.fit(sparkData);
    double actualLR = sparkNet.getNetwork().conf().getLayer().getLearningRate();
    double actualMomentum = sparkNet.getNetwork().conf().getLayer().getMomentum();

    assertEquals(expectedUpdater, actualUpdater);
    assertEquals(expectedLR, actualLR, 0.01);
    assertEquals(expectedMomentum, actualMomentum, 0.01);

}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:21,代码来源:TestSparkMultiLayerParameterAveraging.java

示例2: getDeepDenseLayerNetworkConfiguration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
 */
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
    final int numHiddenNodes = 50;
    return new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .learningRate(learningRate)
            .weightInit(WeightInit.XAVIER)
            .updater(Updater.NESTEROVS).momentum(0.9)
            .list()
            .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
                    .activation(Activation.TANH).build())
            .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
                    .activation(Activation.TANH).build())
            .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                    .activation(Activation.IDENTITY)
                    .nIn(numHiddenNodes).nOut(numOutputs).build())
            .pretrain(false).backprop(true).build();
}
 
开发者ID:IsaacChanghau,项目名称:NeuralNetworksLite,代码行数:22,代码来源:RegressionMathFunctions.java

示例3: getConfiguration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
   {
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
	.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
	.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
	.dropOut(0.5).build();

final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
	.nOut(parameters.getOutputSize()).activation("softmax").build();

return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
	.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
	.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
	.layer(1, outputLayer).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:17,代码来源:DeepBeliefNetworkModel.java

示例4: getConfiguration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
	.list(4)
	.layer(0,
		new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(1,
		new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(2,
		new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu").build())
	.layer(3,
		new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD).activation("relu")
			.lossFunction(LossFunctions.LossFunction.MSE).build())
	.pretrain(false).backprop(true).build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:22,代码来源:AnomalyDetectionModel.java

示例5: testCompGraphNullLayer

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Test
public void testCompGraphNullLayer() {
    ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.01))
                    .seed(42).miniBatch(false).l1(0.2).l2(0.2)
                    /* Graph Builder */
                    .updater(Updater.RMSPROP).graphBuilder().addInputs("in")
                    .addLayer("L" + 1,
                                    new GravesLSTM.Builder().nIn(20).updater(Updater.RMSPROP).nOut(10)
                                                    .weightInit(WeightInit.XAVIER)
                                                    .dropOut(0.4).l1(0.3).activation(Activation.SIGMOID).build(),
                                    "in")
                    .addLayer("output",
                                    new RnnOutputLayer.Builder().nIn(20).nOut(10).activation(Activation.SOFTMAX)
                                                    .weightInit(WeightInit.RELU_UNIFORM).build(),
                                    "L" + 1)
                    .setOutputs("output");
    ComputationGraphConfiguration conf = gb.build();
    ComputationGraph cg = new ComputationGraph(conf);
    cg.init();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:22,代码来源:LayerConfigValidationTest.java

示例6: reconf

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
public void reconf() {

        int seed = 123;
        double learningRate = 0.01;
        int numInputs = 2;
        int numOutputs = 2;
        int numHiddenNodes = 5;

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(seed)
                .iterations(1)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .learningRate(learningRate)
                .updater(Updater.NESTEROVS).momentum(0.9)
                .list()
                .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
                        .weightInit(WeightInit.XAVIER)
                        .activation("relu")
                        .build())
                .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                        .weightInit(WeightInit.XAVIER)
                        .activation("softmax").weightInit(WeightInit.XAVIER)
                        .nIn(numHiddenNodes).nOut(numOutputs).build())
                .pretrain(false).backprop(true).build();

        model = new MultiLayerNetwork(conf);

        System.out.println("Ready :-)");

        if (dirty != null) {
            dirty.run();
        }

    }
 
开发者ID:datathings,项目名称:greycat,代码行数:35,代码来源:NeuralNetAttribute.java

示例7: buildModel

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
public void buildModel() {
    //Create the network
    int numInput = 2;
    int numOutputs = 1;
    int nHidden = 10;
    mNetwork = new MultiLayerNetwork(new NeuralNetConfiguration.Builder()
            .seed(mSeed)
            .iterations(ITERATIONS)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .learningRate(LEARNING_RATE)
            .weightInit(WeightInit.XAVIER)
            .updater(Updater.NESTEROVS)
            .list()
            .layer(0, new DenseLayer.Builder().nIn(numInput).nOut(nHidden)
                    .activation(Activation.TANH)
                    .name("input")
                    .build())
            .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                    .activation(Activation.IDENTITY)
                    .name("output")
                    .nIn(nHidden).nOut(numOutputs).build())
            .pretrain(false)
            .backprop(true)
            .build()
    );
    mNetwork.init();
    mNetwork.setListeners(mIterationListener);
}
 
开发者ID:mccorby,项目名称:FederatedAndroidTrainer,代码行数:29,代码来源:LinearModel.java

示例8: configuration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
public static MultiLayerConfiguration configuration() {
	/*
	 * Regarding the .setInputType(InputType.convolutionalFlat(28,28,1)) line: This does a few things. (a) It adds
	 * preprocessors, which handle things like the transition between the convolutional/subsampling layers and the dense
	 * layer (b) Does some additional configuration validation (c) Where necessary, sets the nIn (number of input
	 * neurons, or input depth in the case of CNNs) values for each layer based on the size of the previous layer (but
	 * it won't override values manually set by the user) InputTypes can be used with other layer types too (RNNs, MLPs
	 * etc) not just CNNs. For normal images (when using ImageRecordReader) use
	 * InputType.convolutional(height,width,depth). MNIST record reader is a special case, that outputs 28x28 pixel
	 * grayscale (nChannels=1) images, in a "flattened" row vector format (i.e., 1x784 vectors), hence the
	 * "convolutionalFlat" input type used here.
	 */
	return new NeuralNetConfiguration.Builder().seed(SEED).iterations(NUM_ITERATIONS).regularization(true).l2(0.0005).
	/*
	 * Uncomment the following for learning decay and bias
	 */
			learningRate(.01).// biasLearningRate(0.02).
			// learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(0.001).lrPolicyPower(0.75).
			weightInit(WeightInit.XAVIER).optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
			.updater(Updater.NESTEROVS).momentum(0.9).list()
			.layer(0,
					new ConvolutionLayer.Builder(5, 5).nIn(NUM_CHANNELS).stride(1, 1).nOut(20).activation(Activation.IDENTITY)
							.build())
			.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).stride(2, 2).build())
			.layer(2, new ConvolutionLayer.Builder(5, 5).stride(1, 1).nOut(50).activation(Activation.IDENTITY).build())
			.layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2).stride(2, 2).build())
			.layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build())
			.layer(5,
					new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(NUM_OUTPUTS)
							.activation(Activation.SOFTMAX).build())
			.setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(true).pretrain(false).build();
}
 
开发者ID:braeunlich,项目名称:anagnostes,代码行数:33,代码来源:ConfigurationFactory.java

示例9: create

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Override
public MultiLayerConfiguration create() {
    int width = imageTransformConfigurationResource.getScaledWidth();
    int height = imageTransformConfigurationResource.getScaledHeight();
    int channels = imageTransformConfigurationResource.getChannels();
    int outputs = networkConfigurationResource.getOutputs();
    return new NeuralNetConfiguration.Builder()
            .seed(seed)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .iterations(1)
            .learningRate(0.0001)
            .activation(Activation.RELU)
            .weightInit(WeightInit.XAVIER)
            .updater(Updater.NESTEROVS).momentum(0.9)
            .regularization(true).l2(1e-3)
            .list()
            .layer(0, new DenseLayer.Builder()
                    .nIn(width * height * channels)
                    .nOut(1200)
                    .build())
            .layer(1, new DenseLayer.Builder()
                    .nIn(1200)
                    .nOut(600)
                    .build())
            .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                    .nIn(600)
                    .activation(Activation.SOFTMAX)
                    .nOut(outputs)
                    .build())
            .pretrain(false).backprop(true)
            .setInputType(InputType.convolutional(height, width, channels))
            .build();
}
 
开发者ID:scaliby,项目名称:ceidg-captcha,代码行数:34,代码来源:MultiLayerConfigurationFactoryImpl.java

示例10: testSmallAmountOfData

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Test
public void testSmallAmountOfData(){
    

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .updater(Updater.RMSPROP)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1)
            .list()
            .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder()
                    .nIn(nIn).nOut(3)
                    .activation("tanh").build())
            .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                    .nIn(3).nOut(nOut)
                    .activation("softmax")
                    .build())
            .build();

    SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc,conf,new ParameterAveragingTrainingMaster(true,numExecutors(),1,10,1,0));

    Nd4j.getRandom().setSeed(12345);
    DataSet d1 = new DataSet(Nd4j.rand(1,nIn),Nd4j.rand(1,nOut));
    DataSet d2 = new DataSet(Nd4j.rand(1,nIn),Nd4j.rand(1,nOut));

    JavaRDD<DataSet> rddData = sc.parallelize(Arrays.asList(d1,d2));

    sparkNet.fit(rddData);

}
 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:29,代码来源:TestSparkMultiLayerParameterAveraging.java

示例11: main

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
public static void main(String[] args){

        //Generate the training data
        DataSetIterator iterator = getTrainingData(batchSize,rng);

        //Create the network
        int numInput = 2;
        int numOutputs = 1;
        int nHidden = 10;
        MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder()
                .seed(seed)
                .iterations(iterations)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .learningRate(learningRate)
                .weightInit(WeightInit.XAVIER)
                .updater(Updater.NESTEROVS).momentum(0.9)
                .list()
                .layer(0, new DenseLayer.Builder().nIn(numInput).nOut(nHidden)
                        .activation(Activation.TANH)
                        .build())
                .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                        .activation(Activation.IDENTITY)
                        .nIn(nHidden).nOut(numOutputs).build())
                .pretrain(false).backprop(true).build()
        );
        net.init();
        net.setListeners(new ScoreIterationListener(1));


        //Train the network on the full data set, and evaluate in periodically
        for( int i=0; i<nEpochs; i++ ){
            iterator.reset();
            net.fit(iterator);
        }
        // Test the addition of 2 numbers (Try different numbers here)
        final INDArray input = Nd4j.create(new double[] { 0.111111, 0.3333333333333 }, new int[] { 1, 2 });
        INDArray out = net.output(input, false);
        System.out.println(out);

    }
 
开发者ID:IsaacChanghau,项目名称:NeuralNetworksLite,代码行数:41,代码来源:RegressionSum.java

示例12: createNet

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
/**
 * Set up network. 
 * 6 in- and output dimensions (as mammography data has 6 feature dimensions).
 * 6 -> intermediate-dim -> core-dim -> intermediate-dim -> 6
 */
private static MultiLayerNetwork createNet(int intermediateDimensions, int coreDimensions) {
	int inputDimensions = 6;
	int outputDimensions = inputDimensions;

	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(12345)
			.iterations(1)
			.weightInit(WeightInit.XAVIER)
			.updater(Updater.ADAGRAD)
			.activation(Activation.RELU)
			.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
			.learningRate(0.05)
			.regularization(true).l2(0.0001)
			.list()
			.layer(0, new DenseLayer.Builder()
					.nIn(inputDimensions)
					.nOut(intermediateDimensions)
					.build())
			.layer(1, new DenseLayer.Builder()
					.nIn(intermediateDimensions)
					.nOut(coreDimensions)
					.build())
			.layer(2, new DenseLayer.Builder()
					.nIn(coreDimensions)
					.nOut(intermediateDimensions)
					.build())
			.layer(3, new OutputLayer.Builder()
					.nIn(intermediateDimensions)
					.nOut(outputDimensions)
					.lossFunction(LossFunctions.LossFunction.MSE)
					.build())
			.pretrain(false)
			.backprop(true)
			.build();

	return new MultiLayerNetwork(conf);
}
 
开发者ID:matthiaszimmermann,项目名称:ml_demo,代码行数:43,代码来源:MammographyAutoencoder.java

示例13: getConfiguration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
final int nLayers = hiddenLayerNodes.length + 1;

final ListBuilder list = new NeuralNetConfiguration.Builder()
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	.iterations(parameters.getIterations()).learningRate(parameters.getLearningRate()).rmsDecay(0.95)
	.seed(parameters.getSeed()).regularization(true).l2(0.001).list(nLayers).pretrain(false).backprop(true);

for (int i = 0; i < nLayers; i++)
{
    int nIn;
    if (i == 0)
    {
	nIn = parameters.getInputSize();
    }
    else
    {
	nIn = hiddenLayerNodes[i - 1];
    }
    if (i < nLayers - 1)
    {
	final GravesLSTM layer = new GravesLSTM.Builder().nIn(nIn).nOut(hiddenLayerNodes[i])
		.updater(Updater.RMSPROP).activation("tanh").weightInit(WeightInit.DISTRIBUTION)
		.dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, layer);
    }
    else
    {
	final RnnOutputLayer outputLayer = new RnnOutputLayer.Builder(LossFunction.MCXENT).activation("softmax")
		.updater(Updater.RMSPROP).nIn(hiddenLayerNodes[1]).nOut(parameters.getOutputSize())
		.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, outputLayer);
    }
}
return list.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:40,代码来源:LstmModel.java

示例14: getConfiguration

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration() {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(11L) // Seed to lock in weight initialization for tuning
                .iterations(100) // # training iterations predict/classify & backprop
                .learningRate(1e-3f) // Optimization step size
                .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) // Backprop method (calculate the gradients)
                .momentum(0.9)
                .constrainGradientToUnitNorm(true)
                .useDropConnect(true)
                .list(2) // # NN layers (does not count input layer)
                .layer(0, new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
                                .nIn(4) // # input nodes
                                .nOut(3) // # fully connected hidden layer nodes. Add list if multiple layers.
                                .weightInit(WeightInit.XAVIER)
                                .activation("relu")
                                .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                                .updater(Updater.ADAGRAD)
                                .k(1) // # contrastive divergence iterations
                                .dropOut(0.5)
                                .build()
                ) // NN layer type
                .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                .nIn(3) // # input nodes
                                .nOut(3) // # output nodes
                                .activation("softmax")
                                .weightInit(WeightInit.XAVIER)
                                .updater(Updater.ADAGRAD)
                                .dropOut(0.5)
                                .build()
                ) // NN layer type
                .build();

        return conf;
    }
 
开发者ID:javadba,项目名称:dl4j-spark-ml-examples,代码行数:36,代码来源:JavaIrisClassification.java

示例15: lenetModel

import org.deeplearning4j.nn.conf.Updater; //导入依赖的package包/类
public MultiLayerNetwork lenetModel(int numLabels) {
  /**
   * Revisde Lenet Model approach developed by ramgo2 achieves slightly above random
   * Reference: https://gist.github.com/ramgo2/833f12e92359a2da9e5c2fb6333351c5
   **/
  MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
      .seed(seed)
      .iterations(iterations)
      .regularization(false).l2(0.005) // tried 0.0001, 0.0005
      .activation(Activation.RELU)
      .learningRate(0.0001) // tried 0.00001, 0.00005, 0.000001
      .weightInit(WeightInit.XAVIER)
      .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
      .updater(Updater.RMSPROP).momentum(0.9)
      .list()
      .layer(0, convInit("cnn1", channels, 50 ,  new int[]{5, 5}, new int[]{1, 1}, new int[]{0, 0}, 0))
      .layer(1, maxPool("maxpool1", new int[]{2,2}))
      .layer(2, conv5x5("cnn2", 100, new int[]{5, 5}, new int[]{1, 1}, 0))
      .layer(3, maxPool("maxool2", new int[]{2,2}))
      .layer(4, new DenseLayer.Builder().nOut(500).build())
      .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
          .nOut(numLabels)
          .activation(Activation.SOFTMAX)
          .build())
      .backprop(true).pretrain(false)
      .setInputType(InputType.convolutional(height, width, channels))
      .build();

  return new MultiLayerNetwork(conf);
}
 
开发者ID:MyRobotLab,项目名称:myrobotlab,代码行数:31,代码来源:Deeplearning4j.java


注:本文中的org.deeplearning4j.nn.conf.Updater类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。