当前位置: 首页>>代码示例>>Java>>正文


Java LocalResponseNormalization类代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.LocalResponseNormalization的典型用法代码示例。如果您正苦于以下问题:Java LocalResponseNormalization类的具体用法?Java LocalResponseNormalization怎么用?Java LocalResponseNormalization使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LocalResponseNormalization类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了LocalResponseNormalization类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultiCNNLayer

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
@Test
public void testMultiCNNLayer() throws Exception {
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list()
                    .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
                                    .activation(Activation.RELU).build())
                    .layer(1, new LocalResponseNormalization.Builder().build()).layer(2,
                                    new DenseLayer.Builder()
                                                    .nOut(2).build())
                    .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(10)
                                    .build())
                    .backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    DataSetIterator iter = new MnistDataSetIterator(2, 2);
    DataSet next = iter.next();

    network.fit(next);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:22,代码来源:LocalResponseTest.java

示例2: KerasLRN

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
/**
 * Constructor from parsed Keras layer configuration dictionary.
 *
 * @param layerConfig               dictionary containing Keras layer configuration
 * @param enforceTrainingConfig     whether to enforce training-related configuration options
 * @throws InvalidKerasConfigurationException
 * @throws UnsupportedKerasConfigurationException
 */
public KerasLRN(Map<String, Object> layerConfig, boolean enforceTrainingConfig)
                throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
    super(layerConfig, enforceTrainingConfig);
    Map<String, Object> lrnParams = KerasLayerUtils.getInnerLayerConfigFromConfig(layerConfig, conf);

    LocalResponseNormalization.Builder builder = new LocalResponseNormalization.Builder().name(this.layerName)
                    .dropOut(this.dropout).alpha((double) lrnParams.get("alpha"))
                    .beta((double) lrnParams.get("beta")).k((int) lrnParams.get("k")).n((int) lrnParams.get("n"));
    this.layer = builder.build();
    this.vertex = null;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:KerasLRN.java

示例3: testGradientLRNSimple

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
@Test
public void testGradientLRNSimple() {
    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int depth = 6;
    int hw = 5;
    int nOut = 4;
    INDArray input = Nd4j.rand(new int[] {minibatch, depth, hw, hw});
    INDArray labels = Nd4j.zeros(minibatch, nOut);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        labels.putScalar(i, r.nextInt(nOut), 1.0);
    }

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .seed(12345L).weightInit(WeightInit.DISTRIBUTION)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new ConvolutionLayer.Builder().nOut(6).kernelSize(2, 2).stride(1, 1)
                                    .activation(Activation.TANH).build())
                    .layer(1, new LocalResponseNormalization.Builder().build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(nOut).build())
                    .setInputType(InputType.convolutional(hw, hw, depth)).pretrain(false).backprop(true);

    MultiLayerNetwork mln = new MultiLayerNetwork(builder.build());
    mln.init();

    if (PRINT_RESULTS) {
        for (int j = 0; j < mln.getnLayers(); j++)
            System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
    }

    boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

    assertTrue(gradOK);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:LRNGradientCheckTests.java

示例4: doBefore

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
@Before
public void doBefore() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
                    .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build())
                    .build();

    layer = new LocalResponseNormalization().instantiate(conf, null, 0, null, false);
    activationsActual = layer.activate(x);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:11,代码来源:LocalResponseTest.java

示例5: testRegularization

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
@Test
public void testRegularization() {
    // Confirm a structure with regularization true will not throw an error

    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).l1(0.2)
                    .l2(0.1).seed(123)
                    .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build())
                    .build();
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:11,代码来源:LocalResponseTest.java

示例6: alexnetModel

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
public MultiLayerNetwork alexnetModel(int numLabels) {
  /**
   * AlexNet model interpretation based on the original paper ImageNet Classification with Deep Convolutional Neural Networks
   * and the imagenetExample code referenced.
   * http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
   **/
  double nonZeroBias = 1;
  double dropOut = 0.5;
  MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
      .seed(seed)
      .weightInit(WeightInit.DISTRIBUTION)
      .dist(new NormalDistribution(0.0, 0.01))
      .activation(Activation.RELU)
      .updater(Updater.NESTEROVS)
      .iterations(iterations)
      .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) // normalize to prevent vanishing or exploding gradients
      .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
      .learningRate(1e-2)
      .biasLearningRate(1e-2*2)
      .learningRateDecayPolicy(LearningRatePolicy.Step)
      .lrPolicyDecayRate(0.1)
      .lrPolicySteps(100000)
      .regularization(true)
      .l2(5 * 1e-4)
      .momentum(0.9)
      .miniBatch(false)
      .list()
      .layer(0, convInit("cnn1", channels, 96, new int[]{11, 11}, new int[]{4, 4}, new int[]{3, 3}, 0))
      .layer(1, new LocalResponseNormalization.Builder().name("lrn1").build())
      .layer(2, maxPool("maxpool1", new int[]{3,3}))
      .layer(3, conv5x5("cnn2", 256, new int[] {1,1}, new int[] {2,2}, nonZeroBias))
      .layer(4, new LocalResponseNormalization.Builder().name("lrn2").build())
      .layer(5, maxPool("maxpool2", new int[]{3,3}))
      .layer(6,conv3x3("cnn3", 384, 0))
      .layer(7,conv3x3("cnn4", 384, nonZeroBias))
      .layer(8,conv3x3("cnn5", 256, nonZeroBias))
      .layer(9, maxPool("maxpool3", new int[]{3,3}))
      .layer(10, fullyConnected("ffn1", 4096, nonZeroBias, dropOut, new GaussianDistribution(0, 0.005)))
      .layer(11, fullyConnected("ffn2", 4096, nonZeroBias, dropOut, new GaussianDistribution(0, 0.005)))
      .layer(12, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
          .name("output")
          .nOut(numLabels)
          .activation(Activation.SOFTMAX)
          .build())
      .backprop(true)
      .pretrain(false)
      .setInputType(InputType.convolutional(height, width, channels))
      .build();
  return new MultiLayerNetwork(conf);
}
 
开发者ID:MyRobotLab,项目名称:myrobotlab,代码行数:51,代码来源:Deeplearning4j.java

示例7: getLocalResponseNormalization

import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; //导入依赖的package包/类
/**
 * Get DL4J LRN.
 *
 * @return  LocalResponseNormalization
 */
public LocalResponseNormalization getLocalResponseNormalization() {
    return (LocalResponseNormalization) this.layer;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:9,代码来源:KerasLRN.java


注:本文中的org.deeplearning4j.nn.conf.layers.LocalResponseNormalization类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。