当前位置: 首页>>代码示例>>Java>>正文


Java NeuralNetConfiguration.getLayer方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.NeuralNetConfiguration.getLayer方法的典型用法代码示例。如果您正苦于以下问题:Java NeuralNetConfiguration.getLayer方法的具体用法?Java NeuralNetConfiguration.getLayer怎么用?Java NeuralNetConfiguration.getLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.conf.NeuralNetConfiguration的用法示例。


在下文中一共展示了NeuralNetConfiguration.getLayer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getGradientsFromFlattened

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) {
    BatchNormalization layer = (BatchNormalization) conf.getLayer();
    int nOut = layer.getNOut();

    Map<String, INDArray> out = new LinkedHashMap<>();
    int meanOffset = 0;
    if (!layer.isLockGammaBeta()) {
        INDArray gammaView = gradientView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, nOut));
        INDArray betaView = gradientView.get(NDArrayIndex.point(0), NDArrayIndex.interval(nOut, 2 * nOut));
        out.put(GAMMA, gammaView);
        out.put(BETA, betaView);
        meanOffset = 2 * nOut;
    }

    out.put(GLOBAL_MEAN,
                    gradientView.get(NDArrayIndex.point(0), NDArrayIndex.interval(meanOffset, meanOffset + nOut)));
    out.put(GLOBAL_VAR, gradientView.get(NDArrayIndex.point(0),
                    NDArrayIndex.interval(meanOffset + nOut, meanOffset + 2 * nOut)));

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:BatchNormalizationParamInitializer.java

示例2: testForwardPass

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testForwardPass() {

    int[][] encLayerSizes = new int[][] {{12}, {12, 13}, {12, 13, 14}};
    for (int i = 0; i < encLayerSizes.length; i++) {

        MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list().layer(0,
                        new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder().nIn(10)
                                        .nOut(5).encoderLayerSizes(encLayerSizes[i]).decoderLayerSizes(13).build())
                        .build();

        NeuralNetConfiguration c = mlc.getConf(0);
        org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae =
                        (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer();

        MultiLayerNetwork net = new MultiLayerNetwork(mlc);
        net.init();

        INDArray in = Nd4j.rand(1, 10);

        //        net.output(in);
        List<INDArray> out = net.feedForward(in);
        assertArrayEquals(new int[] {1, 10}, out.get(0).shape());
        assertArrayEquals(new int[] {1, 5}, out.get(1).shape());
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:27,代码来源:TestVAE.java

示例3: mlpToCG

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
/**
 * Convert a MultiLayerConfiguration into a Computation graph
 *
 * @param mlc Layer-wise configuration
 * @param shape Inputshape
 * @return ComputationGraph based on the configuration in the MLC
 */
default ComputationGraph mlpToCG(MultiLayerConfiguration mlc, int[][] shape) {
  ComputationGraphConfiguration.GraphBuilder builder =
      new NeuralNetConfiguration.Builder()
          .trainingWorkspaceMode(WorkspaceMode.SEPARATE)
          .inferenceWorkspaceMode(WorkspaceMode.SEPARATE)
          .graphBuilder();
  List<NeuralNetConfiguration> confs = mlc.getConfs();

  // Start with input
  String currentInput = "input";
  builder.addInputs(currentInput);

  // Iterate MLN configurations layer-wise
  for (NeuralNetConfiguration conf : confs) {
    Layer l = conf.getLayer();
    String lName = l.getLayerName();

    // Connect current layer with last layer
    builder.addLayer(lName, l, currentInput);
    currentInput = lName;
  }
  builder.setOutputs(currentInput);

  // Configure inputs
  builder.setInputTypes(InputType.convolutional(shape[0][1], shape[0][2], shape[0][0]));

  // Build
  ComputationGraphConfiguration cgc = builder.build();
  return new ComputationGraph(cgc);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:38,代码来源:ZooModel.java

示例4: getGradientsFromFlattened

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) {
    org.deeplearning4j.nn.conf.layers.GravesLSTM layerConf =
                    (org.deeplearning4j.nn.conf.layers.GravesLSTM) conf.getLayer();

    int nL = layerConf.getNOut(); //i.e., n neurons in this layer
    int nLast = layerConf.getNIn(); //i.e., n neurons in previous layer

    int length = numParams(conf);
    if (gradientView.length() != length)
        throw new IllegalStateException(
                        "Expected gradient view of length " + length + ", got length " + gradientView.length());

    int nParamsIn = nLast * (4 * nL);
    int nParamsRecurrent = nL * (4 * nL + 3);
    int nBias = 4 * nL;
    INDArray inputWeightGradView = gradientView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, nParamsIn))
                    .reshape('f', nLast, 4 * nL);
    INDArray recurrentWeightGradView = gradientView
                    .get(NDArrayIndex.point(0), NDArrayIndex.interval(nParamsIn, nParamsIn + nParamsRecurrent))
                    .reshape('f', nL, 4 * nL + 3);
    INDArray biasGradView = gradientView.get(NDArrayIndex.point(0),
                    NDArrayIndex.interval(nParamsIn + nParamsRecurrent, nParamsIn + nParamsRecurrent + nBias)); //already a row vector

    Map<String, INDArray> out = new LinkedHashMap<>();
    out.put(INPUT_WEIGHT_KEY, inputWeightGradView);
    out.put(RECURRENT_WEIGHT_KEY, recurrentWeightGradView);
    out.put(BIAS_KEY, biasGradView);

    return out;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:32,代码来源:GravesLSTMParamInitializer.java

示例5: testPretrainSimple

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testPretrainSimple() {

    int inputSize = 3;

    MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .nIn(inputSize).nOut(4).encoderLayerSizes(5).decoderLayerSizes(6).build())
                    .pretrain(true).backprop(false).build();

    NeuralNetConfiguration c = mlc.getConf(0);
    org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae =
                    (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer();

    int allParams = vae.initializer().numParams(c);

    MultiLayerNetwork net = new MultiLayerNetwork(mlc);
    net.init();
    net.initGradientsView(); //TODO this should happen automatically

    Map<String, INDArray> paramTable = net.getLayer(0).paramTable();
    Map<String, INDArray> gradTable =
                    ((org.deeplearning4j.nn.layers.variational.VariationalAutoencoder) net.getLayer(0))
                                    .getGradientViews();

    assertEquals(paramTable.keySet(), gradTable.keySet());
    for (String s : paramTable.keySet()) {
        assertEquals(paramTable.get(s).length(), gradTable.get(s).length());
        assertArrayEquals(paramTable.get(s).shape(), gradTable.get(s).shape());
    }

    System.out.println("Num params: " + net.numParams());

    INDArray data = Nd4j.rand(1, inputSize);


    net.fit(data);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:TestVAE.java

示例6: init

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    SeparableConvolution2D layer = (SeparableConvolution2D) conf.getLayer();
    if (layer.getKernelSize().length != 2) throw new IllegalArgumentException("Filter size must be == 2");

    Map<String, INDArray> params = Collections.synchronizedMap(new LinkedHashMap<String, INDArray>());
    SeparableConvolution2D layerConf = (SeparableConvolution2D) conf.getLayer();

    int depthWiseParams = numDepthWiseParams(layerConf);
    int biasParams = numBiasParams(layerConf);

    INDArray depthWiseWeightView = paramsView.get(
            NDArrayIndex.point(0), NDArrayIndex.interval(biasParams, biasParams + depthWiseParams));
    INDArray pointWiseWeightView = paramsView.get(
            NDArrayIndex.point(0), NDArrayIndex.interval(biasParams + depthWiseParams, numParams(conf)));

    params.put(DEPTH_WISE_WEIGHT_KEY, createDepthWiseWeightMatrix(conf, depthWiseWeightView, initializeParams));
    conf.addVariable(DEPTH_WISE_WEIGHT_KEY);
    params.put(POINT_WISE_WEIGHT_KEY, createPointWiseWeightMatrix(conf, pointWiseWeightView, initializeParams));
    conf.addVariable(POINT_WISE_WEIGHT_KEY);

    if(layer.hasBias()){
        INDArray biasView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, biasParams));
        params.put(BIAS_KEY, createBias(conf, biasView, initializeParams));
        conf.addVariable(BIAS_KEY);
    }

    return params;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:30,代码来源:SeparableConvolutionParamInitializer.java

示例7: init

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
/**
 * Initialize the parameters
 *
 * @param conf             the configuration
 * @param paramsView       a view of the full network (backprop) parameters
 * @param initializeParams if true: initialize the parameters according to the configuration. If false: don't modify the
 *                         values in the paramsView array (but do select out the appropriate subset, reshape etc as required)
 * @return Map of parameters keyed by type (view of the 'paramsView' array)
 */
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    if (!(conf.getLayer() instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer))
        throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName());

    Map<String, INDArray> params = Collections.synchronizedMap(new LinkedHashMap<String, INDArray>());

    int length = numParams(conf);
    if (paramsView.length() != length)
        throw new IllegalStateException(
                "Expected params view of length " + length + ", got length " + paramsView.length());

    org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf =
            (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer();
    int nIn = layerConf.getNIn();

    int nWeightParams = nIn ;
    INDArray weightView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, nWeightParams));
    INDArray biasView = paramsView.get(NDArrayIndex.point(0),
            NDArrayIndex.interval(nWeightParams, nWeightParams + nIn));


    params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams));
    params.put(BIAS_KEY, createBias(conf, biasView, initializeParams));
    conf.addVariable(WEIGHT_KEY);
    conf.addVariable(BIAS_KEY);

    return params;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:ElementWiseParamInitializer.java

示例8: createCenterLossMatrix

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
protected INDArray createCenterLossMatrix(NeuralNetConfiguration conf, INDArray centerLossView,
                boolean initializeParameters) {
    org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer layerConf =
                    (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf.getLayer();

    if (initializeParameters) {
        centerLossView.assign(0.0);
    }
    return centerLossView;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:11,代码来源:CenterLossParamInitializer.java

示例9: createPointWiseWeightMatrix

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
protected INDArray createPointWiseWeightMatrix(NeuralNetConfiguration conf, INDArray weightView,
                                               boolean initializeParams) {
    /*
     Create a 4d weight matrix of: (num output channels, depth multiplier * num input channels,
     kernel height, kernel width)
     */
    SeparableConvolution2D layerConf =
            (SeparableConvolution2D) conf.getLayer();
    int depthMultiplier = layerConf.getDepthMultiplier();

    if (initializeParams) {
        Distribution dist = Distributions.createDistribution(layerConf.getDist());

        int inputDepth = layerConf.getNIn();
        int outputDepth = layerConf.getNOut();

        double fanIn = inputDepth * depthMultiplier;
        double fanOut = fanIn;

        int[] weightsShape = new int[] {outputDepth, depthMultiplier * inputDepth, 1, 1};

        return WeightInitUtil.initWeights(fanIn, fanOut, weightsShape, layerConf.getWeightInit(), dist, 'c',
                weightView);
    } else {
        return WeightInitUtil.reshapeWeights(
                new int[] {layerConf.getNOut(), depthMultiplier * layerConf.getNIn(), 1, 1}, weightView, 'c');
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:29,代码来源:SeparableConvolutionParamInitializer.java

示例10: init

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    SimpleRnn c = (SimpleRnn)conf.getLayer();
    int nIn = c.getNIn();
    int nOut = c.getNOut();

    Map<String,INDArray> m;

    if (initializeParams) {
        Distribution dist = Distributions.createDistribution(c.getDist());

        m = getSubsets(paramsView, nIn, nOut, false);
        INDArray w = WeightInitUtil.initWeights(nIn, nOut, new int[]{nIn, nOut}, c.getWeightInit(), dist, 'f', m.get(WEIGHT_KEY));
        m.put(WEIGHT_KEY, w);

        WeightInit rwInit;
        Distribution rwDist;
        if (c.getWeightInitRecurrent() != null) {
            rwInit = c.getWeightInitRecurrent();
            rwDist = Distributions.createDistribution(c.getDistRecurrent());
        } else {
            rwInit = c.getWeightInit();
            rwDist = dist;
        }

        INDArray rw = WeightInitUtil.initWeights(nOut, nOut, new int[]{nOut, nOut}, rwInit, rwDist, 'f', m.get(RECURRENT_WEIGHT_KEY));
        m.put(RECURRENT_WEIGHT_KEY, rw);
    } else {
        m = getSubsets(paramsView, nIn, nOut, true);
    }

    conf.addVariable(WEIGHT_KEY);
    conf.addVariable(RECURRENT_WEIGHT_KEY);
    conf.addVariable(BIAS_KEY);

    return m;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:SimpleRnnParamInitializer.java

示例11: getGradientsFromFlattened

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) {
    SimpleRnn c = (SimpleRnn)conf.getLayer();
    int nIn = c.getNIn();
    int nOut = c.getNOut();

    return getSubsets(gradientView, nIn, nOut, true);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:9,代码来源:SimpleRnnParamInitializer.java

示例12: init

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    Layer orig = conf.getLayer();
    Layer l = underlying(conf.getLayer());
    conf.setLayer(l);
    Map<String,INDArray> m = l.initializer().init(conf, paramsView, initializeParams);
    conf.setLayer(orig);
    return m;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:10,代码来源:WrapperLayerParamInitializer.java

示例13: init

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public Map<String, INDArray> init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) {
    Map<String, INDArray> params = Collections.synchronizedMap(new LinkedHashMap<String, INDArray>());

    org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer layerConf =
                    (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf.getLayer();

    int nIn = layerConf.getNIn();
    int nOut = layerConf.getNOut(); // also equal to numClasses

    int wEndOffset = nIn * nOut;
    int bEndOffset = wEndOffset + nOut;
    int cEndOffset = bEndOffset + nIn * nOut;

    INDArray weightView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, wEndOffset));
    INDArray biasView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(wEndOffset, bEndOffset));
    INDArray centerLossView = paramsView.get(NDArrayIndex.point(0), NDArrayIndex.interval(bEndOffset, cEndOffset))
                    .reshape('c', nOut, nIn);

    params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams));
    params.put(BIAS_KEY, createBias(conf, biasView, initializeParams));
    params.put(CENTER_KEY, createCenterLossMatrix(conf, centerLossView, initializeParams));
    conf.addVariable(WEIGHT_KEY);
    conf.addVariable(BIAS_KEY);
    conf.addVariable(CENTER_KEY);

    return params;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:29,代码来源:CenterLossParamInitializer.java

示例14: numParams

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public int numParams(NeuralNetConfiguration conf) {
    org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf =
                    (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer();
    int nIn = layerConf.getNIn();
    int nOut = layerConf.getNOut(); // also equal to numClasses
    return nIn * nOut + nOut + nIn * nOut; //weights + bias + embeddings
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:9,代码来源:CenterLossParamInitializer.java

示例15: buildGraphInfo

import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
public static GraphInfo buildGraphInfo(MultiLayerConfiguration config) {
    List<String> vertexNames = new ArrayList<>();
    List<String> originalVertexName = new ArrayList<>();
    List<String> layerTypes = new ArrayList<>();
    List<List<Integer>> layerInputs = new ArrayList<>();
    List<Map<String, String>> layerInfo = new ArrayList<>();
    vertexNames.add("Input");
    originalVertexName.add(null);
    layerTypes.add("Input");
    layerInputs.add(Collections.emptyList());
    layerInfo.add(Collections.emptyMap());


    List<NeuralNetConfiguration> list = config.getConfs();
    int layerIdx = 1;
    for (NeuralNetConfiguration c : list) {
        Layer layer = c.getLayer();
        String layerName = layer.getLayerName();
        if (layerName == null)
            layerName = "layer" + layerIdx;
        vertexNames.add(layerName);
        originalVertexName.add(String.valueOf(layerIdx - 1));

        String layerType = c.getLayer().getClass().getSimpleName().replaceAll("Layer$", "");
        layerTypes.add(layerType);

        layerInputs.add(Collections.singletonList(layerIdx - 1));
        layerIdx++;

        //Extract layer info
        Map<String, String> map = getLayerInfo(c, layer);
        layerInfo.add(map);
    }

    return new GraphInfo(vertexNames, layerTypes, layerInputs, layerInfo, originalVertexName);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:37,代码来源:TrainModuleUtils.java


注:本文中的org.deeplearning4j.nn.conf.NeuralNetConfiguration.getLayer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。