当前位置: 首页>>代码示例>>Java>>正文


Java PoolingType类代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.PoolingType的典型用法代码示例。如果您正苦于以下问题:Java PoolingType类的具体用法?Java PoolingType怎么用?Java PoolingType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


PoolingType类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了PoolingType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: mapPoolingType

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
/**
 * Map Keras pooling layers to DL4J pooling types.
 *
 * @param className
 * @return
 * @throws UnsupportedKerasConfigurationException
 */
public static PoolingType mapPoolingType(String className, KerasLayerConfiguration conf)
        throws UnsupportedKerasConfigurationException {
    PoolingType poolingType;
    if (className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_2D())) {
        poolingType = PoolingType.MAX;
    } else if (className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_2D())) {
        poolingType = PoolingType.AVG;
    } else {
        throw new UnsupportedKerasConfigurationException("Unsupported Keras pooling layer " + className);
    }
    return poolingType;
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:26,代码来源:KerasPoolingUtils.java

示例2: use

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
@OptionMetadata(
  displayName = "pooling type",
  description = "The type of pooling to use (default = MAX; options: MAX, AVG, SUM, NONE).",
  commandLineParamName = "poolingType",
  commandLineParamSynopsis = "-poolingType <string>",
  displayOrder = 10
)
@Override
public PoolingType getPoolingType() {
  return super.getPoolingType();
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:12,代码来源:GlobalPoolingLayer.java

示例3: testSubsamplingLayer

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
/**
 * Test subsampling layer.
 *
 * @throws Exception
 */
@Test
public void testSubsamplingLayer() throws Exception {
  // CLF
  Dl4jMlpClassifier clf = new Dl4jMlpClassifier();
  clf.setSeed(1);

  // Data
  Instances data = DatasetLoader.loadMiniMnistMeta();
  data.setClassIndex(data.numAttributes() - 1);
  final ImageInstanceIterator imgIter = DatasetLoader.loadMiniMnistImageIterator();
  clf.setInstanceIterator(imgIter);

  SubsamplingLayer pool = new SubsamplingLayer();
  pool.setKernelSizeX(2);
  pool.setKernelSizeY(2);
  pool.setPoolingType(PoolingType.MAX);

  OutputLayer outputLayer = new OutputLayer();
  outputLayer.setActivationFn(Activation.SOFTMAX.getActivationFunction());
  outputLayer.setWeightInit(WeightInit.XAVIER);

  NeuralNetConfiguration nnc = new NeuralNetConfiguration();
  nnc.setOptimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT);

  clf.setNeuralNetConfiguration(nnc);
  clf.setLayers(pool, outputLayer);

  clf.setNumEpochs(1);
  clf.buildClassifier(data);
  final double[][] res = clf.distributionsForInstances(data);
  Assert.assertEquals(DatasetLoader.NUM_INSTANCES_MNIST, res.length);
  Assert.assertEquals(DatasetLoader.NUM_CLASSES_MNIST, res[0].length);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:39,代码来源:LayerTest.java

示例4: buildCNNGraph

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static ComputationGraph buildCNNGraph (int vectorSize, int cnnLayerFeatureMaps, PoolingType globalPoolingType) {
    // Set up the network configuration. Note that we have multiple convolution layers, each wih filter
    // widths of 3, 4 and 5 as per Kim (2014) paper.
    ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
            .weightInit(WeightInit.RELU)
            .activation(Activation.LEAKYRELU)
            .updater(Updater.ADAM)
            .convolutionMode(ConvolutionMode.Same)      //This is important so we can 'stack' the results later
            .regularization(true).l2(0.0001)
            .learningRate(0.01)
            .graphBuilder()
            .addInputs("input")
            .addLayer("cnn3", new ConvolutionLayer.Builder()
                    .kernelSize(3, vectorSize)
                    .stride(1, vectorSize)
                    .nIn(1)
                    .nOut(cnnLayerFeatureMaps)
                    .build(), "input")
            .addLayer("cnn4", new ConvolutionLayer.Builder()
                    .kernelSize(4, vectorSize)
                    .stride(1, vectorSize)
                    .nIn(1)
                    .nOut(cnnLayerFeatureMaps)
                    .build(), "input")
            .addLayer("cnn5", new ConvolutionLayer.Builder()
                    .kernelSize(5, vectorSize)
                    .stride(1, vectorSize)
                    .nIn(1)
                    .nOut(cnnLayerFeatureMaps)
                    .build(), "input")
            //Perform depth concatenation
            .addVertex("merge", new MergeVertex(), "cnn3", "cnn4", "cnn5")
            .addLayer("globalPool", new GlobalPoolingLayer.Builder()
                    .poolingType(globalPoolingType)
                    .build(), "merge")
            .addLayer("out", new OutputLayer.Builder()
                    .lossFunction(LossFunctions.LossFunction.MCXENT)
                    .activation(Activation.SOFTMAX)
                    .nIn(3 * cnnLayerFeatureMaps)
                    .nOut(2)    //2 classes: positive or negative
                    .build(), "globalPool")
            .setOutputs("out")
            .build();

    ComputationGraph net = new ComputationGraph(config);
    net.init();
    return net;
}
 
开发者ID:IsaacChanghau,项目名称:Word2VecfJava,代码行数:49,代码来源:CNNSentenceClassification.java

示例5: main

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static void main (String[] args) throws IOException {
    log.info("download and extract data...");
    CNNSentenceClassification.aclImdbDownloader(DATA_URL, DATA_PATH);

    // basic configuration
    int batchSize = 32;
    int vectorSize = 300;               //Size of the word vectors. 300 in the Google News model
    int nEpochs = 1;                    //Number of epochs (full passes of training data) to train on
    int truncateReviewsToLength = 256;  //Truncate reviews with length (# words) greater than this
    int cnnLayerFeatureMaps = 100;      //Number of feature maps / channels / depth for each CNN layer
    PoolingType globalPoolingType = PoolingType.MAX;
    Random rng = new Random(12345); //For shuffling repeatability

    log.info("construct cnn model...");
    ComputationGraph net = CNNSentenceClassification.buildCNNGraph(vectorSize, cnnLayerFeatureMaps, globalPoolingType);
    log.info("number of parameters by layer:");
    for (Layer l : net.getLayers()) {
        log.info("\t" + l.conf().getLayer().getLayerName() + "\t" + l.numParams());
    }

    // Load word vectors and get the DataSetIterators for training and testing
    log.info("loading word vectors and creating DataSetIterators...");
    WordVectors wordVectors = WordVectorSerializer.loadStaticModel(new File(WORD_VECTORS_PATH));
    DataSetIterator trainIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, true, wordVectors, batchSize,
            truncateReviewsToLength, rng);
    DataSetIterator testIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, false, wordVectors, batchSize,
            truncateReviewsToLength, rng);

    log.info("starting training...");
    for (int i = 0; i < nEpochs; i++) {
        net.fit(trainIter);
        log.info("Epoch " + i + " complete. Starting evaluation:");
        //Run evaluation. This is on 25k reviews, so can take some time
        Evaluation evaluation = net.evaluate(testIter);
        log.info(evaluation.stats());
    }

    // after training: load a single sentence and generate a prediction
    String pathFirstNegativeFile = FilenameUtils.concat(DATA_PATH, "aclImdb/test/neg/0_2.txt");
    String contentsFirstNegative = FileUtils.readFileToString(new File(pathFirstNegativeFile));
    INDArray featuresFirstNegative = ((CnnSentenceDataSetIterator)testIter).loadSingleSentence(contentsFirstNegative);
    INDArray predictionsFirstNegative = net.outputSingle(featuresFirstNegative);
    List<String> labels = testIter.getLabels();
    log.info("\n\nPredictions for first negative review:");
    for( int i=0; i<labels.size(); i++ ){
        log.info("P(" + labels.get(i) + ") = " + predictionsFirstNegative.getDouble(i));
    }
}
 
开发者ID:IsaacChanghau,项目名称:Word2VecfJava,代码行数:49,代码来源:DL4JCNNSentClassifyExample.java

示例6: GlobalPoolingLayer

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
/** Constructor for setting some defaults. */
public GlobalPoolingLayer() {
  setLayerName("GlobalPooling layer");
  setPoolingType(PoolingType.MAX);
  setPnorm(2);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:7,代码来源:GlobalPoolingLayer.java

示例7: setPoolingType

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
@Override
public void setPoolingType(PoolingType poolingType) {
  super.setPoolingType(poolingType);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:5,代码来源:GlobalPoolingLayer.java

示例8: testMinimalMnistConvNet

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
/**
 * Test minimal mnist conv net.
 *
 * @throws Exception IO error.
 */
@Test
public void testMinimalMnistConvNet() throws Exception {
  clf.setInstanceIterator(idiMnist);

  int[] threeByThree = {3, 3};
  int[] twoByTwo = {2, 2};
  int[] oneByOne = {1, 1};
  List<Layer> layers = new ArrayList<>();

  ConvolutionLayer convLayer1 = new ConvolutionLayer();
  convLayer1.setKernelSize(threeByThree);
  convLayer1.setStride(oneByOne);
  convLayer1.setNOut(8);
  convLayer1.setLayerName("Conv-layer 1");
  layers.add(convLayer1);

  SubsamplingLayer poolLayer1 = new SubsamplingLayer();
  poolLayer1.setPoolingType(PoolingType.MAX);
  poolLayer1.setKernelSize(twoByTwo);
  poolLayer1.setLayerName("Pool1");
  layers.add(poolLayer1);

  ConvolutionLayer convLayer3 = new ConvolutionLayer();
  convLayer3.setNOut(8);
  convLayer3.setKernelSize(threeByThree);
  layers.add(convLayer3);

  BatchNormalization bn4 = new BatchNormalization();
  bn4.setActivationFunction(new ActivationReLU());
  layers.add(bn4);

  SubsamplingLayer poolLayer2 = new SubsamplingLayer();
  poolLayer2.setPoolingType(PoolingType.MAX);
  poolLayer2.setKernelSize(twoByTwo);
  layers.add(poolLayer2);

  OutputLayer outputLayer = new OutputLayer();
  outputLayer.setActivationFn(new ActivationSoftmax());
  outputLayer.setLossFn(new LossMCXENT());
  layers.add(outputLayer);

  NeuralNetConfiguration nnc = new NeuralNetConfiguration();
  nnc.setOptimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT);
  nnc.setUseRegularization(true);

  clf.setNeuralNetConfiguration(nnc);
  Layer[] ls = new Layer[layers.size()];
  layers.toArray(ls);
  clf.setLayers(ls);

  TestUtil.holdout(clf, dataMnist);
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:58,代码来源:Dl4jMlpTest.java

示例9: backpropGradient

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
Pair<Gradient, INDArray> backpropGradient(INDArray input, INDArray epsilon, int[] kernel, int[] strides, int[] pad,
PoolingType poolingType, ConvolutionMode convolutionMode, int[] dilation);
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:3,代码来源:SubsamplingHelper.java

示例10: activate

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
INDArray activate(INDArray input, boolean training, int[] kernel, int[] strides, int[] pad, PoolingType poolingType,
ConvolutionMode convolutionMode, int[] dilation);
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:3,代码来源:SubsamplingHelper.java

示例11: maskedPoolingTimeSeries

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static INDArray maskedPoolingTimeSeries(PoolingType poolingType, INDArray toReduce, INDArray mask,
                int pnorm) {
    if (toReduce.rank() != 3) {
        throw new IllegalArgumentException("Expect rank 3 array: got " + toReduce.rank());
    }
    if (mask.rank() != 2) {
        throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
    }

    //Sum pooling: easy. Multiply by mask, then sum as normal
    //Average pooling: as above, but do a broadcast element-wise divi by mask.sum(1)
    //Max pooling: set to -inf if mask is 0, then do max as normal

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask = Transforms.not(mask);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, 0, 2));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            return withInf.max(2);
        case AVG:
        case SUM:
            INDArray masked = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, 0, 2));
            INDArray summed = masked.sum(2);
            if (poolingType == PoolingType.SUM) {
                return summed;
            }

            INDArray maskCounts = mask.sum(1);
            summed.diviColumnVector(maskCounts);
            return summed;
        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, 0, 2));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = abs.sum(2);

            return Transforms.pow(pNorm, 1.0 / pnorm);
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:51,代码来源:MaskedReductionUtil.java

示例12: maskedPoolingEpsilonTimeSeries

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static INDArray maskedPoolingEpsilonTimeSeries(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, int pnorm) {

    if (input.rank() != 3) {
        throw new IllegalArgumentException("Expect rank 3 input activation array: got " + input.rank());
    }
    if (mask.rank() != 2) {
        throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
    }
    if (epsilon2d.rank() != 2) {
        throw new IllegalArgumentException("Expected rank 2 array for errors: got " + epsilon2d.rank());
    }

    //Mask: [minibatch, tsLength]
    //Epsilon: [minibatch, vectorSize]

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask = Transforms.not(mask);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, 0, 2));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.getExecutioner().execAndReturn(new IsMax(withInf, 2));

            return Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, 0, 2));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, 0, 2));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, 0, 2)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:78,代码来源:MaskedReductionUtil.java

示例13: maskedPoolingConvolution

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static INDArray maskedPoolingConvolution(PoolingType poolingType, INDArray toReduce, INDArray mask,
                boolean alongHeight, int pnorm) {
    // [minibatch, depth, h=1, w=X] or [minibatch, depth, h=X, w=1] data
    // with a mask array of shape [minibatch, X]

    //If masking along height: broadcast dimensions are [0,2]
    //If masking along width: broadcast dimensions are [0,3]

    int[] dimensions = (alongHeight ? CNN_DIM_MASK_H : CNN_DIM_MASK_W);

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask = Transforms.not(mask);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            return withInf.max(2, 3);
        case AVG:
        case SUM:
            INDArray masked = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, dimensions));

            INDArray summed = masked.sum(2, 3);
            if (poolingType == PoolingType.SUM) {
                return summed;
            }
            INDArray maskCounts = mask.sum(1);
            summed.diviColumnVector(maskCounts);
            return summed;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = abs.sum(2, 3);

            return Transforms.pow(pNorm, 1.0 / pnorm);
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:49,代码来源:MaskedReductionUtil.java

示例14: maskedPoolingEpsilonCnn

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, boolean alongHeight, int pnorm) {

    // [minibatch, depth, h=1, w=X] or [minibatch, depth, h=X, w=1] data
    // with a mask array of shape [minibatch, X]

    //If masking along height: broadcast dimensions are [0,2]
    //If masking along width: broadcast dimensions are [0,3]

    int[] dimensions = (alongHeight ? CNN_DIM_MASK_H : CNN_DIM_MASK_W);

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask = Transforms.not(mask);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.getExecutioner().execAndReturn(new IsMax(withInf, 2, 3));

            return Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            //Note that with CNNs, current design is restricted to [minibatch, depth, 1, W] ot [minibatch, depth, H, 1]
            INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);

    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:75,代码来源:MaskedReductionUtil.java

示例15: testMaskingRnn

import org.deeplearning4j.nn.conf.layers.PoolingType; //导入依赖的package包/类
@Test
public void testMaskingRnn() {


    int timeSeriesLength = 5;
    int nIn = 5;
    int layerSize = 4;
    int nOut = 2;
    int[] minibatchSizes = new int[] {1, 3};

    for (int miniBatchSize : minibatchSizes) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                        .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                        .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                                        .build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder()
                                        .poolingType(PoolingType.AVG).build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        Random r = new Random(12345L);
        INDArray input = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength}).subi(0.5);

        INDArray mask;
        if (miniBatchSize == 1) {
            mask = Nd4j.create(new double[] {1, 1, 1, 1, 0});
        } else {
            mask = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});
        }

        INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
        for (int i = 0; i < miniBatchSize; i++) {
            int idx = r.nextInt(nOut);
            labels.putScalar(i, idx, 1.0);
        }

        net.setLayerMaskArrays(mask, null);
        INDArray outputMasked = net.output(input);

        net.clearLayerMaskArrays();

        for (int i = 0; i < miniBatchSize; i++) {
            INDArray maskRow = mask.getRow(i);
            int tsLength = maskRow.sumNumber().intValue();
            INDArray inputSubset = input.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.interval(0, tsLength));

            INDArray outSubset = net.output(inputSubset);
            INDArray outputMaskedSubset = outputMasked.getRow(i);

            assertEquals(outSubset, outputMaskedSubset);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:61,代码来源:GlobalPoolingMaskingTests.java


注:本文中的org.deeplearning4j.nn.conf.layers.PoolingType类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。