當前位置: 首頁>>代碼示例>>Java>>正文


Java PoolingType.MAX屬性代碼示例

本文整理匯總了Java中org.deeplearning4j.nn.conf.layers.PoolingType.MAX屬性的典型用法代碼示例。如果您正苦於以下問題:Java PoolingType.MAX屬性的具體用法?Java PoolingType.MAX怎麽用?Java PoolingType.MAX使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在org.deeplearning4j.nn.conf.layers.PoolingType的用法示例。


在下文中一共展示了PoolingType.MAX屬性的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: mapPoolingType

/**
 * Map Keras pooling layers to DL4J pooling types.
 *
 * @param className
 * @return
 * @throws UnsupportedKerasConfigurationException
 */
public static PoolingType mapPoolingType(String className, KerasLayerConfiguration conf)
        throws UnsupportedKerasConfigurationException {
    PoolingType poolingType;
    if (className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_2D())) {
        poolingType = PoolingType.MAX;
    } else if (className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_2D())) {
        poolingType = PoolingType.AVG;
    } else {
        throw new UnsupportedKerasConfigurationException("Unsupported Keras pooling layer " + className);
    }
    return poolingType;
}
 
開發者ID:deeplearning4j,項目名稱:deeplearning4j,代碼行數:25,代碼來源:KerasPoolingUtils.java

示例2: main

public static void main (String[] args) throws IOException {
    log.info("download and extract data...");
    CNNSentenceClassification.aclImdbDownloader(DATA_URL, DATA_PATH);

    // basic configuration
    int batchSize = 32;
    int vectorSize = 300;               //Size of the word vectors. 300 in the Google News model
    int nEpochs = 1;                    //Number of epochs (full passes of training data) to train on
    int truncateReviewsToLength = 256;  //Truncate reviews with length (# words) greater than this
    int cnnLayerFeatureMaps = 100;      //Number of feature maps / channels / depth for each CNN layer
    PoolingType globalPoolingType = PoolingType.MAX;
    Random rng = new Random(12345); //For shuffling repeatability

    log.info("construct cnn model...");
    ComputationGraph net = CNNSentenceClassification.buildCNNGraph(vectorSize, cnnLayerFeatureMaps, globalPoolingType);
    log.info("number of parameters by layer:");
    for (Layer l : net.getLayers()) {
        log.info("\t" + l.conf().getLayer().getLayerName() + "\t" + l.numParams());
    }

    // Load word vectors and get the DataSetIterators for training and testing
    log.info("loading word vectors and creating DataSetIterators...");
    WordVectors wordVectors = WordVectorSerializer.loadStaticModel(new File(WORD_VECTORS_PATH));
    DataSetIterator trainIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, true, wordVectors, batchSize,
            truncateReviewsToLength, rng);
    DataSetIterator testIter = CNNSentenceClassification.getDataSetIterator(DATA_PATH, false, wordVectors, batchSize,
            truncateReviewsToLength, rng);

    log.info("starting training...");
    for (int i = 0; i < nEpochs; i++) {
        net.fit(trainIter);
        log.info("Epoch " + i + " complete. Starting evaluation:");
        //Run evaluation. This is on 25k reviews, so can take some time
        Evaluation evaluation = net.evaluate(testIter);
        log.info(evaluation.stats());
    }

    // after training: load a single sentence and generate a prediction
    String pathFirstNegativeFile = FilenameUtils.concat(DATA_PATH, "aclImdb/test/neg/0_2.txt");
    String contentsFirstNegative = FileUtils.readFileToString(new File(pathFirstNegativeFile));
    INDArray featuresFirstNegative = ((CnnSentenceDataSetIterator)testIter).loadSingleSentence(contentsFirstNegative);
    INDArray predictionsFirstNegative = net.outputSingle(featuresFirstNegative);
    List<String> labels = testIter.getLabels();
    log.info("\n\nPredictions for first negative review:");
    for( int i=0; i<labels.size(); i++ ){
        log.info("P(" + labels.get(i) + ") = " + predictionsFirstNegative.getDouble(i));
    }
}
 
開發者ID:IsaacChanghau,項目名稱:Word2VecfJava,代碼行數:48,代碼來源:DL4JCNNSentClassifyExample.java

示例3: testMaskingCnnDim3_SingleExample

@Test
public void testMaskingCnnDim3_SingleExample() {
    //Test masking, where mask is along dimension 3

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 3;
    int width = 6;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
                                        .stride(height, 1).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = width - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
        assertArrayEquals(new int[] {1, depthIn, height, 5}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
開發者ID:deeplearning4j,項目名稱:deeplearning4j,代碼行數:62,代碼來源:GlobalPoolingMaskingTests.java

示例4: testMaskingCnnDim2_SingleExample

@Test
public void testMaskingCnnDim2_SingleExample() {
    //Test masking, where mask is along dimension 2

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 6;
    int width = 3;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
                                        .stride(1, width).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = height - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
        assertArrayEquals(new int[] {1, depthIn, 5, width}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
開發者ID:deeplearning4j,項目名稱:deeplearning4j,代碼行數:62,代碼來源:GlobalPoolingMaskingTests.java

示例5: testMaskingCnnDim3

@Test
public void testMaskingCnnDim3() {
    //Test masking, where mask is along dimension 3

    int minibatch = 3;
    int depthIn = 3;
    int depthOut = 4;
    int nOut = 5;
    int height = 3;
    int width = 6;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
                                        .stride(height, 1).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray =
                        Nd4j.create(new double[][] {{1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 0, 0}});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        for (int i = 0; i < minibatch; i++) {
            System.out.println(i);
            int numSteps = width - i;
            INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
            assertArrayEquals(new int[] {1, depthIn, height, width - i}, subset.shape());

            INDArray outSubset = net.output(subset);
            INDArray outMaskedSubset = outMasked.getRow(i);

            assertEquals(outSubset, outMaskedSubset);
        }
    }
}
 
開發者ID:deeplearning4j,項目名稱:deeplearning4j,代碼行數:58,代碼來源:GlobalPoolingMaskingTests.java

示例6: testMaskingCnnDim2

@Test
public void testMaskingCnnDim2() {
    //Test masking, where mask is along dimension 2

    int minibatch = 3;
    int depthIn = 3;
    int depthOut = 4;
    int nOut = 5;
    int height = 5;
    int width = 4;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
                                        .stride(1, width).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        for (int i = 0; i < minibatch; i++) {
            System.out.println(i);
            int numSteps = height - i;
            INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
            assertArrayEquals(new int[] {1, depthIn, height - i, width}, subset.shape());

            INDArray outSubset = net.output(subset);
            INDArray outMaskedSubset = outMasked.getRow(i);

            assertEquals(outSubset, outMaskedSubset);
        }
    }
}
 
開發者ID:deeplearning4j,項目名稱:deeplearning4j,代碼行數:57,代碼來源:GlobalPoolingMaskingTests.java


注:本文中的org.deeplearning4j.nn.conf.layers.PoolingType.MAX屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。