当前位置: 首页>>代码示例>>Java>>正文


Java Activation.IDENTITY属性代码示例

本文整理汇总了Java中org.nd4j.linalg.activations.Activation.IDENTITY属性的典型用法代码示例。如果您正苦于以下问题:Java Activation.IDENTITY属性的具体用法?Java Activation.IDENTITY怎么用?Java Activation.IDENTITY使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.nd4j.linalg.activations.Activation的用法示例。


在下文中一共展示了Activation.IDENTITY属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: GaussianReconstructionDistribution

/**
 * Create a GaussianReconstructionDistribution with the default identity activation function.
 */
public GaussianReconstructionDistribution() {
    this(Activation.IDENTITY);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:6,代码来源:GaussianReconstructionDistribution.java

示例2: elementWiseMultiplicationLayerTest

@Test
    public void elementWiseMultiplicationLayerTest(){

        for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) {

            ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                    .seed(12345L)
                    .weightInit(new UniformDistribution(0, 1))
                    .graphBuilder()
                    .addInputs("features")
                    .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(4)
                            .activation(Activation.TANH)
                            .build(), "features")
                    .addLayer("elementWiseMul", new ElementWiseMultiplicationLayer.Builder().nIn(4).nOut(4)
                            .activation(a)
                            .build(), "dense")
                    .addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.COSINE_PROXIMITY)
                            .activation(Activation.IDENTITY).build(), "elementWiseMul")
                    .setOutputs("loss")
                    .pretrain(false).backprop(true).build();

            ComputationGraph netGraph = new ComputationGraph(conf);
            netGraph.init();

            log.info("params before learning: " + netGraph.getLayer(1).paramTable());

            //Run a number of iterations of learning manually make some pseudo data
            //the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim
            // is mainly decided by the fourth value, if everything runs well, we will get a large weight for the fourth value

            INDArray features = Nd4j.create(new double[][]{{1, 2, 3, 4}, {1, 2, 3, 1}, {1, 2, 3, 0}});
            INDArray labels = Nd4j.create(new double[][]{{1, 1, 1, 8}, {1, 1, 1, 2}, {1, 1, 1, 1}});

            netGraph.setInputs(features);
            netGraph.setLabels(labels);
            netGraph.computeGradientAndScore();
            double scoreBefore = netGraph.score();

            String msg;
            for (int epoch = 0; epoch < 5; epoch++)
                netGraph.fit(new INDArray[]{features}, new INDArray[]{labels});
            netGraph.computeGradientAndScore();
            double scoreAfter = netGraph.score();
            //Can't test in 'characteristic mode of operation' if not learning
            msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn="
                    + "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id"
                    + ", doLearningFirst=" + "true" + " (before=" + scoreBefore
                    + ", scoreAfter=" + scoreAfter + ")";
            assertTrue(msg, scoreAfter < 0.8 * scoreBefore);

//        expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight
            log.info("params after learning: " + netGraph.getLayer(1).paramTable());

            boolean gradOK = checkGradients(netGraph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[]{features}, new INDArray[]{labels});

            msg = "elementWiseMultiplicationLayerTest() - activationFn=" + "ID" + ", lossFn=" + "Cos-sim"
                    + ", outputActivation=" + "Id" + ", doLearningFirst=" + "true";
            assertTrue(msg, gradOK);

            TestUtils.testModelSerialization(netGraph);
        }
    }
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:64,代码来源:GradientCheckTests.java

示例3: gradientCheckMaskingOutputSimple

@Test
public void gradientCheckMaskingOutputSimple() {

    int timeSeriesLength = 5;
    boolean[][] mask = new boolean[5][0];
    mask[0] = new boolean[] {true, true, true, true, true}; //No masking
    mask[1] = new boolean[] {false, true, true, true, true}; //mask first output time step
    mask[2] = new boolean[] {false, false, false, false, true}; //time series classification: mask all but last
    mask[3] = new boolean[] {false, false, true, false, true}; //time series classification w/ variable length TS
    mask[4] = new boolean[] {true, true, true, false, true}; //variable length TS

    int nIn = 4;
    int layerSize = 3;

    GradientCheckSimpleScenario[] scenarios = new GradientCheckSimpleScenario[] {
                    new GradientCheckSimpleScenario(LossFunctions.LossFunction.MCXENT.getILossFunction(),
                                    Activation.SOFTMAX, 2, 2),
                    new GradientCheckSimpleScenario(LossMixtureDensity.builder().gaussians(2).labelWidth(3).build(),
                                    Activation.TANH, 10, 3),
                    new GradientCheckSimpleScenario(LossMixtureDensity.builder().gaussians(2).labelWidth(4).build(),
                                    Activation.IDENTITY, 12, 4),
                    new GradientCheckSimpleScenario(LossFunctions.LossFunction.L2.getILossFunction(),
                                    Activation.SOFTMAX, 2, 2)};

    for (GradientCheckSimpleScenario s : scenarios) {

        Random r = new Random(12345L);
        INDArray input = Nd4j.zeros(1, nIn, timeSeriesLength);
        for (int m = 0; m < 1; m++) {
            for (int j = 0; j < nIn; j++) {
                for (int k = 0; k < timeSeriesLength; k++) {
                    input.putScalar(new int[] {m, j, k}, r.nextDouble() - 0.5);
                }
            }
        }

        INDArray labels = Nd4j.zeros(1, s.labelWidth, timeSeriesLength);
        for (int m = 0; m < 1; m++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                int idx = r.nextInt(s.labelWidth);
                labels.putScalar(new int[] {m, idx, j}, 1.0f);
            }
        }

        for (int i = 0; i < mask.length; i++) {

            //Create mask array:
            INDArray maskArr = Nd4j.create(1, timeSeriesLength);
            for (int j = 0; j < mask[i].length; j++) {
                maskArr.putScalar(new int[] {0, j}, mask[i][j] ? 1.0 : 0.0);
            }

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L)
                            .list()
                            .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize)
                                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                                            .updater(new NoOp()).build())
                            .layer(1, new RnnOutputLayer.Builder(s.lf).activation(s.act).nIn(layerSize).nOut(s.nOut)
                                            .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1))
                                            .updater(new NoOp()).build())
                            .pretrain(false).backprop(true).build();
            MultiLayerNetwork mln = new MultiLayerNetwork(conf);
            mln.init();

            boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels, null, maskArr);

            String msg = "gradientCheckMaskingOutputSimple() - timeSeriesLength=" + timeSeriesLength
                            + ", miniBatchSize=" + 1;
            assertTrue(msg, gradOK);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:73,代码来源:GradientCheckTestsMasking.java

示例4: testVaeAsMLP

@Test
public void testVaeAsMLP() {
    //Post pre-training: a VAE can be used as a MLP, by taking the mean value from p(z|x) as the output
    //This gradient check tests this part

    Activation[] activFns = {Activation.IDENTITY, Activation.TANH, Activation.IDENTITY, Activation.TANH, Activation.IDENTITY, Activation.TANH};

    LossFunction[] lossFunctions = {LossFunction.MCXENT, LossFunction.MCXENT, LossFunction.MSE, LossFunction.MSE, LossFunction.MCXENT, LossFunction.MSE};
    Activation[] outputActivations = {Activation.SOFTMAX, Activation.SOFTMAX, Activation.TANH, Activation.TANH, Activation.SOFTMAX, Activation.TANH};

    //use l2vals[i] with l1vals[i]
    double[] l2vals = {0.4, 0.0, 0.4, 0.4, 0.0, 0.0};
    double[] l1vals = {0.0, 0.0, 0.5, 0.0, 0.0, 0.5};
    double[] biasL2 = {0.0, 0.0, 0.0, 0.2, 0.0, 0.4};
    double[] biasL1 = {0.0, 0.0, 0.6, 0.0, 0.0, 0.0};

    int[][] encoderLayerSizes = new int[][] {{5}, {5}, {5, 6}, {5, 6}, {5}, {5, 6}};
    int[][] decoderLayerSizes = new int[][] {{6}, {7, 8}, {6}, {7, 8}, {6}, {7, 8}};

    int[] minibatches = new int[]{1,5,4,3,1,4};

    Nd4j.getRandom().setSeed(12345);
    for( int i=0; i<activFns.length; i++ ){
        LossFunction lf = lossFunctions[i];
        Activation outputActivation = outputActivations[i];
        double l2 = l2vals[i];
        double l1 = l1vals[i];
        int[] encoderSizes = encoderLayerSizes[i];
        int[] decoderSizes = decoderLayerSizes[i];
        int minibatch = minibatches[i];
        INDArray input = Nd4j.rand(minibatch, 4);
        INDArray labels = Nd4j.create(minibatch, 3);
        for (int j = 0; j < minibatch; j++) {
            labels.putScalar(j, j % 3, 1.0);
        }
        Activation afn = activFns[i];

        MultiLayerConfiguration conf =
                new NeuralNetConfiguration.Builder().l2(l2).l1(l1)
                        .updater(new NoOp())
                        .l2Bias(biasL2[i]).l1Bias(biasL1[i])
                        .updater(new NoOp()).seed(12345L).list()
                        .layer(0, new VariationalAutoencoder.Builder().nIn(4)
                                .nOut(3).encoderLayerSizes(encoderSizes)
                                .decoderLayerSizes(decoderSizes)
                                .weightInit(WeightInit.DISTRIBUTION)
                                .dist(new NormalDistribution(0, 1))
                                .activation(afn)
                                .build())
                        .layer(1, new OutputLayer.Builder(lf)
                                .activation(outputActivation).nIn(3).nOut(3)
                                .weightInit(WeightInit.DISTRIBUTION)
                                .dist(new NormalDistribution(0, 1))
                                .build())
                        .pretrain(false).backprop(true).build();

        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        String msg = "testVaeAsMLP() - activationFn=" + afn + ", lossFn=" + lf
                + ", outputActivation=" + outputActivation + ", encLayerSizes = "
                + Arrays.toString(encoderSizes) + ", decLayerSizes = "
                + Arrays.toString(decoderSizes) + ", l2=" + l2 + ", l1=" + l1;
        if (PRINT_RESULTS) {
            System.out.println(msg);
            for (int j = 0; j < mln.getnLayers(); j++)
                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
        }

        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input,
                labels);
        assertTrue(msg, gradOK);
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:75,代码来源:VaeGradientCheckTests.java

示例5: testGaussianLogProb

@Test
public void testGaussianLogProb() {
    Nd4j.getRandom().setSeed(12345);

    int inputSize = 4;
    int[] mbs = new int[] {1, 2, 5};

    for (boolean average : new boolean[] {true, false}) {
        for (int minibatch : mbs) {

            INDArray x = Nd4j.rand(minibatch, inputSize);
            INDArray mean = Nd4j.randn(minibatch, inputSize);
            INDArray logStdevSquared = Nd4j.rand(minibatch, inputSize).subi(0.5);

            INDArray distributionParams = Nd4j.createUninitialized(new int[] {minibatch, 2 * inputSize});
            distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(0, inputSize)).assign(mean);
            distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(inputSize, 2 * inputSize))
                            .assign(logStdevSquared);

            ReconstructionDistribution dist = new GaussianReconstructionDistribution(Activation.IDENTITY);

            double negLogProb = dist.negLogProbability(x, distributionParams, average);

            INDArray exampleNegLogProb = dist.exampleNegLogProbability(x, distributionParams);
            assertArrayEquals(new int[] {minibatch, 1}, exampleNegLogProb.shape());

            //Calculate the same thing, but using Apache Commons math

            double logProbSum = 0.0;
            for (int i = 0; i < minibatch; i++) {
                double exampleSum = 0.0;
                for (int j = 0; j < inputSize; j++) {
                    double mu = mean.getDouble(i, j);
                    double logSigma2 = logStdevSquared.getDouble(i, j);
                    double sigma = Math.sqrt(Math.exp(logSigma2));
                    NormalDistribution nd = new NormalDistribution(mu, sigma);

                    double xVal = x.getDouble(i, j);
                    double thisLogProb = nd.logDensity(xVal);
                    logProbSum += thisLogProb;
                    exampleSum += thisLogProb;
                }
                assertEquals(-exampleNegLogProb.getDouble(i), exampleSum, 1e-6);
            }

            double expNegLogProb;
            if (average) {
                expNegLogProb = -logProbSum / minibatch;
            } else {
                expNegLogProb = -logProbSum;
            }


            //                System.out.println(expLogProb + "\t" + logProb + "\t" + (logProb / expLogProb));
            assertEquals(expNegLogProb, negLogProb, 1e-6);


            //Also: check random sampling...
            int count = minibatch * inputSize;
            INDArray arr = Nd4j.linspace(-3, 3, count).reshape(minibatch, inputSize);
            INDArray sampleMean = dist.generateAtMean(arr);
            INDArray sampleRandom = dist.generateRandom(arr);
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:65,代码来源:TestReconstructionDistributions.java


注:本文中的org.nd4j.linalg.activations.Activation.IDENTITY属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。