本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.PoolingType.SUM属性的典型用法代码示例。如果您正苦于以下问题:Java PoolingType.SUM属性的具体用法?Java PoolingType.SUM怎么用?Java PoolingType.SUM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.deeplearning4j.nn.conf.layers.PoolingType
的用法示例。
在下文中一共展示了PoolingType.SUM属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: maskedPoolingTimeSeries
public static INDArray maskedPoolingTimeSeries(PoolingType poolingType, INDArray toReduce, INDArray mask,
int pnorm) {
if (toReduce.rank() != 3) {
throw new IllegalArgumentException("Expect rank 3 array: got " + toReduce.rank());
}
if (mask.rank() != 2) {
throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
}
//Sum pooling: easy. Multiply by mask, then sum as normal
//Average pooling: as above, but do a broadcast element-wise divi by mask.sum(1)
//Max pooling: set to -inf if mask is 0, then do max as normal
switch (poolingType) {
case MAX:
//TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
INDArray negInfMask = Transforms.not(mask);
BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));
INDArray withInf = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, 0, 2));
//At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op
return withInf.max(2);
case AVG:
case SUM:
INDArray masked = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, 0, 2));
INDArray summed = masked.sum(2);
if (poolingType == PoolingType.SUM) {
return summed;
}
INDArray maskCounts = mask.sum(1);
summed.diviColumnVector(maskCounts);
return summed;
case PNORM:
//Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
INDArray masked2 = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, 0, 2));
INDArray abs = Transforms.abs(masked2, true);
Transforms.pow(abs, pnorm, false);
INDArray pNorm = abs.sum(2);
return Transforms.pow(pNorm, 1.0 / pnorm);
default:
throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
}
}
示例2: maskedPoolingEpsilonTimeSeries
public static INDArray maskedPoolingEpsilonTimeSeries(PoolingType poolingType, INDArray input, INDArray mask,
INDArray epsilon2d, int pnorm) {
if (input.rank() != 3) {
throw new IllegalArgumentException("Expect rank 3 input activation array: got " + input.rank());
}
if (mask.rank() != 2) {
throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
}
if (epsilon2d.rank() != 2) {
throw new IllegalArgumentException("Expected rank 2 array for errors: got " + epsilon2d.rank());
}
//Mask: [minibatch, tsLength]
//Epsilon: [minibatch, vectorSize]
switch (poolingType) {
case MAX:
//TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
INDArray negInfMask = Transforms.not(mask);
BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));
INDArray withInf = Nd4j.createUninitialized(input.shape());
Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, 0, 2));
//At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op
INDArray isMax = Nd4j.getExecutioner().execAndReturn(new IsMax(withInf, 2));
return Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
case AVG:
case SUM:
//if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
//if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
//With masking: N differs for different time series
INDArray out = Nd4j.createUninitialized(input.shape(), 'f');
//Broadcast copy op, then divide and mask to 0 as appropriate
Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, 0, 2));
if (poolingType == PoolingType.SUM) {
return out;
}
INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));
return out;
case PNORM:
//Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
INDArray masked2 = Nd4j.createUninitialized(input.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, 0, 2));
INDArray abs = Transforms.abs(masked2, true);
Transforms.pow(abs, pnorm, false);
INDArray pNorm = Transforms.pow(abs.sum(2), 1.0 / pnorm);
INDArray numerator;
if (pnorm == 2) {
numerator = input.dup();
} else {
INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
numerator = input.mul(absp2);
}
INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
denom.rdivi(epsilon2d);
Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, 0, 2)); //Apply mask
return numerator;
default:
throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
}
}
示例3: maskedPoolingConvolution
public static INDArray maskedPoolingConvolution(PoolingType poolingType, INDArray toReduce, INDArray mask,
boolean alongHeight, int pnorm) {
// [minibatch, depth, h=1, w=X] or [minibatch, depth, h=X, w=1] data
// with a mask array of shape [minibatch, X]
//If masking along height: broadcast dimensions are [0,2]
//If masking along width: broadcast dimensions are [0,3]
int[] dimensions = (alongHeight ? CNN_DIM_MASK_H : CNN_DIM_MASK_W);
switch (poolingType) {
case MAX:
//TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
INDArray negInfMask = Transforms.not(mask);
BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));
INDArray withInf = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, dimensions));
//At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op
return withInf.max(2, 3);
case AVG:
case SUM:
INDArray masked = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, dimensions));
INDArray summed = masked.sum(2, 3);
if (poolingType == PoolingType.SUM) {
return summed;
}
INDArray maskCounts = mask.sum(1);
summed.diviColumnVector(maskCounts);
return summed;
case PNORM:
//Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
INDArray masked2 = Nd4j.createUninitialized(toReduce.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, dimensions));
INDArray abs = Transforms.abs(masked2, true);
Transforms.pow(abs, pnorm, false);
INDArray pNorm = abs.sum(2, 3);
return Transforms.pow(pNorm, 1.0 / pnorm);
default:
throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
}
}
示例4: maskedPoolingEpsilonCnn
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask,
INDArray epsilon2d, boolean alongHeight, int pnorm) {
// [minibatch, depth, h=1, w=X] or [minibatch, depth, h=X, w=1] data
// with a mask array of shape [minibatch, X]
//If masking along height: broadcast dimensions are [0,2]
//If masking along width: broadcast dimensions are [0,3]
int[] dimensions = (alongHeight ? CNN_DIM_MASK_H : CNN_DIM_MASK_W);
switch (poolingType) {
case MAX:
//TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
INDArray negInfMask = Transforms.not(mask);
BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));
INDArray withInf = Nd4j.createUninitialized(input.shape());
Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions));
//At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op
INDArray isMax = Nd4j.getExecutioner().execAndReturn(new IsMax(withInf, 2, 3));
return Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
case AVG:
case SUM:
//if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
//if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
//With masking: N differs for different time series
INDArray out = Nd4j.createUninitialized(input.shape(), 'f');
//Broadcast copy op, then divide and mask to 0 as appropriate
Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions));
if (poolingType == PoolingType.SUM) {
return out;
}
//Note that with CNNs, current design is restricted to [minibatch, depth, 1, W] ot [minibatch, depth, H, 1]
INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));
return out;
case PNORM:
//Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
INDArray masked2 = Nd4j.createUninitialized(input.shape());
Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions));
INDArray abs = Transforms.abs(masked2, true);
Transforms.pow(abs, pnorm, false);
INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm);
INDArray numerator;
if (pnorm == 2) {
numerator = input.dup();
} else {
INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
numerator = input.mul(absp2);
}
INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
denom.rdivi(epsilon2d);
Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask
return numerator;
default:
throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
}
}
示例5: testMaskingCnnDim3_SingleExample
@Test
public void testMaskingCnnDim3_SingleExample() {
//Test masking, where mask is along dimension 3
int minibatch = 1;
int depthIn = 2;
int depthOut = 2;
int nOut = 2;
int height = 3;
int width = 6;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
.stride(height, 1).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
int numSteps = width - 1;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
assertArrayEquals(new int[] {1, depthIn, height, 5}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(0);
assertEquals(outSubset, outMaskedSubset);
//Finally: check gradient calc for exceptions
net.setLayerMaskArrays(maskArray, null);
net.setInput(inToBeMasked);
INDArray labels = Nd4j.create(new double[] {0, 1});
net.setLabels(labels);
net.computeGradientAndScore();
}
}
示例6: testMaskingCnnDim2_SingleExample
@Test
public void testMaskingCnnDim2_SingleExample() {
//Test masking, where mask is along dimension 2
int minibatch = 1;
int depthIn = 2;
int depthOut = 2;
int nOut = 2;
int height = 6;
int width = 3;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
.stride(1, width).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
int numSteps = height - 1;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
assertArrayEquals(new int[] {1, depthIn, 5, width}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(0);
assertEquals(outSubset, outMaskedSubset);
//Finally: check gradient calc for exceptions
net.setLayerMaskArrays(maskArray, null);
net.setInput(inToBeMasked);
INDArray labels = Nd4j.create(new double[] {0, 1});
net.setLabels(labels);
net.computeGradientAndScore();
}
}
示例7: testMaskingCnnDim3
@Test
public void testMaskingCnnDim3() {
//Test masking, where mask is along dimension 3
int minibatch = 3;
int depthIn = 3;
int depthOut = 4;
int nOut = 5;
int height = 3;
int width = 6;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
.stride(height, 1).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray =
Nd4j.create(new double[][] {{1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 0, 0}});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
for (int i = 0; i < minibatch; i++) {
System.out.println(i);
int numSteps = width - i;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
assertArrayEquals(new int[] {1, depthIn, height, width - i}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(i);
assertEquals(outSubset, outMaskedSubset);
}
}
}
示例8: testMaskingCnnDim2
@Test
public void testMaskingCnnDim2() {
//Test masking, where mask is along dimension 2
int minibatch = 3;
int depthIn = 3;
int depthOut = 4;
int nOut = 5;
int height = 5;
int width = 4;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
.stride(1, width).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
for (int i = 0; i < minibatch; i++) {
System.out.println(i);
int numSteps = height - i;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
assertArrayEquals(new int[] {1, depthIn, height - i, width}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(i);
assertEquals(outSubset, outMaskedSubset);
}
}
}