本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.setLayerMaskArrays方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.setLayerMaskArrays方法的具体用法?Java MultiLayerNetwork.setLayerMaskArrays怎么用?Java MultiLayerNetwork.setLayerMaskArrays使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.multilayer.MultiLayerNetwork
的用法示例。
在下文中一共展示了MultiLayerNetwork.setLayerMaskArrays方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testLSTMWithMasking
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLSTMWithMasking() {
//Basic test of GravesLSTM layer
Nd4j.getRandom().setSeed(12345L);
int timeSeriesLength = 10;
int nIn = 5;
int layerSize = 4;
int nOut = 2;
int miniBatchSize = 3;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.AVG, PoolingType.SUM, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
.layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
.build())
.layer(1, new GlobalPoolingLayer.Builder().poolingType(pt).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork mln = new MultiLayerNetwork(conf);
mln.init();
Random r = new Random(12345L);
INDArray input = Nd4j.zeros(miniBatchSize, nIn, timeSeriesLength);
for (int i = 0; i < miniBatchSize; i++) {
for (int j = 0; j < nIn; j++) {
for (int k = 0; k < timeSeriesLength; k++) {
input.putScalar(new int[] {i, j, k}, r.nextDouble() - 0.5);
}
}
}
INDArray featuresMask = Nd4j.create(miniBatchSize, timeSeriesLength);
for (int i = 0; i < miniBatchSize; i++) {
int to = timeSeriesLength - i;
for (int j = 0; j < to; j++) {
featuresMask.putScalar(i, j, 1.0);
}
}
INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
for (int i = 0; i < miniBatchSize; i++) {
int idx = r.nextInt(nOut);
labels.putScalar(i, idx, 1.0);
}
mln.setLayerMaskArrays(featuresMask, null);
if (PRINT_RESULTS) {
System.out.println("testLSTMGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize);
for (int j = 0; j < mln.getnLayers(); j++)
System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
}
boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels, featuresMask, null);
assertTrue(gradOK);
}
}
示例2: testMaskingRnn
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingRnn() {
int timeSeriesLength = 5;
int nIn = 5;
int layerSize = 4;
int nOut = 2;
int[] minibatchSizes = new int[] {1, 3};
for (int miniBatchSize : minibatchSizes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
.layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
.build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder()
.poolingType(PoolingType.AVG).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
Random r = new Random(12345L);
INDArray input = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength}).subi(0.5);
INDArray mask;
if (miniBatchSize == 1) {
mask = Nd4j.create(new double[] {1, 1, 1, 1, 0});
} else {
mask = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});
}
INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
for (int i = 0; i < miniBatchSize; i++) {
int idx = r.nextInt(nOut);
labels.putScalar(i, idx, 1.0);
}
net.setLayerMaskArrays(mask, null);
INDArray outputMasked = net.output(input);
net.clearLayerMaskArrays();
for (int i = 0; i < miniBatchSize; i++) {
INDArray maskRow = mask.getRow(i);
int tsLength = maskRow.sumNumber().intValue();
INDArray inputSubset = input.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
NDArrayIndex.interval(0, tsLength));
INDArray outSubset = net.output(inputSubset);
INDArray outputMaskedSubset = outputMasked.getRow(i);
assertEquals(outSubset, outputMaskedSubset);
}
}
}
示例3: testMaskingCnnDim3_SingleExample
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim3_SingleExample() {
//Test masking, where mask is along dimension 3
int minibatch = 1;
int depthIn = 2;
int depthOut = 2;
int nOut = 2;
int height = 3;
int width = 6;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
.stride(height, 1).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
int numSteps = width - 1;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
assertArrayEquals(new int[] {1, depthIn, height, 5}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(0);
assertEquals(outSubset, outMaskedSubset);
//Finally: check gradient calc for exceptions
net.setLayerMaskArrays(maskArray, null);
net.setInput(inToBeMasked);
INDArray labels = Nd4j.create(new double[] {0, 1});
net.setLabels(labels);
net.computeGradientAndScore();
}
}
示例4: testMaskingCnnDim2_SingleExample
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim2_SingleExample() {
//Test masking, where mask is along dimension 2
int minibatch = 1;
int depthIn = 2;
int depthOut = 2;
int nOut = 2;
int height = 6;
int width = 3;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
.stride(1, width).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
int numSteps = height - 1;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
assertArrayEquals(new int[] {1, depthIn, 5, width}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(0);
assertEquals(outSubset, outMaskedSubset);
//Finally: check gradient calc for exceptions
net.setLayerMaskArrays(maskArray, null);
net.setInput(inToBeMasked);
INDArray labels = Nd4j.create(new double[] {0, 1});
net.setLabels(labels);
net.computeGradientAndScore();
}
}
示例5: testMaskingCnnDim3
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim3() {
//Test masking, where mask is along dimension 3
int minibatch = 3;
int depthIn = 3;
int depthOut = 4;
int nOut = 5;
int height = 3;
int width = 6;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
.stride(height, 1).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray =
Nd4j.create(new double[][] {{1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 0, 0}});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
for (int i = 0; i < minibatch; i++) {
System.out.println(i);
int numSteps = width - i;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
assertArrayEquals(new int[] {1, depthIn, height, width - i}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(i);
assertEquals(outSubset, outMaskedSubset);
}
}
}
示例6: testMaskingCnnDim2
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testMaskingCnnDim2() {
//Test masking, where mask is along dimension 2
int minibatch = 3;
int depthIn = 3;
int depthOut = 4;
int nOut = 5;
int height = 5;
int width = 4;
PoolingType[] poolingTypes =
new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};
for (PoolingType pt : poolingTypes) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
.convolutionMode(ConvolutionMode.Same).seed(12345L).list()
.layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
.stride(1, width).activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});
//Shape for mask: [minibatch, width]
INDArray maskArray = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});
//Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
// as would be the case in practice...
Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));
net.setLayerMaskArrays(maskArray, null);
INDArray outMasked = net.output(inToBeMasked);
net.clearLayerMaskArrays();
for (int i = 0; i < minibatch; i++) {
System.out.println(i);
int numSteps = height - i;
INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
assertArrayEquals(new int[] {1, depthIn, height - i, width}, subset.shape());
INDArray outSubset = net.output(subset);
INDArray outMaskedSubset = outMasked.getRow(i);
assertEquals(outSubset, outMaskedSubset);
}
}
}