本文整理汇总了Java中org.deeplearning4j.nn.conf.MultiLayerConfiguration.getInputPreProcess方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerConfiguration.getInputPreProcess方法的具体用法?Java MultiLayerConfiguration.getInputPreProcess怎么用?Java MultiLayerConfiguration.getInputPreProcess使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.conf.MultiLayerConfiguration
的用法示例。
在下文中一共展示了MultiLayerConfiguration.getInputPreProcess方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testDeconvolution
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testDeconvolution() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer(0, new Deconvolution2D.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
.layer(2, new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(8, proc.getInputHeight());
assertEquals(8, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
示例2: testSubSamplingWithPadding
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSubSamplingWithPadding() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer(0, new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
.layer(2, new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(8, proc.getInputHeight());
assertEquals(8, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
示例3: testUpsampling
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testUpsampling() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer(new Upsampling2D.Builder().size(3).build()) // 14 * 3 = 42!
.layer(new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(42, proc.getInputHeight());
assertEquals(42, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(42 * 42 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
示例4: testSeparableConv2D
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSeparableConv2D() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer( new SeparableConvolution2D.Builder(2, 2)
.depthMultiplier(2)
.padding(0, 0)
.stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
.layer(2, new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(8, proc.getInputHeight());
assertEquals(8, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
示例5: testDeconv2D
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testDeconv2D() {
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
.layer( new Deconvolution2D.Builder(2, 2)
.padding(0, 0)
.stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
.layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
.layer(2, new OutputLayer.Builder().nOut(3).build())
.setInputType(InputType.convolutional(28, 28, 1));
MultiLayerConfiguration conf = builder.build();
assertNotNull(conf.getInputPreProcess(2));
assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
assertEquals(8, proc.getInputHeight());
assertEquals(8, proc.getInputWidth());
assertEquals(3, proc.getNumChannels());
assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
示例6: testCnnToDense
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCnnToDense() {
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder()
.list().layer(0,
new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
4, 4) // 28*28*1 => 15*15*10
.nIn(1).nOut(10).padding(2, 2)
.stride(2, 2)
.weightInit(WeightInit.RELU)
.activation(Activation.RELU)
.build())
.layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder()
.activation(Activation.RELU).nOut(200).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(200)
.nOut(5).weightInit(WeightInit.RELU)
.activation(Activation.SOFTMAX).build())
.setInputType(InputType.convolutionalFlat(28, 28, 1)).backprop(true)
.pretrain(false).build();
assertNotNull(conf.getInputPreProcess(0));
assertNotNull(conf.getInputPreProcess(1));
assertTrue(conf.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor);
assertTrue(conf.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor);
FeedForwardToCnnPreProcessor ffcnn = (FeedForwardToCnnPreProcessor) conf.getInputPreProcess(0);
CnnToFeedForwardPreProcessor cnnff = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(1);
assertEquals(28, ffcnn.getInputHeight());
assertEquals(28, ffcnn.getInputWidth());
assertEquals(1, ffcnn.getNumChannels());
assertEquals(15, cnnff.getInputHeight());
assertEquals(15, cnnff.getInputWidth());
assertEquals(10, cnnff.getNumChannels());
assertEquals(15 * 15 * 10, ((FeedForwardLayer) conf.getConf(1).getLayer()).getNIn());
}
示例7: toComputationGraph
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
/**
* Convert a MultiLayerNetwork to a ComputationGraph
*
* @return ComputationGraph equivalent to this network (including parameters and updater state)
*/
public static ComputationGraph toComputationGraph(MultiLayerNetwork net) {
//We rely heavily here on the fact that the topological sort order - and hence the layout of parameters - is
// by definition the identical for a MLN and "single stack" computation graph. This also has to hold
// for the updater state...
ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder()
.graphBuilder();
MultiLayerConfiguration origConf = net.getLayerWiseConfigurations().clone();
int layerIdx = 0;
String lastLayer = "in";
b.addInputs("in");
for (NeuralNetConfiguration c : origConf.getConfs()) {
String currLayer = String.valueOf(layerIdx);
InputPreProcessor preproc = origConf.getInputPreProcess(layerIdx);
b.addLayer(currLayer, c.getLayer(), preproc, lastLayer);
lastLayer = currLayer;
layerIdx++;
}
b.setOutputs(lastLayer);
ComputationGraphConfiguration conf = b.build();
ComputationGraph cg = new ComputationGraph(conf);
cg.init();
cg.setParams(net.params());
//Also copy across updater state:
INDArray updaterState = net.getUpdater().getStateViewArray();
if (updaterState != null) {
cg.getUpdater().getUpdaterStateViewArray()
.assign(updaterState);
}
return cg;
}