本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.output方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.output方法的具体用法?Java ComputationGraph.output怎么用?Java ComputationGraph.output使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.graph.ComputationGraph
的用法示例。
在下文中一共展示了ComputationGraph.output方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getOutput
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
private INDArray getOutput(InputStream isModel, INDArray image) {
org.deeplearning4j.nn.api.Model dl4jModel;
try {
// won't use the model guesser at the moment because it is trying to load a keras model?
// dl4jModel = ModelGuesser.loadModelGuess(isModel);
dl4jModel = loadModel(isModel);
} catch (Exception e) {
throw new IllegalArgumentException("Not able to load model.", e);
}
if(dl4jModel instanceof MultiLayerNetwork) {
MultiLayerNetwork multiLayerNetwork = (MultiLayerNetwork) dl4jModel;
multiLayerNetwork.init();
return multiLayerNetwork.output(image);
} else {
ComputationGraph graph = (ComputationGraph) dl4jModel;
graph.init();
return graph.output(image)[0];
}
}
示例2: testImageNetLabels
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testImageNetLabels() throws IOException {
// set up model
ZooModel model = new VGG19(1, 123); //num labels doesn't matter since we're getting pretrained imagenet
ComputationGraph initializedModel = (ComputationGraph) model.initPretrained();
// set up input and feedforward
NativeImageLoader loader = new NativeImageLoader(224, 224, 3);
ClassLoader classloader = Thread.currentThread().getContextClassLoader();
INDArray image = loader.asMatrix(classloader.getResourceAsStream("goldenretriever.jpg"));
DataNormalization scaler = new VGG16ImagePreProcessor();
scaler.transform(image);
INDArray[] output = initializedModel.output(false, image);
// check output labels of result
String decodedLabels = new ImageNetLabels().decodePredictions(output[0]);
log.info(decodedLabels);
assertTrue(decodedLabels.contains("golden_retriever"));
// clean up for current model
Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread();
System.gc();
}
示例3: testElementWiseVertexForwardAdd
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testElementWiseVertexForwardAdd() {
int batchsz = 24;
int featuresz = 17;
ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("input1", "input2", "input3")
.addLayer("denselayer",
new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY)
.build(),
"input1")
/* denselayer is not actually used, but it seems that you _need_ to have trainable parameters, otherwise, you get
* Invalid shape: Requested INDArray shape [1, 0] contains dimension size values < 1 (all dimensions must be 1 or more)
* at org.nd4j.linalg.factory.Nd4j.checkShapeValues(Nd4j.java:4877)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4867)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4820)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:3948)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:409)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:341)
*/
.addVertex("elementwiseAdd", new ElementWiseVertex(ElementWiseVertex.Op.Add), "input1",
"input2", "input3")
.addLayer("Add", new ActivationLayer.Builder().activation(Activation.IDENTITY).build(),
"elementwiseAdd")
.setOutputs("Add", "denselayer").build();
ComputationGraph cg = new ComputationGraph(cgc);
cg.init();
INDArray input1 = Nd4j.rand(batchsz, featuresz);
INDArray input2 = Nd4j.rand(batchsz, featuresz);
INDArray input3 = Nd4j.rand(batchsz, featuresz);
INDArray target = input1.dup().addi(input2).addi(input3);
INDArray output = cg.output(input1, input2, input3)[0];
INDArray squared = output.sub(target);
double rms = squared.mul(squared).sumNumber().doubleValue();
Assert.assertEquals(0.0, rms, this.epsilon);
}
示例4: testElementWiseVertexForwardProduct
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testElementWiseVertexForwardProduct() {
int batchsz = 24;
int featuresz = 17;
ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("input1", "input2", "input3")
.addLayer("denselayer",
new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY)
.build(),
"input1")
/* denselayer is not actually used, but it seems that you _need_ to have trainable parameters, otherwise, you get
* Invalid shape: Requested INDArray shape [1, 0] contains dimension size values < 1 (all dimensions must be 1 or more)
* at org.nd4j.linalg.factory.Nd4j.checkShapeValues(Nd4j.java:4877)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4867)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4820)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:3948)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:409)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:341)
*/
.addVertex("elementwiseProduct", new ElementWiseVertex(ElementWiseVertex.Op.Product), "input1",
"input2", "input3")
.addLayer("Product", new ActivationLayer.Builder().activation(Activation.IDENTITY).build(),
"elementwiseProduct")
.setOutputs("Product", "denselayer").build();
ComputationGraph cg = new ComputationGraph(cgc);
cg.init();
INDArray input1 = Nd4j.rand(batchsz, featuresz);
INDArray input2 = Nd4j.rand(batchsz, featuresz);
INDArray input3 = Nd4j.rand(batchsz, featuresz);
INDArray target = input1.dup().muli(input2).muli(input3);
INDArray output = cg.output(input1, input2, input3)[0];
INDArray squared = output.sub(target);
double rms = squared.mul(squared).sumNumber().doubleValue();
Assert.assertEquals(0.0, rms, this.epsilon);
}
示例5: testElementWiseVertexForwardSubtract
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testElementWiseVertexForwardSubtract() {
int batchsz = 24;
int featuresz = 17;
ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("input1", "input2")
.addLayer("denselayer",
new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY)
.build(),
"input1")
/* denselayer is not actually used, but it seems that you _need_ to have trainable parameters, otherwise, you get
* Invalid shape: Requested INDArray shape [1, 0] contains dimension size values < 1 (all dimensions must be 1 or more)
* at org.nd4j.linalg.factory.Nd4j.checkShapeValues(Nd4j.java:4877)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4867)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4820)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:3948)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:409)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:341)
*/
.addVertex("elementwiseSubtract", new ElementWiseVertex(ElementWiseVertex.Op.Subtract),
"input1", "input2")
.addLayer("Subtract", new ActivationLayer.Builder().activation(Activation.IDENTITY).build(),
"elementwiseSubtract")
.setOutputs("Subtract", "denselayer").build();
ComputationGraph cg = new ComputationGraph(cgc);
cg.init();
INDArray input1 = Nd4j.rand(batchsz, featuresz);
INDArray input2 = Nd4j.rand(batchsz, featuresz);
INDArray target = input1.dup().subi(input2);
INDArray output = cg.output(input1, input2)[0];
INDArray squared = output.sub(target);
double rms = Math.sqrt(squared.mul(squared).sumNumber().doubleValue());
Assert.assertEquals(0.0, rms, this.epsilon);
}
示例6: testSimple
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testSimple() {
/*
* This function _simply_ tests whether ShiftVertex is _in fact_ adding the shift value to it's inputs.
*/
// Just first n primes / 10.
INDArray input = Nd4j
.create(new double[][] {{0.2, 0.3, 0.5}, {0.7, 1.1, 1.3}, {1.7, 1.9, 2.3}, {2.9, 3.1, 3.7}});
double sf = 4.1;
ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input")
.addLayer("denselayer",
new DenseLayer.Builder().nIn(input.columns()).nOut(1)
.activation(Activation.IDENTITY).build(),
"input")
/* denselayer is not actually used, but it seems that you _need_ to have trainable parameters, otherwise, you get
* Invalid shape: Requested INDArray shape [1, 0] contains dimension size values < 1 (all dimensions must be 1 or more)
* at org.nd4j.linalg.factory.Nd4j.checkShapeValues(Nd4j.java:4877)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4867)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4820)
* at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:3948)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:409)
* at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:341)
*/
.addLayer("identityinputactivation",
new ActivationLayer.Builder().activation(Activation.IDENTITY).build(), "input")
.addVertex("shiftvertex", new ShiftVertex(sf), "identityinputactivation")
.addLayer("identityshiftvertex",
new ActivationLayer.Builder().activation(Activation.IDENTITY).build(),
"shiftvertex")
.setOutputs("identityshiftvertex", "denselayer").build();
ComputationGraph cg = new ComputationGraph(cgc);
cg.init();
// We can call outputSingle, because we only have a single output layer. It has nothing to do with minibatches.
INDArray output = cg.output(true, input)[0];
INDArray target = Nd4j.zeros(input.shape());
target.addi(input);
target.addi(sf);
INDArray squared = output.sub(target);
double rms = squared.mul(squared).sumNumber().doubleValue();
Assert.assertEquals(0.0, rms, this.epsilon);
}
示例7: testLastTimeStepWithTransfer
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLastTimeStepWithTransfer(){
int lstmLayerSize = 16;
int numLabelClasses = 10;
int numInputs = 5;
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.trainingWorkspaceMode(WorkspaceMode.NONE)
.inferenceWorkspaceMode(WorkspaceMode.NONE)
.seed(123) //Random number generator seed for improved repeatability. Optional.
.updater(new AdaDelta())
.weightInit(WeightInit.XAVIER)
.graphBuilder()
.addInputs("rr")
.setInputTypes(InputType.recurrent(30))
.addLayer("1", new GravesLSTM.Builder().activation(Activation.TANH).nIn(numInputs).nOut(lstmLayerSize).dropOut(0.9).build(), "rr")
.addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nOut(numLabelClasses).build(), "1")
.pretrain(false).backprop(true)
.setOutputs("2")
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
ComputationGraph updatedModel = new TransferLearning.GraphBuilder(net)
.addVertex("laststepoutput", new LastTimeStepVertex("rr"), "2")
.setOutputs("laststepoutput")
.build();
INDArray input = Nd4j.rand(new int[]{10, numInputs, 16});
INDArray[] out = updatedModel.output(input);
assertNotNull(out);
assertEquals(1, out.length);
assertNotNull(out[0]);
assertArrayEquals(new int[]{10, numLabelClasses}, out[0].shape());
Map<String,INDArray> acts = updatedModel.feedForward(input, false);
assertEquals(4, acts.size()); //2 layers + input + vertex output
assertNotNull(acts.get("laststepoutput"));
assertArrayEquals(new int[]{10, numLabelClasses}, acts.get("laststepoutput").shape());
String toString = out[0].toString();
}