本文整理汇总了Java中org.deeplearning4j.nn.graph.ComputationGraph.computeGradientAndScore方法的典型用法代码示例。如果您正苦于以下问题:Java ComputationGraph.computeGradientAndScore方法的具体用法?Java ComputationGraph.computeGradientAndScore怎么用?Java ComputationGraph.computeGradientAndScore使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.graph.ComputationGraph
的用法示例。
在下文中一共展示了ComputationGraph.computeGradientAndScore方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkScopesTestCGAS
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void checkScopesTestCGAS() throws Exception {
ComputationGraph c = createNet();
for(WorkspaceMode wm : new WorkspaceMode[]{WorkspaceMode.SEPARATE, WorkspaceMode.SINGLE}) {
log.info("Starting test: {}", wm);
c.getConfiguration().setTrainingWorkspaceMode(wm);
c.getConfiguration().setInferenceWorkspaceMode(wm);
INDArray f = Nd4j.rand(new int[]{8, 1, 28, 28});
INDArray l = Nd4j.rand(8, 10);
c.setInputs(f);
c.setLabels(l);
c.computeGradientAndScore();
}
}
示例2: testLambdaConf
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testLambdaConf() {
double[] lambdas = new double[] {0.1, 0.01};
double[] results = new double[2];
int numClasses = 2;
INDArray input = Nd4j.rand(150, 4);
INDArray labels = Nd4j.zeros(150, numClasses);
Random r = new Random(12345);
for (int i = 0; i < 150; i++) {
labels.putScalar(i, r.nextInt(numClasses), 1.0);
}
ComputationGraph graph;
for (int i = 0; i < lambdas.length; i++) {
graph = getGraph(numClasses, lambdas[i]);
graph.setInput(0, input);
graph.setLabel(0, labels);
graph.computeGradientAndScore();
results[i] = graph.score();
}
assertNotEquals(results[0], results[1]);
}
示例3: testWithPreprocessorsCG
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testWithPreprocessorsCG(){
//https://github.com/deeplearning4j/deeplearning4j/issues/4347
//Cause for the above issue was layerVertex.setInput() applying the preprocessor, with the result
// not being detached properly from the workspace...
for(WorkspaceMode wm : WorkspaceMode.values()) {
System.out.println(wm);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.trainingWorkspaceMode(wm)
.inferenceWorkspaceMode(wm)
.graphBuilder()
.addInputs("in")
.addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), new DupPreProcessor(), "in")
// .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), "in") //Note that no preprocessor is OK
.addLayer("rnn", new GravesLSTM.Builder().nIn(5).nOut(8).build(), "e")
.addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.SIGMOID).nOut(3).build(), "rnn")
.setInputTypes(InputType.recurrent(10))
.setOutputs("out")
.build();
ComputationGraph cg = new ComputationGraph(conf);
cg.init();
INDArray[] input = new INDArray[]{Nd4j.zeros(1, 10, 5)};
for( boolean train : new boolean[]{false, true}){
cg.clear();
cg.feedForward(input, train);
}
cg.setInputs(input);
cg.setLabels(Nd4j.rand(1, 3, 5));
cg.computeGradientAndScore();
}
}
示例4: elementWiseMultiplicationLayerTest
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void elementWiseMultiplicationLayerTest(){
for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
.seed(12345L)
.weightInit(new UniformDistribution(0, 1))
.graphBuilder()
.addInputs("features")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(4)
.activation(Activation.TANH)
.build(), "features")
.addLayer("elementWiseMul", new ElementWiseMultiplicationLayer.Builder().nIn(4).nOut(4)
.activation(a)
.build(), "dense")
.addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.COSINE_PROXIMITY)
.activation(Activation.IDENTITY).build(), "elementWiseMul")
.setOutputs("loss")
.pretrain(false).backprop(true).build();
ComputationGraph netGraph = new ComputationGraph(conf);
netGraph.init();
log.info("params before learning: " + netGraph.getLayer(1).paramTable());
//Run a number of iterations of learning manually make some pseudo data
//the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim
// is mainly decided by the fourth value, if everything runs well, we will get a large weight for the fourth value
INDArray features = Nd4j.create(new double[][]{{1, 2, 3, 4}, {1, 2, 3, 1}, {1, 2, 3, 0}});
INDArray labels = Nd4j.create(new double[][]{{1, 1, 1, 8}, {1, 1, 1, 2}, {1, 1, 1, 1}});
netGraph.setInputs(features);
netGraph.setLabels(labels);
netGraph.computeGradientAndScore();
double scoreBefore = netGraph.score();
String msg;
for (int epoch = 0; epoch < 5; epoch++)
netGraph.fit(new INDArray[]{features}, new INDArray[]{labels});
netGraph.computeGradientAndScore();
double scoreAfter = netGraph.score();
//Can't test in 'characteristic mode of operation' if not learning
msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn="
+ "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id"
+ ", doLearningFirst=" + "true" + " (before=" + scoreBefore
+ ", scoreAfter=" + scoreAfter + ")";
assertTrue(msg, scoreAfter < 0.8 * scoreBefore);
// expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight
log.info("params after learning: " + netGraph.getLayer(1).paramTable());
boolean gradOK = checkGradients(netGraph, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, new INDArray[]{features}, new INDArray[]{labels});
msg = "elementWiseMultiplicationLayerTest() - activationFn=" + "ID" + ", lossFn=" + "Cos-sim"
+ ", outputActivation=" + "Id" + ", doLearningFirst=" + "true";
assertTrue(msg, gradOK);
TestUtils.testModelSerialization(netGraph);
}
}
示例5: testMlnToCompGraph
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testMlnToCompGraph() {
Nd4j.getRandom().setSeed(12345);
for( int i=0; i<3; i++ ){
MultiLayerNetwork n;
switch (i){
case 0:
n = getNet1(false);
break;
case 1:
n = getNet1(true);
break;
case 2:
n = getNet2();
break;
default:
throw new RuntimeException();
}
INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10}));
INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10}));
ComputationGraph cg = n.toComputationGraph();
INDArray out1 = n.output(in);
INDArray out2 = cg.outputSingle(in);
assertEquals(out1, out2);
n.setInput(in);
n.setLabels(labels);
cg.setInputs(in);
cg.setLabels(labels);
n.computeGradientAndScore();
cg.computeGradientAndScore();
assertEquals(n.score(), cg.score(), 1e-6);
assertEquals(n.gradient().gradient(), cg.gradient().gradient());
n.fit(in, labels);
cg.fit(new INDArray[]{in}, new INDArray[]{labels});
assertEquals(n.params(), cg.params());
}
}
示例6: testSerializationCompGraph
import org.deeplearning4j.nn.graph.ComputationGraph; //导入方法依赖的package包/类
@Test
public void testSerializationCompGraph() throws Exception {
for(WorkspaceMode wsm : WorkspaceMode.values()) {
log.info("*** Starting workspace mode: " + wsm);
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder()
.activation(Activation.TANH)
.weightInit(WeightInit.XAVIER)
.trainingWorkspaceMode(wsm)
.inferenceWorkspaceMode(wsm)
.updater(new Adam())
.graphBuilder()
.addInputs("in")
.layer("0", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "in")
.layer("1", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "0")
.layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
.nIn(10).nOut(10).build(), "1")
.setOutputs("2")
.build();
ComputationGraph net1 = new ComputationGraph(conf1);
net1.init();
INDArray in = Nd4j.rand(new int[]{3, 10, 5});
INDArray labels = Nd4j.rand(new int[]{3, 10, 5});
net1.fit(new DataSet(in, labels));
byte[] bytes;
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
ModelSerializer.writeModel(net1, baos, true);
bytes = baos.toByteArray();
}
ComputationGraph net2 = ModelSerializer.restoreComputationGraph(new ByteArrayInputStream(bytes), true);
in = Nd4j.rand(new int[]{3, 10, 5});
labels = Nd4j.rand(new int[]{3, 10, 5});
INDArray out1 = net1.outputSingle(in);
INDArray out2 = net2.outputSingle(in);
assertEquals(out1, out2);
net1.setInput(0, in);
net2.setInput(0, in);
net1.setLabels(labels);
net2.setLabels(labels);
net1.computeGradientAndScore();
net2.computeGradientAndScore();
assertEquals(net1.score(), net2.score(), 1e-6);
assertEquals(net1.gradient().gradient(), net2.gradient().gradient());
}
}