当前位置: 首页>>代码示例>>Java>>正文


Java RnnOutputLayer类代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.RnnOutputLayer的典型用法代码示例。如果您正苦于以下问题:Java RnnOutputLayer类的具体用法?Java RnnOutputLayer怎么用?Java RnnOutputLayer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RnnOutputLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了RnnOutputLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: makeLayer

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
public static FeedForwardLayer makeLayer(Config layerConfig){

        Type layerType = Type.valueOf(layerConfig.getString("type"));
        switch (layerType) {
            case GravesLSTM:
                return new GravesLSTM.Builder()
                        .activation(layerConfig.getString("activation"))
                        .nIn(layerConfig.getInt("nIn"))
                        .nOut(layerConfig.getInt("nOut")).build();

            case RnnOutputLayer:
                return new RnnOutputLayer.Builder()
                        .activation(layerConfig.getString("activation"))
                        .lossFunction(LossFunctions.LossFunction.valueOf(layerConfig.getString("lossFunction")))
                        .nIn(layerConfig.getInt("nIn"))
                        .nOut(layerConfig.getInt("nOut")).build();

            default:
                throw new RuntimeException("UNAVAILABLE LAYER TYPE CONFIG.");
        }



    }
 
开发者ID:claytantor,项目名称:blueweave,代码行数:25,代码来源:NetworkTypeFactory.java

示例2: testRnnTimeStepWithPreprocessor

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testRnnTimeStepWithPreprocessor() {

    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder()
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .list()
                                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
                                    .inputPreProcessor(0, new FeedForwardToRnnPreProcessor()).pretrain(false)
                                    .backprop(true).build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:23,代码来源:MultiLayerTestRNN.java

示例3: testRnnTimeStepWithPreprocessorGraph

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testRnnTimeStepWithPreprocessorGraph() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .graphBuilder().addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "0")
                    .addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build(), "1")
                    .setOutputs("2").inputPreProcessor("0", new FeedForwardToRnnPreProcessor()).pretrain(false)
                    .backprop(true).build();

    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:22,代码来源:MultiLayerTestRNN.java

示例4: testTbpttMasking

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testTbpttMasking() {
    //Simple "does it throw an exception" type test...
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .graphBuilder().addInputs("in")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE)
                                    .activation(Activation.IDENTITY).nIn(1).nOut(1).build(), "in")
                    .setOutputs("out").backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(8)
                    .tBPTTBackwardLength(8).build();

    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    MultiDataSet data = new MultiDataSet(new INDArray[] {Nd4j.linspace(1, 10, 10).reshape(1, 1, 10)},
                    new INDArray[] {Nd4j.linspace(2, 20, 10).reshape(1, 1, 10)}, null,
                    new INDArray[] {Nd4j.ones(10)});

    net.fit(data);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:20,代码来源:ComputationGraphTestRNN.java

示例5: getConfiguration

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Override
   protected MultiLayerConfiguration getConfiguration()
   {
final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
final int nLayers = hiddenLayerNodes.length + 1;

final ListBuilder list = new NeuralNetConfiguration.Builder()
	.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	.iterations(parameters.getIterations()).learningRate(parameters.getLearningRate()).rmsDecay(0.95)
	.seed(parameters.getSeed()).regularization(true).l2(0.001).list(nLayers).pretrain(false).backprop(true);

for (int i = 0; i < nLayers; i++)
{
    int nIn;
    if (i == 0)
    {
	nIn = parameters.getInputSize();
    }
    else
    {
	nIn = hiddenLayerNodes[i - 1];
    }
    if (i < nLayers - 1)
    {
	final GravesLSTM layer = new GravesLSTM.Builder().nIn(nIn).nOut(hiddenLayerNodes[i])
		.updater(Updater.RMSPROP).activation("tanh").weightInit(WeightInit.DISTRIBUTION)
		.dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, layer);
    }
    else
    {
	final RnnOutputLayer outputLayer = new RnnOutputLayer.Builder(LossFunction.MCXENT).activation("softmax")
		.updater(Updater.RMSPROP).nIn(hiddenLayerNodes[1]).nOut(parameters.getOutputSize())
		.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(-0.08, 0.08)).build();
	list.layer(i, outputLayer);
    }
}
return list.build();
   }
 
开发者ID:amrabed,项目名称:DL4J,代码行数:40,代码来源:LstmModel.java

示例6: initializeClassifier

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
/**
 * The method used to initialize the classifier.
 *
 * @param data set of instances serving as training data
 * @throws Exception if something goes wrong in the training process
 */
@Override
public void initializeClassifier(Instances data) throws Exception {

  // Can classifier handle the data?
  getCapabilities().testWithFail(data);

  // Check basic network structure
  if (layers.length == 0) {
    throw new MissingOutputLayerException("No layers have been added!");
  }

  final Layer lastLayer = layers[layers.length - 1];
  if (!(lastLayer instanceof RnnOutputLayer)) {
    throw new MissingOutputLayerException("Last layer in network must be an output layer!");
  }

  ClassLoader origLoader = Thread.currentThread().getContextClassLoader();
  try {
    Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());

    data = initEarlyStopping(data);
    this.trainData = data;

    instanceIterator.initialize();

    createModel();

    // Setup the datasetiterators (needs to be done after the model initialization)
    trainIterator = getDataSetIterator(this.trainData);

    // Set the iteration listener
    model.setListeners(getListener());

    numEpochsPerformed = 0;
  } finally {
    Thread.currentThread().setContextClassLoader(origLoader);
  }
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:45,代码来源:RnnSequenceClassifier.java

示例7: createModel

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Override
protected void createModel() throws Exception {
  final INDArray features = getFirstBatchFeatures(trainData);
  log.info("Feature shape: {}", features.shape());
  ComputationGraphConfiguration.GraphBuilder gb =
      netConfig
          .builder()
          .seed(getSeed())
          .inferenceWorkspaceMode(WorkspaceMode.SEPARATE)
          .trainingWorkspaceMode(WorkspaceMode.SEPARATE)
          .graphBuilder()
          .backpropType(BackpropType.TruncatedBPTT)
          .tBPTTBackwardLength(tBPTTbackwardLength)
          .tBPTTForwardLength(tBPTTforwardLength);

  // Set ouput size
  final Layer lastLayer = layers[layers.length - 1];
  final int nOut = trainData.numClasses();
  if (lastLayer instanceof RnnOutputLayer) {
    ((RnnOutputLayer) lastLayer).setNOut(nOut);
  }

  String currentInput = "input";
  gb.addInputs(currentInput);
  // Collect layers
  for (Layer layer : layers) {
    String lName = layer.getLayerName();
    gb.addLayer(lName, layer, currentInput);
    currentInput = lName;
  }
  gb.setOutputs(currentInput);
  gb.setInputTypes(InputType.inferInputType(features));

  ComputationGraphConfiguration conf = gb.pretrain(false).backprop(true).build();
  ComputationGraph model = new ComputationGraph(conf);
  model.init();
  this.model = model;
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:39,代码来源:RnnSequenceClassifier.java

示例8: isSequenceCompatibleLayer

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
/**
 * Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
 *
 * @param layer Layers to check
 * @return True if compatible
 */
protected boolean isSequenceCompatibleLayer(Layer layer) {
  return layer instanceof EmbeddingLayer
      || layer instanceof AbstractLSTM
      || layer instanceof RnnOutputLayer
      || layer instanceof GlobalPoolingLayer;
}
 
开发者ID:Waikato,项目名称:wekaDeeplearning4j,代码行数:13,代码来源:RnnSequenceClassifier.java

示例9: testTruncatedBPTTSimple

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testTruncatedBPTTSimple() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 12;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    int nTimeSlices = 20;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 0.5)).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .weightInit(WeightInit.DISTRIBUTION).dist(
                                                    new NormalDistribution(0,
                                                                    0.5))
                                    .build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT).weightInit(WeightInit.DISTRIBUTION)
                                    .nIn(8).nOut(nOut).activation(Activation.SOFTMAX)
                                    .weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 0.5))
                                    .build())
                    .pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, nTimeSlices * timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, nTimeSlices * timeSeriesLength});

    mln.fit(inputLong, labelsLong);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:MultiLayerTestRNN.java

示例10: testTBPTTLongerThanTS

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testTBPTTLongerThanTS() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 20;
    int tbpttLength = 1000;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MSE).nIn(8).nOut(nOut)
                                    .activation(Activation.IDENTITY).build())
                    .pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray features = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength});
    INDArray labels = Nd4j.rand(new int[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray maskArrayInput = Nd4j.ones(miniBatchSize, timeSeriesLength);
    INDArray maskArrayOutput = Nd4j.ones(miniBatchSize, timeSeriesLength);

    DataSet ds = new DataSet(features, labels, maskArrayInput, maskArrayOutput);

    INDArray initialParams = mln.params().dup();
    mln.fit(ds);
    INDArray afterParams = mln.params();
    assertNotEquals(initialParams, afterParams);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:MultiLayerTestRNN.java

示例11: testTruncatedBPTTSimple

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testTruncatedBPTTSimple() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 12;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    int nTimeSlices = 20;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "0")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(nOut)
                                    .activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "1")
                    .setOutputs("out").pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build();

    Nd4j.getRandom().setSeed(12345);
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, nTimeSlices * timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, nTimeSlices * timeSeriesLength});

    graph.fit(new INDArray[] {inputLong}, new INDArray[] {labelsLong});
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:ComputationGraphTestRNN.java

示例12: testTBPTTLongerThanTS

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testTBPTTLongerThanTS() {
    int tbpttLength = 100;
    int timeSeriesLength = 20;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "0")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .weightInit(WeightInit.DISTRIBUTION).nIn(8).nOut(nOut)
                                    .activation(Activation.SOFTMAX).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "1")
                    .setOutputs("out").pretrain(false).backprop(true).backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();

    Nd4j.getRandom().setSeed(12345);
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray initialParams = graph.params().dup();
    graph.fit(new INDArray[] {inputLong}, new INDArray[] {labelsLong});
    INDArray afterParams = graph.params();

    assertNotEquals(initialParams, afterParams);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:40,代码来源:ComputationGraphTestRNN.java

示例13: testDuplicateToTimeSeriesVertex

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
@Test
public void testDuplicateToTimeSeriesVertex() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
                    .addInputs("in2d", "in3d")
                    .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d")
                    .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).build(), "duplicateTS")
                    .addLayer("out3d", new RnnOutputLayer.Builder().nIn(1).nOut(1).build(), "in3d")
                    .setOutputs("out", "out3d").build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray in2d = Nd4j.rand(3, 5);
    INDArray in3d = Nd4j.rand(new int[] {3, 2, 7});

    graph.setInputs(in2d, in3d);

    INDArray expOut = Nd4j.zeros(3, 5, 7);
    for (int i = 0; i < 7; i++) {
        expOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, in2d);
    }

    GraphVertex gv = graph.getVertex("duplicateTS");
    gv.setInputs(in2d);
    INDArray outFwd = gv.doForward(true);
    assertEquals(expOut, outFwd);

    INDArray expOutBackward = expOut.sum(2);
    gv.setEpsilon(expOut);
    INDArray outBwd = gv.doBackward(false).getSecond()[0];
    assertEquals(expOutBackward, outBwd);

    String json = conf.toJson();
    ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
    assertEquals(conf, conf2);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:38,代码来源:TestGraphNodes.java

示例14: main

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        getModelData();
        
        System.out.println("Total memory = " + Runtime.getRuntime().totalMemory());

        int batchSize = 50;
        int vectorSize = 300;
        int nEpochs = 5;
        int truncateReviewsToLength = 300;

        MultiLayerConfiguration sentimentNN = new NeuralNetConfiguration.Builder()
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1)
                .updater(Updater.RMSPROP)
                .regularization(true).l2(1e-5)
                .weightInit(WeightInit.XAVIER)
                .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue).gradientNormalizationThreshold(1.0)
                .learningRate(0.0018)
                .list()
                .layer(0, new GravesLSTM.Builder().nIn(vectorSize).nOut(200)
                        .activation("softsign").build())
                .layer(1, new RnnOutputLayer.Builder().activation("softmax")
                        .lossFunction(LossFunctions.LossFunction.MCXENT).nIn(200).nOut(2).build())
                .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(sentimentNN);
        net.init();
        net.setListeners(new ScoreIterationListener(1));

        WordVectors wordVectors = WordVectorSerializer.loadGoogleModel(new File(GNEWS_VECTORS_PATH), true, false);
        DataSetIterator trainData = new AsyncDataSetIterator(new SentimentExampleIterator(EXTRACT_DATA_PATH, wordVectors, batchSize, truncateReviewsToLength, true), 1);
        DataSetIterator testData = new AsyncDataSetIterator(new SentimentExampleIterator(EXTRACT_DATA_PATH, wordVectors, 100, truncateReviewsToLength, false), 1);

        for (int i = 0; i < nEpochs; i++) {
            net.fit(trainData);
            trainData.reset();

            Evaluation evaluation = new Evaluation();
            while (testData.hasNext()) {
                DataSet t = testData.next();
                INDArray dataFeatures = t.getFeatureMatrix();
                INDArray dataLabels = t.getLabels();
                INDArray inMask = t.getFeaturesMaskArray();
                INDArray outMask = t.getLabelsMaskArray();
                INDArray predicted = net.output(dataFeatures, false, inMask, outMask);

                evaluation.evalTimeSeries(dataLabels, predicted, outMask);
            }
            testData.reset();

            System.out.println(evaluation.stats());
        }
    }
 
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:54,代码来源:DL4JSentimentAnalysisExample.java

示例15: buildLstmNetworks

import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; //导入依赖的package包/类
public static MultiLayerNetwork buildLstmNetworks(int nIn, int nOut) {
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .learningRate(learningRate)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .weightInit(WeightInit.XAVIER)
            .updater(Updater.RMSPROP)
            .regularization(true)
            .l2(1e-4)
            .list()
            .layer(0, new GravesLSTM.Builder()
                    .nIn(nIn)
                    .nOut(lstmLayer1Size)
                    .activation(Activation.TANH)
                    .gateActivationFunction(Activation.HARDSIGMOID)
                    .dropOut(dropoutRatio)
                    .build())
            .layer(1, new GravesLSTM.Builder()
                    .nIn(lstmLayer1Size)
                    .nOut(lstmLayer2Size)
                    .activation(Activation.TANH)
                    .gateActivationFunction(Activation.HARDSIGMOID)
                    .dropOut(dropoutRatio)
                    .build())
            .layer(2, new DenseLayer.Builder()
            		.nIn(lstmLayer2Size)
            		.nOut(denseLayerSize)
            		.activation(Activation.RELU)
            		.build())
            .layer(3, new RnnOutputLayer.Builder()
                    .nIn(denseLayerSize)
                    .nOut(nOut)
                    .activation(Activation.IDENTITY)
                    .lossFunction(LossFunctions.LossFunction.MSE)
                    .build())
            .backpropType(BackpropType.TruncatedBPTT)
            .tBPTTForwardLength(truncatedBPTTLength)
            .tBPTTBackwardLength(truncatedBPTTLength)
            .pretrain(false)
            .backprop(true)
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();
    net.setListeners(new ScoreIterationListener(100));
    return net;
}
 
开发者ID:IsaacChanghau,项目名称:StockPrediction,代码行数:49,代码来源:RecurrentNets.java


注:本文中的org.deeplearning4j.nn.conf.layers.RnnOutputLayer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。