本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.AutoEncoder类的典型用法代码示例。如果您正苦于以下问题:Java AutoEncoder类的具体用法?Java AutoEncoder怎么用?Java AutoEncoder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AutoEncoder类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了AutoEncoder类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getConfiguration
import org.deeplearning4j.nn.conf.layers.AutoEncoder; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
.layer(0,
new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)
.build())
.layer(2,
new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
.nOut(parameters.getOutputSize()).build())
.pretrain(true).backprop(false).build();
}
示例2: main
import org.deeplearning4j.nn.conf.layers.AutoEncoder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
// final int numRows = 28;
// final int numColumns = 28;
int outputNum = 10;
int inputNum = 1000;
int numSamples = 60000;
int batchSize = 1024;
int iterations = 10;
int seed = 123;
int listenerFreq = batchSize / 5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list()
.layer(0, new AutoEncoder.Builder()
.nIn(inputNum)
.nOut(500)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(1, new AutoEncoder.Builder()
.nIn(500)
.nOut(250)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(2, new AutoEncoder.Builder()
.nIn(250)
.nOut(125)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(3, new AutoEncoder.Builder()
.nIn(125)
.nOut(50)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(4, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax")
.nIn(75)
.nOut(outputNum)
.build())
.pretrain(true)
.backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
log.info("Train model....");
model.fit(iter);
log.info("Evaluate model....");
Evaluation eval = new Evaluation(outputNum);
DataSetIterator testIter = new MnistDataSetIterator(100,10000);
while(testIter.hasNext()) {
DataSet testMnist = testIter.next();
INDArray predict2 = model.output(testMnist.getFeatureMatrix());
eval.eval(testMnist.getLabels(), predict2);
}
log.info(eval.stats());
}
示例3: testAEScoreFunctionSimple
import org.deeplearning4j.nn.conf.layers.AutoEncoder; //导入依赖的package包/类
@Test
public void testAEScoreFunctionSimple() throws Exception {
for(RegressionEvaluation.Metric metric : new RegressionEvaluation.Metric[]{RegressionEvaluation.Metric.MSE,
RegressionEvaluation.Metric.MAE}) {
log.info("Metric: " + metric);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.list()
.layer(new AutoEncoder.Builder().nIn(784).nOut(32).build())
.pretrain(true).backprop(false)
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
DataSetIterator iter = new MnistDataSetIterator(32, false, 12345);
List<DataSet> l = new ArrayList<>();
for( int i=0; i<10; i++ ){
DataSet ds = iter.next();
l.add(new DataSet(ds.getFeatures(), ds.getFeatures()));
}
iter = new ExistingDataSetIterator(l);
EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5))
.iterationTerminationConditions(
new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES))
.scoreCalculator(new AutoencoderScoreCalculator(metric, iter)).modelSaver(saver)
.build();
EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, iter);
EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit();
assertNotNull(result.getBestModel());
assertTrue(result.getBestModelScore() > 0.0);
}
}
示例4: testAEScoreFunctionSimple
import org.deeplearning4j.nn.conf.layers.AutoEncoder; //导入依赖的package包/类
@Test
public void testAEScoreFunctionSimple() throws Exception {
for(RegressionEvaluation.Metric metric : new RegressionEvaluation.Metric[]{RegressionEvaluation.Metric.MSE,
RegressionEvaluation.Metric.MAE}) {
log.info("Metric: " + metric);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.graphBuilder()
.addInputs("in")
.layer("0", new AutoEncoder.Builder().nIn(784).nOut(32).build(), "in")
.setOutputs("0")
.pretrain(true).backprop(false)
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
DataSetIterator iter = new MnistDataSetIterator(32, false, 12345);
List<DataSet> l = new ArrayList<>();
for( int i=0; i<10; i++ ){
DataSet ds = iter.next();
l.add(new DataSet(ds.getFeatures(), ds.getFeatures()));
}
iter = new ExistingDataSetIterator(l);
EarlyStoppingModelSaver<ComputationGraph> saver = new InMemoryModelSaver<>();
EarlyStoppingConfiguration<ComputationGraph> esConf =
new EarlyStoppingConfiguration.Builder<ComputationGraph>()
.epochTerminationConditions(new MaxEpochsTerminationCondition(5))
.iterationTerminationConditions(
new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES))
.scoreCalculator(new AutoencoderScoreCalculator(metric, iter)).modelSaver(saver)
.build();
EarlyStoppingGraphTrainer trainer = new EarlyStoppingGraphTrainer(esConf, net, iter);
EarlyStoppingResult<ComputationGraph> result = trainer.fit();
assertNotNull(result.getBestModel());
assertTrue(result.getBestModelScore() > 0.0);
}
}
示例5: testAutoEncoderActivationLayer
import org.deeplearning4j.nn.conf.layers.AutoEncoder; //导入依赖的package包/类
@Test
public void testAutoEncoderActivationLayer() throws Exception {
int minibatch = 3;
int nIn = 5;
int layerSize = 5;
int nOut = 3;
INDArray next = Nd4j.rand(new int[] {minibatch, nIn});
INDArray labels = Nd4j.zeros(minibatch, nOut);
for (int i = 0; i < minibatch; i++) {
labels.putScalar(i, i % nOut, 1.0);
}
// Run without separate activation layer
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
.list()
.layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0)
.activation(Activation.SIGMOID).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY)
.activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
.build())
.backprop(true).pretrain(false).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
network.fit(next, labels); //Labels are necessary for this test: layer activation function affect pretraining results, otherwise
// Run with separate activation layer
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
.list()
.layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0)
.activation(Activation.IDENTITY).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.ActivationLayer.Builder()
.activation(Activation.SIGMOID).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY)
.activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut)
.build())
.backprop(true).pretrain(false).build();
MultiLayerNetwork network2 = new MultiLayerNetwork(conf2);
network2.init();
network2.fit(next, labels);
// check parameters
assertEquals(network.getLayer(0).getParam("W"), network2.getLayer(0).getParam("W"));
assertEquals(network.getLayer(1).getParam("W"), network2.getLayer(2).getParam("W"));
assertEquals(network.getLayer(0).getParam("b"), network2.getLayer(0).getParam("b"));
assertEquals(network.getLayer(1).getParam("b"), network2.getLayer(2).getParam("b"));
// check activations
network.init();
network.setInput(next);
List<INDArray> activations = network.feedForward(true);
network2.init();
network2.setInput(next);
List<INDArray> activations2 = network2.feedForward(true);
assertEquals(activations.get(1).reshape(activations2.get(2).shape()), activations2.get(2));
assertEquals(activations.get(2), activations2.get(3));
}