本文整理汇总了Java中org.deeplearning4j.nn.conf.MultiLayerConfiguration.clone方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerConfiguration.clone方法的具体用法?Java MultiLayerConfiguration.clone怎么用?Java MultiLayerConfiguration.clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.conf.MultiLayerConfiguration
的用法示例。
在下文中一共展示了MultiLayerConfiguration.clone方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testVaePretrainSimple
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testVaePretrainSimple() {
//Simple sanity check on pretraining
int nIn = 8;
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp())
.weightInit(WeightInit.XAVIER).list()
.layer(0, new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12)
.decoderLayerSizes(13).reconstructionDistribution(
new GaussianReconstructionDistribution(Activation.IDENTITY))
.build())
.pretrain(true).backprop(false).build();
//Do training on Spark with one executor, for 3 separate minibatches
int rddDataSetNumExamples = 10;
int totalAveragings = 5;
int averagingFrequency = 3;
ParameterAveragingTrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(rddDataSetNumExamples)
.averagingFrequency(averagingFrequency).batchSizePerWorker(rddDataSetNumExamples)
.saveUpdater(true).workerPrefetchNumBatches(0).build();
Nd4j.getRandom().setSeed(12345);
SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf.clone(), tm);
List<DataSet> trainData = new ArrayList<>();
int nDataSets = numExecutors() * totalAveragings * averagingFrequency;
for (int i = 0; i < nDataSets; i++) {
trainData.add(new DataSet(Nd4j.rand(rddDataSetNumExamples, nIn), null));
}
JavaRDD<DataSet> data = sc.parallelize(trainData);
sparkNet.fit(data);
}
示例2: testIterationCounts
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testIterationCounts() throws Exception {
int dataSetObjSize = 5;
int batchSizePerExecutor = 25;
List<DataSet> list = new ArrayList<>();
int minibatchesPerWorkerPerEpoch = 10;
DataSetIterator iter = new MnistDataSetIterator(dataSetObjSize,
batchSizePerExecutor * numExecutors() * minibatchesPerWorkerPerEpoch, false);
while (iter.hasNext()) {
list.add(iter.next());
}
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50)
.activation(Activation.TANH).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.MCXENT).nIn(50).nOut(10)
.activation(Activation.SOFTMAX).build())
.pretrain(false).backprop(true).build();
for (int avgFreq : new int[] {1, 5, 10}) {
System.out.println("--- Avg freq " + avgFreq + " ---");
SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf.clone(),
new ParameterAveragingTrainingMaster.Builder(numExecutors(), dataSetObjSize)
.batchSizePerWorker(batchSizePerExecutor).averagingFrequency(avgFreq)
.repartionData(Repartition.Always).build());
sparkNet.setListeners(new ScoreIterationListener(1));
JavaRDD<DataSet> rdd = sc.parallelize(list);
assertEquals(0, sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());
sparkNet.fit(rdd);
assertEquals(minibatchesPerWorkerPerEpoch,
sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());
sparkNet.fit(rdd);
assertEquals(2 * minibatchesPerWorkerPerEpoch,
sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());
sparkNet.getTrainingMaster().deleteTempFiles(sc);
}
}
示例3: testEpochCounter
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testEpochCounter() throws Exception {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.list()
.layer(new OutputLayer.Builder().nIn(4).nOut(3).build())
.build();
ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder()
.graphBuilder()
.addInputs("in")
.addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).build(), "in")
.setOutputs("out")
.build();
DataSetIterator iter = new IrisDataSetIterator(1, 150);
List<DataSet> l = new ArrayList<>();
while(iter.hasNext()){
l.add(iter.next());
}
JavaRDD<DataSet> rdd = sc.parallelize(l);
int rddDataSetNumExamples = 1;
int averagingFrequency = 3;
ParameterAveragingTrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(rddDataSetNumExamples)
.averagingFrequency(averagingFrequency).batchSizePerWorker(rddDataSetNumExamples)
.saveUpdater(true).workerPrefetchNumBatches(0).build();
Nd4j.getRandom().setSeed(12345);
SparkDl4jMultiLayer sn1 = new SparkDl4jMultiLayer(sc, conf.clone(), tm);
SparkComputationGraph sn2 = new SparkComputationGraph(sc, conf2.clone(), tm);
for(int i=0; i<4; i++ ){
assertEquals(i, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount());
assertEquals(i, sn2.getNetwork().getConfiguration().getEpochCount());
sn1.fit(rdd);
sn2.fit(rdd);
assertEquals(i+1, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount());
assertEquals(i+1, sn2.getNetwork().getConfiguration().getEpochCount());
}
}