本文整理汇总了Java中org.deeplearning4j.datasets.iterator.DataSetIterator.next方法的典型用法代码示例。如果您正苦于以下问题:Java DataSetIterator.next方法的具体用法?Java DataSetIterator.next怎么用?Java DataSetIterator.next使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.datasets.iterator.DataSetIterator
的用法示例。
在下文中一共展示了DataSetIterator.next方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: evaluate
import org.deeplearning4j.datasets.iterator.DataSetIterator; //导入方法依赖的package包/类
@Override
@SuppressWarnings("rawtypes")
public Model evaluate()
{
final Evaluation evaluation = new Evaluation(parameters.getOutputSize());
try
{
final DataSetIterator iterator = new MnistDataSetIterator(100, 10000);
while (iterator.hasNext())
{
final DataSet testingData = iterator.next();
evaluation.eval(testingData.getLabels(), model.output(testingData.getFeatureMatrix()));
}
System.out.println(evaluation.stats());
}
catch (IOException e)
{
e.printStackTrace();
}
return this;
}
示例2: train
import org.deeplearning4j.datasets.iterator.DataSetIterator; //导入方法依赖的package包/类
@Override
public Model train()
{
final DataSetIterator iterator = data.getIterator();
while (iterator.hasNext())
{
DataSet next = iterator.next();
model.fit(new DataSet(next.getFeatureMatrix(), next.getFeatureMatrix()));
}
return this;
}
示例3: main
import org.deeplearning4j.datasets.iterator.DataSetIterator; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int outputNum = 10;
int numSamples = 60000;
int batchSize = 100;
int iterations = 10;
int seed = 123;
int listenerFreq = batchSize / 5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples,
true);
log.info("Build model....");
MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum);
// // MultiLayerNetwork model = deepBeliefNetwork(seed, iterations,
// // numRows, numColumns, outputNum);
// MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows,
// numColumns, outputNum);
model.init();
model.setListeners(Collections
.singletonList((IterationListener) new ScoreIterationListener(
listenerFreq)));
log.info("Train model....");
model.fit(iter); // achieves end to end pre-training
log.info("Evaluate model....");
Evaluation eval = new Evaluation(outputNum);
DataSetIterator testIter = new MnistDataSetIterator(100, 10000);
while (testIter.hasNext()) {
DataSet testMnist = testIter.next();
INDArray predict2 = model.output(testMnist.getFeatureMatrix());
eval.eval(testMnist.getLabels(), predict2);
}
log.info(eval.stats());
log.info("****************Example finished********************");
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:44,代码来源:NeuralNetworks.java
示例4: main
import org.deeplearning4j.datasets.iterator.DataSetIterator; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// final int numRows = 28;
// final int numColumns = 28;
int outputNum = 10;
int inputNum = 1000;
int numSamples = 60000;
int batchSize = 1024;
int iterations = 10;
int seed = 123;
int listenerFreq = batchSize / 5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list()
.layer(0, new AutoEncoder.Builder()
.nIn(inputNum)
.nOut(500)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(1, new AutoEncoder.Builder()
.nIn(500)
.nOut(250)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(2, new AutoEncoder.Builder()
.nIn(250)
.nOut(125)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(3, new AutoEncoder.Builder()
.nIn(125)
.nOut(50)
.weightInit(WeightInit.XAVIER).lossFunction(LossFunction.RMSE_XENT)
.corruptionLevel(0.3)
.build())
.layer(4, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax")
.nIn(75)
.nOut(outputNum)
.build())
.pretrain(true)
.backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
log.info("Train model....");
model.fit(iter);
log.info("Evaluate model....");
Evaluation eval = new Evaluation(outputNum);
DataSetIterator testIter = new MnistDataSetIterator(100,10000);
while(testIter.hasNext()) {
DataSet testMnist = testIter.next();
INDArray predict2 = model.output(testMnist.getFeatureMatrix());
eval.eval(testMnist.getLabels(), predict2);
}
log.info(eval.stats());
}
示例5: main
import org.deeplearning4j.datasets.iterator.DataSetIterator; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
Nd4j.MAX_SLICES_TO_PRINT = -1;
Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
final int numRows = 4;
final int numColumns = 1;
int outputNum = 10;
int numSamples = 150;
int batchSize = 150;
int iterations = 100;
int seed = 123;
int listenerFreq = iterations/2;
log.info("Load data....");
DataSetIterator iter = new IrisDataSetIterator(batchSize, numSamples);
DataSet iris = iter.next();
iris.normalizeZeroMeanZeroUnitVariance();
log.info("Build model....");
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().regularization(true)
.miniBatch(true)
.layer(new RBM.Builder().l2(1e-1).l1(1e-3)
.nIn(numRows * numColumns)
.nOut(outputNum)
.activation("relu")
.weightInit(WeightInit.RELU)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).k(3)
.hiddenUnit(HiddenUnit.RECTIFIED).visibleUnit(VisibleUnit.GAUSSIAN)
.updater(Updater.ADAGRAD).gradientNormalization(GradientNormalization.ClipL2PerLayer)
.build())
.seed(seed)
.iterations(iterations)
.learningRate(1e-3)
.optimizationAlgo(OptimizationAlgorithm.LBFGS)
.build();
Layer model = LayerFactories.getFactory(conf.getLayer()).create(conf);
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Evaluate weights....");
INDArray w = model.getParam(DefaultParamInitializer.WEIGHT_KEY);
log.info("Weights: " + w);
log.info("Scaling the dataset");
iris.scale();
log.info("Train model....");
for(int i = 0; i < 20; i++) {
log.info("Epoch "+i+":");
model.fit(iris.getFeatureMatrix());
}
}