本文整理汇总了Java中org.deeplearning4j.datasets.fetchers.MnistDataFetcher类的典型用法代码示例。如果您正苦于以下问题:Java MnistDataFetcher类的具体用法?Java MnistDataFetcher怎么用?Java MnistDataFetcher使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MnistDataFetcher类属于org.deeplearning4j.datasets.fetchers包,在下文中一共展示了MnistDataFetcher类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAutoEncoder
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
@Test
public void testAutoEncoder() throws Exception {
MnistDataFetcher fetcher = new MnistDataFetcher(true);
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).updater(new Sgd(0.1))
.layer(new org.deeplearning4j.nn.conf.layers.AutoEncoder.Builder().nIn(784).nOut(600)
.corruptionLevel(0.6)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).build())
.build();
fetcher.fetch(100);
DataSet d2 = fetcher.next();
INDArray input = d2.getFeatureMatrix();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
AutoEncoder da = (AutoEncoder) conf.getLayer().instantiate(conf,
Arrays.<IterationListener>asList(new ScoreIterationListener(1)), 0, params, true);
assertEquals(da.params(), da.params());
assertEquals(471784, da.params().length());
da.setParams(da.params());
da.setBackpropGradientsViewArray(Nd4j.create(1, params.length()));
da.fit(input);
}
示例2: testBackProp
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
@Test
public void testBackProp() throws Exception {
MnistDataFetcher fetcher = new MnistDataFetcher(true);
// LayerFactory layerFactory = LayerFactories.getFactory(new org.deeplearning4j.nn.conf.layers.AutoEncoder());
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.updater(new Sgd(0.1))
.layer(new org.deeplearning4j.nn.conf.layers.AutoEncoder.Builder().nIn(784).nOut(600)
.corruptionLevel(0.6)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).build())
.build();
fetcher.fetch(100);
DataSet d2 = fetcher.next();
INDArray input = d2.getFeatureMatrix();
int numParams = conf.getLayer().initializer().numParams(conf);
INDArray params = Nd4j.create(1, numParams);
AutoEncoder da = (AutoEncoder) conf.getLayer().instantiate(conf, null, 0, params, true);
Gradient g = new DefaultGradient();
g.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, da.decode(da.activate(input)).sub(input));
}
示例3: main
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(8)
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(2000).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(500).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(30).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(5, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(6, new RBM.Builder().nIn(1000).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(2000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例4: mnist
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public static DataSet mnist(int num) {
try {
MnistDataFetcher fetcher = new MnistDataFetcher();
fetcher.fetch(num);
return fetcher.next();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例5: DeepAutoEncoderExample
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public DeepAutoEncoderExample() {
try {
int seed = 123;
int numberOfIterations = 1;
iterator = new MnistDataSetIterator(1000, MnistDataFetcher.NUM_EXAMPLES, true);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(numberOfIterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list()
.layer(0, new RBM.Builder().nIn(numberOfRows * numberOfColumns)
.nOut(1000)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
.layer(5, new RBM.Builder().nIn(30).nOut(100)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
.layer(6, new RBM.Builder().nIn(100).nOut(250)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(9, new OutputLayer.Builder(
LossFunctions.LossFunction.RMSE_XENT).nIn(1000)
.nOut(numberOfRows * numberOfColumns).build())
.pretrain(true).backprop(true)
.build();
model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Collections.singletonList(
(IterationListener) new ScoreIterationListener()));
while (iterator.hasNext()) {
DataSet dataSet = iterator.next();
model.fit(new DataSet(dataSet.getFeatureMatrix(),
dataSet.getFeatureMatrix()));
}
modelFile = new File("savedModel");
ModelSerializer.writeModel(model, modelFile, true);
} catch (IOException ex) {
ex.printStackTrace();
}
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:55,代码来源:DeepAutoEncoderExample.java
示例6: main
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(10)
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
.layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
.layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT).nIn(1000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例7: main
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list()
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(1000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例8: MnistManager
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
/**
* Constructs an instance managing the two given data files. Supports
* <code>NULL</code> value for one of the arguments in case reading only one
* of the files (images and labels) is required.
*
* @param imagesFile
* Can be <code>NULL</code>. In that case all future operations
* using that file will fail.
* @param labelsFile
* Can be <code>NULL</code>. In that case all future operations
* using that file will fail.
* @throws IOException
*/
public MnistManager(String imagesFile, String labelsFile, boolean train) throws IOException {
this(imagesFile, labelsFile, train ? MnistDataFetcher.NUM_EXAMPLES : MnistDataFetcher.NUM_EXAMPLES_TEST);
}
示例9: MnistDataSetIterator
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
/** Constructor to get the full MNIST data set (either test or train sets) without binarization (i.e., just normalization
* into range of 0 to 1), with shuffling based on a random seed.
* @param batchSize
* @param train
* @throws IOException
*/
public MnistDataSetIterator(int batchSize, boolean train, int seed) throws IOException {
this(batchSize, (train ? MnistDataFetcher.NUM_EXAMPLES : MnistDataFetcher.NUM_EXAMPLES_TEST), false, train,
true, seed);
}
示例10: RawMnistDataSetIterator
import org.deeplearning4j.datasets.fetchers.MnistDataFetcher; //导入依赖的package包/类
public RawMnistDataSetIterator(int batch, int numExamples) throws IOException {
super(batch, numExamples, new MnistDataFetcher(false));
}