本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.init方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork.init方法的具体用法?Java MultiLayerNetwork.init怎么用?Java MultiLayerNetwork.init使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.multilayer.MultiLayerNetwork
的用法示例。
在下文中一共展示了MultiLayerNetwork.init方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getOutput
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
private INDArray getOutput(InputStream isModel, INDArray image) {
org.deeplearning4j.nn.api.Model dl4jModel;
try {
// won't use the model guesser at the moment because it is trying to load a keras model?
// dl4jModel = ModelGuesser.loadModelGuess(isModel);
dl4jModel = loadModel(isModel);
} catch (Exception e) {
throw new IllegalArgumentException("Not able to load model.", e);
}
if(dl4jModel instanceof MultiLayerNetwork) {
MultiLayerNetwork multiLayerNetwork = (MultiLayerNetwork) dl4jModel;
multiLayerNetwork.init();
return multiLayerNetwork.output(image);
} else {
ComputationGraph graph = (ComputationGraph) dl4jModel;
graph.init();
return graph.output(image)[0];
}
}
示例2: createNetwork
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
private MultiLayerNetwork createNetwork(int numLabels) {
MultiLayerNetwork network = null;
switch (modelType) {
case "LeNet":
network = lenetModel(numLabels);
break;
case "AlexNet":
network = alexnetModel(numLabels);
break;
case "custom":
network = customModel(numLabels);
break;
default:
throw new InvalidInputTypeException("Incorrect model provided.");
}
network.init();
network.setListeners(new ScoreIterationListener(listenerFreq));
return network;
}
示例3: getInitialModel
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Override
public MultiLayerNetwork getInitialModel() {
// This method will be called ONLY once, in master thread
NetBroadcastTuple tuple = broadcastModel.getValue();
if (tuple.getConfiguration() != null) {
MultiLayerConfiguration conf = tuple.getConfiguration();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
if (tuple.getParameters() != null)
network.setParams(tuple.getParameters());
// we can assign properly, without
if (tuple.getUpdaterState() != null)
network.getUpdater().getStateViewArray().assign(tuple.getUpdaterState());
return network;
} else
return null;
}
示例4: testListenersViaModel
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testListenersViaModel() {
TestListener.clearCounts();
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0,
new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10)
.activation(Activation.TANH).build());
MultiLayerConfiguration conf = builder.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
StatsStorage ss = new InMemoryStatsStorage();
model.setListeners(new TestListener(), new StatsListener(ss));
testListenersForModel(model, null);
assertEquals(1, ss.listSessionIDs().size());
assertEquals(2, ss.listWorkerIDsForSession(ss.listSessionIDs().get(0)).size());
}
示例5: testOptimizersBasicMLPBackprop
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testOptimizersBasicMLPBackprop() {
//Basic tests of the 'does it throw an exception' variety.
DataSetIterator iter = new IrisDataSetIterator(5, 50);
OptimizationAlgorithm[] toTest =
{OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT, OptimizationAlgorithm.LINE_GRADIENT_DESCENT,
OptimizationAlgorithm.CONJUGATE_GRADIENT, OptimizationAlgorithm.LBFGS
//OptimizationAlgorithm.HESSIAN_FREE //Known to not work
};
for (OptimizationAlgorithm oa : toTest) {
MultiLayerNetwork network = new MultiLayerNetwork(getMLPConfigIris(oa));
network.init();
iter.reset();
network.fit(iter);
}
}
示例6: checkInitializationFF
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void checkInitializationFF() {
//Actually create a network with a custom layer; check initialization and forward pass
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(9).nOut(10).build()).layer(1, new CustomLayer(3.14159)) //hard-coded nIn/nOut of 10
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(11).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(9 * 10 + 10, net.getLayer(0).numParams());
assertEquals(10 * 10 + 10, net.getLayer(1).numParams());
assertEquals(10 * 11 + 11, net.getLayer(2).numParams());
//Check for exceptions...
net.output(Nd4j.rand(1, 9));
net.fit(new DataSet(Nd4j.rand(1, 9), Nd4j.rand(1, 11)));
}
示例7: main
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public static void main(String[] args){
//Generate the training data
DataSetIterator iterator = getTrainingData(batchSize,rng);
//Create the network
int numInput = 2;
int numOutputs = 1;
int nHidden = 10;
MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInput).nOut(nHidden)
.activation(Activation.TANH)
.build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(nHidden).nOut(numOutputs).build())
.pretrain(false).backprop(true).build()
);
net.init();
net.setListeners(new ScoreIterationListener(1));
//Train the network on the full data set, and evaluate in periodically
for( int i=0; i<nEpochs; i++ ){
iterator.reset();
net.fit(iterator);
}
// Test the addition of 2 numbers (Try different numbers here)
final INDArray input = Nd4j.create(new double[] { 0.111111, 0.3333333333333 }, new int[] { 1, 2 });
INDArray out = net.output(input, false);
System.out.println(out);
}
示例8: buildModel
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public void buildModel() {
//Create the network
int numInput = 2;
int numOutputs = 1;
int nHidden = 10;
mNetwork = new MultiLayerNetwork(new NeuralNetConfiguration.Builder()
.seed(mSeed)
.iterations(ITERATIONS)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(LEARNING_RATE)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInput).nOut(nHidden)
.activation(Activation.TANH)
.name("input")
.build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.name("output")
.nIn(nHidden).nOut(numOutputs).build())
.pretrain(false)
.backprop(true)
.build()
);
mNetwork.init();
mNetwork.setListeners(mIterationListener);
}
示例9: main
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public static void main(final String[] args){
//Switch these two options to do different functions with different networks
final MathFunction fn = new SinXDivXMathFunction();
final MultiLayerConfiguration conf = getDeepDenseLayerNetworkConfiguration();
//Generate the training data
final INDArray x = Nd4j.linspace(-10,10,nSamples).reshape(nSamples, 1);
final DataSetIterator iterator = getTrainingData(x,fn,batchSize,rng);
//Create the network
final MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
net.setListeners(new ScoreIterationListener(1));
//Train the network on the full data set, and evaluate in periodically
final INDArray[] networkPredictions = new INDArray[nEpochs/ plotFrequency];
for( int i=0; i<nEpochs; i++ ){
iterator.reset();
net.fit(iterator);
if((i+1) % plotFrequency == 0) networkPredictions[i/ plotFrequency] = net.output(x, false);
}
//Plot the target data and the network predictions
plot(fn,x,fn.getFunctionValues(x),networkPredictions);
}
示例10: main
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(8)
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(2000).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(500).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(30).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(5, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(6, new RBM.Builder().nIn(1000).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(2000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例11: build
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public Model build()
{
model = new MultiLayerNetwork(getConfiguration());
model.init();
model.setListeners(
Arrays.asList((IterationListener) new ScoreIterationListener(parameters.getListenerFrequency())));
return this;
}
示例12: testLearningRateByParam
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testLearningRateByParam() {
double lr = 0.01;
double biasLr = 0.02;
int[] nIns = {4, 3, 3};
int[] nOuts = {3, 3, 3};
int oldScore = 1;
int newScore = 1;
int iteration = 3;
INDArray gradientW = Nd4j.ones(nIns[0], nOuts[0]);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)).list()
.layer(0, new DenseLayer.Builder().nIn(nIns[0]).nOut(nOuts[0])
.updater(new Sgd(lr)).biasUpdater(new Sgd(biasLr)).build())
.layer(1, new BatchNormalization.Builder().nIn(nIns[1]).nOut(nOuts[1]).updater(new Sgd(0.7)).build())
.layer(2, new OutputLayer.Builder().nIn(nIns[2]).nOut(nOuts[2]).build())
.backprop(true).pretrain(false).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
ConvexOptimizer opt = new StochasticGradientDescent(net.getDefaultConfiguration(),
new NegativeDefaultStepFunction(), null, net);
opt.checkTerminalConditions(gradientW, oldScore, newScore, iteration);
assertEquals(lr, ((Sgd)net.getLayer(0).conf().getLayer().getUpdaterByParam("W")).getLearningRate(), 1e-4);
assertEquals(biasLr, ((Sgd)net.getLayer(0).conf().getLayer().getUpdaterByParam("b")).getLearningRate(), 1e-4);
assertEquals(0.7, ((Sgd)net.getLayer(1).conf().getLayer().getUpdaterByParam("gamma")).getLearningRate(), 1e-4);
assertEquals(0.3, ((Sgd)net.getLayer(2).conf().getLayer().getUpdaterByParam("W")).getLearningRate(), 1e-4); //From global LR
assertEquals(0.3, ((Sgd)net.getLayer(2).conf().getLayer().getUpdaterByParam("W")).getLearningRate(), 1e-4); //From global LR
}
示例13: getNet2
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
private MultiLayerNetwork getNet2() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.convolutionMode(ConvolutionMode.Same)
.activation(Activation.TANH)
.weightInit(WeightInit.XAVIER)
.updater(new Sgd(0.1))
.list()
.layer(new GravesLSTM.Builder().nOut(8).build())
.layer(new LSTM.Builder().nOut(8).build())
.layer(new RnnOutputLayer.Builder().nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.recurrent(5))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
for (int i = 0; i < 3; i++) {
INDArray f = Nd4j.rand(new int[]{8, 5, 10});
INDArray l = Nd4j.rand(new int[]{8, 10, 10});
net.fit(f, l);
}
return net;
}
示例14: testCNNActivationsFrozen
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
@Test
public void testCNNActivationsFrozen() throws Exception {
int nChannels = 1;
int outputNum = 10;
int batchSize = 64;
int nEpochs = 10;
int seed = 123;
log.info("Load data....");
DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345);
log.info("Build model....");
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
.l2(0.0005)
.weightInit(WeightInit.XAVIER)
.updater(new Nesterovs(0.01, 0.9)).list()
.layer(0, new FrozenLayer(new ConvolutionLayer.Builder(5, 5)
//nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
.nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()))
.layer(1, new FrozenLayer(new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(2, 2)
.stride(2, 2).build()))
.layer(2, new FrozenLayer(new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()))
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(outputNum).activation(Activation.SOFTMAX).build())
.setInputType(InputType.convolutionalFlat(28, 28, nChannels));
MultiLayerConfiguration conf = builder.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
log.info("Train model....");
model.setListeners(new ConvolutionalIterationListener(1));
for (int i = 0; i < nEpochs; i++) {
model.fit(mnistTrain);
}
}
示例15: main
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int outputNum = 10;
int numSamples = 60000;
int batchSize = 100;
int iterations = 10;
int seed = 123;
int listenerFreq = batchSize / 5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples,
true);
log.info("Build model....");
MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum);
// // MultiLayerNetwork model = deepBeliefNetwork(seed, iterations,
// // numRows, numColumns, outputNum);
// MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows,
// numColumns, outputNum);
model.init();
model.setListeners(Collections
.singletonList((IterationListener) new ScoreIterationListener(
listenerFreq)));
log.info("Train model....");
model.fit(iter); // achieves end to end pre-training
log.info("Evaluate model....");
Evaluation eval = new Evaluation(outputNum);
DataSetIterator testIter = new MnistDataSetIterator(100, 10000);
while (testIter.hasNext()) {
DataSet testMnist = testIter.next();
INDArray predict2 = model.output(testMnist.getFeatureMatrix());
eval.eval(testMnist.getLabels(), predict2);
}
log.info(eval.stats());
log.info("****************Example finished********************");
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:44,代码来源:NeuralNetworks.java