本文整理汇总了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork类的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerNetwork类的具体用法?Java MultiLayerNetwork怎么用?Java MultiLayerNetwork使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MultiLayerNetwork类属于org.deeplearning4j.nn.multilayer包,在下文中一共展示了MultiLayerNetwork类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: softMaxRegression
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
private static MultiLayerNetwork softMaxRegression(int seed,
int iterations, int numRows, int numColumns, int outputNum) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(
GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(1)
.layer(0,
new OutputLayer.Builder(
LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax")
.nIn(numColumns * numRows).nOut(outputNum)
.build()).pretrain(true).backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
return model;
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:25,代码来源:NeuralNetworks.java
示例2: getOutput
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
private INDArray getOutput(InputStream isModel, INDArray image) {
org.deeplearning4j.nn.api.Model dl4jModel;
try {
// won't use the model guesser at the moment because it is trying to load a keras model?
// dl4jModel = ModelGuesser.loadModelGuess(isModel);
dl4jModel = loadModel(isModel);
} catch (Exception e) {
throw new IllegalArgumentException("Not able to load model.", e);
}
if(dl4jModel instanceof MultiLayerNetwork) {
MultiLayerNetwork multiLayerNetwork = (MultiLayerNetwork) dl4jModel;
multiLayerNetwork.init();
return multiLayerNetwork.output(image);
} else {
ComputationGraph graph = (ComputationGraph) dl4jModel;
graph.init();
return graph.output(image)[0];
}
}
示例3: testRunIteration
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testRunIteration() {
DataSet dataSet = new IrisDataSetIterator(5,5).next();
List<DataSet> list = dataSet.asList();
JavaRDD<DataSet> data = sc.parallelize(list);
SparkDl4jMultiLayer sparkNetCopy = new SparkDl4jMultiLayer(sc,getBasicConf(),new ParameterAveragingTrainingMaster(true,numExecutors(),1,5,1,0));
MultiLayerNetwork networkCopy = sparkNetCopy.fit(data);
INDArray expectedParams = networkCopy.params();
SparkDl4jMultiLayer sparkNet = getBasicNetwork();
MultiLayerNetwork network = sparkNet.fit(data);
INDArray actualParams = network.params();
assertEquals(expectedParams.size(1), actualParams.size(1));
}
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:19,代码来源:TestSparkMultiLayerParameterAveraging.java
示例4: testUpdaters
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testUpdaters() {
SparkDl4jMultiLayer sparkNet = getBasicNetwork();
MultiLayerNetwork netCopy = sparkNet.getNetwork().clone();
netCopy.fit(data);
Updater expectedUpdater = netCopy.conf().getLayer().getUpdater();
double expectedLR = netCopy.conf().getLayer().getLearningRate();
double expectedMomentum = netCopy.conf().getLayer().getMomentum();
Updater actualUpdater = sparkNet.getNetwork().conf().getLayer().getUpdater();
sparkNet.fit(sparkData);
double actualLR = sparkNet.getNetwork().conf().getLayer().getLearningRate();
double actualMomentum = sparkNet.getNetwork().conf().getLayer().getMomentum();
assertEquals(expectedUpdater, actualUpdater);
assertEquals(expectedLR, actualLR, 0.01);
assertEquals(expectedMomentum, actualMomentum, 0.01);
}
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:21,代码来源:TestSparkMultiLayerParameterAveraging.java
示例5: testEvaluation
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testEvaluation(){
SparkDl4jMultiLayer sparkNet = getBasicNetwork();
MultiLayerNetwork netCopy = sparkNet.getNetwork().clone();
Evaluation evalExpected = new Evaluation();
INDArray outLocal = netCopy.output(input, Layer.TrainingMode.TEST);
evalExpected.eval(labels, outLocal);
Evaluation evalActual = sparkNet.evaluate(sparkData);
assertEquals(evalExpected.accuracy(), evalActual.accuracy(), 1e-3);
assertEquals(evalExpected.f1(), evalActual.f1(), 1e-3);
assertEquals(evalExpected.getNumRowCounter(), evalActual.getNumRowCounter(), 1e-3);
assertMapEquals(evalExpected.falseNegatives(),evalActual.falseNegatives());
assertMapEquals(evalExpected.falsePositives(), evalActual.falsePositives());
assertMapEquals(evalExpected.trueNegatives(), evalActual.trueNegatives());
assertMapEquals(evalExpected.truePositives(),evalActual.truePositives());
assertEquals(evalExpected.precision(), evalActual.precision(), 1e-3);
assertEquals(evalExpected.recall(), evalActual.recall(), 1e-3);
assertEquals(evalExpected.getConfusionMatrix(), evalActual.getConfusionMatrix());
}
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:24,代码来源:TestSparkMultiLayerParameterAveraging.java
示例6: evalMnistTestSet
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
private static void evalMnistTestSet(MultiLayerNetwork leNetModel) throws Exception {
log.info("Load test data....");
int batchSize = 64;
DataSetIterator mnistTest = new MnistDataSetIterator(batchSize,false,12345);
log.info("Evaluate model....");
int outputNum = 10;
Evaluation eval = new Evaluation(outputNum);
while(mnistTest.hasNext()){
DataSet dataSet = mnistTest.next();
INDArray output = leNetModel.output(dataSet.getFeatureMatrix(), false);
eval.eval(dataSet.getLabels(), output);
}
log.info(eval.stats());
}
示例7: loadDL4JNetworkParameters
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
public static void loadDL4JNetworkParameters(MultiLayerNetwork savedNetwork, String baseModelFilePath) throws IOException {
// String jsonFilePath = baseModelFilePath + "dl4j_model_conf.json";
String parametersFilePath = baseModelFilePath + "dl4j_model.parameters";
/*
String jsonBuffer = "";
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson( jsonBuffer );
*/
DataInputStream dis = new DataInputStream(new FileInputStream( parametersFilePath ));
INDArray newParams = Nd4j.read( dis );
dis.close();
//MultiLayerNetwork savedNetwork = new MultiLayerNetwork( confFromJson );
//savedNetwork.init();
savedNetwork.setParameters(newParams);
//System.out.println("Original network params " + model.params());
//System.out.println(savedNetwork.params());
//return savedNetwork;
}
示例8: iterationDone
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Override
public void iterationDone(Model model, int i) {
if (printIterations <= 0)
printIterations = 1;
if (iterCount % printIterations == 0) {
iter.reset();
double cost = 0;
double count = 0;
while(iter.hasNext()) {
DataSet minibatch = iter.next(miniBatchSize);
cost += ((MultiLayerNetwork)model).scoreExamples(minibatch, false).sumNumber().doubleValue();
count += minibatch.getLabelsMaskArray().sumNumber().doubleValue();
}
log.info(String.format("Iteration %5d test set score: %.4f", iterCount, cost/count));
}
iterCount++;
}
示例9: main
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
/**
* args[0] input: word2vecファイル名
* args[1] input: sentimentモデル名
* args[2] input: test親フォルダ名
*
* @param args
* @throws Exception
*/
public static void main (final String[] args) throws Exception {
if (args[0]==null || args[1]==null || args[2]==null)
System.exit(1);
WordVectors wvec = WordVectorSerializer.loadTxtVectors(new File(args[0]));
MultiLayerNetwork model = ModelSerializer.restoreMultiLayerNetwork(args[1],false);
DataSetIterator test = new AsyncDataSetIterator(
new SentimentRecurrentIterator(args[2],wvec,100,300,false),1);
Evaluation evaluation = new Evaluation();
while(test.hasNext()) {
DataSet t = test.next();
INDArray features = t.getFeatures();
INDArray lables = t.getLabels();
INDArray inMask = t.getFeaturesMaskArray();
INDArray outMask = t.getLabelsMaskArray();
INDArray predicted = model.output(features,false,inMask,outMask);
evaluation.evalTimeSeries(lables,predicted,outMask);
}
System.out.println(evaluation.stats());
}
示例10: createNetwork
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
private MultiLayerNetwork createNetwork(int numLabels) {
MultiLayerNetwork network = null;
switch (modelType) {
case "LeNet":
network = lenetModel(numLabels);
break;
case "AlexNet":
network = alexnetModel(numLabels);
break;
case "custom":
network = customModel(numLabels);
break;
default:
throw new InvalidInputTypeException("Incorrect model provided.");
}
network.init();
network.setListeners(new ScoreIterationListener(listenerFreq));
return network;
}
示例11: testWriteMLNModel
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testWriteMLNModel() throws Exception {
int nIn = 5;
int nOut = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01)
.l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build())
.layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build())
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(net, tempFile, true);
MultiLayerNetwork network = ModelSerializer.restoreMultiLayerNetwork(tempFile);
assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson());
assertEquals(net.params(), network.params());
assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例12: getDenseMLNConfig
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
private static MultiLayerNetwork getDenseMLNConfig(boolean backprop, boolean pretrain) {
int numInputs = 4;
int outputNum = 3;
long seed = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed)
.updater(new Sgd(1e-3)).l1(0.3).l2(1e-3).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(numInputs).nOut(3)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(3).nOut(2)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).nIn(2).nOut(outputNum).build())
.backprop(backprop).pretrain(pretrain).build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
return model;
}
示例13: testListenersViaModel
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testListenersViaModel() {
TestListener.clearCounts();
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0,
new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10)
.activation(Activation.TANH).build());
MultiLayerConfiguration conf = builder.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
StatsStorage ss = new InMemoryStatsStorage();
model.setListeners(new TestListener(), new StatsListener(ss));
testListenersForModel(model, null);
assertEquals(1, ss.listSessionIDs().size());
assertEquals(2, ss.listWorkerIDsForSession(ss.listSessionIDs().get(0)).size());
}
示例14: testDeconvolution2DUnsupportedSameModeNetwork
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test(expected = IllegalArgumentException.class)
public void testDeconvolution2DUnsupportedSameModeNetwork() {
/*
* When convolution mode Same is set for the network and a deconvolution layer is added
* then only layer activation will fail. Suboptimal, but I don't think we want special
* logic for NNC in this case.
*/
NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345)
.updater(new NoOp())
.activation(Activation.SIGMOID)
.convolutionMode(Same)
.list()
.layer(new Deconvolution2D.Builder().name("deconvolution")
.nIn(3).nOut(2).build());
MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nOut(2).build())
.setInputType(InputType.convolutionalFlat(7, 7, 3)).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
net.getLayer(0).activate(Nd4j.rand(10, 7 * 7 * 3));
}
示例15: testIterationListener
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; //导入依赖的package包/类
@Test
public void testIterationListener() {
MultiLayerNetwork model1 = new MultiLayerNetwork(getConf());
model1.init();
model1.setListeners(Collections.singletonList((IterationListener) new ScoreIterationListener(1)));
MultiLayerNetwork model2 = new MultiLayerNetwork(getConf());
model2.setListeners(Collections.singletonList((IterationListener) new ScoreIterationListener(1)));
model2.init();
Layer[] l1 = model1.getLayers();
for (int i = 0; i < l1.length; i++)
assertTrue(l1[i].getListeners() != null && l1[i].getListeners().size() == 1);
Layer[] l2 = model2.getLayers();
for (int i = 0; i < l2.length; i++)
assertTrue(l2[i].getListeners() != null && l2[i].getListeners().size() == 1);
}