本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.DenseLayer类的典型用法代码示例。如果您正苦于以下问题:Java DenseLayer类的具体用法?Java DenseLayer怎么用?Java DenseLayer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DenseLayer类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了DenseLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeepDenseLayerNetworkConfiguration
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
*/
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
final int numHiddenNodes = 50;
return new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(numHiddenNodes).nOut(numOutputs).build())
.pretrain(false).backprop(true).build();
}
示例2: getConfiguration
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
.list(4)
.layer(0,
new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(1,
new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(2,
new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(3,
new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu")
.lossFunction(LossFunctions.LossFunction.MSE).build())
.pretrain(false).backprop(true).build();
}
示例3: net
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
private static MultiLayerConfiguration net(int nIn, int nOut) {
return new NeuralNetConfiguration.Builder()
.seed(42)
.iterations(1)
.activation(Activation.RELU)
.weightInit(WeightInit.XAVIER)
.learningRate(0.1)
.regularization(true).l2(1e-4)
.list(
new DenseLayer.Builder().nIn(nIn).nOut(3).build(),
new DenseLayer.Builder().nIn(3).nOut(3).build(),
new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX)
.nIn(3)
.nOut(nOut)
.build()
)
.build();
}
示例4: getOriginalNet
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
public static MultiLayerNetwork getOriginalNet(int seed){
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.list()
.layer(new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build())
.layer(new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build())
.layer(new DenseLayer.Builder().nOut(64).build())
.layer(new DenseLayer.Builder().nIn(64).nOut(64).build())
.layer(new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.setInputType(InputType.convolutionalFlat(28,28,1))
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例5: getOriginalGraph
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
public static ComputationGraph getOriginalGraph(int seed){
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.weightInit(WeightInit.XAVIER)
.activation(Activation.TANH)
.convolutionMode(ConvolutionMode.Same)
.updater(new Sgd(0.3))
.graphBuilder()
.addInputs("in")
.layer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2,2).stride(1,1).build(), "in")
.layer("1", new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build(), "0")
.layer("2", new ConvolutionLayer.Builder().nIn(3).nOut(3).kernelSize(2,2).stride(1,1).build(), "1")
.layer("3", new DenseLayer.Builder().nOut(64).build(), "2")
.layer("4", new DenseLayer.Builder().nIn(64).nOut(64).build(), "3")
.layer("5", new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build(), "4")
.setOutputs("5")
.setInputTypes(InputType.convolutionalFlat(28,28,1))
.build();
ComputationGraph net = new ComputationGraph(conf);
net.init();
return net;
}
示例6: testJsonComputationGraph
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void testJsonComputationGraph() {
//ComputationGraph with a custom layer; check JSON and YAML config actually works...
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
.addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in")
.addLayer("1", new CustomLayer(3.14159), "0").addLayer("2",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10)
.build(),
"1")
.setOutputs("2").pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
ComputationGraphConfiguration confFromJson = ComputationGraphConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
ComputationGraphConfiguration confFromYaml = ComputationGraphConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例7: checkInitializationFF
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void checkInitializationFF() {
//Actually create a network with a custom layer; check initialization and forward pass
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(9).nOut(10).build()).layer(1, new CustomLayer(3.14159)) //hard-coded nIn/nOut of 10
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(11).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertEquals(9 * 10 + 10, net.getLayer(0).numParams());
assertEquals(10 * 10 + 10, net.getLayer(1).numParams());
assertEquals(10 * 11 + 11, net.getLayer(2).numParams());
//Check for exceptions...
net.output(Nd4j.rand(1, 9));
net.fit(new DataSet(Nd4j.rand(1, 9), Nd4j.rand(1, 11)));
}
示例8: testMultiCNNLayer
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void testMultiCNNLayer() throws Exception {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list()
.layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
.activation(Activation.RELU).build())
.layer(1, new LocalResponseNormalization.Builder().build()).layer(2,
new DenseLayer.Builder()
.nOut(2).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(10)
.build())
.backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
DataSetIterator iter = new MnistDataSetIterator(2, 2);
DataSet next = iter.next();
network.fit(next);
}
示例9: getDenseMLNConfig
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
private static MultiLayerNetwork getDenseMLNConfig(boolean backprop, boolean pretrain) {
int numInputs = 4;
int outputNum = 3;
long seed = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed)
.updater(new Sgd(1e-3)).l1(0.3).l2(1e-3).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(numInputs).nOut(3)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(3).nOut(2)
.activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).nIn(2).nOut(outputNum).build())
.backprop(backprop).pretrain(pretrain).build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
return model;
}
示例10: getGraph
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
private ComputationGraph getGraph(int numLabels, double lambda) {
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
.graphBuilder().addInputs("input1")
.addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.RELU).build(),
"input1")
.addLayer("lossLayer", new CenterLossOutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(numLabels)
.lambda(lambda).activation(Activation.SOFTMAX).build(), "l1")
.setOutputs("lossLayer").pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
return graph;
}
示例11: getIrisMLPSimpleConfig
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
/** Very simple back-prop config set up for Iris.
* Learning Rate = 0.1
* No regularization, no Adagrad, no momentum etc. One iteration.
*/
private static MultiLayerConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes,
Activation activationFunction) {
NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1))
.seed(12345L).list();
for (int i = 0; i < hiddenLayerSizes.length; i++) {
int nIn = (i == 0 ? 4 : hiddenLayerSizes[i - 1]);
lb.layer(i, new DenseLayer.Builder().nIn(nIn).nOut(hiddenLayerSizes[i]).weightInit(WeightInit.XAVIER)
.activation(activationFunction).build());
}
lb.layer(hiddenLayerSizes.length,
new OutputLayer.Builder(LossFunction.MCXENT).nIn(hiddenLayerSizes[hiddenLayerSizes.length - 1])
.nOut(3).weightInit(WeightInit.XAVIER)
.activation(activationFunction.equals(Activation.IDENTITY) ? Activation.IDENTITY
: Activation.SOFTMAX)
.build());
lb.pretrain(false).backprop(true);
return lb.build();
}
示例12: testJSONBasic
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void testJSONBasic() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
.graphBuilder().addInputs("input")
.addLayer("firstLayer",
new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
"input")
.addLayer("outputLayer",
new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
"firstLayer")
.setOutputs("outputLayer").pretrain(false).backprop(true).build();
String json = conf.toJson();
ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
assertEquals(json, conf2.toJson());
assertEquals(conf, conf2);
}
示例13: getNetwork
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
private MultiLayerNetwork getNetwork() {
int nIn = 5;
int nOut = 6;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01).l2(0.01)
.updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build())
.layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build())
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
return net;
}
示例14: testWriteCGModel
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void testWriteCGModel() throws Exception {
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1))
.graphBuilder().addInputs("in")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3)
.build(),
"dense")
.setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(cg, tempFile, true);
ComputationGraph network = ModelSerializer.restoreComputationGraph(tempFile);
assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson());
assertEquals(cg.params(), network.params());
assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}
示例15: testWriteCGModelInputStream
import org.deeplearning4j.nn.conf.layers.DenseLayer; //导入依赖的package包/类
@Test
public void testWriteCGModelInputStream() throws Exception {
ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1))
.graphBuilder().addInputs("in")
.addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out",
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3)
.build(),
"dense")
.setOutputs("out").pretrain(false).backprop(true).build();
ComputationGraph cg = new ComputationGraph(config);
cg.init();
File tempFile = File.createTempFile("tsfs", "fdfsdf");
tempFile.deleteOnExit();
ModelSerializer.writeModel(cg, tempFile, true);
FileInputStream fis = new FileInputStream(tempFile);
ComputationGraph network = ModelSerializer.restoreComputationGraph(fis);
assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson());
assertEquals(cg.params(), network.params());
assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray());
}