本文整理汇总了Java中org.deeplearning4j.nn.weights.WeightInit类的典型用法代码示例。如果您正苦于以下问题:Java WeightInit类的具体用法?Java WeightInit怎么用?Java WeightInit使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
WeightInit类属于org.deeplearning4j.nn.weights包,在下文中一共展示了WeightInit类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeepDenseLayerNetworkConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
*/
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
final int numHiddenNodes = 50;
return new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(numHiddenNodes).nOut(numOutputs).build())
.pretrain(false).backprop(true).build();
}
示例2: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
{
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
.dropOut(0.5).build();
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
.nOut(parameters.getOutputSize()).activation("softmax").build();
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
.layer(1, outputLayer).build();
}
示例3: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
.list(4)
.layer(0,
new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(1,
new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(2,
new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(3,
new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu")
.lossFunction(LossFunctions.LossFunction.MSE).build())
.pretrain(false).backprop(true).build();
}
示例4: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0).iterations(parameters.getIterations()).momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list(4)
.layer(0,
new AutoEncoder.Builder().nIn(parameters.getInputSize()).nOut(500).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(1, new AutoEncoder.Builder().nIn(500).nOut(250).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3)
.build())
.layer(2,
new AutoEncoder.Builder().nIn(250).nOut(200).weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT).corruptionLevel(0.3).build())
.layer(3, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD).activation("softmax").nIn(200)
.nOut(parameters.getOutputSize()).build())
.pretrain(true).backprop(false).build();
}
示例5: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
.layer(0,
new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
.activation("relu").weightInit(WeightInit.RELU).build())
.layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
.weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例6: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
.layer(0,
new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
.weightInit(WeightInit.XAVIER).activation("relu").build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例7: testSerialization
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Test
public void testSerialization() throws IOException, ClassNotFoundException {
NeuralNetConfiguration nnc = new NeuralNetConfiguration();
nnc.setSeed(42);
nnc.builder()
.learningRate(5)
.weightInit(WeightInit.UNIFORM)
.biasLearningRate(5)
.l1(5)
.l2(5)
.updater(new AdaDelta())
.build();
final File output = Paths.get(System.getProperty("java.io.tmpdir"), "nnc.object").toFile();
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(output));
oos.writeObject(nnc);
oos.close();
ObjectInputStream ois = new ObjectInputStream(new FileInputStream(output));
NeuralNetConfiguration nnc2 = (NeuralNetConfiguration) ois.readObject();
Assert.assertEquals(nnc, nnc2);
output.delete();
}
示例8: net
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
private static MultiLayerConfiguration net(int nIn, int nOut) {
return new NeuralNetConfiguration.Builder()
.seed(42)
.iterations(1)
.activation(Activation.RELU)
.weightInit(WeightInit.XAVIER)
.learningRate(0.1)
.regularization(true).l2(1e-4)
.list(
new DenseLayer.Builder().nIn(nIn).nOut(3).build(),
new DenseLayer.Builder().nIn(3).nOut(3).build(),
new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX)
.nIn(3)
.nOut(nOut)
.build()
)
.build();
}
示例9: getConfiguration
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.constrainGradientToUnitNorm(true)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(rows * columns).nOut(600).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(600).nOut(250).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(250).nOut(200).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.weightInit(WeightInit.XAVIER)
.activation("softmax")
.nIn(200).nOut(AUTOMATIC).build())
.pretrain(true).backprop(false)
.build();
return conf;
}
示例10: testInitializers
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Test
public void testInitializers() throws Exception {
Integer keras1 = 1;
Integer keras2 = 2;
String[] keras1Inits = initializers(conf1);
String[] keras2Inits = initializers(conf2);
WeightInit[] dl4jInits = dl4jInitializers();
Distribution[] dl4jDistributions = dl4jDistributions();
for (int i=0; i< dl4jInits.length - 1; i++) {
initilizationDenseLayer(conf1, keras1, keras1Inits[i], dl4jInits[i], dl4jDistributions[i]);
initilizationDenseLayer(conf2, keras2, keras2Inits[i], dl4jInits[i], dl4jDistributions[i]);
initilizationDenseLayer(conf2, keras2, keras2Inits[dl4jInits.length-1],
dl4jInits[dl4jInits.length-1], dl4jDistributions[dl4jInits.length-1]);
}
}
示例11: dl4jInitializers
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
private WeightInit[] dl4jInitializers() {
return new WeightInit[] {
WeightInit.XAVIER,
WeightInit.XAVIER_UNIFORM,
WeightInit.LECUN_NORMAL,
WeightInit.LECUN_UNIFORM,
WeightInit.DISTRIBUTION,
WeightInit.RELU,
WeightInit.RELU_UNIFORM,
WeightInit.ONES,
WeightInit.ZERO,
WeightInit.IDENTITY,
WeightInit.DISTRIBUTION,
WeightInit.DISTRIBUTION,
WeightInit.DISTRIBUTION,
WeightInit.VAR_SCALING_NORMAL_FAN_IN};
}
示例12: getConf
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
private static MultiLayerConfiguration getConf() {
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().seed(12345L)
.list().layer(0,
new DenseLayer.Builder().nIn(4).nOut(3)
.weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0,1))
.build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(3).nOut(3)
.weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0, 1)).build())
.build();
return conf;
}
示例13: getGraph
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
private ComputationGraph getGraph(int numLabels, double lambda) {
Nd4j.getRandom().setSeed(12345);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
.graphBuilder().addInputs("input1")
.addLayer("l1", new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.RELU).build(),
"input1")
.addLayer("lossLayer", new CenterLossOutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(numLabels)
.lambda(lambda).activation(Activation.SOFTMAX).build(), "l1")
.setOutputs("lossLayer").pretrain(false).backprop(true).build();
ComputationGraph graph = new ComputationGraph(conf);
graph.init();
return graph;
}
示例14: testCifarDataSetIteratorReset
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
@Ignore // use when checking cifar dataset iterator
@Test
public void testCifarDataSetIteratorReset() {
int epochs = 2;
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.weightInit(WeightInit.XAVIER).seed(12345L).list()
.layer(0, new DenseLayer.Builder().nIn(400).nOut(50).activation(Activation.RELU).build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(50).nOut(10).build())
.pretrain(false).backprop(true)
.inputPreProcessor(0, new CnnToFeedForwardPreProcessor(20, 20, 1)).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
net.setListeners(new ScoreIterationListener(1));
MultipleEpochsIterator ds =
new MultipleEpochsIterator(epochs, new CifarDataSetIterator(10, 20, new int[] {20, 20, 1}));
net.fit(ds);
assertEquals(epochs, ds.epochs);
assertEquals(2, ds.batch);
}
示例15: complete
import org.deeplearning4j.nn.weights.WeightInit; //导入依赖的package包/类
public MultiLayerConfiguration.Builder complete() {
final int numRows = 28;
final int numColumns = 28;
int nChannels = 1;
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10},
new int[] {2, 2}).nIn(nChannels).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nIn(5 * 5 * 1 * 6) //216
.nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.build())
.inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
.inputPreProcessor(2, new CnnToFeedForwardPreProcessor(5, 5, 6)).backprop(true).pretrain(false);
return builder;
}