本文整理汇总了Java中org.nd4j.linalg.lossfunctions.LossFunctions类的典型用法代码示例。如果您正苦于以下问题:Java LossFunctions类的具体用法?Java LossFunctions怎么用?Java LossFunctions使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LossFunctions类属于org.nd4j.linalg.lossfunctions包,在下文中一共展示了LossFunctions类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeepDenseLayerNetworkConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
/** Returns the network configuration, 2 hidden DenseLayers of size 50.
*/
private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() {
final int numHiddenNodes = 50;
return new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(learningRate)
.weightInit(WeightInit.XAVIER)
.updater(Updater.NESTEROVS).momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes)
.activation(Activation.TANH).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
.activation(Activation.IDENTITY)
.nIn(numHiddenNodes).nOut(numOutputs).build())
.pretrain(false).backprop(true).build();
}
示例2: makeLayer
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
public static FeedForwardLayer makeLayer(Config layerConfig){
Type layerType = Type.valueOf(layerConfig.getString("type"));
switch (layerType) {
case GravesLSTM:
return new GravesLSTM.Builder()
.activation(layerConfig.getString("activation"))
.nIn(layerConfig.getInt("nIn"))
.nOut(layerConfig.getInt("nOut")).build();
case RnnOutputLayer:
return new RnnOutputLayer.Builder()
.activation(layerConfig.getString("activation"))
.lossFunction(LossFunctions.LossFunction.valueOf(layerConfig.getString("lossFunction")))
.nIn(layerConfig.getInt("nIn"))
.nOut(layerConfig.getInt("nOut")).build();
default:
throw new RuntimeException("UNAVAILABLE LAYER TYPE CONFIG.");
}
}
示例3: getConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
{
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
.dropOut(0.5).build();
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
.nOut(parameters.getOutputSize()).activation("softmax").build();
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
.layer(1, outputLayer).build();
}
示例4: getConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).learningRate(parameters.getLearningRate()).l2(0.001)
.list(4)
.layer(0,
new DenseLayer.Builder().nIn(parameters.getInputSize()).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(1,
new DenseLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(2,
new DenseLayer.Builder().nIn(10).nOut(250).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu").build())
.layer(3,
new OutputLayer.Builder().nIn(250).nOut(parameters.getInputSize()).weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD).activation("relu")
.lossFunction(LossFunctions.LossFunction.MSE).build())
.pretrain(false).backprop(true).build();
}
示例5: getConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(2)
.layer(0,
new ConvolutionLayer.Builder(new int[] { 1, 1 }).nIn(parameters.getInputSize()).nOut(1000)
.activation("relu").weightInit(WeightInit.RELU).build())
.layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nOut(parameters.getOutputSize())
.weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例6: getConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final ConvulationalNetParameters parameters = (ConvulationalNetParameters) this.parameters;
final MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations())
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list(3)
.layer(0,
new ConvolutionLayer.Builder(10, 10).stride(2, 2).nIn(parameters.getChannels()).nOut(6)
.weightInit(WeightInit.XAVIER).activation("relu").build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] { 2, 2 }).build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nOut(parameters.getOutputSize()).weightInit(WeightInit.XAVIER).activation("softmax").build())
.backprop(true).pretrain(false);
new ConvolutionLayerSetup(builder, parameters.getRows(), parameters.getColumns(), parameters.getChannels());
return builder.build();
}
示例7: net
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
private static MultiLayerConfiguration net(int nIn, int nOut) {
return new NeuralNetConfiguration.Builder()
.seed(42)
.iterations(1)
.activation(Activation.RELU)
.weightInit(WeightInit.XAVIER)
.learningRate(0.1)
.regularization(true).l2(1e-4)
.list(
new DenseLayer.Builder().nIn(nIn).nOut(3).build(),
new DenseLayer.Builder().nIn(3).nOut(3).build(),
new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX)
.nIn(3)
.nOut(nOut)
.build()
)
.build();
}
示例8: getConfiguration
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.constrainGradientToUnitNorm(true)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(rows * columns).nOut(600).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(600).nOut(250).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(250).nOut(200).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.weightInit(WeightInit.XAVIER)
.activation("softmax")
.nIn(200).nOut(AUTOMATIC).build())
.pretrain(true).backprop(false)
.build();
return conf;
}
示例9: testCNNBNActivationCombo
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Test
public void testCNNBNActivationCombo() throws Exception {
DataSetIterator iter = new MnistDataSetIterator(2, 2);
DataSet next = iter.next();
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123)
.list()
.layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER)
.activation(Activation.IDENTITY).build())
.layer(1, new BatchNormalization.Builder().build())
.layer(2, new ActivationLayer.Builder().activation(Activation.RELU).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build())
.backprop(true).pretrain(false).setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
network.fit(next);
assertNotEquals(null, network.getLayer(0).getParam("W"));
assertNotEquals(null, network.getLayer(0).getParam("b"));
}
示例10: incompleteMnistLenet
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
public MultiLayerConfiguration.Builder incompleteMnistLenet() {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder().seed(3)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(1).nOut(20).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}, new int[] {2, 2}).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
new int[] {5, 5}).nIn(20).nOut(50).build())
.layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder(
new int[] {2, 2}, new int[] {2, 2}).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(500)
.build())
.layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.activation(Activation.SOFTMAX).nOut(10)
.build());
return builder;
}
示例11: getNetworkConf
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
private MultiLayerConfiguration getNetworkConf(boolean useTBPTT) {
MultiLayerConfiguration.Builder builder =
new NeuralNetConfiguration.Builder()
.updater(new AdaGrad(0.1)).l2(0.0025)
.stepFunction(new NegativeDefaultStepFunction())
.list()
.layer(0, new GravesLSTM.Builder().weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0.0, 0.01)).nIn(nIn)
.nOut(layerSize).activation(Activation.TANH).build())
.layer(1, new OutputLayer.Builder(
LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(layerSize)
.nOut(nIn).activation(Activation.SOFTMAX)
.build())
.inputPreProcessor(1, new RnnToFeedForwardPreProcessor()).backprop(true)
.pretrain(false);
if (useTBPTT) {
builder.backpropType(BackpropType.TruncatedBPTT);
builder.tBPTTBackwardLength(window / 3);
builder.tBPTTForwardLength(window / 3);
}
return builder.build();
}
示例12: testDeconvolution2DUnsupportedSameModeNetwork
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Test(expected = IllegalArgumentException.class)
public void testDeconvolution2DUnsupportedSameModeNetwork() {
/*
* When convolution mode Same is set for the network and a deconvolution layer is added
* then only layer activation will fail. Suboptimal, but I don't think we want special
* logic for NNC in this case.
*/
NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345)
.updater(new NoOp())
.activation(Activation.SIGMOID)
.convolutionMode(Same)
.list()
.layer(new Deconvolution2D.Builder().name("deconvolution")
.nIn(3).nOut(2).build());
MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nOut(2).build())
.setInputType(InputType.convolutionalFlat(7, 7, 3)).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
net.getLayer(0).activate(Nd4j.rand(10, 7 * 7 * 3));
}
示例13: getGraphConfCNN
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
private static ComputationGraphConfiguration getGraphConfCNN(int seed, IUpdater updater) {
Nd4j.getRandom().setSeed(seed);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.weightInit(WeightInit.XAVIER).updater(updater).seed(seed).graphBuilder()
.addInputs("in")
.addLayer("0", new ConvolutionLayer.Builder().nOut(3).kernelSize(2, 2).stride(1, 1)
.padding(0, 0).activation(Activation.TANH).build(), "in")
.addLayer("1", new ConvolutionLayer.Builder().nOut(3).kernelSize(2, 2).stride(1, 1)
.padding(0, 0).activation(Activation.TANH).build(), "0")
.addLayer("2", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nOut(10)
.build(), "1")
.setOutputs("2").setInputTypes(InputType.convolutional(10, 10, 3)).pretrain(false)
.backprop(true).build();
return conf;
}
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:17,代码来源:TestCompareParameterAveragingSparkVsSingleMachine.java
示例14: testJSONBasic
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
@Test
public void testJSONBasic() {
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0, 1)).updater(new NoOp())
.graphBuilder().addInputs("input")
.addLayer("firstLayer",
new DenseLayer.Builder().nIn(4).nOut(5).activation(Activation.TANH).build(),
"input")
.addLayer("outputLayer",
new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(5).nOut(3).build(),
"firstLayer")
.setOutputs("outputLayer").pretrain(false).backprop(true).build();
String json = conf.toJson();
ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
assertEquals(json, conf2.toJson());
assertEquals(conf, conf2);
}
示例15: complete
import org.nd4j.linalg.lossfunctions.LossFunctions; //导入依赖的package包/类
public MultiLayerConfiguration.Builder complete() {
final int numRows = 28;
final int numColumns = 28;
int nChannels = 1;
int outputNum = 10;
int seed = 123;
MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10},
new int[] {2, 2}).nIn(nChannels).nOut(6).build())
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
.build())
.layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
.nIn(5 * 5 * 1 * 6) //216
.nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
.build())
.inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
.inputPreProcessor(2, new CnnToFeedForwardPreProcessor(5, 5, 6)).backprop(true).pretrain(false);
return builder;
}