当前位置: 首页>>代码示例>>Java>>正文


Java MultiLayerConfiguration.toJson方法代码示例

本文整理汇总了Java中org.deeplearning4j.nn.conf.MultiLayerConfiguration.toJson方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerConfiguration.toJson方法的具体用法?Java MultiLayerConfiguration.toJson怎么用?Java MultiLayerConfiguration.toJson使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.deeplearning4j.nn.conf.MultiLayerConfiguration的用法示例。


在下文中一共展示了MultiLayerConfiguration.toJson方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRngInitMLN

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testRngInitMLN() {
    Nd4j.getRandom().setSeed(12345);

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                    .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(2,
                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
                    .build();

    String json = conf.toJson();

    MultiLayerNetwork net1 = new MultiLayerNetwork(conf);
    net1.init();

    MultiLayerNetwork net2 = new MultiLayerNetwork(conf);
    net2.init();

    assertEquals(net1.params(), net2.params());

    MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json);

    Nd4j.getRandom().setSeed(987654321);
    MultiLayerNetwork net3 = new MultiLayerNetwork(fromJson);
    net3.init();

    assertEquals(net1.params(), net3.params());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:31,代码来源:RandomTests.java

示例2: testSerialization

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSerialization() {

    final MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .updater(new AdaGrad(0.1))
                    .l2(0.001)
                    .seed(12345).list().pretrain(false)
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder()
                                    .activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new UniformDistribution(-0.05, 0.05)).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder()
                                    .activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new UniformDistribution(-0.05, 0.05)).build())
                    .layer(2, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder()
                                    .lossFunction(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(2)
                                    .activation(Activation.TANH).build())
                    .backprop(true).build();


    final String json1 = conf1.toJson();

    final MultiLayerConfiguration conf2 = MultiLayerConfiguration.fromJson(json1);

    final String json2 = conf1.toJson();


    TestCase.assertEquals(json1, json2);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:30,代码来源:GravesBidirectionalLSTMTest.java

示例3: testCustomActivationFn

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomActivationFn() {

    //First: Ensure that the CustomActivation class is registered
    ObjectMapper mapper = NeuralNetConfiguration.mapper();

    AnnotatedClass ac = AnnotatedClass.construct(IActivation.class,
                    mapper.getSerializationConfig().getAnnotationIntrospector(), null);
    Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
                    mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
    boolean found = false;
    for (NamedType nt : types) {
        System.out.println(nt);
        if (nt.getType() == CustomActivation.class)
            found = true;
    }

    assertTrue("CustomActivation: not registered with NeuralNetConfiguration mapper", found);

    //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list()
                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(new CustomActivation()).build())
                    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10).build())
                    .pretrain(false).backprop(true).build();

    String json = conf.toJson();
    String yaml = conf.toYaml();

    System.out.println(json);

    MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
    assertEquals(conf, confFromJson);

    MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
    assertEquals(conf, confFromYaml);

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:39,代码来源:TestCustomActivation.java

示例4: testJsonYaml

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testJsonYaml() {

    MultiLayerConfiguration config = new NeuralNetConfiguration.Builder().seed(12345).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .reconstructionDistribution(new GaussianReconstructionDistribution(Activation.IDENTITY))
                                    .nIn(3).nOut(4).encoderLayerSizes(5).decoderLayerSizes(6).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .reconstructionDistribution(new GaussianReconstructionDistribution(Activation.TANH))
                                    .nIn(7).nOut(8).encoderLayerSizes(9).decoderLayerSizes(10).build())
                    .layer(2, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .reconstructionDistribution(new BernoulliReconstructionDistribution()).nIn(11)
                                    .nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
                    .layer(3, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .reconstructionDistribution(new ExponentialReconstructionDistribution(Activation.TANH))
                                    .nIn(11).nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
                    .layer(4, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .lossFunction(new ActivationTanH(), LossFunctions.LossFunction.MSE).nIn(11)
                                    .nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
                    .layer(5, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
                                    .reconstructionDistribution(new CompositeReconstructionDistribution.Builder()
                                                    .addDistribution(5, new GaussianReconstructionDistribution())
                                                    .addDistribution(5,
                                                                    new GaussianReconstructionDistribution(Activation.TANH))
                                                    .addDistribution(5, new BernoulliReconstructionDistribution())
                                                    .build())
                                    .nIn(15).nOut(16).encoderLayerSizes(17).decoderLayerSizes(18).build())
                    .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(18)
                                    .nOut(19).activation(new ActivationTanH()).build())
                    .pretrain(true).backprop(true).build();

    String asJson = config.toJson();
    String asYaml = config.toYaml();

    MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson);
    MultiLayerConfiguration fromYaml = MultiLayerConfiguration.fromYaml(asYaml);

    assertEquals(config, fromJson);
    assertEquals(config, fromYaml);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:41,代码来源:TestVAE.java

示例5: testCnn1DWithZeroPadding1D

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCnn1DWithZeroPadding1D() {
    Nd4j.getRandom().setSeed(1337);

    int[] minibatchSizes = {1, 3};
    int length = 7;
    int convNIn = 2;
    int convNOut1 = 3;
    int convNOut2 = 4;
    int finalNOut = 4;


    int[] kernels = {1, 2, 4};
    int stride = 1;
    int pnorm = 2;

    int padding = 0;
    int zeroPadding = 2;
    int paddedLength = length + 2 * zeroPadding;

    Activation[] activations = {Activation.SIGMOID};
    SubsamplingLayer.PoolingType[] poolingTypes =
            new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
                    SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

    for (Activation afn : activations) {
        for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
            for (int minibatchSize : minibatchSizes) {
                for (int kernel : kernels) {
                    INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
                    INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, paddedLength);
                    for (int i = 0; i < minibatchSize; i++) {
                        for (int j = 0; j < paddedLength; j++) {
                            labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
                        }
                    }

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                            .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                            .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
                            .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                    .stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
                                    .build())
                            .layer(new ZeroPadding1DLayer.Builder(zeroPadding).build())
                            .layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                    .stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
                                    .build())
                            .layer(new ZeroPadding1DLayer.Builder(0).build())
                            .layer(new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
                                    .stride(stride).padding(padding).pnorm(pnorm).build())
                            .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                            .setInputType(InputType.recurrent(convNIn)).build();

                    String json = conf.toJson();
                    MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                    assertEquals(conf, c2);

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                            + afn + ", kernel = " + kernel;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
                        for (int j = 0; j < net.getnLayers(); j++)
                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);
                }
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:80,代码来源:CNN1DGradientCheckTest.java

示例6: testCnn1DWithSubsampling1D

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCnn1DWithSubsampling1D() {
    Nd4j.getRandom().setSeed(12345);

    int[] minibatchSizes = {1, 3};
    int length = 7;
    int convNIn = 2;
    int convNOut1 = 3;
    int convNOut2 = 4;
    int finalNOut = 4;

    int[] kernels = {1, 2, 4};
    int stride = 1;
    int padding = 0;
    int pnorm = 2;

    Activation[] activations = {Activation.SIGMOID, Activation.TANH};
    SubsamplingLayer.PoolingType[] poolingTypes =
                    new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
                                    SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};

    for (Activation afn : activations) {
        for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
            for (int minibatchSize : minibatchSizes) {
                for (int kernel : kernels) {
                    INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
                    INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, length);
                    for (int i = 0; i < minibatchSize; i++) {
                        for (int j = 0; j < length; j++) {
                            labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
                        }
                    }

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                    .updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
                                    .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
                                    .layer(0, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                                    .stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
                                                    .build())
                                    .layer(1, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
                                                    .stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
                                                    .build())
                                    .layer(2, new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
                                                    .stride(stride).padding(padding).pnorm(pnorm).build())
                                    .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nOut(finalNOut).build())
                                    .setInputType(InputType.recurrent(convNIn)).build();

                    String json = conf.toJson();
                    MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
                    assertEquals(conf, c2);

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
                                    + afn + ", kernel = " + kernel;

                    if (PRINT_RESULTS) {
                        System.out.println(msg);
                        for (int j = 0; j < net.getnLayers(); j++)
                            System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                    }

                    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                                    DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);

                    assertTrue(msg, gradOK);
                }
            }
        }
    }
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:74,代码来源:CNN1DGradientCheckTest.java

示例7: testCustomUpdater

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomUpdater() {

    //Create a simple custom updater, equivalent to SGD updater

    double lr = 0.03;

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345)
                    .activation(Activation.TANH).updater(new CustomIUpdater(lr)) //Specify custom IUpdater
                    .list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                    .layer(1, new OutputLayer.Builder().nIn(10).nOut(10)
                                    .lossFunction(LossFunctions.LossFunction.MSE).build())
                    .build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345)
                    .activation(Activation.TANH).updater(new Sgd(lr)).list()
                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder()
                                    .nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
                    .build();

    //First: Check updater config
    assertTrue(((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater() instanceof CustomIUpdater);
    assertTrue(((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater() instanceof CustomIUpdater);
    assertTrue(((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater() instanceof Sgd);
    assertTrue(((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater() instanceof Sgd);

    CustomIUpdater u0_0 = (CustomIUpdater) ((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater();
    CustomIUpdater u0_1 = (CustomIUpdater) ((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater();
    assertEquals(lr, u0_0.getLearningRate(), 1e-6);
    assertEquals(lr, u0_1.getLearningRate(), 1e-6);

    Sgd u1_0 = (Sgd) ((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater();
    Sgd u1_1 = (Sgd) ((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater();
    assertEquals(lr, u1_0.getLearningRate(), 1e-6);
    assertEquals(lr, u1_1.getLearningRate(), 1e-6);


    //Second: check JSON
    String asJson = conf1.toJson();
    MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson);
    assertEquals(conf1, fromJson);

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
    net1.init();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net2.init();


    //Third: check gradients are equal
    INDArray in = Nd4j.rand(5, 10);
    INDArray labels = Nd4j.rand(5, 10);

    net1.setInput(in);
    net2.setInput(in);

    net1.setLabels(labels);
    net2.setLabels(labels);

    net1.computeGradientAndScore();
    net2.computeGradientAndScore();;

    assertEquals(net1.getFlattenedGradients(), net2.getFlattenedGradients());
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:TestCustomUpdater.java

示例8: testJsonMultiLayerNetwork

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testJsonMultiLayerNetwork() {
    //First: Ensure that the CustomLayer class is registered
    ObjectMapper mapper = NeuralNetConfiguration.mapper();

    AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
                    mapper.getSerializationConfig().getAnnotationIntrospector(), null);
    Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
                    mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
    Set<Class<?>> registeredSubtypes = new HashSet<>();
    boolean found = false;
    for (NamedType nt : types) {
        System.out.println(nt);
        //            registeredSubtypes.add(nt.getType());
        if (nt.getType() == CustomLayer.class)
            found = true;
    }

    assertTrue("CustomLayer: not registered with NeuralNetConfiguration mapper", found);

    //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...

    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder().list()
                                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                                    .layer(1, new CustomLayer(3.14159)).layer(2,
                                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                                    .nIn(10).nOut(10).build())
                                    .pretrain(false).backprop(true).build();

    String json = conf.toJson();
    String yaml = conf.toYaml();

    System.out.println(json);

    MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
    assertEquals(conf, confFromJson);

    MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
    assertEquals(conf, confFromYaml);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:42,代码来源:TestCustomLayers.java

示例9: testCustomOutputLayerMLN

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomOutputLayerMLN() {
    //First: Ensure that the CustomOutputLayer class is registered
    ObjectMapper mapper = NeuralNetConfiguration.mapper();

    AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
                    mapper.getSerializationConfig().getAnnotationIntrospector(), null);
    Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
                    mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
    Set<Class<?>> registeredSubtypes = new HashSet<>();
    boolean found = false;
    for (NamedType nt : types) {
        System.out.println(nt);
        //            registeredSubtypes.add(nt.getType());
        if (nt.getType() == CustomOutputLayer.class)
            found = true;
    }

    assertTrue("CustomOutputLayer: not registered with NeuralNetConfiguration mapper", found);

    //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder().seed(12345).list()
                                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                                    .layer(1, new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .nIn(10).nOut(10).build())
                                    .pretrain(false).backprop(true).build();

    String json = conf.toJson();
    String yaml = conf.toYaml();

    System.out.println(json);

    MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
    assertEquals(conf, confFromJson);

    MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
    assertEquals(conf, confFromYaml);

    //Third: check initialization
    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl);

    //Fourth: compare to an equivalent standard output layer (should be identical)
    MultiLayerConfiguration conf2 =
                    new NeuralNetConfiguration.Builder().seed(12345).weightInit(WeightInit.XAVIER)
                                    .list()
                                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1,
                                                    new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                                    .nIn(10).nOut(10).build())
                                    .pretrain(false).backprop(true).build();
    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net2.init();

    assertEquals(net2.params(), net.params());

    INDArray testFeatures = Nd4j.rand(1, 10);
    INDArray testLabels = Nd4j.zeros(1, 10);
    testLabels.putScalar(0, 3, 1.0);
    DataSet ds = new DataSet(testFeatures, testLabels);

    assertEquals(net2.output(testFeatures), net.output(testFeatures));
    assertEquals(net2.score(ds), net.score(ds), 1e-6);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:69,代码来源:TestCustomLayers.java

示例10: testFrozenLayerInstantiation

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testFrozenLayerInstantiation() {
    //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if
    // they were initialized via the builder
    MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).list()
                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                    .weightInit(WeightInit.XAVIER).build())
                    .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10)
                                                    .nOut(10).build())
                    .build();

    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0,
                    new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(new DenseLayer.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build()))
                    .layer(1, new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(
                                    new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH)
                                                    .weightInit(WeightInit.XAVIER).build()))
                    .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10)
                                                    .nOut(10).build())
                    .build();

    MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
    net1.init();
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net2.init();

    assertEquals(net1.params(), net2.params());


    String json = conf2.toJson();
    MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json);

    assertEquals(conf2, fromJson);

    MultiLayerNetwork net3 = new MultiLayerNetwork(fromJson);
    net3.init();

    INDArray input = Nd4j.rand(10, 10);

    INDArray out2 = net2.output(input);
    INDArray out3 = net3.output(input);

    assertEquals(out2, out3);
}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:49,代码来源:FrozenLayerTest.java

示例11: testCustomPreprocessor

import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomPreprocessor() {
    //First: Ensure that the CustomLayer class is registered
    ObjectMapper mapper = NeuralNetConfiguration.mapper();

    AnnotatedClass ac = AnnotatedClass.construct(InputPreProcessor.class,
                    mapper.getSerializationConfig().getAnnotationIntrospector(), null);
    Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
                    mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
    boolean found = false;
    for (NamedType nt : types) {
        //            System.out.println(nt);
        if (nt.getType() == MyCustomPreprocessor.class) {
            found = true;
            break;
        }
    }

    assertTrue("MyCustomPreprocessor: not registered with NeuralNetConfiguration mapper", found);

    //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder().list()
                                    .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
                                    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10)
                                                    .nOut(10).build())
                                    .inputPreProcessor(0, new MyCustomPreprocessor()).pretrain(false).backprop(true)
                                    .build();

    String json = conf.toJson();
    String yaml = conf.toYaml();

    System.out.println(json);

    MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
    assertEquals(conf, confFromJson);

    MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
    assertEquals(conf, confFromYaml);

    assertTrue(confFromJson.getInputPreProcess(0) instanceof MyCustomPreprocessor);

}
 
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:44,代码来源:CustomPreprocessorTest.java


注:本文中的org.deeplearning4j.nn.conf.MultiLayerConfiguration.toJson方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。