本文整理汇总了Java中org.deeplearning4j.nn.conf.MultiLayerConfiguration.fromJson方法的典型用法代码示例。如果您正苦于以下问题:Java MultiLayerConfiguration.fromJson方法的具体用法?Java MultiLayerConfiguration.fromJson怎么用?Java MultiLayerConfiguration.fromJson使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.conf.MultiLayerConfiguration
的用法示例。
在下文中一共展示了MultiLayerConfiguration.fromJson方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getVaeLayer
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
MultiLayerNetwork network =
new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue()));
network.init();
INDArray val = ((INDArray) params.value()).unsafeDuplication();
if (val.length() != network.numParams(false))
throw new IllegalStateException(
"Network did not have same number of parameters as the broadcast set parameters");
network.setParameters(val);
Layer l = network.getLayer(0);
if (!(l instanceof VariationalAutoencoder)) {
throw new RuntimeException(
"Cannot use VaeReconstructionProbWithKeyFunction on network that doesn't have a VAE "
+ "layer as layer 0. Layer type: " + l.getClass());
}
return (VariationalAutoencoder) l;
}
示例2: getVaeLayer
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Override
public VariationalAutoencoder getVaeLayer() {
MultiLayerNetwork network =
new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue()));
network.init();
INDArray val = ((INDArray) params.value()).unsafeDuplication();
if (val.length() != network.numParams(false))
throw new IllegalStateException(
"Network did not have same number of parameters as the broadcast set parameters");
network.setParameters(val);
Layer l = network.getLayer(0);
if (!(l instanceof VariationalAutoencoder)) {
throw new RuntimeException(
"Cannot use VaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE "
+ "layer as layer 0. Layer type: " + l.getClass());
}
return (VariationalAutoencoder) l;
}
示例3: getModelFromJson
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
private MultiLayerNetwork getModelFromJson() throws IOException {
String path = "./src/main/resources/models/d2v/" + language.getName() + "-model.json";
byte[] encoded = Files.readAllBytes(Paths.get(path));
String json = new String(encoded, StandardCharsets.UTF_8);
System.out.println(json);
return new MultiLayerNetwork(MultiLayerConfiguration.fromJson(json));
}
示例4: loadConfigGuess
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
/**
* Load the model from the given file path
* @param path the path of the file to "guess"
*
* @return the loaded model
* @throws Exception
*/
public static Object loadConfigGuess(String path) throws Exception {
String input = FileUtils.readFileToString(new File(path));
//note here that we load json BEFORE YAML. YAML
//turns out to load just fine *accidentally*
try {
return MultiLayerConfiguration.fromJson(input);
} catch (Exception e) {
log.warn("Tried multi layer config from json", e);
try {
return KerasModelImport.importKerasModelConfiguration(path);
} catch (Exception e1) {
log.warn("Tried keras model config", e);
try {
return KerasModelImport.importKerasSequentialConfiguration(path);
} catch (Exception e2) {
log.warn("Tried keras sequence config", e);
try {
return ComputationGraphConfiguration.fromJson(input);
} catch (Exception e3) {
log.warn("Tried computation graph from json");
try {
return MultiLayerConfiguration.fromYaml(input);
} catch (Exception e4) {
log.warn("Tried multi layer configuration from yaml");
try {
return ComputationGraphConfiguration.fromYaml(input);
} catch (Exception e5) {
throw new ModelGuesserException("Unable to load configuration from path " + path
+ " (invalid config file or not a known config type)");
}
}
}
}
}
}
}
示例5: testRngInitMLN
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testRngInitMLN() {
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH)
.weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(2,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
.build();
String json = conf.toJson();
MultiLayerNetwork net1 = new MultiLayerNetwork(conf);
net1.init();
MultiLayerNetwork net2 = new MultiLayerNetwork(conf);
net2.init();
assertEquals(net1.params(), net2.params());
MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json);
Nd4j.getRandom().setSeed(987654321);
MultiLayerNetwork net3 = new MultiLayerNetwork(fromJson);
net3.init();
assertEquals(net1.params(), net3.params());
}
示例6: testSerialization
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testSerialization() {
final MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.updater(new AdaGrad(0.1))
.l2(0.001)
.seed(12345).list().pretrain(false)
.layer(0, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder()
.activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION)
.dist(new UniformDistribution(-0.05, 0.05)).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder()
.activation(Activation.TANH).nIn(2).nOut(2).weightInit(WeightInit.DISTRIBUTION)
.dist(new UniformDistribution(-0.05, 0.05)).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.RnnOutputLayer.Builder()
.lossFunction(LossFunctions.LossFunction.MCXENT).nIn(2).nOut(2)
.activation(Activation.TANH).build())
.backprop(true).build();
final String json1 = conf1.toJson();
final MultiLayerConfiguration conf2 = MultiLayerConfiguration.fromJson(json1);
final String json2 = conf1.toJson();
TestCase.assertEquals(json1, json2);
}
示例7: testCustomActivationFn
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomActivationFn() {
//First: Ensure that the CustomActivation class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(IActivation.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
if (nt.getType() == CustomActivation.class)
found = true;
}
assertTrue("CustomActivation: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(new CustomActivation()).build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例8: testJsonYaml
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testJsonYaml() {
MultiLayerConfiguration config = new NeuralNetConfiguration.Builder().seed(12345).list()
.layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.reconstructionDistribution(new GaussianReconstructionDistribution(Activation.IDENTITY))
.nIn(3).nOut(4).encoderLayerSizes(5).decoderLayerSizes(6).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.reconstructionDistribution(new GaussianReconstructionDistribution(Activation.TANH))
.nIn(7).nOut(8).encoderLayerSizes(9).decoderLayerSizes(10).build())
.layer(2, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.reconstructionDistribution(new BernoulliReconstructionDistribution()).nIn(11)
.nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
.layer(3, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.reconstructionDistribution(new ExponentialReconstructionDistribution(Activation.TANH))
.nIn(11).nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
.layer(4, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.lossFunction(new ActivationTanH(), LossFunctions.LossFunction.MSE).nIn(11)
.nOut(12).encoderLayerSizes(13).decoderLayerSizes(14).build())
.layer(5, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder()
.reconstructionDistribution(new CompositeReconstructionDistribution.Builder()
.addDistribution(5, new GaussianReconstructionDistribution())
.addDistribution(5,
new GaussianReconstructionDistribution(Activation.TANH))
.addDistribution(5, new BernoulliReconstructionDistribution())
.build())
.nIn(15).nOut(16).encoderLayerSizes(17).decoderLayerSizes(18).build())
.layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(18)
.nOut(19).activation(new ActivationTanH()).build())
.pretrain(true).backprop(true).build();
String asJson = config.toJson();
String asYaml = config.toYaml();
MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson);
MultiLayerConfiguration fromYaml = MultiLayerConfiguration.fromYaml(asYaml);
assertEquals(config, fromJson);
assertEquals(config, fromYaml);
}
示例9: call
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Override
public Iterable<Tuple2<Integer, Double>> call(Iterator<DataSet> dataSetIterator) throws Exception {
if (!dataSetIterator.hasNext()) {
return Collections.singletonList(new Tuple2<>(0, 0.0));
}
DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate
MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(json));
network.init();
INDArray val = params.value().unsafeDuplication(); //.value() object will be shared by all executors on each machine -> OK, as params are not modified by score function
if (val.length() != network.numParams(false))
throw new IllegalStateException(
"Network did not have same number of parameters as the broadcast set parameters");
network.setParameters(val);
List<Tuple2<Integer, Double>> out = new ArrayList<>();
while (iter.hasNext()) {
DataSet ds = iter.next();
double score = network.score(ds, false);
int numExamples = ds.getFeatureMatrix().size(0);
out.add(new Tuple2<>(numExamples, score * numExamples));
}
Nd4j.getExecutioner().commit();
return out;
}
示例10: loadDL4JNetwork
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
public static MultiLayerNetwork loadDL4JNetwork(String baseModelFilePath) throws IOException {
String jsonFilePath = baseModelFilePath + "dl4j_model_conf.json";
String parametersFilePath = baseModelFilePath + "dl4j_model.parameters";
String jsonBuffer = readFile(jsonFilePath, StandardCharsets.UTF_8 );
// read this into jsonBuffer
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson( jsonBuffer );
DataInputStream dis = new DataInputStream(new FileInputStream( parametersFilePath ));
INDArray newParams = Nd4j.read( dis );
dis.close();
MultiLayerNetwork savedNetwork = new MultiLayerNetwork( confFromJson );
savedNetwork.init();
savedNetwork.setParameters(newParams);
//System.out.println("Original network params " + model.params());
//System.out.println(savedNetwork.params());
return savedNetwork;
}
示例11: testCnn1DWithZeroPadding1D
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCnn1DWithZeroPadding1D() {
Nd4j.getRandom().setSeed(1337);
int[] minibatchSizes = {1, 3};
int length = 7;
int convNIn = 2;
int convNOut1 = 3;
int convNOut2 = 4;
int finalNOut = 4;
int[] kernels = {1, 2, 4};
int stride = 1;
int pnorm = 2;
int padding = 0;
int zeroPadding = 2;
int paddedLength = length + 2 * zeroPadding;
Activation[] activations = {Activation.SIGMOID};
SubsamplingLayer.PoolingType[] poolingTypes =
new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};
for (Activation afn : activations) {
for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
for (int minibatchSize : minibatchSizes) {
for (int kernel : kernels) {
INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, paddedLength);
for (int i = 0; i < minibatchSize; i++) {
for (int j = 0; j < paddedLength; j++) {
labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
}
}
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
.layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
.stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
.build())
.layer(new ZeroPadding1DLayer.Builder(zeroPadding).build())
.layer(new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
.stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
.build())
.layer(new ZeroPadding1DLayer.Builder(0).build())
.layer(new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
.stride(stride).padding(padding).pnorm(pnorm).build())
.layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nOut(finalNOut).build())
.setInputType(InputType.recurrent(convNIn)).build();
String json = conf.toJson();
MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, c2);
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
+ afn + ", kernel = " + kernel;
if (PRINT_RESULTS) {
System.out.println(msg);
for (int j = 0; j < net.getnLayers(); j++)
System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
}
boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);
assertTrue(msg, gradOK);
}
}
}
}
}
示例12: testCnn1DWithSubsampling1D
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCnn1DWithSubsampling1D() {
Nd4j.getRandom().setSeed(12345);
int[] minibatchSizes = {1, 3};
int length = 7;
int convNIn = 2;
int convNOut1 = 3;
int convNOut2 = 4;
int finalNOut = 4;
int[] kernels = {1, 2, 4};
int stride = 1;
int padding = 0;
int pnorm = 2;
Activation[] activations = {Activation.SIGMOID, Activation.TANH};
SubsamplingLayer.PoolingType[] poolingTypes =
new SubsamplingLayer.PoolingType[] {SubsamplingLayer.PoolingType.MAX,
SubsamplingLayer.PoolingType.AVG, SubsamplingLayer.PoolingType.PNORM};
for (Activation afn : activations) {
for (SubsamplingLayer.PoolingType poolingType : poolingTypes) {
for (int minibatchSize : minibatchSizes) {
for (int kernel : kernels) {
INDArray input = Nd4j.rand(new int[] {minibatchSize, convNIn, length});
INDArray labels = Nd4j.zeros(minibatchSize, finalNOut, length);
for (int i = 0; i < minibatchSize; i++) {
for (int j = 0; j < length; j++) {
labels.putScalar(new int[] {i, i % finalNOut, j}, 1.0);
}
}
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.updater(new NoOp()).weightInit(WeightInit.DISTRIBUTION)
.dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list()
.layer(0, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
.stride(stride).padding(padding).nIn(convNIn).nOut(convNOut1)
.build())
.layer(1, new Convolution1DLayer.Builder().activation(afn).kernelSize(kernel)
.stride(stride).padding(padding).nIn(convNOut1).nOut(convNOut2)
.build())
.layer(2, new Subsampling1DLayer.Builder(poolingType).kernelSize(kernel)
.stride(stride).padding(padding).pnorm(pnorm).build())
.layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.activation(Activation.SOFTMAX).nOut(finalNOut).build())
.setInputType(InputType.recurrent(convNIn)).build();
String json = conf.toJson();
MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, c2);
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
String msg = "PoolingType=" + poolingType + ", minibatch=" + minibatchSize + ", activationFn="
+ afn + ", kernel = " + kernel;
if (PRINT_RESULTS) {
System.out.println(msg);
for (int j = 0; j < net.getnLayers(); j++)
System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
}
boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);
assertTrue(msg, gradOK);
}
}
}
}
}
示例13: testCustomUpdater
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomUpdater() {
//Create a simple custom updater, equivalent to SGD updater
double lr = 0.03;
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345)
.activation(Activation.TANH).updater(new CustomIUpdater(lr)) //Specify custom IUpdater
.list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new OutputLayer.Builder().nIn(10).nOut(10)
.lossFunction(LossFunctions.LossFunction.MSE).build())
.build();
Nd4j.getRandom().setSeed(12345);
MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345)
.activation(Activation.TANH).updater(new Sgd(lr)).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder()
.nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
.build();
//First: Check updater config
assertTrue(((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater() instanceof CustomIUpdater);
assertTrue(((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater() instanceof CustomIUpdater);
assertTrue(((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater() instanceof Sgd);
assertTrue(((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater() instanceof Sgd);
CustomIUpdater u0_0 = (CustomIUpdater) ((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater();
CustomIUpdater u0_1 = (CustomIUpdater) ((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater();
assertEquals(lr, u0_0.getLearningRate(), 1e-6);
assertEquals(lr, u0_1.getLearningRate(), 1e-6);
Sgd u1_0 = (Sgd) ((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater();
Sgd u1_1 = (Sgd) ((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater();
assertEquals(lr, u1_0.getLearningRate(), 1e-6);
assertEquals(lr, u1_1.getLearningRate(), 1e-6);
//Second: check JSON
String asJson = conf1.toJson();
MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson);
assertEquals(conf1, fromJson);
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
net1.init();
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net2.init();
//Third: check gradients are equal
INDArray in = Nd4j.rand(5, 10);
INDArray labels = Nd4j.rand(5, 10);
net1.setInput(in);
net2.setInput(in);
net1.setLabels(labels);
net2.setLabels(labels);
net1.computeGradientAndScore();
net2.computeGradientAndScore();;
assertEquals(net1.getFlattenedGradients(), net2.getFlattenedGradients());
}
示例14: testJsonMultiLayerNetwork
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testJsonMultiLayerNetwork() {
//First: Ensure that the CustomLayer class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
Set<Class<?>> registeredSubtypes = new HashSet<>();
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
// registeredSubtypes.add(nt.getType());
if (nt.getType() == CustomLayer.class)
found = true;
}
assertTrue("CustomLayer: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new CustomLayer(3.14159)).layer(2,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例15: testCustomOutputLayerMLN
import org.deeplearning4j.nn.conf.MultiLayerConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomOutputLayerMLN() {
//First: Ensure that the CustomOutputLayer class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
Set<Class<?>> registeredSubtypes = new HashSet<>();
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
// registeredSubtypes.add(nt.getType());
if (nt.getType() == CustomOutputLayer.class)
found = true;
}
assertTrue("CustomOutputLayer: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().seed(12345).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
//Third: check initialization
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl);
//Fourth: compare to an equivalent standard output layer (should be identical)
MultiLayerConfiguration conf2 =
new NeuralNetConfiguration.Builder().seed(12345).weightInit(WeightInit.XAVIER)
.list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net2.init();
assertEquals(net2.params(), net.params());
INDArray testFeatures = Nd4j.rand(1, 10);
INDArray testLabels = Nd4j.zeros(1, 10);
testLabels.putScalar(0, 3, 1.0);
DataSet ds = new DataSet(testFeatures, testLabels);
assertEquals(net2.output(testFeatures), net.output(testFeatures));
assertEquals(net2.score(ds), net.score(ds), 1e-6);
}