本文整理汇总了Java中org.deeplearning4j.nn.conf.NeuralNetConfiguration.mapper方法的典型用法代码示例。如果您正苦于以下问题:Java NeuralNetConfiguration.mapper方法的具体用法?Java NeuralNetConfiguration.mapper怎么用?Java NeuralNetConfiguration.mapper使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.deeplearning4j.nn.conf.NeuralNetConfiguration
的用法示例。
在下文中一共展示了NeuralNetConfiguration.mapper方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testDistributionDeserializer
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testDistributionDeserializer() throws Exception {
//Test current format:
Distribution[] distributions =
new Distribution[] {new NormalDistribution(3, 0.5), new UniformDistribution(-2, 1),
new GaussianDistribution(2, 1.0), new BinomialDistribution(10, 0.3)};
ObjectMapper om = NeuralNetConfiguration.mapper();
for (Distribution d : distributions) {
String json = om.writeValueAsString(d);
Distribution fromJson = om.readValue(json, Distribution.class);
assertEquals(d, fromJson);
}
}
示例2: testCustomActivationFn
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomActivationFn() {
//First: Ensure that the CustomActivation class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(IActivation.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
if (nt.getType() == CustomActivation.class)
found = true;
}
assertTrue("CustomActivation: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(new CustomActivation()).build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例3: deserialize
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public ComputationGraphConfiguration deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
long charOffsetStart = jp.getCurrentLocation().getCharOffset();
ComputationGraphConfiguration conf = (ComputationGraphConfiguration) defaultDeserializer.deserialize(jp, ctxt);
//Updater configuration changed after 0.8.0 release
//Previously: enumerations and fields. Now: classes
//Here, we manually create the appropriate Updater instances, if the IUpdater field is empty
List<Layer> layerList = new ArrayList<>();
Map<String, GraphVertex> vertices = conf.getVertices();
for (Map.Entry<String, GraphVertex> entry : vertices.entrySet()) {
if (entry.getValue() instanceof LayerVertex) {
LayerVertex lv = (LayerVertex) entry.getValue();
layerList.add(lv.getLayerConf().getLayer());
}
}
Layer[] layers = layerList.toArray(new Layer[layerList.size()]);
//Now, check if we need to manually handle IUpdater deserialization from legacy format
boolean attemptIUpdaterFromLegacy = requiresIUpdaterFromLegacy(layers);
if(attemptIUpdaterFromLegacy) {
JsonLocation endLocation = jp.getCurrentLocation();
long charOffsetEnd = endLocation.getCharOffset();
String jsonSubString = endLocation.getSourceRef().toString().substring((int) charOffsetStart - 1, (int) charOffsetEnd);
ObjectMapper om = NeuralNetConfiguration.mapper();
JsonNode rootNode = om.readTree(jsonSubString);
ObjectNode verticesNode = (ObjectNode) rootNode.get("vertices");
Iterator<JsonNode> iter = verticesNode.elements();
int layerIdx = 0;
while(iter.hasNext()){
JsonNode next = iter.next();
ObjectNode confNode = null;
if(next.has("LayerVertex")){
next = next.get("LayerVertex");
if(next.has("layerConf")){
confNode = (ObjectNode) next.get("layerConf");
next = confNode.get("layer").elements().next();
} else {
continue;
}
if(layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getIUpdater() == null){
handleUpdaterBackwardCompatibility((BaseLayer)layers[layerIdx], (ObjectNode)next);
}
if(layers[layerIdx].getIDropout() == null){
//Check for legacy dropout
if(next.has("dropOut")){
double d = next.get("dropOut").asDouble();
if(!Double.isNaN(d)){
//Might be dropout or dropconnect...
if(layers[layerIdx] instanceof BaseLayer && confNode.has("useDropConnect")
&& confNode.get("useDropConnect").asBoolean(false)){
((BaseLayer)layers[layerIdx]).setWeightNoise(new DropConnect(d));
} else {
layers[layerIdx].setIDropout(new Dropout(d));
}
}
}
}
layerIdx++;
}
}
}
return conf;
}
开发者ID:deeplearning4j,项目名称:deeplearning4j,代码行数:74,代码来源:ComputationGraphConfigurationDeserializer.java
示例4: deserialize
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Override
public MultiLayerConfiguration deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException {
long charOffsetStart = jp.getCurrentLocation().getCharOffset();
MultiLayerConfiguration conf = (MultiLayerConfiguration) defaultDeserializer.deserialize(jp, ctxt);
Layer[] layers = new Layer[conf.getConfs().size()];
for (int i = 0; i < layers.length; i++) {
layers[i] = conf.getConf(i).getLayer();
}
//Now, check if we need to manually handle IUpdater deserialization from legacy format
boolean attemptIUpdaterFromLegacy = requiresIUpdaterFromLegacy(layers);
if(attemptIUpdaterFromLegacy) {
JsonLocation endLocation = jp.getCurrentLocation();
long charOffsetEnd = endLocation.getCharOffset();
String jsonSubString = endLocation.getSourceRef().toString().substring((int) charOffsetStart - 1, (int) charOffsetEnd);
ObjectMapper om = NeuralNetConfiguration.mapper();
JsonNode rootNode = om.readTree(jsonSubString);
ArrayNode confsNode = (ArrayNode)rootNode.get("confs");
for( int i=0; i<layers.length; i++ ){
ObjectNode on = (ObjectNode) confsNode.get(i);
ObjectNode confNode = null;
if(layers[i] instanceof BaseLayer && ((BaseLayer)layers[i]).getIUpdater() == null){
//layer -> (first/only child) -> updater
if(on.has("layer")){
confNode = on;
on = (ObjectNode) on.get("layer");
} else {
continue;
}
on = (ObjectNode) on.elements().next();
handleUpdaterBackwardCompatibility((BaseLayer)layers[i], on);
}
if(layers[i].getIDropout() == null){
//Check for legacy dropout/dropconnect
if(on.has("dropOut")){
double d = on.get("dropOut").asDouble();
if(!Double.isNaN(d)){
//Might be dropout or dropconnect...
if(confNode != null && layers[i] instanceof BaseLayer && confNode.has("useDropConnect")
&& confNode.get("useDropConnect").asBoolean(false)){
((BaseLayer)layers[i]).setWeightNoise(new DropConnect(d));
} else {
if(d > 0.0) {
layers[i].setIDropout(new Dropout(d));
}
}
}
}
}
}
}
return conf;
}
示例5: testDistributionDeserializerLegacyFormat
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testDistributionDeserializerLegacyFormat() throws Exception {
ObjectMapper om = NeuralNetConfiguration.mapper();
String normalJson = "{\n" + " \"normal\" : {\n" + " \"mean\" : 0.1,\n"
+ " \"std\" : 1.2\n" + " }\n" + " }";
Distribution nd = om.readValue(normalJson, Distribution.class);
assertTrue(nd instanceof NormalDistribution);
NormalDistribution normDist = (NormalDistribution) nd;
assertEquals(0.1, normDist.getMean(), 1e-6);
assertEquals(1.2, normDist.getStd(), 1e-6);
String uniformJson = "{\n" + " \"uniform\" : {\n" + " \"lower\" : -1.1,\n"
+ " \"upper\" : 2.2\n" + " }\n" + " }";
Distribution ud = om.readValue(uniformJson, Distribution.class);
assertTrue(ud instanceof UniformDistribution);
UniformDistribution uniDist = (UniformDistribution) ud;
assertEquals(-1.1, uniDist.getLower(), 1e-6);
assertEquals(2.2, uniDist.getUpper(), 1e-6);
String gaussianJson = "{\n" + " \"gaussian\" : {\n" + " \"mean\" : 0.1,\n"
+ " \"std\" : 1.2\n" + " }\n" + " }";
Distribution gd = om.readValue(gaussianJson, Distribution.class);
assertTrue(gd instanceof GaussianDistribution);
GaussianDistribution gDist = (GaussianDistribution) gd;
assertEquals(0.1, gDist.getMean(), 1e-6);
assertEquals(1.2, gDist.getStd(), 1e-6);
String bernoulliJson =
"{\n" + " \"binomial\" : {\n" + " \"numberOfTrials\" : 10,\n"
+ " \"probabilityOfSuccess\" : 0.3\n" + " }\n"
+ " }";
Distribution bd = om.readValue(bernoulliJson, Distribution.class);
assertTrue(bd instanceof BinomialDistribution);
BinomialDistribution binDist = (BinomialDistribution) bd;
assertEquals(10, binDist.getNumberOfTrials());
assertEquals(0.3, binDist.getProbabilityOfSuccess(), 1e-6);
}
示例6: testJsonMultiLayerNetwork
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testJsonMultiLayerNetwork() {
//First: Ensure that the CustomLayer class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
Set<Class<?>> registeredSubtypes = new HashSet<>();
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
// registeredSubtypes.add(nt.getType());
if (nt.getType() == CustomLayer.class)
found = true;
}
assertTrue("CustomLayer: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new CustomLayer(3.14159)).layer(2,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
}
示例7: testCustomOutputLayerMLN
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomOutputLayerMLN() {
//First: Ensure that the CustomOutputLayer class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(Layer.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
Set<Class<?>> registeredSubtypes = new HashSet<>();
boolean found = false;
for (NamedType nt : types) {
System.out.println(nt);
// registeredSubtypes.add(nt.getType());
if (nt.getType() == CustomOutputLayer.class)
found = true;
}
assertTrue("CustomOutputLayer: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().seed(12345).list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
//Third: check initialization
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl);
//Fourth: compare to an equivalent standard output layer (should be identical)
MultiLayerConfiguration conf2 =
new NeuralNetConfiguration.Builder().seed(12345).weightInit(WeightInit.XAVIER)
.list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(10).nOut(10).build())
.pretrain(false).backprop(true).build();
Nd4j.getRandom().setSeed(12345);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net2.init();
assertEquals(net2.params(), net.params());
INDArray testFeatures = Nd4j.rand(1, 10);
INDArray testLabels = Nd4j.zeros(1, 10);
testLabels.putScalar(0, 3, 1.0);
DataSet ds = new DataSet(testFeatures, testLabels);
assertEquals(net2.output(testFeatures), net.output(testFeatures));
assertEquals(net2.score(ds), net.score(ds), 1e-6);
}
示例8: testCustomPreprocessor
import org.deeplearning4j.nn.conf.NeuralNetConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomPreprocessor() {
//First: Ensure that the CustomLayer class is registered
ObjectMapper mapper = NeuralNetConfiguration.mapper();
AnnotatedClass ac = AnnotatedClass.construct(InputPreProcessor.class,
mapper.getSerializationConfig().getAnnotationIntrospector(), null);
Collection<NamedType> types = mapper.getSubtypeResolver().collectAndResolveSubtypes(ac,
mapper.getSerializationConfig(), mapper.getSerializationConfig().getAnnotationIntrospector());
boolean found = false;
for (NamedType nt : types) {
// System.out.println(nt);
if (nt.getType() == MyCustomPreprocessor.class) {
found = true;
break;
}
}
assertTrue("MyCustomPreprocessor: not registered with NeuralNetConfiguration mapper", found);
//Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works...
MultiLayerConfiguration conf =
new NeuralNetConfiguration.Builder().list()
.layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10)
.nOut(10).build())
.inputPreProcessor(0, new MyCustomPreprocessor()).pretrain(false).backprop(true)
.build();
String json = conf.toJson();
String yaml = conf.toYaml();
System.out.println(json);
MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json);
assertEquals(conf, confFromJson);
MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml);
assertEquals(conf, confFromYaml);
assertTrue(confFromJson.getInputPreProcess(0) instanceof MyCustomPreprocessor);
}