本文整理匯總了Java中org.deeplearning4j.nn.conf.layers.EmbeddingLayer類的典型用法代碼示例。如果您正苦於以下問題:Java EmbeddingLayer類的具體用法?Java EmbeddingLayer怎麽用?Java EmbeddingLayer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
EmbeddingLayer類屬於org.deeplearning4j.nn.conf.layers包,在下文中一共展示了EmbeddingLayer類的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testEmbeddingLayerConfig
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
@Test
public void testEmbeddingLayerConfig() {
for(boolean hasBias : new boolean[]{true, false}){
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
.layer(0, new EmbeddingLayer.Builder().hasBias(hasBias).nIn(10).nOut(5).build())
.layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
Layer l0 = net.getLayer(0);
assertEquals(org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer.class, l0.getClass());
assertEquals(10, ((FeedForwardLayer) l0.conf().getLayer()).getNIn());
assertEquals(5, ((FeedForwardLayer) l0.conf().getLayer()).getNOut());
INDArray weights = l0.getParam(DefaultParamInitializer.WEIGHT_KEY);
INDArray bias = l0.getParam(DefaultParamInitializer.BIAS_KEY);
assertArrayEquals(new int[]{10, 5}, weights.shape());
if(hasBias){
assertArrayEquals(new int[]{1, 5}, bias.shape());
}
}
}
示例2: isSequenceCompatibleLayer
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
/**
* Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
*
* @param layer Layers to check
* @return True if compatible
*/
protected boolean isSequenceCompatibleLayer(Layer layer) {
return layer instanceof EmbeddingLayer
|| layer instanceof AbstractLSTM
|| layer instanceof RnnOutputLayer
|| layer instanceof GlobalPoolingLayer;
}
示例3: KerasEmbedding
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
/**
* Constructor from parsed Keras layer configuration dictionary.
*
* @param layerConfig dictionary containing Keras layer configuration
* @param enforceTrainingConfig whether to enforce training-related configuration options
* @throws InvalidKerasConfigurationException
* @throws UnsupportedKerasConfigurationException
*/
public KerasEmbedding(Map<String, Object> layerConfig, boolean enforceTrainingConfig)
throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
super(layerConfig, enforceTrainingConfig);
int inputDim = getInputDimFromConfig(layerConfig);
int[] inputShapeOld = this.inputShape;
this.inputShape = new int[inputShapeOld.length + 1];
this.inputShape[0] = inputShapeOld[0];
this.inputShape[1] = inputDim;
boolean hasZeroMasking = KerasLayerUtils.getZeroMaskingFromConfig(layerConfig, conf);
if (hasZeroMasking)
log.warn("Masking in keras and DL4J work differently. We do not support mask_zero flag" +
"on Embedding layers. If you want to have this behaviour for your imported model" +
"in DL4J, apply masking as a pre-processing step to your input." +
"See https://deeplearning4j.org/usingrnns#masking for more on this.");
Pair<WeightInit, Distribution> init = getWeightInitFromConfig(layerConfig, conf.getLAYER_FIELD_EMBEDDING_INIT(),
enforceTrainingConfig, conf, kerasMajorVersion);
WeightInit weightInit = init.getFirst();
Distribution distribution = init.getSecond();
LayerConstraint embeddingConstraint = KerasConstraintUtils.getConstraintsFromConfig(
layerConfig, conf.getLAYER_FIELD_EMBEDDINGS_CONSTRAINT(), conf, kerasMajorVersion);
EmbeddingLayer.Builder builder = new EmbeddingLayer.Builder().name(this.layerName).nIn(inputDim)
.nOut(getNOutFromConfig(layerConfig, conf)).dropOut(this.dropout).activation(Activation.IDENTITY)
.weightInit(weightInit)
.biasInit(0.0)
.l1(this.weightL1Regularization).l2(this.weightL2Regularization).hasBias(false);
if (distribution != null)
builder.dist(distribution);
if (embeddingConstraint != null)
builder.constrainWeights(embeddingConstraint);
this.layer = builder.build();
}
示例4: buildEmbeddingLayer
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
void buildEmbeddingLayer(KerasLayerConfiguration conf, Integer kerasVersion) throws Exception {
Map<String, Object> layerConfig = new HashMap<String, Object>();
layerConfig.put(conf.getLAYER_FIELD_CLASS_NAME(), conf.getLAYER_CLASS_NAME_EMBEDDING());
Map<String, Object> config = new HashMap<String, Object>();
Integer inputDim = 10;
Integer outputDim = 10;
config.put(conf.getLAYER_FIELD_INPUT_DIM(), inputDim);
config.put(conf.getLAYER_FIELD_OUTPUT_DIM(), outputDim);
ArrayList inputShape = new ArrayList<Integer>() {{
for (int i : INPUT_SHAPE) add(i);
}};
config.put(conf.getLAYER_FIELD_BATCH_INPUT_SHAPE(), inputShape);
config.put(conf.getLAYER_FIELD_NAME(), LAYER_NAME);
layerConfig.put(conf.getLAYER_FIELD_CONFIG(), config);
layerConfig.put(conf.getLAYER_FIELD_KERAS_VERSION(), kerasVersion);
if (kerasVersion == 1) {
config.put(conf.getLAYER_FIELD_EMBEDDING_INIT(), INIT_KERAS);
} else {
Map<String, Object> init = new HashMap<String, Object>();
init.put("class_name", conf.getINIT_GLOROT_NORMAL());
config.put(conf.getLAYER_FIELD_EMBEDDING_INIT(), init);
}
KerasEmbedding kerasEmbedding = new KerasEmbedding(layerConfig, false);
assertEquals(kerasEmbedding.getNumParams(), 1);
EmbeddingLayer layer = kerasEmbedding.getEmbeddingLayer();
assertEquals(LAYER_NAME, layer.getLayerName());
}
示例5: testStackVertexEmbedding
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
@Test
public void testStackVertexEmbedding() {
Nd4j.getRandom().setSeed(12345);
GraphVertex unstack = new StackVertex(null, "", -1);
INDArray in1 = Nd4j.zeros(5, 1);
INDArray in2 = Nd4j.zeros(5, 1);
for (int i = 0; i < 5; i++) {
in1.putScalar(i, 0, i);
in2.putScalar(i, 0, i);
}
INDArray l = Nd4j.rand(5, 5);
MultiDataSet ds = new org.nd4j.linalg.dataset.MultiDataSet(new INDArray[] {in1, in2}, new INDArray[] {l, l},
null, null);
ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in1", "in2")
.addVertex("stack", new org.deeplearning4j.nn.conf.graph.StackVertex(), "in1", "in2")
.addLayer("1", new EmbeddingLayer.Builder().nIn(5).nOut(5).build(), "stack")
.addVertex("unstack1", new org.deeplearning4j.nn.conf.graph.UnstackVertex(0, 2), "1")
.addVertex("unstack2", new org.deeplearning4j.nn.conf.graph.UnstackVertex(0, 2), "1")
.addLayer("out1", new OutputLayer.Builder().activation(Activation.TANH)
.lossFunction(LossFunctions.LossFunction.L2).nIn(5).nOut(5).build(), "unstack1")
.addLayer("out2", new OutputLayer.Builder().activation(Activation.TANH)
.lossFunction(LossFunctions.LossFunction.L2).nIn(5).nOut(5).build(), "unstack2")
.setOutputs("out1", "out2").build();
ComputationGraph g = new ComputationGraph(conf);
g.init();
g.feedForward(new INDArray[] {in1, in2}, false);
g.fit(ds);
}
示例6: createComputationGraph
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
/**
* Configure and initialize the computation graph. This is done once in the
* beginning to prepare the computation graph for training.
*/
public static ComputationGraph createComputationGraph (Map<String, Double> dict) {
final NeuralNetConfiguration.Builder builder = new NeuralNetConfiguration.Builder()
.iterations(1)
.learningRate(LEARNING_RATE)
.rmsDecay(RMS_DECAY)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.miniBatch(true)
.updater(Updater.RMSPROP)
.weightInit(WeightInit.XAVIER)
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer);
final ComputationGraphConfiguration.GraphBuilder graphBuilder = builder.graphBuilder()
.addInputs("inputLine", "decoderInput")
.setInputTypes(InputType.recurrent(dict.size()), InputType.recurrent(dict.size()))
.addLayer("embeddingEncoder",
new EmbeddingLayer.Builder()
.nIn(dict.size())
.nOut(EMBEDDING_WIDTH)
.build(),
"inputLine")
.addLayer("encoder",
new GravesLSTM.Builder()
.nIn(EMBEDDING_WIDTH)
.nOut(HIDDEN_LAYER_WIDTH)
.activation(Activation.TANH)
.gateActivationFunction(Activation.HARDSIGMOID)
.build(),
"embeddingEncoder")
.addVertex("thoughtVector",
new LastTimeStepVertex("inputLine"),
"encoder")
.addVertex("dup",
new DuplicateToTimeSeriesVertex("decoderInput"),
"thoughtVector")
.addVertex("merge",
new MergeVertex(),
"decoderInput",
"dup")
.addLayer("decoder",
new GravesLSTM.Builder()
.nIn(dict.size() + HIDDEN_LAYER_WIDTH)
.nOut(HIDDEN_LAYER_WIDTH)
.activation(Activation.TANH)
.gateActivationFunction(Activation.HARDSIGMOID) // always be a (hard) sigmoid function
.build(),
"merge")
.addLayer("output",
new RnnOutputLayer.Builder()
.nIn(HIDDEN_LAYER_WIDTH)
.nOut(dict.size())
.activation(Activation.SOFTMAX)
.lossFunction(LossFunctions.LossFunction.MCXENT) // multi-class cross entropy
.build(),
"decoder")
.setOutputs("output")
.backpropType(BackpropType.Standard) // why not BackpropType.TruncatedBPTT
.tBPTTForwardLength(TBPTT_SIZE)
.tBPTTBackwardLength(TBPTT_SIZE)
.pretrain(false)
.backprop(true);
ComputationGraph net = new ComputationGraph(graphBuilder.build());
net.init();
return net;
}
示例7: testEmbeddingForwardPass
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
@Test
public void testEmbeddingForwardPass() {
//With the same parameters, embedding layer should have same activations as the equivalent one-hot representation
// input with a DenseLayer
int nClassesIn = 10;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
.layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build())
.layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
.build();
MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
.layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build())
.layer(1, new OutputLayer.Builder().nIn(5).nOut(4).build()).pretrain(false).backprop(true)
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net.init();
net2.init();
net2.setParams(net.params().dup());
int batchSize = 3;
INDArray inEmbedding = Nd4j.create(batchSize, 1);
INDArray inOneHot = Nd4j.create(batchSize, nClassesIn);
Random r = new Random(12345);
for (int i = 0; i < batchSize; i++) {
int classIdx = r.nextInt(nClassesIn);
inEmbedding.putScalar(i, classIdx);
inOneHot.putScalar(new int[] {i, classIdx}, 1.0);
}
List<INDArray> activationsEmbedding = net.feedForward(inEmbedding, false);
List<INDArray> activationsDense = net2.feedForward(inOneHot, false);
for (int i = 1; i < 3; i++) {
INDArray actE = activationsEmbedding.get(i);
INDArray actD = activationsDense.get(i);
assertEquals(actE, actD);
}
}
示例8: testEmbeddingBackwardPass
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
@Test
public void testEmbeddingBackwardPass() {
//With the same parameters, embedding layer should have same activations as the equivalent one-hot representation
// input with a DenseLayer
int nClassesIn = 10;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
.layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build()).layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4)
.activation(Activation.SOFTMAX).build())
.pretrain(false).backprop(true).build();
MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH)
.weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build()).layer(1,
new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4)
.activation(Activation.SOFTMAX).build())
.pretrain(false).backprop(true).build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net.init();
net2.init();
net2.setParams(net.params().dup());
int batchSize = 3;
INDArray inEmbedding = Nd4j.create(batchSize, 1);
INDArray inOneHot = Nd4j.create(batchSize, nClassesIn);
INDArray outLabels = Nd4j.create(batchSize, 4);
Random r = new Random(12345);
for (int i = 0; i < batchSize; i++) {
int classIdx = r.nextInt(nClassesIn);
inEmbedding.putScalar(i, classIdx);
inOneHot.putScalar(new int[] {i, classIdx}, 1.0);
int labelIdx = r.nextInt(4);
outLabels.putScalar(new int[] {i, labelIdx}, 1.0);
}
net.setInput(inEmbedding);
net2.setInput(inOneHot);
net.setLabels(outLabels);
net2.setLabels(outLabels);
net.computeGradientAndScore();
net2.computeGradientAndScore();
System.out.println(net.score() + "\t" + net2.score());
assertEquals(net2.score(), net.score(), 1e-6);
Map<String, INDArray> gradient = net.gradient().gradientForVariable();
Map<String, INDArray> gradient2 = net2.gradient().gradientForVariable();
assertEquals(gradient.size(), gradient2.size());
for (String s : gradient.keySet()) {
assertEquals(gradient2.get(s), gradient.get(s));
}
}
示例9: testEmbeddingLayerRNN
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
@Test
public void testEmbeddingLayerRNN() {
int nClassesIn = 10;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list()
.layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build())
.layer(1, new GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.SOFTSIGN).build())
.layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(7).nOut(4)
.activation(Activation.SOFTMAX).build())
.inputPreProcessor(0, new RnnToFeedForwardPreProcessor())
.inputPreProcessor(1, new FeedForwardToRnnPreProcessor()).pretrain(false).backprop(true)
.build();
MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH)
.weightInit(WeightInit.XAVIER).list()
.layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).build())
.layer(1, new GravesLSTM.Builder().nIn(5).nOut(7).activation(Activation.SOFTSIGN).build())
.layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(7).nOut(4)
.activation(Activation.SOFTMAX).build())
.inputPreProcessor(0, new RnnToFeedForwardPreProcessor())
.inputPreProcessor(1, new FeedForwardToRnnPreProcessor()).pretrain(false).backprop(true)
.build();
MultiLayerNetwork net = new MultiLayerNetwork(conf);
MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
net.init();
net2.init();
net2.setParams(net.params().dup());
int batchSize = 3;
int timeSeriesLength = 8;
INDArray inEmbedding = Nd4j.create(batchSize, 1, timeSeriesLength);
INDArray inOneHot = Nd4j.create(batchSize, nClassesIn, timeSeriesLength);
INDArray outLabels = Nd4j.create(batchSize, 4, timeSeriesLength);
Random r = new Random(12345);
for (int i = 0; i < batchSize; i++) {
for (int j = 0; j < timeSeriesLength; j++) {
int classIdx = r.nextInt(nClassesIn);
inEmbedding.putScalar(new int[] {i, 0, j}, classIdx);
inOneHot.putScalar(new int[] {i, classIdx, j}, 1.0);
int labelIdx = r.nextInt(4);
outLabels.putScalar(new int[] {i, labelIdx, j}, 1.0);
}
}
net.setInput(inEmbedding);
net2.setInput(inOneHot);
net.setLabels(outLabels);
net2.setLabels(outLabels);
net.computeGradientAndScore();
net2.computeGradientAndScore();
System.out.println(net.score() + "\t" + net2.score());
assertEquals(net2.score(), net.score(), 1e-6);
Map<String, INDArray> gradient = net.gradient().gradientForVariable();
Map<String, INDArray> gradient2 = net2.gradient().gradientForVariable();
assertEquals(gradient.size(), gradient2.size());
for (String s : gradient.keySet()) {
assertEquals(gradient2.get(s), gradient.get(s));
}
}
示例10: getEmbeddingLayer
import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; //導入依賴的package包/類
/**
* Get DL4J DenseLayer.
*
* @return DenseLayer
*/
public EmbeddingLayer getEmbeddingLayer() {
return (EmbeddingLayer) this.layer;
}