本文整理匯總了Python中keras.layers.embeddings.Embedding方法的典型用法代碼示例。如果您正苦於以下問題:Python embeddings.Embedding方法的具體用法?Python embeddings.Embedding怎麽用?Python embeddings.Embedding使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers.embeddings
的用法示例。
在下文中一共展示了embeddings.Embedding方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: textual_embedding
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def textual_embedding(self, language_model, mask_zero):
"""
Note:
* mask_zero only makes sense if embedding is learnt
"""
if self._config.textual_embedding_dim > 0:
print('Textual Embedding is on')
language_model.add(Embedding(
self._config.input_dim,
self._config.textual_embedding_dim,
mask_zero=mask_zero))
else:
print('Textual Embedding is off')
language_model.add(Reshape(
input_shape=(self._config.max_input_time_steps, self._config.input_dim),
dims=(self._config.max_input_time_steps, self._config.input_dim)))
if mask_zero:
language_model.add(Masking(0))
return language_model
示例2: textual_embedding_fixed_length
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def textual_embedding_fixed_length(self, language_model, mask_zero):
"""
In contrast to textual_embedding, it produces a fixed length output.
"""
if self._config.textual_embedding_dim > 0:
print('Textual Embedding with fixed length is on')
language_model.add(Embedding(
self._config.input_dim,
self._config.textual_embedding_dim,
input_length=self._config.max_input_time_steps,
mask_zero=mask_zero))
else:
print('Textual Embedding with fixed length is off')
language_model.add(Reshape(
input_shape=(self._config.max_input_time_steps, self._config.input_dim),
dims=(self._config.max_input_time_steps, self._config.input_dim)))
if mask_zero:
language_model.add(Masking(0))
return language_model
示例3: create
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def create(self):
assert self._config.textual_embedding_dim == 0, \
'Embedding cannot be learnt but must be fixed'
language_forward = Sequential()
language_forward.add(self._config.recurrent_encoder(
self._config.hidden_state_dim, return_sequences=False,
input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
self.language_forward = language_forward
language_backward = Sequential()
language_backward.add(self._config.recurrent_encoder(
self._config.hidden_state_dim, return_sequences=False,
go_backwards=True,
input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
self.language_backward = language_backward
self.add(Merge([language_forward, language_backward]))
self.deep_mlp()
self.add(Dense(self._config.output_dim))
self.add(Activation('softmax'))
示例4: gen_model
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
RNN_CLASS = LSTM
if RNN_LAYER_TYPE == "GRU":
RNN_CLASS = GRU
logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
(vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
logger.info("Building Model")
model = Sequential()
logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
model.add(Embedding(vocab_size, embedding_size, input_length=maxlen))
logger.info("Added Embedding Layer")
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
for i in xrange(num_hidden_layers):
model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(TimeDistributedDense(output_size, activation="softmax"))
logger.info("Added Dropout Layer")
logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4))
logger.info("Compiling model with optimizer %s" % optimizer)
start_time = time.time()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
total_time = time.time() - start_time
logger.info("Model compiled in %.4f seconds." % total_time)
return model
示例5: test_embedding
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def test_embedding():
layer_test(Embedding,
kwargs={'output_dim': 4, 'input_dim': 10, 'input_length': 2},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype=K.floatx())
layer_test(Embedding,
kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True},
input_shape=(3, 2),
input_dtype='int32',
expected_output_dtype=K.floatx())
layer_test(Embedding,
kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True},
input_shape=(3, 2, 5),
input_dtype='int32',
expected_output_dtype=K.floatx())
layer_test(Embedding,
kwargs={'output_dim': 4, 'input_dim': 10, 'mask_zero': True, 'input_length': (None, 5)},
input_shape=(3, 2, 5),
input_dtype='int32',
expected_output_dtype=K.floatx())
示例6: test_masking_correctness
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def test_masking_correctness(layer_class):
# Check masking: output with left padding and right padding
# should be the same.
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
assert_allclose(out7, out6, atol=1e-5)
示例7: build_embedding_layer
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def build_embedding_layer(word2index, emb_type='glove', embedding_dim=300, max_len=40, trainable=True):
vocab_size = len(word2index) + 1
if 'glove' in emb_type:
word2vec_map = utils.load_vectors(filename='glove.6B.%dd.txt' % embedding_dim)
emb_layer = pretrained_embedding_layer(word2vec_map, word2index, embedding_dim, vocab_size, trainable=trainable)
elif 'emoji' in emb_type:
emoji2vec_map = utils.load_vectors(filename='emoji_embeddings_%dd.txt' % embedding_dim)
emb_layer = pretrained_embedding_layer(emoji2vec_map, word2index, embedding_dim, vocab_size, trainable=trainable)
elif 'random' in emb_type:
words = word2index.keys()
random2vec_map = utils.build_random_word2vec(words, embedding_dim=embedding_dim, variance=1)
emb_layer = pretrained_embedding_layer(random2vec_map, word2index, embedding_dim, vocab_size, trainable=trainable)
else:
emb_layer = Embedding(vocab_size, embedding_dim, input_length=max_len, trainable=trainable)
emb_layer.build((None,))
return emb_layer
示例8: create
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def create(inputtokens, vocabsize, units=16, dropout=0, embedding=32):
input_ = Input(shape=(inputtokens,), dtype='int32')
# Embedding layer
net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_)
net = Dropout(dropout)(net)
# Bidirectional LSTM layer
net = BatchNormalization()(net)
net = Bidirectional(CuDNNLSTM(units))(net)
net = Dropout(dropout)(net)
# Output layer
net = Dense(vocabsize, activation='softmax')(net)
model = Model(inputs=input_, outputs=net)
# Make data-parallel
ngpus = len(get_available_gpus())
if ngpus > 1:
model = make_parallel(model, ngpus)
return model
示例9: setUp
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def setUp(self):
self.embs = np.array([
[0, 0, 0],
[1, 10, 100],
[2, 20, 200],
[3, 30, 300],
[4, 40, 400],
[5, 50, 500],
[6, 60, 600],
[7, 70, 700],
[8, 80, 800],
[9, 90, 900]],
dtype='float32')
self.emb_dim = self.embs.shape[1]
self.token_emb = Embedding(
input_dim=self.embs.shape[0],
output_dim=self.emb_dim,
weights=[self.embs],
mask_zero=False, # Reshape layer does not support masking.
trainable=True,
name='token_emb')
self.gather_layer = Lambda(gather3, output_shape=gather_output_shape3)
示例10: visual_embedding
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def visual_embedding(self, visual_model, input_dimensionality):
if self._config.visual_embedding_dim > 0:
print('Visual Embedding is on')
visual_model.add(Dense(
self._config.visual_embedding_dim,
input_shape=(input_dimensionality,)))
return visual_model
示例11: make_embedding
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def make_embedding(vocab_size, wv_size, init=None, fixed=False, constraint=ConstNorm(3.0, True), **kwargs):
'''
Takes parameters and makes a word vector embedding
Args:
------
vocab_size: integer -- how many words in your vocabulary
wv_size: how big do you want the word vectors
init: initial word vectors -- defaults to None. If you specify initial word vectors,
needs to be an np.array of shape (vocab_size, wv_size)
fixed: boolean -- do you want the word vectors fixed or not?
Returns:
---------
a Keras Embedding layer
'''
if (init is not None) and len(init.shape) == 2:
emb = Embedding(vocab_size, wv_size, weights=[init], W_constraint=constraint) # keras needs a list for initializations
else:
emb = Embedding(vocab_size, wv_size, W_constraint=constraint) # keras needs a list for initializations
if fixed:
emb.trainable = False
# emb.params = []
return emb
示例12: test_unitnorm_constraint
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def test_unitnorm_constraint(self):
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
lookup.add(Flatten())
lookup.add(Dense(2, 1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
示例13: get_model
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def get_model(self, num_classes, activation='sigmoid'):
max_len = opt.max_len
voca_size = opt.unigram_hash_size + 1
with tf.device('/gpu:0'):
embd = Embedding(voca_size,
opt.embd_size,
name='uni_embd')
t_uni = Input((max_len,), name="input_1")
t_uni_embd = embd(t_uni) # token
w_uni = Input((max_len,), name="input_2")
w_uni_mat = Reshape((max_len, 1))(w_uni) # weight
uni_embd_mat = dot([t_uni_embd, w_uni_mat], axes=1)
uni_embd = Reshape((opt.embd_size, ))(uni_embd_mat)
embd_out = Dropout(rate=0.5)(uni_embd)
relu = Activation('relu', name='relu1')(embd_out)
outputs = Dense(num_classes, activation=activation)(relu)
model = Model(inputs=[t_uni, w_uni], outputs=outputs)
optm = keras.optimizers.Nadam(opt.lr)
model.compile(loss='binary_crossentropy',
optimizer=optm,
metrics=[top1_acc])
model.summary(print_fn=lambda x: self.logger.info(x))
return model
示例14: build_MLP_model
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def build_MLP_model(self):
NUM_WINDOW_FEATURES = 2
left_token_input = Input(name='left_token_input', shape=(NUM_WINDOW_FEATURES,))
left_token_embedding = Embedding(output_dim=self.preprocessor.embedding_dims, input_dim=self.preprocessor.max_features,
input_length=NUM_WINDOW_FEATURES)(left_token_input)
left_token_embedding = Flatten(name="left_token_embedding")(left_token_embedding)
n_PoS_tags = len(self.tag_names)
left_PoS_input = Input(name='left_PoS_input', shape=(n_PoS_tags,))
#target_token_input = Input(name='target_token_input', shape=(1,))
right_token_input = Input(name='right_token_input', shape=(NUM_WINDOW_FEATURES,))
right_token_embedding = Embedding(output_dim=self.preprocessor.embedding_dims, input_dim=self.preprocessor.max_features,
input_length=NUM_WINDOW_FEATURES)(right_token_input)
right_PoS_input = Input(name='right_PoS_input', shape=(n_PoS_tags,))
right_token_embedding = Flatten(name="right_token_embedding")(right_token_embedding)
other_features_input = Input(name='other_feature_inputs', shape=(4,))
x = merge([left_token_embedding, #target_token_input,
right_token_embedding,
left_PoS_input, right_PoS_input, other_features_input],
mode='concat', concat_axis=1)
x = Dense(128, name="hidden1", activation='relu')(x)
x = Dropout(.2)(x)
x = Dense(64, name="hidden2", activation='relu')(x)
output = Dense(1, name="prediction", activation='sigmoid')(x)
self.model = Model([left_token_input, left_PoS_input, #target_token_input,
right_token_input, right_PoS_input, other_features_input],
output=[output])
self.model.compile(optimizer="adam", loss="binary_crossentropy")
示例15: gen_model_brnn
# 需要導入模塊: from keras.layers import embeddings [as 別名]
# 或者: from keras.layers.embeddings import Embedding [as 別名]
def gen_model_brnn(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
RNN_CLASS = LSTM
if RNN_LAYER_TYPE == "GRU":
RNN_CLASS = GRU
logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
(vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
logger.info("Building Graph model for Bidirectional RNN")
model = Graph()
model.add_input(name='input', input_shape=(maxlen,), dtype=int)
logger.info("Added Input node")
logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen), name='embedding', input='input')
logger.info("Added Embedding node")
model.add_node(Dropout(0.5), name="dropout_0", input="embedding")
logger.info("Added Dropout Node")
for i in xrange(num_hidden_layers):
last_dropout_name = "dropout_%s" % i
forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]]
model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name)
logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1))
model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name)
logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1))
model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name])
logger.info("Added Dropout node[%s]" % (i+1))
model.add_node(TimeDistributedDense(output_size, activation="softmax"), name="tdd", input=dropout_name)
logger.info("Added TimeDistributedDense node")
model.add_output(name="output", input="tdd")
logger.info("Added Output node")
logger.info("Created model with following config:\n%s" % model.get_config())
logger.info("Compiling model with optimizer %s" % optimizer)
start_time = time.time()
model.compile(optimizer, {"output": 'categorical_crossentropy'})
total_time = time.time() - start_time
logger.info("Model compiled in %.4f seconds." % total_time)
return model