本文整理汇总了Python中keras.layers.TimeDistributed方法的典型用法代码示例。如果您正苦于以下问题:Python layers.TimeDistributed方法的具体用法?Python layers.TimeDistributed怎么用?Python layers.TimeDistributed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.TimeDistributed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def create_model():
inputs = Input(shape=(length,), dtype='int32', name='inputs')
embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
con_d = Dropout(DROPOUT_RATE)(con)
dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
crf = CRF(len(chunk_tags), sparse_target=True)
crf_output = crf(dense)
model = Model(input=[inputs], output=[crf_output])
model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
return model
示例2: get_audio_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def get_audio_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 50
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例3: get_bimodal_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def get_bimodal_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 10
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例4: classifier
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 7
input_shape = (num_rois,7,7,512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois,512,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
示例5: __build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def __build_model(self):
model = Sequential()
embedding_layer = Embedding(input_dim=len(self.vocab) + 1,
output_dim=self.embedding_dim,
weights=[self.embedding_mat],
trainable=False)
model.add(embedding_layer)
bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True))
model.add(bilstm_layer)
model.add(TimeDistributed(Dense(256, activation="relu")))
crf_layer = CRF(units=len(self.tags), sparse_target=True)
model.add(crf_layer)
model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.summary()
return model
示例6: classifier
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def classifier(base_layers, input_rois, num_rois, nb_classes=21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 14
# Changed the input shape to 1088 from 1024 because of nn_base's output being 1088. Not sure if this is correct
input_shape = (num_rois, 14, 14, 1088)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois, 1024, 7, 7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = classifier_layers(out_roi_pool, input_shape=input_shape, trainable=True)
out = TimeDistributed(Flatten())(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
示例7: classifier
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 7
input_shape = (num_rois, 7, 7, 512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois, 512, 7, 7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
示例8: classifier_layers
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def classifier_layers(x, input_shape, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
# (hence a smaller stride in the region that follows the ROI pool)
x = TimeDistributed(SeparableConv2D(1536, (3, 3),
padding='same',
use_bias=False),
name='block14_sepconv1')(x)
x = TimeDistributed(BatchNormalization(), name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = TimeDistributed(SeparableConv2D(2048, (3, 3),
padding='same',
use_bias=False),
name='block14_sepconv2')(x)
x = TimeDistributed(BatchNormalization(), name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x)
return x
示例9: classifier
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def classifier(base_layers, input_rois, num_rois, nb_classes,trainable=True):
"""
The final classifier to match original implementation for VGG-16
The only difference being the Roipooling layer uses tensorflow's bilinear interpolation
"""
pooling_regions = 7
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois,trainable=trainable)([base_layers, input_rois])
out = TimeDistributed(Flatten(),name="flatten",trainable=trainable)(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu',trainable=trainable),name="fc1",trainable=trainable)(out)
out = TimeDistributed(Dropout(0.5),name="drop_out1",trainable=trainable)(out) # add dropout to match original implememtation
out = TimeDistributed(Dense(4096, activation='relu',trainable=trainable),name="fc2",trainable=trainable)(out)
out = TimeDistributed(Dropout(0.5),name="drop_out2",trainable=trainable)(out) # add dropout to match original implementation
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero',trainable=trainable), name='dense_class_{}'.format(nb_classes),trainable=trainable)(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero',trainable=trainable), name='dense_regress_{}'.format(nb_classes),trainable=trainable)(out)
return [out_class, out_regr]
示例10: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def create_model(maxlen, chars, word_size, infer=False):
"""
:param infer:
:param maxlen:
:param chars:
:param word_size:
:return:
"""
sequence = Input(shape=(maxlen,), dtype='int32')
embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence)
blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded)
output = TimeDistributed(Dense(5, activation='softmax'))(blstm)
model = Model(input=sequence, output=output)
if not infer:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例11: create_lstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def create_lstm(hidden_units=[50], dropout=0.05, bidirectional=True):
model = Sequential()
if bidirectional:
i = 0
for unit in hidden_units:
if i == 0:
model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS)))
else:
model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True)))
i += 1
else:
i = 0
for unit in hidden_units:
if i == 0:
model.add(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS))
else:
model.add(LSTM(unit, dropout=dropout, return_sequences=True))
i += 1
model.add(TimeDistributed(Dense(config.CLASSES, activation='sigmoid')))
return model
示例12: AlternativeRNNModel
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dense(embedding_size, activation='relu')(image_input)
image_model = RepeatVector(max_len)(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
# Since we are going to predict the next word using the previous words
# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
示例13: build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def build(self, input_shape):
self._validate_input_shape(input_shape)
d_k = self._d_k if self._d_k else input_shape[1][-1]
d_model = self._d_model if self._d_model else input_shape[1][-1]
d_v = self._d_v
if type(d_k) == tf.Dimension:
d_k = d_k.value
if type(d_model) == tf.Dimension:
d_model = d_model.value
self._q_layers = []
self._k_layers = []
self._v_layers = []
self._sdp_layer = ScaledDotProductAttention(return_attention=self._return_attention)
for _ in range(self._h):
self._q_layers.append(
TimeDistributed(
Dense(d_k, activation=self._activation, use_bias=False)
)
)
self._k_layers.append(
TimeDistributed(
Dense(d_k, activation=self._activation, use_bias=False)
)
)
self._v_layers.append(
TimeDistributed(
Dense(d_v, activation=self._activation, use_bias=False)
)
)
self._output = TimeDistributed(Dense(d_model))
#if self._return_attention:
# self._output = Concatenate()
示例14: creat_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def creat_model(input_shape, num_class):
init = initializers.Orthogonal(gain=args.norm)
sequence_input =Input(shape=input_shape)
mask = Masking(mask_value=0.)(sequence_input)
if args.aug:
mask = augmentaion()(mask)
X = Noise(0.075)(mask)
if args.model[0:2]=='VA':
# VA
trans = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
trans = Dropout(0.5)(trans)
trans = TimeDistributed(Dense(3,kernel_initializer='zeros'))(trans)
rot = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
rot = Dropout(0.5)(rot)
rot = TimeDistributed(Dense(3,kernel_initializer='zeros'))(rot)
transform = Concatenate()([rot,trans])
X = VA()([mask,transform])
X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
X = Dropout(0.5)(X)
X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
X = Dropout(0.5)(X)
X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X)
X = Dropout(0.5)(X)
X = TimeDistributed(Dense(num_class))(X)
X = MeanOverTime()(X)
X = Activation('softmax')(X)
model=Model(sequence_input,X)
return model
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:33,代码来源:va-rnn.py
示例15: set_trainable
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import TimeDistributed [as 别名]
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))