本文整理汇总了Python中keras.layers.GRU属性的典型用法代码示例。如果您正苦于以下问题:Python layers.GRU属性的具体用法?Python layers.GRU怎么用?Python layers.GRU使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类keras.layers
的用法示例。
在下文中一共展示了layers.GRU属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __middle_hidden_layer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def __middle_hidden_layer(self, return_sequences):
if self.current_params["layer_type"] == "GRU":
layer = GRU(self.current_params["hidden_neurons"],
return_sequences=return_sequences,
kernel_initializer=self.current_params["kernel_initializer"],
recurrent_initializer=self.current_params["recurrent_initializer"],
recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]),
bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
dropout=self.current_params["dropout"],
recurrent_dropout=self.current_params["recurrent_dropout"]
)
else:
layer = LSTM(self.current_params["hidden_neurons"],
return_sequences=return_sequences,
kernel_initializer=self.current_params["kernel_initializer"],
recurrent_initializer=self.current_params["recurrent_initializer"],
recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]),
bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
dropout=self.current_params["dropout"],
recurrent_dropout=self.current_params["recurrent_dropout"]
)
return layer
示例2: test_tiny_no_sequence_gru_random
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_tiny_no_sequence_gru_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
示例3: test_small_no_sequence_gru_random
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_small_no_sequence_gru_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
示例4: test_medium_no_sequence_gru_random
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_medium_no_sequence_gru_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
示例5: test_gru_seq
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_gru_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
示例6: test_tiny_mcrnn_music_tagger
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4, 6, 1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name="bn_0_freq")(x)
# Conv block 1
x = Conv2D(2, (3, 3), padding="same", name="conv1")(x)
x = BatchNormalization(axis=3, name="bn1")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x)
# Conv block 2
x = Conv2D(4, (3, 3), padding="same", name="conv2")(x)
x = BatchNormalization(axis=3, name="bn2")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name="gru1")(x)
x = GRU(32, return_sequences=False, name="gru2")(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
示例7: interp_net
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def interp_net():
if gpu_num > 1:
dev = "/cpu:0"
else:
dev = "/gpu:0"
with tf.device(dev):
main_input = Input(shape=(4*num_features, timestamp), name='input')
sci = single_channel_interp(ref_points, hours_look_ahead)
cci = cross_channel_interp()
interp = cci(sci(main_input))
reconst = cci(sci(main_input, reconstruction=True),
reconstruction=True)
aux_output = Lambda(lambda x: x, name='aux_output')(reconst)
z = Permute((2, 1))(interp)
z = GRU(hid, activation='tanh', recurrent_dropout=0.2, dropout=0.2)(z)
main_output = Dense(1, activation='sigmoid', name='main_output')(z)
orig_model = Model([main_input], [main_output, aux_output])
if gpu_num > 1:
model = multi_gpu_model(orig_model, gpus=gpu_num)
else:
model = orig_model
print(orig_model.summary())
return model
示例8: test_temporal_regression
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def test_temporal_regression():
'''
Predict float numbers (regression) based on sequences
of float numbers of length 3 using a single layer of GRU units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 5),
output_shape=(2,),
classification=False)
model = Sequential()
model.add(layers.LSTM(y_train.shape[-1],
input_shape=(x_train.shape[1], x_train.shape[2])))
model.compile(loss='hinge', optimizer='adam')
history = model.fit(x_train, y_train, epochs=5, batch_size=16,
validation_data=(x_test, y_test), verbose=0)
assert(history.history['loss'][-1] < 1.)
示例9: bidLstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
#x = Dropout(dropout_rate)(x)
x = Attention(maxlen)(x)
#x = AttentionWeightedAverage(maxlen)(x)
#print('len(x):', len(x))
#x = AttentionWeightedAverage(maxlen)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# conv+GRU with embeddings
示例10: cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units)(x)
x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例11: cnn2_best
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例12: cnn2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例13: ctpn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512):
"""
ctpn网络
:param base_features: (B,H,W,C)
:param num_anchors: anchors个数
:param rnn_units:
:param fc_units:
:return:
"""
x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512]
# 沿着宽度方式做rnn
rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'),
name='gru_forward')(x)
rnn_backward = layers.TimeDistributed(
layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True),
name='gru_backward')(x)
rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256)
# conv实现fc
fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')(
rnn_output) # (B,H,W,512)
# 分类
class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output)
class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits)
# 中心点垂直坐标和高度回归
predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output)
predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas)
# 侧边精调(只需要预测x偏移即可)
predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output)
predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')(
predict_side_deltas)
return class_logits, predict_deltas, predict_side_deltas
示例14: buildModel_RNN
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM):
'''
def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM):
word_index in word index ,
embeddings_index is embeddings index, look at data_helper.py
nClasses is number of classes,
MAX_SEQUENCE_LENGTH is maximum lenght of text sequences,
EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py
output: RNN model
'''
model = Sequential()
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
model.add(Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True))
model.add(GRU(100,dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(nClasses, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
return model
示例15: __input_layer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GRU [as 别名]
def __input_layer(self, dims, return_sequences):
""" Returns GRU or LSTM input layer """
if self.current_params["bidirectional"] == True:
return Bidirectional(self.__middle_hidden_layer(return_sequences), input_shape=dims)
else:
if self.current_params["layer_type"] == "GRU":
return GRU(self.current_params["hidden_neurons"],
input_shape=dims,
return_sequences=return_sequences,
kernel_initializer=self.current_params["kernel_initializer"],
recurrent_initializer=self.current_params["recurrent_initializer"],
recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]),
bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
dropout=self.current_params["dropout"],
recurrent_dropout=self.current_params["recurrent_dropout"]
)
return LSTM(self.current_params["hidden_neurons"],
input_shape=dims,
return_sequences=return_sequences,
kernel_initializer=self.current_params["kernel_initializer"],
recurrent_initializer=self.current_params["recurrent_initializer"],
recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]),
bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]),
dropout=self.current_params["dropout"],
recurrent_dropout=self.current_params["recurrent_dropout"]
)