本文整理汇总了Python中keras.layers.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python layers.concatenate方法的具体用法?Python layers.concatenate怎么用?Python layers.concatenate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RNNModel
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
image_model = Dense(embedding_size, activation='relu')(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例2: weather_l2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def weather_l2(hidden_nums=100,l2=0.01):
input_img = Input(shape=(37,))
hn = Dense(hidden_nums, activation='relu')(input_img)
hn = Dense(hidden_nums, activation='relu',
kernel_regularizer=regularizers.l2(l2))(hn)
out_u = Dense(37, activation='sigmoid',
name='ae_part')(hn)
out_sig = Dense(37, activation='linear',
name='pred_part')(hn)
out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')
#weather_model = Model(input_img, outputs=[out_ae, out_pred])
mve_model = Model(input_img, outputs=[out_both])
mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
return mve_model
示例3: build_discriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def build_discriminator(self):
z = Input(shape=(self.latent_dim, ))
img = Input(shape=self.img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
示例4: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def create_model():
inputs = Input(shape=(length,), dtype='int32', name='inputs')
embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
con_d = Dropout(DROPOUT_RATE)(con)
dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
crf = CRF(len(chunk_tags), sparse_target=True)
crf_output = crf(dense)
model = Model(input=[inputs], output=[crf_output])
model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
return model
示例5: fire_module
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def fire_module(x, fire_id, squeeze=16, expand=64):
s_id = 'fire' + str(fire_id) + '/'
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = Conv2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
x = Activation('relu', name=s_id + relu + sq1x1)(x)
left = Conv2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
left = Activation('relu', name=s_id + relu + exp1x1)(left)
right = Conv2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
right = Activation('relu', name=s_id + relu + exp3x3)(right)
x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
示例6: preprocess
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def preprocess(x):
return K.concatenate([
x[:,:,0:1] / 360.0,
x[:,:,1:3],
x[:,:,3:4] / 360.0,
x[:,:,4:6],
x[:,:,6:18] / 360.0,
x[:,:,18:19] - x[:,:,1:2],
x[:,:,19:22],
x[:,:,28:29] - x[:,:,1:2],
x[:,:,29:30],
x[:, :, 30:31] - x[:, :, 1:2],
x[:, :, 31:32],
x[:, :, 32:33] - x[:, :, 1:2],
x[:, :, 33:34],
x[:, :, 34:35] - x[:, :, 1:2],
x[:, :, 35:41],
], axis=2)
示例7: _pad
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def _pad(self, input):
"""
pads the network output so y_pred and y_true have the same dimensions
:param input: previous layer
:return: layer, last dimensions padded for 4
"""
#pad = K.placeholder( (None,self.config.ANCHORS, 4))
#pad = np.zeros ((self.config.BATCH_SIZE,self.config.ANCHORS, 4))
#return K.concatenate( [input, pad], axis=-1)
padding = np.zeros((3,2))
padding[2,1] = 4
return tf.pad(input, padding ,"CONSTANT")
#loss function to optimize
示例8: fire_module
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def fire_module(x, fire_id, squeeze=16, expand=64):
s_id = 'fire' + str(fire_id) + '/'
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
x = Activation('relu', name=s_id + relu + sq1x1)(x)
left = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
left = Activation('relu', name=s_id + relu + exp1x1)(left)
right = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
right = Activation('relu', name=s_id + relu + exp3x3)(right)
x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
示例9: unet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def unet(x_in, pose_in, nf_enc, nf_dec):
x0 = my_conv(x_in, nf_enc[0], ks=7) # 256
x1 = my_conv(x0, nf_enc[1], strides=2) # 128
x2 = concatenate([x1, pose_in])
x3 = my_conv(x2, nf_enc[2])
x4 = my_conv(x3, nf_enc[3], strides=2) # 64
x5 = my_conv(x4, nf_enc[4])
x6 = my_conv(x5, nf_enc[5], strides=2) # 32
x7 = my_conv(x6, nf_enc[6])
x8 = my_conv(x7, nf_enc[7], strides=2) # 16
x9 = my_conv(x8, nf_enc[8])
x10 = my_conv(x9, nf_enc[9], strides=2) # 8
x = my_conv(x10, nf_enc[10])
skips = [x9, x7, x5, x3, x0]
filters = [nf_enc[10], nf_dec[0], nf_dec[1], nf_dec[2], nf_enc[3]]
for i in range(5):
out_sz = 8*(2**(i+1))
x = Lambda(interp_upsampling, output_shape = (out_sz, out_sz, filters[i]))(x)
x = concatenate([x, skips[i]])
x = my_conv(x, nf_dec[i])
return x
示例10: network_unet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def network_unet(param):
n_joints = param['n_joints']
pose_dn = param['posemap_downsample']
img_h = param['IMG_HEIGHT']
img_w = param['IMG_WIDTH']
src_in = Input(shape=(img_h, img_w, 3))
pose_src = Input(shape=(img_h / pose_dn, img_w / pose_dn, n_joints))
pose_tgt = Input(shape=(img_h / pose_dn, img_w / pose_dn, n_joints))
x = unet(src_in, concatenate([pose_src, pose_tgt]), [64] + [128] * 3 + [256] * 7,
[256, 256, 256, 128, 64])
y = my_conv(x, 3, activation='tanh')
model = Model(inputs=[src_in, pose_src, pose_tgt], outputs=[y])
return model
示例11: CapsuleNet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(170,))
x = Embedding(21099, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例12: CapsuleNet_v2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(200,))
x = Embedding(20000, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例13: test_tiny_concat_random
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def test_tiny_concat_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = concatenate([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
示例14: test_tiny_concat_seq_random
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def test_tiny_concat_seq_random(self):
np.random.seed(1988)
max_features = 10
embedding_dims = 4
seq_len = 5
num_channels = 6
# Define a model
input_tensor = Input(shape=(seq_len,))
x1 = Embedding(max_features, embedding_dims)(input_tensor)
x2 = Embedding(max_features, embedding_dims)(input_tensor)
x3 = concatenate([x1, x2], axis=1)
model = Model(inputs=[input_tensor], outputs=[x3])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True])
示例15: test_shared_vision
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import concatenate [as 别名]
def test_shared_vision(self):
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
out = Flatten()(x)
vision_model = Model(inputs=[digit_input], outputs=[out])
# then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# the vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = concatenate([out_a, out_b])
out = Dense(1, activation="sigmoid")(concatenated)
model = Model(inputs=[digit_a, digit_b], outputs=out)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)