本文整理匯總了Python中tensorflow.keras.layers.concatenate方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.concatenate方法的具體用法?Python layers.concatenate怎麽用?Python layers.concatenate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.concatenate方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: up_stage
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def up_stage(inputs, skip, filters, kernel_size=3,
activation="relu", padding="SAME"):
up = UpSampling3D()(inputs)
up = Conv3D(filters, 2, activation=activation, padding=padding)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = Conv3D(filters, kernel_size,
activation=activation, padding=padding)(merge)
conv = GroupNormalization()(conv)
conv = Conv3D(filters, kernel_size,
activation=activation, padding=padding)(conv)
conv = GroupNormalization()(conv)
conv = SpatialDropout3D(0.5)(conv, training=True)
return conv
示例2: up_stage
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def up_stage(inputs, skip, filters, kernel_size=3,
activation="relu", padding="SAME"):
up = UpSampling2D()(inputs)
up = Conv2D(filters, 2, activation=activation, padding=padding)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = Conv2D(filters, kernel_size,
activation=activation, padding=padding)(merge)
conv = GroupNormalization()(conv)
conv = Conv2D(filters, kernel_size,
activation=activation, padding=padding)(conv)
conv = GroupNormalization()(conv)
conv = SpatialDropout2D(0.5)(conv, training=True)
return conv
示例3: up_stage
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def up_stage(inputs, skip, filters, prior_fn, kernel_size=3,
activation="relu", padding="SAME"):
up = UpSampling2D()(inputs)
up = tfp.layers.Convolution2DFlipout(filters, 2,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = tfp.layers.Convolution2DFlipout(filters, kernel_size,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(merge)
conv = GroupNormalization()(conv)
conv = tfp.layers.Convolution2DFlipout(filters, kernel_size,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(conv)
conv = GroupNormalization()(conv)
return conv
示例4: up_stage
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def up_stage(inputs, skip, filters, prior_fn, kernel_size=3,
activation="relu", padding="SAME"):
up = UpSampling3D()(inputs)
up = tfp.layers.Convolution3DFlipout(filters, 2,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = tfp.layers.Convolution3DFlipout(filters, kernel_size,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(merge)
conv = GroupNormalization()(conv)
conv = tfp.layers.Convolution3DFlipout(filters, kernel_size,
activation=activation,
padding=padding,
kernel_prior_fn=prior_fn)(conv)
conv = GroupNormalization()(conv)
return conv
示例5: attention_3d_block
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def attention_3d_block(hidden_states):
"""
Many-to-one attention mechanism for Keras.
@param hidden_states: 3D tensor with shape (batch_size, time_steps, input_dim).
@return: 2D tensor with shape (batch_size, 128)
@author: felixhao28.
"""
hidden_size = int(hidden_states.shape[2])
# Inside dense layer
# hidden_states dot W => score_first_part
# (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)
# W is the trainable weight matrix of attention Luong's multiplicative style score
score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
# score_first_part dot last_hidden_state => attention_weights
# (batch_size, time_steps, hidden_size) dot (batch_size, hidden_size) => (batch_size, time_steps)
h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)
score = dot([score_first_part, h_t], [2, 1], name='attention_score')
attention_weights = Activation('softmax', name='attention_weight')(score)
# (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)
context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
pre_activation = concatenate([context_vector, h_t], name='attention_output')
attention_vector = Dense(128, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
return attention_vector
示例6: test_shape_1
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def test_shape_1(self):
# model definition
i1 = Input(shape=(10,), name='i1')
i2 = Input(shape=(10,), name='i2')
a = Dense(1, name='fc1')(i1)
b = Dense(1, name='fc2')(i2)
c = concatenate([a, b], name='concat')
d = Dense(1, name='out')(c)
model = Model(inputs=[i1, i2], outputs=[d])
# inputs to the model
x = [np.random.uniform(size=(32, 10)),
np.random.uniform(size=(32, 10))]
# call to fetch the activations of the model.
activations = get_activations(model, x, auto_compile=True)
# OrderedDict so its ok to .values()
self.assertListEqual([a.shape for a in activations.values()],
[(32, 10), (32, 10), (32, 1), (32, 1), (32, 2), (32, 1)])
示例7: test_inputs_order
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def test_inputs_order(self):
i10 = Input(shape=(10,), name='i1')
i40 = Input(shape=(40,), name='i4')
i30 = Input(shape=(30,), name='i3')
i20 = Input(shape=(20,), name='i2')
a = Dense(1, name='fc1')(concatenate([i10, i40, i30, i20], name='concat'))
model = Model(inputs=[i40, i30, i20, i10], outputs=[a])
x = [
np.random.uniform(size=(1, 40)),
np.random.uniform(size=(1, 30)),
np.random.uniform(size=(1, 20)),
np.random.uniform(size=(1, 10))
]
acts = get_activations(model, x)
self.assertListEqual(list(acts['i1'].shape), [1, 10])
self.assertListEqual(list(acts['i2'].shape), [1, 20])
self.assertListEqual(list(acts['i3'].shape), [1, 30])
self.assertListEqual(list(acts['i4'].shape), [1, 40])
示例8: expanding_layer_2D
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def expanding_layer_2D(input, neurons, concatenate_link, ba_norm,
ba_norm_momentum):
up = concatenate([Conv2DTranspose(neurons, (2, 2), strides=(2, 2),
padding='same')(input), concatenate_link], axis=-1)
conv1 = Conv2D(neurons, (3, 3,), activation='relu', padding='same')(up)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conc1 = concatenate([up, conv1], axis=-1)
conv2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conc1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
conc2 = concatenate([up, conv2], axis=-1)
return conc2
#-----------------------------------------------------#
# Subroutines 3D #
#-----------------------------------------------------#
# Create a contracting layer
示例9: expanding_layer_2D
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def expanding_layer_2D(input, neurons, concatenate_link, ba_norm,
ba_norm_momentum):
up = concatenate([Conv2DTranspose(neurons, (2, 2), strides=(2, 2),
padding='same')(input), concatenate_link], axis=-1)
conv1 = Conv2D(neurons, (3, 3,), activation='relu', padding='same')(up)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conv2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conv1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
shortcut = Conv2D(neurons, (1, 1), activation='relu', padding="same")(up)
add_layer = add([shortcut, conv2])
return add_layer
#-----------------------------------------------------#
# Subroutines 3D #
#-----------------------------------------------------#
# Create a contracting layer
示例10: fire_module
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def fire_module(x, fire_id, squeeze=16, expand=64):
s_id = 'fire' + str(fire_id) + '/'
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = Conv2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
x = Activation('relu', name=s_id + relu + sq1x1)(x)
left = Conv2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
left = Activation('relu', name=s_id + relu + exp1x1)(left)
right = Conv2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
right = Activation('relu', name=s_id + relu + exp3x3)(right)
x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
示例11: rel_to_abs
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def rel_to_abs(self, x):
shape = K.shape(x)
shape = [shape[i] for i in range(3)]
B, Nh, L, = shape
col_pad = K.zeros(K.stack([B, Nh, L, 1]))
x = K.concatenate([x, col_pad], axis=3)
flat_x = K.reshape(x, [B, Nh, L * 2 * L])
flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
final_x = final_x[:, :, :L, L - 1:]
return final_x
示例12: augmented_conv2d
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
"""
Builds an Attention Augmented Convolution block.
Args:
ip: keras tensor.
filters: number of output filters.
kernel_size: convolution kernel size.
strides: strides of the convolution.
depth_k: float or int. Number of filters for k.
Computes the number of filters for `v`.
If passed as float, computed as `filters * depth_k`.
depth_v: float or int. Number of filters for v.
Computes the number of filters for `k`.
If passed as float, computed as `filters * depth_v`.
num_heads: int. Number of attention heads.
Must be set such that `depth_k // num_heads` is > 0.
relative_encodings: bool. Whether to use relative
encodings or not.
Returns:
a keras tensor.
"""
# input_shape = K.int_shape(ip)
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)
conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)
# Augmented Attention Block
qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)
output = concatenate([conv_out, attn_out], axis=channel_axis)
output = BatchNormalization()(output)
return output
示例13: build_generator
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def build_generator(latent_codes, image_size, feature1_dim=256):
"""Build Generator Model sub networks
Two sub networks: 1) Class and noise to feature1
(intermediate feature)
2) feature1 to image
# Arguments
latent_codes (Layers): dicrete code (labels),
noise and feature1 features
image_size (int): Target size of one side
(assuming square image)
feature1_dim (int): feature1 dimensionality
# Returns
gen0, gen1 (Models): Description below
"""
# Latent codes and network parameters
labels, z0, z1, feature1 = latent_codes
# image_resize = image_size // 4
# kernel_size = 5
# layer_filters = [128, 64, 32, 1]
# gen1 inputs
inputs = [labels, z1] # 10 + 50 = 62-dim
x = concatenate(inputs, axis=1)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
fake_feature1 = Dense(feature1_dim, activation='relu')(x)
# gen1: classes and noise (feature2 + z1) to feature1
gen1 = Model(inputs, fake_feature1, name='gen1')
# gen0: feature1 + z0 to feature0 (image)
gen0 = gan.generator(feature1, image_size, codes=z0)
return gen0, gen1
示例14: decoder_layer
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def decoder_layer(inputs,
paired_inputs,
filters=16,
kernel_size=3,
strides=2,
activation='relu',
instance_norm=True):
"""Builds a generic decoder layer made of Conv2D-IN-LeakyReLU
IN is optional, LeakyReLU may be replaced by ReLU
Arguments: (partial)
inputs (tensor): the decoder layer input
paired_inputs (tensor): the encoder layer output
provided by U-Net skip connection &
concatenated to inputs.
"""
conv = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')
x = inputs
if instance_norm:
x = InstanceNormalization()(x)
if activation == 'relu':
x = Activation('relu')(x)
else:
x = LeakyReLU(alpha=0.2)(x)
x = conv(x)
x = concatenate([x, paired_inputs])
return x
示例15: create_model_2D
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import concatenate [as 別名]
def create_model_2D(self, input_shape, n_labels=2):
# Input layer
inputs = Input(input_shape)
# Start the CNN Model chain with adding the inputs as first tensor
cnn_chain = inputs
# Cache contracting normalized conv layers
# for later copy & concatenate links
contracting_convs = []
# Contracting Layers
for i in range(0, self.depth):
neurons = self.n_filters * 2**i
cnn_chain, last_conv = contracting_layer_2D(cnn_chain, neurons,
self.ba_norm,
self.ba_norm_momentum)
contracting_convs.append(last_conv)
# Middle Layer
neurons = self.n_filters * 2**self.depth
cnn_chain = middle_layer_2D(cnn_chain, neurons, self.ba_norm,
self.ba_norm_momentum)
# Expanding Layers
for i in reversed(range(0, self.depth)):
neurons = self.n_filters * 2**i
cnn_chain = expanding_layer_2D(cnn_chain, neurons,
contracting_convs[i], self.ba_norm,
self.ba_norm_momentum)
# Output Layer
conv_out = Conv2D(n_labels, (1, 1),
activation=self.activation)(cnn_chain)
# Create Model with associated input and output layers
model = Model(inputs=[inputs], outputs=[conv_out])
# Return model
return model
#---------------------------------------------#
# Create 3D Model #
#---------------------------------------------#