本文整理汇总了Python中tensorflow.keras.layers.Concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Concatenate方法的具体用法?Python layers.Concatenate怎么用?Python layers.Concatenate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.Concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_single_ddpg_input
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def test_single_ddpg_input():
nb_actions = 2
actor = Sequential()
actor.add(Flatten(input_shape=(2, 3)))
actor.add(Dense(nb_actions))
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(2, 3), name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(1)(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
memory = SequentialMemory(limit=10, window_length=2)
agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
agent.compile('sgd')
agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
示例2: _build_q_NN
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def _build_q_NN(self):
input_states = Input(shape=(self.observation_size,))
input_action = Input(shape=(self.action_size,))
input_layer = Concatenate()([input_states, input_action])
lay1 = Dense(self.observation_size)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
advantage = Dense(1, activation = 'linear')(lay3)
model = Model(inputs=[input_states, input_action], outputs=[advantage])
model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
return model
示例3: incorporate_embeddings
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def incorporate_embeddings(self, x):
"""Puts relevant data through embedding layers and then concatenates the result with the rest of the data ready
to then be put through the hidden layers"""
all_embedded_data = []
for embedding_layer_ix, embedding_var in enumerate(self.columns_of_data_to_be_embedded):
data = x[:, embedding_var]
embedded_data = self.embedding_layers[embedding_layer_ix](data)
all_embedded_data.append(embedded_data)
if len(all_embedded_data) > 1: all_embedded_data = Concatenate(axis=1)(all_embedded_data)
else: all_embedded_data = all_embedded_data[0]
non_embedded_columns = [col for col in range(x.shape[1]) if col not in self.columns_of_data_to_be_embedded]
if len(non_embedded_columns) > 0:
x = tf.gather(x, non_embedded_columns, axis=1)
x = Concatenate(axis=1)([tf.dtypes.cast(x, float), all_embedded_data])
else: x = all_embedded_data
return x
示例4: incorporate_embeddings
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def incorporate_embeddings(self, x):
"""Puts relevant data through embedding layers and then concatenates the result with the rest of the data ready
to then be put through the hidden layers"""
all_embedded_data = []
for embedding_layer_ix, embedding_var in enumerate(self.columns_of_data_to_be_embedded):
data = x[:, :, embedding_var]
embedded_data = self.embedding_layers[embedding_layer_ix](data)
all_embedded_data.append(embedded_data)
if len(all_embedded_data) > 1: all_embedded_data = Concatenate(axis=2)(all_embedded_data)
else: all_embedded_data = all_embedded_data[0]
non_embedded_columns = [col for col in range(x.shape[2]) if col not in self.columns_of_data_to_be_embedded]
if len(non_embedded_columns) > 0:
x = tf.gather(x, non_embedded_columns, axis=2)
x = Concatenate(axis=2)([tf.dtypes.cast(x, float), all_embedded_data])
else: x = all_embedded_data
return x
示例5: process_output_layers
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def process_output_layers(self, x, restricted_to_final_seq):
"""Puts the data x through all the output layers"""
out = None
for output_layer_ix, output_layer in enumerate(self.output_layers):
if type(output_layer) == Dense:
if self.return_final_seq_only and not restricted_to_final_seq:
x = x[:, -1, :]
restricted_to_final_seq = True
temp_output = output_layer(x)
else:
temp_output = output_layer(x)
activation = self.get_activation(self.output_activation, output_layer_ix)
temp_output = activation(temp_output)
if out is None: out = temp_output
else:
if restricted_to_final_seq: dim = 1
else: dim = 2
out = Concatenate(axis=dim)([out, temp_output])
return out
示例6: load
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def load(input_shape, output_shape, cfg):
nb_lstm_states = int(cfg['nb_lstm_states'])
inputs = KL.Input(shape=input_shape)
x = KL.CuDNNLSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)
x = KL.Dense(512)(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.2)(x)
x = KL.Dense(256)(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.3)(x)
mu = KL.Dense(1)(x)
std = KL.Dense(1)(x)
activation_fn = get_activation_function_by_name(cfg['activation_function'])
std = KL.Activation(activation_fn, name="exponential_activation")(std)
output = KL.Concatenate(axis=-1)([std, mu])
model = KM.Model(inputs=[inputs], outputs=[output])
return model
示例7: __init__
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def __init__(self, growth_rate=64, bottleneck_factor=1, **kwargs):
# super(DenseConv2D, self).__init__(self, **kwargs)
self.concat = Concatenate()
bottleneck_filters = int(np.round(growth_rate * bottleneck_factor))
self.bottleneck_1x1 = layers.Conv2D(
bottleneck_filters,
(1, 1),
padding="same",
activation="selu",
kernel_initializer="lecun_normal",
)
self.conv_3x3 = layers.Conv2D(
growth_rate,
(3, 3),
padding="same",
activation="selu",
kernel_initializer="lecun_normal",
)
示例8: call
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def call(self, inputs):
conv_7x7 = self.conv_7x7(inputs)
pooled_inputs = self.pool_input(inputs)
outputs = [pooled_inputs, conv_7x7]
residual_outputs = []
for idx in range(self.n_downsample - 1):
outputs = self.dense_conv[idx](outputs)
concat_outputs = Concatenate()(outputs)
outputs = [concat_outputs]
# Pool each dense layer to match output size
pooled_outputs = self.pooled_outputs[idx](outputs)
residual_outputs.append(Concatenate()(pooled_outputs))
outputs = self.transition_down[idx](outputs)
outputs = self.dense_conv[-1](outputs)
outputs = Concatenate()(outputs)
residual_outputs.append(outputs)
residual_outputs = [
Compression(self.compression_factor)(res) for res in residual_outputs
]
outputs = Concatenate()(residual_outputs)
return [outputs]
示例9: build
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
if len(inputs) == 1:
return inputs
merge_type = self.merge_type or hp.Choice('merge_type',
['add', 'concatenate'],
default='add')
if not all([shape_compatible(input_node.shape, inputs[0].shape) for
input_node in inputs]):
new_inputs = []
for input_node in inputs:
new_inputs.append(Flatten().build(hp, input_node))
inputs = new_inputs
# TODO: Even inputs have different shape[-1], they can still be Add(
# ) after another layer. Check if the inputs are all of the same
# shape
if all([input_node.shape == inputs[0].shape for input_node in inputs]):
if merge_type == 'add':
return layers.Add(inputs)
return layers.Concatenate()(inputs)
示例10: yolo2_predictions
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def yolo2_predictions(feature_maps, feature_channel_nums, num_anchors, num_classes):
f1, f2 = feature_maps
f1_channel_num, f2_channel_num = feature_channel_nums
x1 = compose(
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)),
DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3)))(f1)
# Here change the f2 channel number to f2_channel_num//8 first,
# then expand back to f2_channel_num//2 with "space_to_depth_x2"
x2 = DarknetConv2D_BN_Leaky(f2_channel_num//8, (1, 1))(f2)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
x2_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(x2)
x = Concatenate()([x2_reshaped, x1])
x = DarknetConv2D_BN_Leaky(f1_channel_num, (3, 3))(x)
y = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1), name='predict_conv')(x)
return y
示例11: resblock_body
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def resblock_body(x, num_filters, num_blocks, all_narrow=True):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Mish(num_filters, (3,3), strides=(2,2))(x)
res_connection = DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
x = DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Mish(num_filters//2, (1,1)),
DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (3,3)))(x)
x = Add()([x,y])
x = DarknetConv2D_BN_Mish(num_filters//2 if all_narrow else num_filters, (1,1))(x)
x = Concatenate()([x , res_connection])
return DarknetConv2D_BN_Mish(num_filters, (1,1))(x)
示例12: residual_block_id
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def residual_block_id(self,tensor, feature_n,name=None):
if name != None:
depconv_1 = DepthwiseConv2D(3,2,padding='same',name=name+"/dconv")(tensor)
conv_2 = Conv2D(feature_n,1,name=name+"/conv")(depconv_1)
else:
depconv_1 = DepthwiseConv2D(3,2,padding='same')(tensor)
conv_2 = Conv2D(feature_n,1)(depconv_1)
maxpool_1 = MaxPool2D(pool_size=(2,2),strides=(2,2),padding='same')(tensor)
conv_zeros = Conv2D(feature_n/2,2,strides=2,use_bias=False,kernel_initializer=tf.zeros_initializer())(tensor)
padding_1 = Concatenate(axis=-1)([maxpool_1,conv_zeros])#self.feature_padding(maxpool_1)
add = Add()([padding_1,conv_2])
relu = ReLU()(add)
return relu
#def feature_padding(self,tensor,channels_n=0):
# #pad = tf.keras.layers.ZeroPadding2D(((0,0),(0,0),(0,tensor.shape[3])))(tensor)
# return Concatenate(axis=3)([tensor,pad])
示例13: call
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def call(self, x):
x0_0 = self.conv0_0(x)
x1_0 = self.conv1_0(self.pool(x0_0))
x0_1 = self.conv0_1(Concatenate()([x0_0, self.Up(x1_0)]))
x2_0 = self.conv2_0(self.pool(x1_0))
x1_1 = self.conv1_1(Concatenate()([x1_0, self.Up(x2_0)]))
x0_2 = self.conv0_2(Concatenate()([x0_0, x0_1, self.Up(x1_1)]))
x3_0 = self.conv3_0(self.pool(x2_0))
x2_1 = self.conv2_1(Concatenate()([x2_0, self.Up(x3_0)]))
x1_2 = self.conv1_2(Concatenate()([x1_0, x1_1, self.Up(x2_1)]))
x0_3 = self.conv0_3(Concatenate()([x0_0, x0_1, x0_2, self.Up(x1_2)]))
x4_0 = self.conv4_0(self.pool(x3_0))
x3_1 = self.conv3_1(Concatenate()([x3_0, self.Up(x4_0)]))
x2_2 = self.conv2_2(Concatenate()([x2_0, x2_1, self.Up(x3_1)]))
x1_3 = self.conv1_3(Concatenate()([x1_0, x1_1, x1_2, self.Up(x2_2)]))
x0_4 = self.conv0_4(Concatenate()([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)]))
output = self.final(x0_4)
return output
示例14: create_discriminator
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1, activation=tf.sigmoid)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
示例15: test_clone_graph_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Concatenate [as 别名]
def test_clone_graph_model():
in1 = Input(shape=(2,))
in2 = Input(shape=(3,))
x = Dense(8)(Concatenate()([in1, in2]))
graph = Model([in1, in2], x)
graph.compile(optimizer='sgd', loss='mse')
clone = clone_model(graph)
clone.compile(optimizer='sgd', loss='mse')
ins = [np.random.random((4, 2)), np.random.random((4, 3))]
y_pred_graph = graph.predict_on_batch(ins)
y_pred_clone = clone.predict_on_batch(ins)
assert y_pred_graph.shape == y_pred_clone.shape
assert_allclose(y_pred_graph, y_pred_clone)