本文整理汇总了Python中tensorflow.keras.layers.add方法的典型用法代码示例。如果您正苦于以下问题:Python layers.add方法的具体用法?Python layers.add怎么用?Python layers.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: channel_spatial_squeeze_excite
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def channel_spatial_squeeze_excite(input_tensor, ratio=16):
""" Create a spatial squeeze-excite block
Args:
input_tensor: input Keras tensor
ratio: number of output filters
Returns: a Keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
"""
cse = squeeze_excite_block(input_tensor, ratio)
sse = spatial_squeeze_excite_block(input_tensor)
x = add([cse, sse])
return x
示例2: add
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def add(self, s, a, r, d, s2):
"""Add an experience to the buffer"""
# S represents current state, a is action,
# r is reward, d is whether it is the end,
# and s2 is next state
if np.any(~np.isfinite(s)) or np.any(~np.isfinite(s2)):
# TODO proper handling of infinite values somewhere !!!!
return
experience = (s, a, r, d, s2)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
示例3: __init__
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def __init__(self, action_size, observation_size, lr=1e-5,
training_param=TrainingParam()):
RLQvalue.__init__(self, action_size, observation_size, lr, training_param)
# TODO add as meta param the number of "Q" you want to use (here 2)
# TODO add as meta param size and types of the networks
self.average_reward = 0
self.life_spent = 1
self.qvalue_evolution = np.zeros((0,))
self.Is_nan = False
self.model_value_target = None
self.model_value = None
self.model_Q = None
self.model_Q2 = None
self.model_policy = None
self.construct_q_network()
示例4: expanding_layer_2D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def expanding_layer_2D(input, neurons, concatenate_link, ba_norm,
ba_norm_momentum):
up = concatenate([Conv2DTranspose(neurons, (2, 2), strides=(2, 2),
padding='same')(input), concatenate_link], axis=-1)
conv1 = Conv2D(neurons, (3, 3,), activation='relu', padding='same')(up)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conv2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conv1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
shortcut = Conv2D(neurons, (1, 1), activation='relu', padding="same")(up)
add_layer = add([shortcut, conv2])
return add_layer
#-----------------------------------------------------#
# Subroutines 3D #
#-----------------------------------------------------#
# Create a contracting layer
示例5: residual
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def residual(x, num_filters,
kernel_size=(3, 3),
activation='relu',
pool_strides=(2, 2),
max_pooling=True):
"Residual block."
if max_pooling:
res = layers.Conv2D(num_filters, kernel_size=(
1, 1), strides=pool_strides, padding='same')(x)
elif num_filters != keras.backend.int_shape(x)[-1]:
res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding='same')(x)
else:
res = x
x = sep_conv(x, num_filters, kernel_size, activation)
x = sep_conv(x, num_filters, kernel_size, activation)
if max_pooling:
x = layers.MaxPooling2D(
kernel_size, strides=pool_strides, padding='same')(x)
x = layers.add([x, res])
return x
示例6: channel_spatial_squeeze_excite
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def channel_spatial_squeeze_excite(input, ratio=16):
''' Create a spatial squeeze-excite block
Args:
input: input tensor
filters: number of output filters
Returns: a keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
'''
cse = squeeze_excite_block(input, ratio)
sse = spatial_squeeze_excite_block(input)
x = add([cse, sse])
return x
示例7: bottleneck_block
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
init = input
grouped_channels = int(filters / cardinality)
if init.shape[-1] != 2 * filters:
init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
init = BatchNormalization(axis=3)(init)
x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)
x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=3)(x)
x = add([init, x])
x = Activation('relu')(x)
return x
示例8: construct_q_network
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def construct_q_network(self):
# Uses the network architecture found in DeepMind paper
# The inputs and outputs size have changed, as well as replacing the convolution by dense layers.
self.model = Sequential()
input_layer = Input(shape=(self.observation_size*self.training_param.NUM_FRAMES,))
lay1 = Dense(self.observation_size*self.training_param.NUM_FRAMES)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
fc1 = Dense(self.action_size)(lay3)
advantage = Dense(self.action_size)(fc1)
fc2 = Dense(self.action_size)(lay3)
value = Dense(1)(fc2)
meaner = Lambda(lambda x: K.mean(x, axis=1) )
mn_ = meaner(advantage)
tmp = subtract([advantage, mn_])
policy = add([tmp, value])
self.model = Model(inputs=[input_layer], outputs=[policy])
self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model = Model(inputs=[input_layer], outputs=[policy])
self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
print("Successfully constructed networks.")
# This class implements the "Sof Actor Critic" model.
# It is a custom implementation, courtesy to Clement Goubet
# The original paper is: https://arxiv.org/abs/1801.01290
示例9: identity_block
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def identity_block(self, input_tensor, kernel_size, filters, stage, block):
conv_name_base = f'res{stage}_{block}_branch'
x = Conv2D(filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2a')(input_tensor)
x = BatchNormalization(name=conv_name_base + '_2a_bn')(x)
x = self.clipped_relu(x)
x = Conv2D(filters,
kernel_size=kernel_size,
strides=1,
activation=None,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001),
name=conv_name_base + '_2b')(x)
x = BatchNormalization(name=conv_name_base + '_2b_bn')(x)
x = self.clipped_relu(x)
x = layers.add([x, input_tensor])
x = self.clipped_relu(x)
return x
示例10: build_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def build_model(board_size=4, board_layers=16, outputs=4, filters=64, residual_blocks=4):
# Functional API model
inputs = layers.Input(shape=(board_size * board_size * board_layers,))
x = layers.Reshape((board_size, board_size, board_layers))(inputs)
# Initial convolutional block
x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
# residual blocks
for i in range(residual_blocks):
# x at the start of a block
temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x)
temp_x = layers.BatchNormalization()(temp_x)
temp_x = layers.Activation('relu')(temp_x)
temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(temp_x)
temp_x = layers.BatchNormalization()(temp_x)
x = layers.add([x, temp_x])
x = layers.Activation('relu')(x)
# policy head
x = layers.Conv2D(filters=2, kernel_size=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Flatten()(x)
predictions = layers.Dense(outputs, activation='softmax')(x)
# Create model
return models.Model(inputs=inputs, outputs=predictions)
示例11: contracting_layer_2D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def contracting_layer_2D(input, neurons, ba_norm, ba_norm_momentum):
conv1 = Conv2D(neurons, (3,3), activation='relu', padding='same')(input)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conv2 = Conv2D(neurons, (3,3), activation='relu', padding='same')(conv1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
shortcut = Conv2D(neurons, (1, 1), activation='relu', padding="same")(input)
add_layer = add([shortcut, conv2])
pool = MaxPooling2D(pool_size=(2, 2))(add_layer)
return pool, add_layer
# Create the middle layer between the contracting and expanding layers
示例12: middle_layer_2D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def middle_layer_2D(input, neurons, ba_norm, ba_norm_momentum):
conv_m1 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(input)
if ba_norm : conv_m1 = BatchNormalization(momentum=ba_norm_momentum)(conv_m1)
conv_m2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conv_m1)
if ba_norm : conv_m2 = BatchNormalization(momentum=ba_norm_momentum)(conv_m2)
shortcut = Conv2D(neurons, (1, 1), activation='relu', padding="same")(input)
add_layer = add([shortcut, conv_m2])
return add_layer
# Create an expanding layer
示例13: contracting_layer_3D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def contracting_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
shortcut = Conv3D(neurons, (1, 1, 1), activation='relu', padding="same")(input)
add_layer = add([shortcut, conv2])
pool = MaxPooling3D(pool_size=(2, 2, 2))(add_layer)
return pool, add_layer
# Create the middle layer between the contracting and expanding layers
示例14: middle_layer_3D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def middle_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input)
if ba_norm : conv_m1 = BatchNormalization(momentum=ba_norm_momentum)(conv_m1)
conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv_m1)
if ba_norm : conv_m2 = BatchNormalization(momentum=ba_norm_momentum)(conv_m2)
shortcut = Conv3D(neurons, (1, 1, 1), activation='relu', padding="same")(input)
add_layer = add([shortcut, conv_m2])
return add_layer
# Create an expanding layer
示例15: MultiResBlock_3D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import add [as 别名]
def MultiResBlock_3D(U, inp, alpha = 1.67):
'''
MultiRes Block
Arguments:
U {int} -- Number of filters in a corrsponding UNet stage
inp {keras layer} -- input layer
Returns:
[keras layer] -- [output layer]
'''
W = alpha * U
shortcut = inp
shortcut = conv3d_bn(shortcut, int(W*0.167) + int(W*0.333) + int(W*0.5), 1, 1, 1, activation=None, padding='same')
conv3x3 = conv3d_bn(inp, int(W*0.167), 3, 3, 3, activation='relu', padding='same')
conv5x5 = conv3d_bn(conv3x3, int(W*0.333), 3, 3, 3, activation='relu', padding='same')
conv7x7 = conv3d_bn(conv5x5, int(W*0.5), 3, 3, 3, activation='relu', padding='same')
out = concatenate([conv3x3, conv5x5, conv7x7], axis=4)
out = BatchNormalization(axis=4)(out)
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=4)(out)
return out