本文整理汇总了Python中tensorflow.keras.backend.relu方法的典型用法代码示例。如果您正苦于以下问题:Python backend.relu方法的具体用法?Python backend.relu怎么用?Python backend.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.relu方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hinge_d
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def hinge_d(y_true, y_pred):
return K.mean(K.relu(1.0 + (y_true * y_pred)))
示例2: relu6
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def relu6(x):
return K.relu(x, max_value=6)
示例3: odd_shifted_relu
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def odd_shifted_relu(x, shift=-0.5, scale=2.0):
"""
Odd shifted ReLu
Essentially in x > 0, it is a shifted ReLu, and in x < 0 it's a negative mirror.
"""
shift = float(shift)
scale = float(scale)
return scale * K.relu(x - shift) - scale * K.relu(- x - shift)
示例4: __call__
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def __call__(self, x):
non_sign_bits = self.bits - (self.negative_slope != 0)
m = K.cast_to_floatx(pow(2, non_sign_bits))
m_i = K.cast_to_floatx(pow(2, self.integer))
x_uq = tf.where(
x <= m_i, K.relu(x, alpha=self.negative_slope), tf.ones_like(x) * m_i)
if self.use_sigmoid:
p = _sigmoid(x / m_i) * m
xq = m_i * tf.keras.backend.clip(
2.0 * (_round_through(p, self.use_stochastic_rounding) / m) - 1.0,
0.0, 1.0 - 1.0 / m)
if self.negative_slope > 0:
neg_factor = 1 / (self.negative_slope * m)
xq = xq + m_i * self.negative_slope * tf.keras.backend.clip(
2.0 * (_round_through(p * self.negative_slope,
self.use_stochastic_rounding) * neg_factor) - 1.0,
-1.0, 0.0)
else:
p = x * m / m_i
xq = m_i * tf.keras.backend.clip(
_round_through(p, self.use_stochastic_rounding) / m, 0.0,
1.0 - 1.0 / m)
if self.negative_slope > 0:
neg_factor = 1 / (self.negative_slope * m)
xq = xq + m_i * self.negative_slope * (tf.keras.backend.clip(
_round_through(p * self.negative_slope,
self.use_stochastic_rounding) * neg_factor, -1.0, 0.0))
return x_uq + tf.stop_gradient(-x_uq + xq)
示例5: softplus2
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def softplus2(x):
"""
out = log(exp(x)+1) - log(2)
softplus function that is 0 at x=0, the implementation aims at avoiding overflow
Args:
x: (Tensor) input tensor
Returns:
(Tensor) output tensor
"""
return kb.relu(x) + kb.log(0.5*kb.exp(-kb.abs(x)) + 0.5)
示例6: _relu6
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def _relu6(self, x):
"""Relu 6
"""
return K.relu(x, max_value=6.0)
示例7: _hard_swish
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def _hard_swish(self, x):
"""Hard swish
"""
return x * K.relu(x + 3.0, max_value=6.0) / 6.0
示例8: _squeeze
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def _squeeze(self, inputs):
"""Squeeze and Excitation.
This function defines a squeeze structure.
# Arguments
inputs: Tensor, input tensor of conv layer.
"""
input_channels = int(inputs.shape[-1])
x = GlobalAveragePooling2D()(inputs)
x = Dense(input_channels, activation='relu')(x)
x = Dense(input_channels, activation='hard_sigmoid')(x)
x = Reshape((1, 1, input_channels))(x)
return x
示例9: train_step
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import relu [as 别名]
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
#Get style information
w_space = []
pl_lengths = self.pl_mean
for i in range(len(style)):
w_space.append(self.GAN.S(style[i]))
#Generate images
generated_images = self.GAN.G(w_space + [noise])
#Discriminate
real_output = self.GAN.D(images, training=True)
fake_output = self.GAN.D(generated_images, training=True)
#Hinge loss function
gen_loss = K.mean(fake_output)
divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
disc_loss = divergence
if perform_gp:
#R1 gradient penalty
disc_loss += gradient_penalty(images, real_output, 10)
if perform_pl:
#Slightly adjust W space
w_space_2 = []
for i in range(len(style)):
std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))
#Generate from slightly adjusted W space
pl_images = self.GAN.G(w_space_2 + [noise])
#Get distance after adjustment (path length)
delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
pl_lengths = delta_g
if self.pl_mean > 0:
gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))
#Get gradients for respective areas
gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)
#Apply gradients
self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))
return disc_loss, gen_loss, divergence, pl_lengths