本文整理汇总了Python中tensorflow.keras.backend.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python backend.tanh方法的具体用法?Python backend.tanh怎么用?Python backend.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.tanh方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def __init__(self,
ratio,
return_mask=False,
sigmoid_gating=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.ratio = ratio
self.return_mask = return_mask
self.sigmoid_gating = sigmoid_gating
self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
示例2: logtanh
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def logtanh(x, a=1):
"""
log * tanh
See Also: arcsinh
"""
return K.tanh(x) * K.log(2 + a * abs(x))
示例3: gelu_tanh
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def gelu_tanh(x):
"""基于Tanh近似计算的gelu函数
"""
cdf = 0.5 * (
1.0 + K.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3))))
)
return x * cdf
示例4: set_gelu
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must be erf or tanh'
if version == 'erf':
keras.utils.get_custom_objects()['gelu'] = gelu_erf
else:
keras.utils.get_custom_objects()['gelu'] = gelu_tanh
示例5: mish
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def mish(x):
return x * K.tanh(K.softplus(x))
示例6: __call__
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tanh [as 别名]
def __call__(self, x):
if isinstance(self.alpha, six.string_types):
assert self.alpha in ["auto", "auto_po2"]
if self.alpha is None:
scale = self.default_alpha
elif isinstance(self.alpha, six.string_types):
scale = 1.0
elif isinstance(self.alpha, np.ndarray):
scale = self.alpha
else:
scale = float(self.alpha)
if self.use_stochastic_rounding:
len_axis = len(x.shape.as_list())
if len_axis == 1:
axis = None
elif K.image_data_format() == "channels_last":
axis = list(range(len_axis - 1))
else:
axis = list(range(1, len_axis))
# if stochastic_round is through, we need to scale
# number so that the precision is small enough.
# This is especially important if range of x is very
# small, which occurs during initialization of weights.
m = K.max(tf.abs(x), axis=axis, keepdims=True)
m = tf.where(m > 1.0, tf.ones_like(m), m)
f = 2 * m
x = tf_utils.smart_cond(
K.learning_phase(),
lambda: f * _round_through(
x / f, use_stochastic_rounding=True, precision=0.125),
lambda: x)
k_sign = tf.sign(x)
if self.use_stochastic_rounding:
# in inference, we use a biased "1" for stochastic rounding right now
k_sign += (1.0 - tf.abs(k_sign)) * tf_utils.smart_cond(
K.learning_phase(),
lambda: 2.0 * tf.round(tf.random.uniform(tf.shape(x))) - 1.0,
lambda: tf.ones_like(tf.shape(x), dtype=K.floatx()))
# if something still remains, just make it positive for now.
k_sign += (1.0 - tf.abs(k_sign))
if self.use_01:
k_sign = (k_sign + 1.0) / 2.0
# approximate binary by tanh(x) as it has limited range between -1 and +1.
if self.alpha is None:
x = K.tanh(x)
scale = _get_scale(self.alpha, x, k_sign)
self.scale = scale
return x + tf.stop_gradient(-x + scale * k_sign)