本文整理匯總了Python中tensorflow.keras.backend.pow方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.pow方法的具體用法?Python backend.pow怎麽用?Python backend.pow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.pow方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def call(self, x):
power_spectrogram = super(Melspectrogram, self).call(x)
# now, channels_first: (batch_sample, n_ch, n_freq, n_time)
# channels_last: (batch_sample, n_freq, n_time, n_ch)
if self.image_data_format == 'channels_first':
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
else:
power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
# now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
output = K.dot(power_spectrogram, self.freq2mel)
if self.image_data_format == 'channels_first':
output = K.permute_dimensions(output, [0, 1, 3, 2])
else:
output = K.permute_dimensions(output, [0, 3, 2, 1])
if self.power_melgram != 2.0:
output = K.pow(K.sqrt(output), self.power_melgram)
if self.return_decibel_melgram:
output = backend_keras.amplitude_to_decibel(output)
return output
示例2: focal_loss_binary
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def focal_loss_binary(y_true, y_pred):
"""Binary cross-entropy focal loss
"""
gamma = 2.0
alpha = 0.25
pt_1 = tf.where(tf.equal(y_true, 1),
y_pred,
tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0),
y_pred,
tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN and Inf
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
weight = alpha * K.pow(1. - pt_1, gamma)
fl1 = -K.sum(weight * K.log(pt_1))
weight = (1 - alpha) * K.pow(pt_0, gamma)
fl0 = -K.sum(weight * K.log(1. - pt_0))
return fl1 + fl0
示例3: focal_loss_categorical
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def focal_loss_categorical(y_true, y_pred):
"""Categorical cross-entropy focal loss"""
gamma = 2.0
alpha = 0.25
# scale to ensure sum of prob is 1.0
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip the prediction value to prevent NaN and Inf
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# calculate cross entropy
cross_entropy = -y_true * K.log(y_pred)
# calculate focal loss
weight = alpha * K.pow(1 - y_pred, gamma)
cross_entropy *= weight
return K.sum(cross_entropy, axis=-1)
示例4: convert_pow
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def convert_pow(node, params, layers, lambda_func, node_name, keras_name):
"""
Convert Pow layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param lambda_func: function for keras Lambda layer
:param node_name: internal converter name
:param keras_name: resulting layer name
:return: None
"""
if len(node.input) != 2:
assert AttributeError('More than 2 inputs for pow layer.')
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
power = ensure_numpy_type(layers[node.input[1]])
def target_layer(x, a=power):
import tensorflow.keras.backend as K
return K.pow(x, a)
lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
示例5: gelu_tanh
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def gelu_tanh(x):
"""基於Tanh近似計算的gelu函數
"""
cdf = 0.5 * (
1.0 + K.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3))))
)
return x * cdf
示例6: customLoss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def customLoss(y_true,y_pred):
log1 = 1.5 * y_true * K.log(y_pred + 1e-9) * K.pow(1-y_pred, 2)
log0 = 0.5 * (1 - y_true) * K.log((1 - y_pred) + 1e-9) * K.pow(y_pred, 2)
return (- K.sum(K.mean(log0 + log1, axis = 0)))
示例7: _get_scale
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def _get_scale(alpha, x, q):
"""Gets scaling factor for scaling the tensor per channel.
Arguments:
alpha: A float or string. When it is string, it should be either "auto" or
"auto_po2", and
scale = sum(x * q, axis=all but last) / sum(q * q, axis=all but last)
x: A tensor object. Its elements are in float.
q: A tensor object. Its elements are in quantized format of x.
Returns:
A scaling factor tensor or scala for scaling tensor per channel.
"""
if isinstance(alpha, six.string_types) and "auto" in alpha:
assert alpha in ["auto", "auto_po2"]
x_shape = x.shape.as_list()
len_axis = len(x_shape)
if len_axis > 1:
if K.image_data_format() == "channels_last":
axis = list(range(len_axis - 1))
else:
axis = list(range(1, len_axis))
qx = K.mean(tf.math.multiply(x, q), axis=axis, keepdims=True)
qq = K.mean(tf.math.multiply(q, q), axis=axis, keepdims=True)
else:
qx = K.mean(x * q, axis=0, keepdims=True)
qq = K.mean(q * q, axis=0, keepdims=True)
scale = qx / (qq + K.epsilon())
if alpha == "auto_po2":
scale = K.pow(2.0,
tf.math.round(K.log(scale + K.epsilon()) / np.log(2.0)))
elif alpha is None:
scale = 1.0
elif isinstance(alpha, np.ndarray):
scale = alpha
else:
scale = float(alpha)
return scale
示例8: stochastic_round_po2
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def stochastic_round_po2(x):
"""Performs stochastic rounding for the power of two."""
# TODO(hzhuang): test stochastic_round_po2 and constraint.
# because quantizer is applied after constraint.
y = tf.abs(x)
eps = tf.keras.backend.epsilon()
log2 = tf.keras.backend.log(2.0)
x_log2 = tf.round(tf.keras.backend.log(y + eps) / log2)
po2 = tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
left_val = tf.where(po2 > y, x_log2 - 1, x_log2)
right_val = tf.where(po2 > y, x_log2, x_log2 + 1)
# sampling in [2**left_val, 2**right_val].
minval = 2 ** left_val
maxval = 2 ** right_val
val = tf.random.uniform(tf.shape(y), minval=minval, maxval=maxval)
# use y as a threshold to keep the probabliy [2**left_val, y, 2**right_val]
# so that the mean value of the sample should be y
x_po2 = tf.where(y < val, left_val, right_val)
"""
x_log2 = stochastic_round(tf.keras.backend.log(y + eps) / log2)
sign = tf.sign(x)
po2 = (
tf.sign(x) *
tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
)
"""
return x_po2
示例9: __call__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def __call__(self, x):
non_sign_bits = self.bits - (self.negative_slope != 0)
m = K.cast_to_floatx(pow(2, non_sign_bits))
m_i = K.cast_to_floatx(pow(2, self.integer))
x_uq = tf.where(
x <= m_i, K.relu(x, alpha=self.negative_slope), tf.ones_like(x) * m_i)
if self.use_sigmoid:
p = _sigmoid(x / m_i) * m
xq = m_i * tf.keras.backend.clip(
2.0 * (_round_through(p, self.use_stochastic_rounding) / m) - 1.0,
0.0, 1.0 - 1.0 / m)
if self.negative_slope > 0:
neg_factor = 1 / (self.negative_slope * m)
xq = xq + m_i * self.negative_slope * tf.keras.backend.clip(
2.0 * (_round_through(p * self.negative_slope,
self.use_stochastic_rounding) * neg_factor) - 1.0,
-1.0, 0.0)
else:
p = x * m / m_i
xq = m_i * tf.keras.backend.clip(
_round_through(p, self.use_stochastic_rounding) / m, 0.0,
1.0 - 1.0 / m)
if self.negative_slope > 0:
neg_factor = 1 / (self.negative_slope * m)
xq = xq + m_i * self.negative_slope * (tf.keras.backend.clip(
_round_through(p * self.negative_slope,
self.use_stochastic_rounding) * neg_factor, -1.0, 0.0))
return x_uq + tf.stop_gradient(-x_uq + xq)
示例10: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def call(self, x):
#The conditional probability of surviving each time interval (given that has survived to beginning of interval)
#is affected by the input data according to eq. 18.13 in Harrell F.,
#Regression Modeling Strategies 2nd ed. (available free online)
return K.pow(K.sigmoid(self.kernel), K.exp(x))
示例11: focal_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def focal_loss(y_true, y_pred, gamma=2, alpha=0.95):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
pt_1 = K.clip(pt_1, 1e-3, .999)
pt_0 = K.clip(pt_0, 1e-3, .999)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
示例12: k_focal_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def k_focal_loss(gamma=2, alpha=0.75):
# from github.com/atomwh/focalloss_keras
def focal_loss_fixed(y_true, y_pred): # with tensorflow
eps = 1e-12 # improve the stability of the focal loss
y_pred = K.clip(y_pred, eps, 1.-eps)
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.sum(
alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum(
(1-alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
示例13: focal_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import pow [as 別名]
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed