本文整理汇总了Python中tensorflow.python.ops.math_ops.pow方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.pow方法的具体用法?Python math_ops.pow怎么用?Python math_ops.pow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.math_ops
的用法示例。
在下文中一共展示了math_ops.pow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _PowGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = array_ops.where(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
示例2: _PowGrad
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = math_ops.select(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = math_ops.select(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(
math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
示例3: _discounted_cumulative_gain
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def _discounted_cumulative_gain(labels, weights=None):
"""Computes discounted cumulative gain (DCG).
DCG = SUM((2^label -1) / (log(1+rank))).
Args:
labels: The relevance `Tensor` of shape [batch_size, list_size]. For the
ideal ranking, the examples are sorted by relevance in reverse order.
weights: A `Tensor` of the same shape as labels or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
Returns:
A `Tensor` as the weighted discounted cumulative gain per-list. The
tensor shape is [batch_size, 1].
"""
list_size = array_ops.shape(labels)[1]
position = math_ops.to_float(math_ops.range(1, list_size + 1))
denominator = math_ops.log(position + 1)
numerator = math_ops.pow(2.0, math_ops.to_float(labels)) - 1.0
return math_ops.reduce_sum(
weights * numerator / denominator, 1, keepdims=True)
示例4: focal_loss
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def focal_loss(labels, logits, gamma=2.0):
r"""
Multi-class focal loss implementation: https://arxiv.org/abs/1708.02002
:param labels: [batch_size, ] - Tensor of the correct class ids
:param logits: [batch_size, num_classes] - Unscaled logits
:param gamma: focal loss weight
:return: [batch_size, ] - Tensor of average costs for each batch element
"""
num_classes = array_ops.shape(logits)[1]
onehot_labels = array_ops.one_hot(labels, num_classes, dtype=logits.dtype)
p = nn_ops.softmax(logits)
p = clip_ops.clip_by_value(p, 1e-7, 1.0 - 1e-7)
f_loss = - onehot_labels * math_ops.pow(1.0 - p, gamma) * math_ops.log(p) \
- (1 - onehot_labels) * math_ops.pow(p, gamma) * math_ops.log(1.0 - p)
cost = math_ops.reduce_sum(f_loss, axis=1)
return cost
示例5: LRSchedule
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def LRSchedule(global_step, d_model, warmup_steps=4000):
if global_step is None:
raise ValueError("global_step is required for learning_rate_schedule.")
def deal_lr(global_step, d_model, warmup_steps):
d_model = ops.convert_to_tensor(d_model, dtype=tf.float32)
dtype = d_model.dtype
warmup_steps = math_ops.cast(warmup_steps, dtype)
global_step_recomp = math_ops.cast(global_step, dtype)
arg1 = math_ops.rsqrt(global_step_recomp)
arg2 = math_ops.multiply(global_step_recomp, math_ops.pow(warmup_steps, -1.5))
return math_ops.multiply(math_ops.rsqrt(d_model), math_ops.minimum(arg1, arg2))
return functools.partial(deal_lr, global_step, d_model, warmup_steps)
示例6: get_drop_fraction
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def get_drop_fraction(self, global_step, is_mask_update_iter_op):
"""Returns a constant or annealing drop_fraction op."""
if self._drop_fraction_anneal == 'constant':
drop_frac = self._drop_fraction_initial_value
elif self._drop_fraction_anneal == 'cosine':
decay_steps = self._end_step - self._begin_step
drop_frac = learning_rate_decay.cosine_decay(
self._drop_fraction_initial_value, global_step, decay_steps,
name='cosine_drop_fraction')
elif self._drop_fraction_anneal.startswith('exponential'):
exponent = extract_number(self._drop_fraction_anneal)
div_dtype = self._drop_fraction_initial_value.dtype
power = math_ops.divide(
math_ops.cast(global_step - self._begin_step, div_dtype),
math_ops.cast(self._end_step - self._begin_step, div_dtype),
)
drop_frac = math_ops.multiply(
self._drop_fraction_initial_value,
math_ops.pow(1 - power, exponent),
name='%s_drop_fraction' % self._drop_fraction_anneal)
else:
raise ValueError('drop_fraction_anneal: %s is not valid' %
self._drop_fraction_anneal)
return array_ops.where(is_mask_update_iter_op, drop_frac,
array_ops.zeros_like(drop_frac))
示例7: pow
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
示例8: setUp
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = array_ops.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = array_ops.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, math_ops.add, core.add),
('sub', operator.sub, math_ops.subtract, core.sub),
('mul', operator.mul, math_ops.multiply, core.mul),
('div', operator.truediv, math_ops.div, core.div),
('mod', operator.mod, math_ops.mod, core.mod),
('pow', operator.pow, math_ops.pow, core.pow_function),
('equal', None, math_ops.equal, core.equal),
('less', operator.lt, math_ops.less, core.less),
('less_equal', operator.le, math_ops.less_equal, core.less_equal),
('not_equal', None, math_ops.not_equal, core.not_equal),
('greater', operator.gt, math_ops.greater, core.greater),
('greater_equal', operator.ge, math_ops.greater_equal,
core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
示例9: dropout_selu
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
noise_shape=None, seed=None, name=None, training=False):
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
示例10: dropout_selu
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def dropout_selu(x, rate, alpha=-1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
noise_shape=None, seed=None, name=None, training=False):
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
示例11: _phi
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with ops.name_scope('phi'):
if order == 1:
r = math_ops.maximum(r, EPSILON)
r = math_ops.sqrt(r)
return r
elif order == 2:
return 0.5 * r * math_ops.log(math_ops.maximum(r, EPSILON))
elif order == 4:
return 0.5 * math_ops.square(r) * math_ops.log(
math_ops.maximum(r, EPSILON))
elif order % 2 == 0:
r = math_ops.maximum(r, EPSILON)
return 0.5 * math_ops.pow(r, 0.5 * order) * math_ops.log(r)
else:
r = math_ops.maximum(r, EPSILON)
return math_ops.pow(r, 0.5 * order)
示例12: _prob
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def _prob(self, x):
y = (x - self.mu) / self.sigma
half_df = 0.5 * self.df
return (math_ops.exp(math_ops.lgamma(0.5 + half_df) -
math_ops.lgamma(half_df)) /
(math_ops.sqrt(self.df) * math.sqrt(math.pi) * self.sigma) *
math_ops.pow(1. + math_ops.square(y) / self.df, -(0.5 + half_df)))
示例13: expected_reciprocal_rank
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def expected_reciprocal_rank(
labels, predictions, weights=None, topn=None, name=None):
"""Computes expected reciprocal rank (ERR).
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted expected reciprocal rank of the batch.
"""
with ops.name_scope(name, 'expected_reciprocal_rank',
(labels, predictions, weights)):
labels, predictions, weights, topn = _prepare_and_validate_params(
labels, predictions, weights, topn)
sorted_labels, sorted_weights = utils.sort_by_scores(
predictions, [labels, weights], topn=topn)
_, list_size = array_ops.unstack(array_ops.shape(sorted_labels))
relevance = (math_ops.pow(2.0, sorted_labels) - 1) / \
math_ops.pow(2.0, RankingMetricKey.MAX_LABEL)
non_rel = tf.math.cumprod(1.0 - relevance, axis=1) / (1.0 - relevance)
reciprocal_rank = 1.0 / \
math_ops.to_float(math_ops.range(1, list_size + 1))
mask = math_ops.to_float(math_ops.greater_equal(
reciprocal_rank, 1.0 / (topn + 1)))
reciprocal_rank = reciprocal_rank * mask
# ERR has a shape of [batch_size, 1]
err = math_ops.reduce_sum(
relevance * non_rel * reciprocal_rank * sorted_weights, axis=1, keepdims=True)
return math_ops.reduce_mean(err)
示例14: normalized_discounted_cumulative_gain
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def normalized_discounted_cumulative_gain(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes normalized discounted cumulative gain (NDCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted normalized discounted cumulative gain of the
batch.
"""
with ops.name_scope(name, 'normalized_discounted_cumulative_gain',
(labels, predictions, weights)):
labels, predictions, weights, topn = _prepare_and_validate_params(
labels, predictions, weights, topn)
sorted_labels, sorted_weights = utils.sort_by_scores(
predictions, [labels, weights], topn=topn)
dcg = _discounted_cumulative_gain(sorted_labels, sorted_weights)
# Sorting over the weighted labels to get ideal ranking.
ideal_sorted_labels, ideal_sorted_weights = utils.sort_by_scores(
weights * labels, [labels, weights], topn=topn)
ideal_dcg = _discounted_cumulative_gain(ideal_sorted_labels,
ideal_sorted_weights)
per_list_ndcg = _safe_div(dcg, ideal_dcg)
per_list_weights = _per_example_weights_to_per_list_weights(
weights=weights,
relevance=math_ops.pow(2.0, math_ops.to_float(labels)) - 1.0)
return math_ops.reduce_mean(per_list_ndcg * per_list_weights)
示例15: discounted_cumulative_gain
# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import pow [as 别名]
def discounted_cumulative_gain(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes discounted cumulative gain (DCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted discounted cumulative gain of the batch.
"""
with ops.name_scope(name, 'discounted_cumulative_gain',
(labels, predictions, weights)):
labels, predictions, weights, topn = _prepare_and_validate_params(
labels, predictions, weights, topn)
sorted_labels, sorted_weights = utils.sort_by_scores(
predictions, [labels, weights], topn=topn)
dcg = _discounted_cumulative_gain(sorted_labels,
sorted_weights) * math_ops.log1p(1.0)
per_list_weights = _per_example_weights_to_per_list_weights(
weights=weights,
relevance=math_ops.pow(2.0, math_ops.to_float(labels)) - 1.0)
return math_ops.reduce_mean(
_safe_div(dcg, per_list_weights) * per_list_weights)